From 3bc06e6a4f6e28bb8c98ca60350a9ad656327fbb Mon Sep 17 00:00:00 2001 From: linmin Date: Mon, 2 Dec 2024 13:18:49 +0800 Subject: [PATCH 215/222] release-1130 Changelogs: 1.feat:dump multi dsp input/output 2.fix:add hsp clock for usb 3.Revert:fix:add hsp clock for usb 4.fix:add hsp clock for usb 5.fix:split dmabuf 6.refactor:add evb a3 dts and platform model 7.fix:change voltage of 7702 npu to 1.08V 8.fix:modify machine model of 7702 9.feat:add gpu kmd driver for ddk24.2. 10.refactor: refact split dmabuf 11.fix:modify license information. 12.fix(drm):add VIRTUAL on 7702 board 13.fix:add the idle timer delay from 500ms to 1s 14.refactor:remove ecc range from 7702 evb 15.fix:fix slow command id error 16.fix:Fan at maximum speed 17.fix:resolve the issue of DMA interrupt. 18.fix(jenc):enc 32k jpeg hang 19.Merge branch 'feature-eic7702' into dev 20.feat: dsp drv use dsp pool for flat mem 21.fix: porting modify for dual e31 22.fix:Add dsp op buffer cnt. 23.feat:support load npu firmware by name 24.feat:change audio proc license statement. Signed-off-by: linmin --- arch/riscv/boot/dts/eswin/Makefile | 5 +- arch/riscv/boot/dts/eswin/eic7700-evb-a2.dts | 16 +- arch/riscv/boot/dts/eswin/eic7700-evb-a3.dts | 55 + arch/riscv/boot/dts/eswin/eic7700-evb.dts | 3 +- .../dts/eswin/eic7700-hifive-premier-p550.dts | 13 +- .../riscv/boot/dts/eswin/eic7700-pinctrl.dtsi | 14 +- .../boot/dts/eswin/eic7702-evb-a1-d0.dts | 35 +- .../boot/dts/eswin/eic7702-evb-a1-d1.dts | 7 +- arch/riscv/boot/dts/eswin/eic7702-evb-a1.dts | 1508 ++ .../boot/dts/eswin/eic7x-die0-pinctrl.dtsi | 1447 ++ .../boot/dts/eswin/eic7x-die1-pinctrl.dtsi | 1452 ++ .../dts/eswin/eswin-win2030-arch-d2d.dtsi | 1171 +- .../boot/dts/eswin/eswin-win2030-arch.dtsi | 75 +- .../dts/eswin/eswin-win2030-die0-soc.dtsi | 74 +- .../dts/eswin/eswin-win2030-die1-soc.dtsi | 779 +- .../dts/eswin/eswin-win2030-platform.dtsi | 2 +- arch/riscv/boot/dts/eswin/eswin-win2030.dts | 4 +- arch/riscv/configs/eic7700_defconfig | 4 +- arch/riscv/configs/eic7702_defconfig | 844 + arch/riscv/configs/win2030_defconfig | 2 +- arch/riscv/mm/init.c | 9 +- drivers/clk/eswin/clk.c | 1 + .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 124 + drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 21 + drivers/edac/eswin_edac.c | 16 +- drivers/gpu/drm/eswin/Kconfig | 7 - drivers/gpu/drm/eswin/Makefile | 2 + drivers/gpu/drm/eswin/dw-hdmi.c | 25 +- drivers/gpu/drm/eswin/dw_hdmi_hdcp.c | 74 +- drivers/gpu/drm/eswin/dw_hdmi_hdcp.h | 38 +- drivers/gpu/drm/eswin/dw_hdmi_hdcp2.c | 513 +- drivers/gpu/drm/eswin/eswin_dw_hdmi.c | 2 +- drivers/gpu/drm/img/img-volcanic/Makefile | 6 + .../gpu/drm/img/img-volcanic/config_kernel.h | 197 +- .../gpu/drm/img/img-volcanic/config_kernel.mk | 31 +- .../rogue/cache_bridge/server_cache_bridge.c | 17 +- .../rogue/cmm_bridge/server_cmm_bridge.c | 17 +- .../server_devicememhistory_bridge.c | 61 +- .../rogue/di_bridge/server_di_bridge.c | 70 +- .../rogue/dma_bridge/server_dma_bridge.c | 26 +- .../dmabuf_bridge/common_dmabuf_bridge.h | 43 +- .../dmabuf_bridge/server_dmabuf_bridge.c | 258 +- .../htbuffer_bridge/client_htbuffer_bridge.h | 7 - .../client_htbuffer_direct_bridge.c | 15 - .../htbuffer_bridge/common_htbuffer_bridge.h | 24 +- .../htbuffer_bridge/server_htbuffer_bridge.c | 130 +- .../rogue/mm_bridge/client_mm_bridge.h | 122 +- .../rogue/mm_bridge/client_mm_direct_bridge.c | 422 +- .../rogue/mm_bridge/common_mm_bridge.h | 433 +- .../rogue/mm_bridge/server_mm_bridge.c | 2745 ++-- .../mmextmem_bridge/server_mmextmem_bridge.c | 4 +- .../rogue/pdump_bridge/server_pdump_bridge.c | 40 +- .../client_pdumpctrl_bridge.h | 3 +- .../client_pdumpctrl_direct_bridge.c | 7 +- .../common_pdumpctrl_bridge.h | 1 + .../server_pdumpctrl_bridge.c | 20 +- .../client_pdumpmm_direct_bridge.c | 4 +- .../pdumpmm_bridge/server_pdumpmm_bridge.c | 62 +- .../rogue/pvrtl_bridge/server_pvrtl_bridge.c | 59 +- .../common_rgxbreakpoint_bridge.h | 2 +- .../server_rgxbreakpoint_bridge.c | 22 +- .../rgxcmp_bridge/common_rgxcmp_bridge.h | 142 +- .../rgxcmp_bridge/server_rgxcmp_bridge.c | 798 +- .../rgxfwdbg_bridge/common_rgxfwdbg_bridge.h | 144 +- .../rgxfwdbg_bridge/server_rgxfwdbg_bridge.c | 291 +- .../common_rgxhwperf_bridge.h | 168 +- .../server_rgxhwperf_bridge.c | 831 +- .../server_rgxkicksync_bridge.c | 38 +- .../client_rgxpdump_direct_bridge.c | 10 - .../rgxpdump_bridge/server_rgxpdump_bridge.c | 49 +- .../server_rgxregconfig_bridge.c | 17 +- .../rgxta3d_bridge/common_rgxta3d_bridge.h | 288 +- .../rgxta3d_bridge/server_rgxta3d_bridge.c | 3953 +++-- .../server_rgxtimerquery_bridge.c | 10 +- .../rgxtq2_bridge/common_rgxtq2_bridge.h | 40 +- .../rgxtq2_bridge/server_rgxtq2_bridge.c | 504 +- .../rogue/rgxtq_bridge/common_rgxtq_bridge.h | 77 +- .../rogue/rgxtq_bridge/server_rgxtq_bridge.c | 960 +- .../rogue/ri_bridge/client_ri_bridge.h | 3 +- .../rogue/ri_bridge/client_ri_direct_bridge.c | 15 +- .../rogue/ri_bridge/common_ri_bridge.h | 3 +- .../rogue/ri_bridge/server_ri_bridge.c | 56 +- .../srvcore_bridge/common_srvcore_bridge.h | 4 +- .../srvcore_bridge/server_srvcore_bridge.c | 109 +- .../rogue/sync_bridge/server_sync_bridge.c | 51 +- .../server_syncfallback_bridge.c | 109 +- .../server_synctracking_bridge.c | 16 +- .../cache_bridge/server_cache_bridge.c | 17 +- .../volcanic/cmm_bridge/server_cmm_bridge.c | 17 +- .../server_devicememhistory_bridge.c | 61 +- .../volcanic/di_bridge/server_di_bridge.c | 70 +- .../volcanic/dma_bridge/server_dma_bridge.c | 26 +- .../dmabuf_bridge/common_dmabuf_bridge.h | 43 +- .../dmabuf_bridge/server_dmabuf_bridge.c | 258 +- .../htbuffer_bridge/client_htbuffer_bridge.h | 7 - .../client_htbuffer_direct_bridge.c | 15 - .../htbuffer_bridge/common_htbuffer_bridge.h | 24 +- .../htbuffer_bridge/server_htbuffer_bridge.c | 130 +- .../volcanic/mm_bridge/client_mm_bridge.h | 122 +- .../mm_bridge/client_mm_direct_bridge.c | 422 +- .../volcanic/mm_bridge/common_mm_bridge.h | 433 +- .../volcanic/mm_bridge/server_mm_bridge.c | 2745 ++-- .../mmextmem_bridge/common_mmextmem_bridge.h | 80 + .../mmextmem_bridge/server_mmextmem_bridge.c | 166 + .../pdump_bridge/server_pdump_bridge.c | 40 +- .../client_pdumpctrl_bridge.h | 3 +- .../client_pdumpctrl_direct_bridge.c | 7 +- .../common_pdumpctrl_bridge.h | 1 + .../server_pdumpctrl_bridge.c | 20 +- .../client_pdumpmm_direct_bridge.c | 4 +- .../pdumpmm_bridge/server_pdumpmm_bridge.c | 62 +- .../pvrtl_bridge/server_pvrtl_bridge.c | 59 +- .../rgxcmp_bridge/common_rgxcmp_bridge.h | 142 +- .../rgxcmp_bridge/server_rgxcmp_bridge.c | 798 +- .../rgxfwdbg_bridge/common_rgxfwdbg_bridge.h | 144 +- .../rgxfwdbg_bridge/server_rgxfwdbg_bridge.c | 291 +- .../common_rgxhwperf_bridge.h | 164 +- .../server_rgxhwperf_bridge.c | 675 +- .../server_rgxkicksync_bridge.c | 38 +- .../client_rgxpdump_direct_bridge.c | 10 - .../rgxpdump_bridge/server_rgxpdump_bridge.c | 49 +- .../rgxray_bridge/server_rgxray_bridge.c | 27 +- .../server_rgxregconfig_bridge.c | 17 +- .../rgxta3d_bridge/common_rgxta3d_bridge.h | 243 +- .../rgxta3d_bridge/server_rgxta3d_bridge.c | 3863 +++-- .../server_rgxtimerquery_bridge.c | 10 +- .../rgxtq2_bridge/common_rgxtq2_bridge.h | 40 +- .../rgxtq2_bridge/server_rgxtq2_bridge.c | 504 +- .../volcanic/ri_bridge/client_ri_bridge.h | 3 +- .../ri_bridge/client_ri_direct_bridge.c | 15 +- .../volcanic/ri_bridge/common_ri_bridge.h | 3 +- .../volcanic/ri_bridge/server_ri_bridge.c | 56 +- .../srvcore_bridge/common_srvcore_bridge.h | 4 +- .../srvcore_bridge/server_srvcore_bridge.c | 109 +- .../volcanic/sync_bridge/server_sync_bridge.c | 51 +- .../server_syncfallback_bridge.c | 109 +- .../server_synctracking_bridge.c | 16 +- .../rogue/km/configs/rgxconfig_km_1.V.4.19.h | 1 + .../rogue/km/configs/rgxconfig_km_1.V.4.5.h | 1 + .../rogue/km/configs/rgxconfig_km_15.V.1.64.h | 1 + .../km/configs/rgxconfig_km_22.V.104.18.h | 1 + .../km/configs/rgxconfig_km_22.V.104.218.h | 1 + .../km/configs/rgxconfig_km_22.V.208.318.h | 1 + .../km/configs/rgxconfig_km_22.V.21.16.h | 1 + .../km/configs/rgxconfig_km_22.V.54.25.h | 1 + .../km/configs/rgxconfig_km_22.V.54.30.h | 1 + .../km/configs/rgxconfig_km_22.V.54.330.h | 1 + .../km/configs/rgxconfig_km_22.V.54.38.h | 1 + .../km/configs/rgxconfig_km_24.V.104.504.h | 1 + .../km/configs/rgxconfig_km_24.V.208.504.h | 1 + .../km/configs/rgxconfig_km_24.V.208.505.h | 1 + .../km/configs/rgxconfig_km_24.V.54.204.h | 1 + .../km/configs/rgxconfig_km_29.V.108.208.h | 1 + .../km/configs/rgxconfig_km_29.V.52.202.h | 1 + .../rogue/km/configs/rgxconfig_km_33.V.11.3.h | 1 + .../rogue/km/configs/rgxconfig_km_33.V.22.1.h | 1 + .../km/configs/rgxconfig_km_36.V.104.182.h | 2 + .../km/configs/rgxconfig_km_36.V.104.183.h | 2 + .../km/configs/rgxconfig_km_36.V.104.796.h | 3 + .../km/configs/rgxconfig_km_36.V.52.182.h | 105 + .../km/configs/rgxconfig_km_36.V.54.103.h | 2 + .../km/configs/rgxconfig_km_36.V.54.182.h | 2 + .../km/configs/rgxconfig_km_36.V.54.183.h | 2 + .../km/configs/rgxconfig_km_36.V.54.280.h | 2 + .../rogue/km/configs/rgxconfig_km_4.V.2.51.h | 1 + .../rogue/km/configs/rgxconfig_km_4.V.2.58.h | 1 + .../rogue/km/configs/rgxconfig_km_4.V.4.55.h | 1 + .../rogue/km/configs/rgxconfig_km_4.V.6.62.h | 1 + .../km/configs/rgxconfig_km_46.V.102.389.h | 112 + .../km/configs/rgxconfig_km_46.V.204.390.h | 114 + .../rogue/km/configs/rgxconfig_km_5.V.1.46.h | 1 + .../rogue/km/configs/rgxconfig_km_6.V.4.35.h | 1 + .../rogue/km/cores/rgxcore_km_33.15.11.3.h | 3 +- .../rogue/km/cores/rgxcore_km_36.29.52.182.h | 75 + .../rogue/km/cores/rgxcore_km_36.50.54.182.h | 3 +- .../rogue/km/cores/rgxcore_km_36.52.104.182.h | 3 +- .../rogue/km/cores/rgxcore_km_46.72.102.389.h | 76 + .../rogue/km/cores/rgxcore_km_46.94.204.390.h | 76 + .../hwdefs/rogue/km/rgx_bvnc_defs_km.h | 359 +- .../hwdefs/rogue/km/rgx_bvnc_table_km.h | 305 +- .../hwdefs/rogue/km/rgx_cr_defs_km.h | 11588 ++++++------- .../img-volcanic/hwdefs/rogue/km/rgxdefs_km.h | 88 +- .../hwdefs/rogue/km/rgxmhdefs_km.h | 442 +- .../hwdefs/rogue/km/rgxmmudefs_km.h | 210 +- .../km/configs/rgxconfig_km_27.V.254.2.h | 8 + .../km/configs/rgxconfig_km_30.V.408.101.h | 8 + .../km/configs/rgxconfig_km_30.V.816.20.h | 8 + .../km/configs/rgxconfig_km_35.V.1632.21.h | 8 + .../km/configs/rgxconfig_km_35.V.1632.23.h | 8 + .../km/configs/rgxconfig_km_35.V.1632.34.h | 8 + .../km/configs/rgxconfig_km_35.V.1632.35.h | 154 + .../km/configs/rgxconfig_km_35.V.408.101.h | 8 + .../km/configs/rgxconfig_km_35.V.408.23.h | 8 + .../km/configs/rgxconfig_km_35.V.408.33.h | 9 + .../km/configs/rgxconfig_km_35.V.408.34.h | 9 + .../km/configs/rgxconfig_km_38.V.2448.402.h | 153 + .../km/configs/rgxconfig_km_70.V.2448.1041.h | 154 + .../km/configs/rgxconfig_km_70.V.2448.1042.h | 154 + .../km/configs/rgxconfig_km_70.V.2448.1360.h | 154 + .../km/configs/rgxconfig_km_70.V.2448.418.h | 154 + .../km/configs/rgxconfig_km_71.V.2448.1210.h | 154 + .../km/configs/rgxconfig_km_71.V.2448.1211.h | 154 + .../km/configs/rgxconfig_km_71.V.2448.1212.h | 154 + .../km/configs/rgxconfig_km_71.V.3672.2080.h | 154 + .../km/configs/rgxconfig_km_71.V.3672.2081.h | 154 + .../km/configs/rgxconfig_km_71.V.3672.2082.h | 154 + .../km/configs/rgxconfig_km_71.V.3672.2083.h | 154 + .../km/configs/rgxconfig_km_71.V.3672.2084.h | 154 + .../km/cores/rgxcore_km_30.3.408.101.h | 3 +- .../km/cores/rgxcore_km_30.3.816.20.h | 3 +- .../km/cores/rgxcore_km_35.2.1632.21.h | 3 +- .../km/cores/rgxcore_km_35.2.1632.23.h | 3 +- .../km/cores/rgxcore_km_35.2.1632.34.h | 3 +- .../km/cores/rgxcore_km_35.2.1632.35.h} | 50 +- .../km/cores/rgxcore_km_35.2.408.34.h | 5 +- .../km/cores/rgxcore_km_35.3.1632.23.h | 3 +- .../km/cores/rgxcore_km_35.3.408.101.h | 3 +- .../km/cores/rgxcore_km_35.3.408.33.h | 73 + .../km/cores/rgxcore_km_35.3.408.34.h | 73 + .../km/cores/rgxcore_km_35.4.1632.23.h | 3 +- .../km/cores/rgxcore_km_35.5.408.23.h | 3 +- .../km/cores/rgxcore_km_38.4.2448.402.h | 74 + .../km/cores/rgxcore_km_38.6.2448.402.h | 74 + .../km/cores/rgxcore_km_70.2.2448.1041.h | 71 + .../km/cores/rgxcore_km_70.2.2448.1042.h | 71 + .../km/cores/rgxcore_km_70.2.2448.1360.h | 71 + .../km/cores/rgxcore_km_70.2.2448.418.h | 71 + .../km/cores/rgxcore_km_70.3.2448.1042.h | 71 + .../km/cores/rgxcore_km_70.3.2448.1360.h | 71 + .../km/cores/rgxcore_km_71.2.2448.1210.h | 72 + .../km/cores/rgxcore_km_71.2.2448.1211.h | 72 + .../km/cores/rgxcore_km_71.2.2448.1212.h | 72 + .../km/cores/rgxcore_km_71.2.3672.2080.h | 72 + .../km/cores/rgxcore_km_71.2.3672.2081.h | 72 + .../km/cores/rgxcore_km_71.2.3672.2082.h | 72 + .../km/cores/rgxcore_km_71.2.3672.2083.h | 72 + .../km/cores/rgxcore_km_71.3.3672.2083.h | 72 + .../km/cores/rgxcore_km_71.3.3672.2084.h | 72 + .../hwdefs/volcanic/km/rgx_bvnc_defs_km.h | 370 +- .../hwdefs/volcanic/km/rgx_bvnc_table_km.h | 322 +- .../hwdefs/volcanic/km/rgx_cr_defs_km.h | 13382 ++++++++++------ .../hwdefs/volcanic/km/rgxdefs_km.h | 80 +- .../hwdefs/volcanic/km/rgxmmudefs_km.h | 279 +- .../hwdefs/volcanic/km/rgxtbdefs_km.h | 2023 ++- .../img-volcanic/hwdefs/volcanic/rgxpmdefs.h | 28 +- .../img-volcanic/include/devicemem_typedefs.h | 17 - .../gpu/drm/img/img-volcanic/include/dllist.h | 25 +- .../img/img-volcanic/include/drm/pvr_drm.h | 18 + .../img/img-volcanic/include/img_3dtypes.h | 2 +- .../drm/img/img-volcanic/include/img_defs.h | 74 +- .../include/img_drm_fourcc_internal.h | 14 +- .../drm/img/img-volcanic/include/img_types.h | 23 +- .../drm/img/img-volcanic/include/lock_types.h | 6 +- .../gpu/drm/img/img-volcanic/include/log2.h | 4 +- .../img/img-volcanic/include/multicore_defs.h | 2 +- .../img/img-volcanic/include/osfunc_common.h | 60 +- .../drm/img/img-volcanic/include/pdumpdefs.h | 21 +- .../drm/img/img-volcanic/include/pdumpdesc.h | 8 +- .../include/public/powervr/img_drm_fourcc.h | 21 + .../include/public/powervr/pvrsrv_sync_ext.h | 2 + .../drm/img/img-volcanic/include/pvr_debug.h | 343 +- .../include/pvrsrv_device_types.h | 4 +- .../img/img-volcanic/include/pvrsrv_error.h | 4 +- .../img/img-volcanic/include/pvrsrv_errors.h | 19 +- .../include/pvrsrv_memalloc_physheap.h | 200 +- .../include/pvrsrv_memallocflags.h | 266 +- .../img-volcanic/include/pvrsrv_tlcommon.h | 2 +- .../img-volcanic/include/pvrsrv_tlstreams.h | 4 +- .../drm/img/img-volcanic/include/pvrversion.h | 18 +- .../drm/img/img-volcanic/include/rgx_common.h | 39 +- .../img/img-volcanic/include/rgx_fwif_sf.h | 757 +- .../img-volcanic/include/rgx_heap_firmware.h | 19 +- .../img-volcanic/include/rgx_hwperf_common.h | 1193 +- .../drm/img/img-volcanic/include/rgx_meta.h | 22 +- .../drm/img/img-volcanic/include/rgx_mips.h | 42 +- .../drm/img/img-volcanic/include/rgx_riscv.h | 22 +- .../include/rogue/rgx_fwif_alignchecks.h | 2 +- .../include/rogue/rgx_fwif_hwperf.h | 12 +- .../img-volcanic/include/rogue/rgx_fwif_km.h | 1261 +- .../include/rogue/rgx_fwif_shared.h | 829 +- .../img-volcanic/include/rogue/rgx_heaps.h | 1 - .../img-volcanic/include/rogue/rgx_hwperf.h | 1152 +- .../img-volcanic/include/rogue/rgx_options.h | 81 +- .../include/rogue/rgxheapconfig.h | 32 +- .../include/rogue/rgxheapconfig_65273.h | 9 +- .../img/img-volcanic/include/servicesext.h | 46 +- .../include/sync_checkpoint_external.h | 5 + .../img-volcanic/include/sync_prim_internal.h | 1 - .../include/system/rgx_tc/odin_defs.h | 12 + .../include/volcanic/rgx_fwif_alignchecks.h | 3 +- .../volcanic/rgx_fwif_customer.h} | 24 +- .../include/volcanic/rgx_fwif_hwperf.h | 4 +- .../include/volcanic/rgx_fwif_km.h | 1485 +- .../include/volcanic/rgx_fwif_shared.h | 779 +- .../img-volcanic/include/volcanic/rgx_heaps.h | 1 - .../include/volcanic/rgx_hwperf.h | 1138 +- .../include/volcanic/rgx_hwperf_table.h | 12 +- .../include/volcanic/rgx_options.h | 79 +- .../include/volcanic/rgxheapconfig.h | 34 +- .../img-volcanic/services/include/dma_flags.h | 2 +- .../services/include/htbuffer_sf.h | 119 +- .../services/include/htbuffer_types.h | 4 + .../services/include/info_page_client.h | 4 +- .../services/include/info_page_defs.h | 110 +- .../services/include/km_apphint_defs_common.h | 48 +- .../img-volcanic/services/include/physheap.h | 201 +- .../services/include/physheap_config.h | 258 +- .../services/include/pvr_bridge.h | 24 +- .../services/include/rgx_bridge.h | 13 +- .../services/include/rgx_fw_info.h | 11 +- .../services/include/rgx_tq_shared.h | 2 - .../services/include/rgxtransfer_shader.h | 4 +- .../services/include/rogue/km_apphint_defs.h | 2 + .../services/include/rogue/rgxapi_km.h | 14 - .../include/sync_checkpoint_internal.h | 31 +- .../include/volcanic/km_apphint_defs.h | 13 +- .../services/server/common/cache_km.c | 68 +- .../server/common/connection_server.c | 164 +- .../services/server/common/dc_server.c | 2330 --- .../services/server/common/debug_common.c | 1109 +- .../server/common/devicemem_heapcfg.c | 10 +- .../server/common/devicemem_history_server.c | 188 +- .../services/server/common/devicemem_server.c | 3515 +++- .../services/server/common/di_impl_brg.c | 126 +- .../services/server/common/di_impl_brg.h | 2 + .../services/server/common/di_server.c | 4 +- .../services/server/common/dma_km.c | 195 +- .../services/server/common/handle.c | 93 +- .../services/server/common/handle_generic.c | 1009 -- .../services/server/common/htb_debug.c | 77 +- .../services/server/common/htb_debug.h | 12 +- .../services/server/common/htbserver.c | 483 +- .../services/server/common/info_page_km.c | 2 +- .../services/server/common/mmu_common.c | 2732 +++- .../services/server/common/pdump_mmu.c | 44 +- .../services/server/common/pdump_physmem.c | 110 +- .../services/server/common/pdump_server.c | 237 +- .../services/server/common/physheap.c | 1777 +- .../services/server/common/physmem.c | 413 +- .../server/common/physmem_cpumap_history.c | 2 +- .../services/server/common/physmem_dlm.c | 341 + .../services/server/common/physmem_hostmem.c | 66 +- .../services/server/common/physmem_ima.c | 655 + .../services/server/common/physmem_lma.c | 2380 +-- .../services/server/common/physmem_osmem.c | 91 + .../services/server/common/physmem_ramem.c | 2609 +++ .../img-volcanic/services/server/common/pmr.c | 2902 +++- .../services/server/common/power.c | 404 +- .../services/server/common/process_stats.c | 1937 ++- .../services/server/common/pvr_notifier.c | 50 +- .../services/server/common/pvrsrv.c | 1720 +- .../server/common/pvrsrv_bridge_init.c | 24 +- .../services/server/common/pvrsrv_pool.c | 2 +- .../services/server/common/ri_server.c | 1253 +- .../img-volcanic/services/server/common/scp.c | 687 - .../services/server/common/srvcore.c | 265 +- .../services/server/common/sync_checkpoint.c | 1024 +- .../server/common/sync_fallback_server.c | 225 +- .../services/server/common/sync_server.c | 32 +- .../services/server/common/tlintern.c | 33 +- .../services/server/common/tlserver.c | 14 +- .../services/server/common/tlstream.c | 79 +- .../services/server/common/vmm_pvz_client.c | 14 +- .../services/server/common/vmm_pvz_server.c | 71 +- .../services/server/common/vz_vmm_pvz.c | 75 +- .../services/server/common/vz_vmm_vm.c | 161 +- .../services/server/devices/rgx_bridge_init.c | 89 +- .../services/server/devices/rgx_bridge_init.h | 5 +- .../rgxhwperf.h => rgx_heaps_server.h} | 63 +- .../services/server/devices/rgxbreakpoint.c | 4 +- .../services/server/devices/rgxbreakpoint.h | 2 +- .../services/server/devices/rgxbvnc.c | 332 +- .../services/server/devices/rgxccb.c | 671 +- .../services/server/devices/rgxccb.h | 9 +- .../services/server/devices/rgxcompute.c | 716 +- .../services/server/devices/rgxcompute.h | 42 +- .../services/server/devices/rgxdebug_common.c | 3236 ++++ .../services/server/devices/rgxdebug_common.h | 433 + .../services/server/devices/rgxfwcmnctx.c | 810 + .../services/server/devices/rgxfwcmnctx.h | 150 + .../services/server/devices/rgxfwdbg.c | 369 +- .../services/server/devices/rgxfwdbg.h | 46 +- .../services/server/devices/rgxfwimageutils.c | 221 +- .../services/server/devices/rgxfwimageutils.h | 3 +- .../services/server/devices/rgxfwmemctx.h | 156 + .../services/server/devices/rgxfwriscv.c | 1074 ++ .../services/server/devices/rgxfwriscv.h | 217 + .../server/devices/{rogue => }/rgxfwutils.h | 555 +- .../server/devices/{rogue => }/rgxhwperf.h | 17 +- .../server/devices/rgxhwperf_common.c | 1382 +- .../server/devices/rgxhwperf_common.h | 132 +- .../server/devices/{volcanic => }/rgxinit.h | 64 +- .../server/devices/rgxinit_apphints.h | 120 + .../services/server/devices/rgxkicksync.c | 52 +- .../server/devices/{rogue => }/rgxlayer.h | 166 +- .../devices/{rogue => }/rgxlayer_impl.h | 2 + .../server/devices/rgxlayer_impl_common.c | 914 ++ .../services/server/devices/rgxmem.c | 320 +- .../services/server/devices/rgxmem.h | 22 +- .../devices/{volcanic => }/rgxmmuinit.c | 315 +- .../server/devices/{rogue => }/rgxmmuinit.h | 1 - .../services/server/devices/rgxpdump.h | 15 - .../services/server/devices/rgxpdump_common.c | 121 + .../services/server/devices/rgxpdump_common.h | 106 + .../services/server/devices/rgxpdvfs.c | 154 +- .../services/server/devices/rgxpdvfs.h | 24 +- .../server/devices/{rogue => }/rgxpower.c | 1403 +- .../server/devices/{rogue => }/rgxpower.h | 113 +- .../services/server/devices/rgxregconfig.c | 36 +- .../services/server/devices/rgxshader.c | 49 +- .../services/server/devices/rgxshader.h | 4 +- .../services/server/devices/rgxsyncutils.h | 2 +- .../services/server/devices/rgxtdmtransfer.c | 328 +- .../services/server/devices/rgxtdmtransfer.h | 28 +- .../services/server/devices/rgxtimecorr.c | 245 +- .../services/server/devices/rgxtimecorr.h | 66 +- .../services/server/devices/rgxtimerquery.c | 31 +- .../services/server/devices/rgxtimerquery.h | 6 - .../services/server/devices/rgxutils.c | 331 +- .../services/server/devices/rgxutils.h | 128 +- .../services/server/devices/rgxworkest.c | 73 +- .../services/server/devices/rogue/rgxdebug.c | 5697 ++----- .../services/server/devices/rogue/rgxdebug.h | 229 - .../services/server/devices/rogue/rgxdevice.h | 211 +- .../server/devices/rogue/rgxfwutils.c | 4763 ++---- .../services/server/devices/rogue/rgxhwperf.c | 106 +- .../services/server/devices/rogue/rgxinit.c | 3662 +++-- .../services/server/devices/rogue/rgxinit.h | 281 - .../server/devices/rogue/rgxlayer_impl.c | 1030 +- .../server/devices/rogue/rgxmipsmmuinit.c | 41 +- .../server/devices/rogue/rgxmmuinit.c | 1079 -- .../server/devices/rogue/rgxmulticore.c | 162 +- .../services/server/devices/rogue/rgxpdump.c | 290 +- .../server/devices/rogue/rgxsrvinit.c | 1156 +- .../server/devices/rogue/rgxstartstop.c | 936 +- .../services/server/devices/rogue/rgxta3d.c | 2822 ++-- .../services/server/devices/rogue/rgxta3d.h | 138 +- .../server/devices/rogue/rgxtransfer.c | 274 +- .../server/devices/rogue/rgxtransfer.h | 34 +- .../server/devices/volcanic/rgxdebug.c | 3331 +--- .../server/devices/volcanic/rgxdebug.h | 191 - .../server/devices/volcanic/rgxdevice.h | 233 +- .../server/devices/volcanic/rgxfwutils.c | 5233 ++---- .../server/devices/volcanic/rgxfwutils.h | 1418 -- .../server/devices/volcanic/rgxhwperf.c | 146 +- .../server/devices/volcanic/rgxinit.c | 4045 +++-- .../server/devices/volcanic/rgxlayer.h | 510 - .../server/devices/volcanic/rgxlayer_impl.c | 631 +- .../server/devices/volcanic/rgxmulticore.c | 148 +- .../server/devices/volcanic/rgxpdump.c | 634 +- .../server/devices/volcanic/rgxpower.c | 1602 -- .../server/devices/volcanic/rgxpower.h | 272 - .../services/server/devices/volcanic/rgxray.c | 239 +- .../services/server/devices/volcanic/rgxray.h | 2 +- .../server/devices/volcanic/rgxsrvinit.c | 1371 +- .../server/devices/volcanic/rgxstartstop.c | 960 +- .../server/devices/volcanic/rgxta3d.c | 2037 ++- .../server/devices/volcanic/rgxta3d.h | 125 +- .../services/server/env/linux/Kbuild.mk | 124 +- .../services/server/env/linux/Linux.mk | 4 - .../services/server/env/linux/allocmem.c | 63 +- .../services/server/env/linux/dkf_server.c | 409 + .../services/server/env/linux/dkf_server.h | 103 + .../server/env/linux/env_connection.h | 4 + .../services/server/env/linux/event.c | 6 +- .../services/server/env/linux/fwload.c | 8 +- .../services/server/env/linux/handle_idr.c | 27 +- .../services/server/env/linux/km_apphint.c | 107 +- .../services/server/env/linux/module_common.c | 106 +- .../services/server/env/linux/module_common.h | 8 +- .../server/env/linux/osconnection_server.c | 29 +- .../services/server/env/linux/osfunc.c | 613 +- .../services/server/env/linux/osfunc_arm.c | 70 +- .../services/server/env/linux/osfunc_arm64.c | 125 +- .../services/server/env/linux/osfunc_riscv.c | 37 +- .../services/server/env/linux/osfunc_x86.c | 7 +- .../server/env/linux/ossecure_export.c | 2 +- .../server/env/linux/pg_walk_through.c | 213 - .../server/env/linux/physmem_dmabuf.c | 1947 ++- .../env/linux/physmem_dmabuf_internal.h | 69 + .../server/env/linux/physmem_extmem_linux.c | 251 +- .../server/env/linux/physmem_extmem_wrap.h | 2 +- .../server/env/linux/physmem_osmem_linux.c | 2514 ++- .../server/env/linux/physmem_osmem_linux.h | 13 +- .../services/server/env/linux/physmem_test.c | 416 +- .../services/server/env/linux/physmem_test.h | 5 +- .../services/server/env/linux/pmr_env.c | 83 + .../services/server/env/linux/pmr_env.h | 90 + .../services/server/env/linux/pmr_os.c | 420 +- .../services/server/env/linux/pvr_bridge_k.c | 91 +- .../services/server/env/linux/pvr_bridge_k.h | 26 +- .../server/env/linux/pvr_buffer_sync.c | 18 +- .../server/env/linux/pvr_counting_timeline.c | 12 +- .../services/server/env/linux/pvr_debug.c | 42 +- .../services/server/env/linux/pvr_debugfs.c | 18 +- .../services/server/env/linux/pvr_drm.c | 166 +- .../services/server/env/linux/pvr_drv.h | 8 +- .../server/env/linux/pvr_dvfs_common.c | 363 + .../server/env/linux/pvr_dvfs_common.h | 123 + .../server/env/linux/pvr_dvfs_device.c | 1000 +- .../server/env/linux/pvr_export_fence.c | 472 + .../server/env/linux/pvr_export_fence.h | 77 + .../services/server/env/linux/pvr_fence.c | 71 +- .../services/server/env/linux/pvr_fence.h | 15 +- .../server/env/linux/pvr_fence_trace.h | 2 +- .../services/server/env/linux/pvr_gputrace.c | 844 +- .../services/server/env/linux/pvr_ion_stats.h | 16 + .../services/server/env/linux/pvr_pci_drv.c | 229 - .../server/env/linux/pvr_platform_drv.c | 60 +- .../services/server/env/linux/pvr_procfs.c | 20 +- .../services/server/env/linux/pvr_sw_fence.c | 5 +- .../services/server/env/linux/pvr_sync2.c | 2759 ---- .../services/server/env/linux/pvr_sync_api.h | 4 +- .../services/server/env/linux/pvr_sync_file.c | 271 +- .../server/env/linux/pvr_sync_ioctl_common.c | 276 +- .../server/env/linux/pvr_sync_ioctl_common.h | 20 +- .../server/env/linux/pvr_sync_ioctl_drm.c | 32 +- .../server/env/linux/pvr_sync_ioctl_drm.h | 4 + .../server/env/linux/rogue_trace_events.h | 5 +- .../server/env/linux/services_kernel_client.h | 23 +- .../services/server/env/linux/srvinit_param.c | 216 - .../services/server/env/linux/trace_events.c | 15 +- .../services/server/env/linux/trace_events.h | 25 +- .../server/include/connection_server.h | 5 +- .../services/server/include/dc_server.h | 173 - .../services/server/include/debug_common.h | 16 + .../services/server/include/device.h | 299 +- .../server/include/devicemem_heapcfg.h | 10 - .../server/include/devicemem_history_server.h | 5 + .../server/include/devicemem_server.h | 510 +- .../server/include/devicemem_server_utils.h | 27 +- .../services/server/include/di_server.h | 9 +- .../services/server/include/dkp_impl.h | 163 + .../services/server/include/dma_km.h | 6 +- .../services/server/include/handle.h | 9 +- .../services/server/include/handle_impl.h | 1 + .../services/server/include/handle_types.h | 5 +- .../services/server/include/htbserver.h | 96 +- .../services/server/include/mmu_common.h | 336 +- .../include/{oskm_apphint.h => os_apphint.h} | 74 +- .../server/include/os_srvinit_param.h | 328 - .../server/include/osconnection_server.h | 4 + .../services/server/include/osdi_impl.h | 6 +- .../services/server/include/osfunc.h | 319 +- .../services/server/include/ospvr_gputrace.h | 74 +- .../services/server/include/pdump_km.h | 47 +- .../services/server/include/pdump_mmu.h | 3 + .../services/server/include/pdump_physmem.h | 70 +- .../services/server/include/physmem.h | 130 +- .../services/server/include/physmem_dlm.h | 88 + .../services/server/include/physmem_dmabuf.h | 35 +- .../services/server/include/physmem_ima.h | 77 + .../services/server/include/physmem_lma.h | 32 +- .../services/server/include/physmem_osmem.h | 63 +- .../services/server/include/physmem_ramem.h | 109 + .../services/server/include/pmr.h | 421 +- .../services/server/include/pmr_impl.h | 153 +- .../services/server/include/pmr_os.h | 19 + .../services/server/include/power.h | 184 +- .../services/server/include/process_stats.h | 36 +- .../services/server/include/pvr_dvfs.h | 42 +- .../services/server/include/pvr_notifier.h | 8 +- .../services/server/include/pvrsrv.h | 130 +- .../server/include/pvrsrv_bridge_init.h | 2 +- .../services/server/include/pvrsrv_cleanup.h | 105 +- .../services/server/include/pvrsrv_device.h | 76 +- ...rv_firmware_boot.h => rgx_firmware_boot.h} | 12 +- .../services/server/include/ri_server.h | 15 +- .../services/server/include/scp.h | 241 - .../services/server/include/srvcore.h | 55 +- .../services/server/include/srvkm.h | 17 +- .../services/server/include/sync_checkpoint.h | 121 +- .../services/server/include/tlintern.h | 2 + .../services/server/include/tlstream.h | 21 +- .../services/server/include/tutils_km.h | 174 - .../services/server/include/vmm_impl.h | 59 +- .../services/server/include/vmm_pvz_server.h | 10 +- .../services/server/include/vz_vm.h | 6 +- .../services/server/include/vz_vmm_pvz.h | 6 +- .../services/shared/common/devicemem.c | 349 +- .../services/shared/common/devicemem_pdump.c | 40 - .../services/shared/common/devicemem_utils.c | 73 +- .../services/shared/common/hash.c | 13 + .../services/shared/common/hash_functions.c | 73 + .../services/shared/common/htbuffer.c | 91 - .../services/shared/common/mem_utils.c | 72 +- .../img-volcanic/services/shared/common/ra.c | 1010 +- .../services/shared/common/sync.c | 171 +- .../services/shared/common/tlclient.c | 2 +- .../shared/common/uniq_key_splay_tree.c | 34 +- .../shared/devices/rogue/rgx_hwperf_table.c | 24 +- .../services/shared/include/devicemem.h | 17 +- .../services/shared/include/devicemem_utils.h | 2 +- .../services/shared/include/devicememx.h | 22 +- .../services/shared/include/hash.h | 8 + .../services/shared/include/hash_functions.h | 66 + .../services/shared/include/htbuffer.h | 83 +- .../services/shared/include/lock.h | 108 +- .../services/shared/include/proc_stats.h | 167 +- .../img-volcanic/services/shared/include/ra.h | 189 +- .../services/shared/include/sync_internal.h | 21 +- .../system/common/env/linux/pci_support.c | 83 - .../services/system/common/sysconfig_cmn.c | 35 + .../services/system/common/uma_heap_fns.c | 88 + .../system/eswin_cpu/common/vmm_type_stub.c | 10 +- .../services/system/eswin_cpu/sysconfig.c | 30 +- .../services/system/eswin_cpu/sysinfo.h | 1 + .../services/system/include/syscommon.h | 29 + .../services/system/include/sysvalidation.h | 11 +- .../services/system/include/uma_heap_fns.h | 53 + .../services/system/rgx_linux_plato/Kbuild.mk | 11 +- .../system/rgx_linux_plato/sysconfig.c | 130 +- .../services/system/rgx_linux_plato/sysinfo.h | 24 +- .../services/system/rgx_nohw/Kbuild.mk | 52 + .../services/system/rgx_nohw/sysconfig.c | 330 + .../rgx_nohw/sysconfig.h} | 27 +- .../services/system/rgx_nohw/sysinfo.h | 58 + .../rogue/common/env/linux/dma_support.c | 4 +- .../system/rogue/common/vmm_type_stub.c | 10 +- .../system/rogue/common/vmm_type_vzfpga.c | 170 + .../services/system/rogue/mt8173/Kbuild.mk | 13 +- .../system/rogue/mt8173/mt8173_mfgsys.c | 2 +- .../system/rogue/mt8173/mt8173_mfgsys.h | 3 - .../system/rogue/mt8173/mt8173_sysconfig.c | 235 +- .../services/system/rogue/mt8173/sysinfo.h | 1 + .../system/rogue/rgx_linux_tc/Kbuild.mk | 11 +- .../system/rogue/rgx_linux_tc/sysconfig.c | 815 +- .../system/rogue/rgx_linux_tc/sysinfo.h | 8 + .../system/volcanic/common/vmm_type_stub.c | 13 +- .../system/volcanic/common/vmm_type_vzfpga.c | 169 + .../system/volcanic/rgx_linux_tc/Kbuild.mk | 16 +- .../system/volcanic/rgx_linux_tc/fpga.c | 2 +- .../system/volcanic/rgx_linux_tc/sysconfig.c | 1122 +- .../system/volcanic/rgx_linux_tc/sysinfo.h | 10 + .../services/system/vz_example/Kbuild.mk | 21 +- .../services/system/vz_example/sysconfig.c | 298 +- .../services/system/vz_example/sysconfig.h | 67 +- .../services/system/vz_example/sysinfo.h | 1 + drivers/gpu/drm/img/kernel_compatibility.h | 330 +- drivers/gpu/drm/img/kernel_nospec.h | 4 +- drivers/gpu/drm/img/pvr_dma_resv.h | 6 - drivers/gpu/drm/img/pvr_vmap.h | 10 +- drivers/gpu/drm/img/pvrversion.h | 18 +- drivers/hwmon/eswin-fan-control.c | 22 +- drivers/hwmon/pac193x.c | 2 +- drivers/iommu/eswin/eswin-win2030-sid.c | 10 +- drivers/memory/eswin/es_iommu_rsv/Makefile | 4 - drivers/memory/eswin/es_iommu_rsv/iommu_rsv.c | 59 +- drivers/memory/eswin/es_mmz_vb/mmz_vb.c | 23 +- .../dmabuf-heap-import-helper.c | 787 +- .../ethernet/stmicro/stmmac/dwmac-win2030.c | 7 +- drivers/pinctrl/Kconfig | 6 +- drivers/pinctrl/Makefile | 2 +- .../{pinctrl-eic7700.c => pinctrl-eic7x.c} | 351 +- drivers/regulator/Kconfig | 7 +- drivers/regulator/Makefile | 1 + drivers/regulator/es5340.c | 871 + drivers/soc/eswin/Kconfig | 6 + drivers/soc/eswin/Makefile | 3 +- .../ai_driver/common/devices/mailbox_regs.h | 19 +- .../ai_driver/common/devices/npu_base_regs.h | 20 +- .../soc/eswin/ai_driver/common/hetero_arch.h | 4 + .../soc/eswin/ai_driver/common/hetero_ipc.h | 2 + drivers/soc/eswin/ai_driver/dsp/Makefile | 1 - drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c | 11 +- .../soc/eswin/ai_driver/dsp/dsp_platform.c | 12 +- drivers/soc/eswin/ai_driver/npu/Makefile | 1 - drivers/soc/eswin/ai_driver/npu/debug.c | 462 +- drivers/soc/eswin/ai_driver/npu/dla_driver.h | 1 + drivers/soc/eswin/ai_driver/npu/edma.c | 4 +- drivers/soc/eswin/ai_driver/npu/engine.c | 47 +- drivers/soc/eswin/ai_driver/npu/npu_e31.c | 10 +- drivers/soc/eswin/ai_driver/npu/npu_main.c | 46 +- drivers/soc/eswin/ai_driver/npu/npu_spram.c | 5 +- drivers/soc/eswin/ai_driver/npu/nvdla_hw.c | 54 +- .../soc/eswin/ai_driver/npu/nvdla_lowlevel.h | 3 +- .../soc/eswin/ai_driver/npu/user_context.c | 8 +- drivers/soc/eswin/d2d.c | 231 + drivers/soc/sifive/sifive_ccache.c | 245 +- .../media/eswin/dewarp/vvcam_dwe_driver.c | 448 +- .../hal/kernel/arch/gc_hal_kernel_hardware.c | 4 +- .../arch/gc_hal_kernel_hardware_waitlink_fe.c | 3 +- .../eswin/hae/hal/kernel/gc_hal_kernel.c | 7 + .../hae/hal/kernel/gc_hal_kernel_event.c | 23 +- .../os/linux/kernel/gc_hal_kernel_driver.c | 11 +- .../os/linux/kernel/gc_hal_kernel_parameter.h | 2 +- .../eswin/gc_hal_kernel_platform_win2030.c | 38 +- drivers/staging/media/eswin/vdec/hantro_dec.c | 78 +- .../staging/media/eswin/vdec/hantro_vcmd.c | 97 +- drivers/staging/media/eswin/vdec/hantrovcmd.h | 2 + drivers/staging/media/eswin/vdec/subsys.h | 2 +- .../staging/media/eswin/venc/vc8000_driver.h | 1 + .../media/eswin/venc/vc8000_vcmd_driver.c | 138 +- include/linux/dmabuf-heap-import-helper.h | 47 +- .../include => include}/linux/es_iommu_rsv.h | 1 + include/linux/eswin_npu.h | 24 +- sound/soc/eswin/esw-i2s.c | 214 +- 697 files changed, 118325 insertions(+), 86175 deletions(-) create mode 100644 arch/riscv/boot/dts/eswin/eic7700-evb-a3.dts create mode 100644 arch/riscv/boot/dts/eswin/eic7702-evb-a1.dts create mode 100644 arch/riscv/boot/dts/eswin/eic7x-die0-pinctrl.dtsi create mode 100644 arch/riscv/boot/dts/eswin/eic7x-die1-pinctrl.dtsi create mode 100644 arch/riscv/configs/eic7702_defconfig create mode 100644 drivers/gpu/drm/img/img-volcanic/generated/volcanic/mmextmem_bridge/common_mmextmem_bridge.h create mode 100644 drivers/gpu/drm/img/img-volcanic/generated/volcanic/mmextmem_bridge/server_mmextmem_bridge.c create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.52.182.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_46.V.102.389.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_46.V.204.390.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.29.52.182.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_46.72.102.389.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_46.94.204.390.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.35.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_38.V.2448.402.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1041.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1042.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1360.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.418.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1210.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1211.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1212.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2080.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2081.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2082.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2083.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2084.h rename drivers/gpu/drm/img/img-volcanic/{services/server/include/secure_export.h => hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.35.h} (69%) create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.33.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.34.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_38.4.2448.402.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_38.6.2448.402.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1041.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1042.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1360.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.418.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.3.2448.1042.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.3.2448.1360.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1210.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1211.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1212.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2080.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2081.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2082.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2083.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.3.3672.2083.h create mode 100644 drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.3.3672.2084.h rename drivers/gpu/drm/img/img-volcanic/{services/server/devices/volcanic/rgxlayer_impl.h => include/volcanic/rgx_fwif_customer.h} (82%) delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/common/dc_server.c delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/common/handle_generic.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_dlm.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_ima.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_osmem.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_ramem.c delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/common/scp.c rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{volcanic/rgxhwperf.h => rgx_heaps_server.h} (64%) create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxdebug_common.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxdebug_common.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwcmnctx.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwcmnctx.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwmemctx.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwriscv.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwriscv.h rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{rogue => }/rgxfwutils.h (74%) rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{rogue => }/rgxhwperf.h (87%) rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{volcanic => }/rgxinit.h (85%) create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxinit_apphints.h rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{rogue => }/rgxlayer.h (87%) rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{rogue => }/rgxlayer_impl.h (98%) create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer_impl_common.c rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{volcanic => }/rgxmmuinit.c (81%) rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{rogue => }/rgxmmuinit.h (99%) create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump_common.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump_common.h rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{rogue => }/rgxpower.c (51%) rename drivers/gpu/drm/img/img-volcanic/services/server/devices/{rogue => }/rgxpower.h (70%) delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdebug.h delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxinit.h delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmmuinit.c delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdebug.h delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxfwutils.h delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer.h delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpower.c delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpower.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/dkf_server.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/dkf_server.h delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pg_walk_through.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_dmabuf_internal.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_env.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_env.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_common.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_common.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_export_fence.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_export_fence.h delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_pci_drv.c delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync2.c delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/env/linux/srvinit_param.c delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/include/dc_server.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/include/dkp_impl.h rename drivers/gpu/drm/img/img-volcanic/services/server/include/{oskm_apphint.h => os_apphint.h} (73%) delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/include/os_srvinit_param.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_dlm.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_ima.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_ramem.h rename drivers/gpu/drm/img/img-volcanic/services/server/include/{pvrsrv_firmware_boot.h => rgx_firmware_boot.h} (93%) delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/include/scp.h delete mode 100644 drivers/gpu/drm/img/img-volcanic/services/server/include/tutils_km.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/shared/common/hash_functions.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/shared/include/hash_functions.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/system/common/uma_heap_fns.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/system/include/uma_heap_fns.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/Kbuild.mk create mode 100644 drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysconfig.c rename drivers/gpu/drm/img/img-volcanic/services/{server/devices/volcanic/rgxmmuinit.h => system/rgx_nohw/sysconfig.h} (74%) create mode 100644 drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysinfo.h create mode 100644 drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/vmm_type_vzfpga.c create mode 100644 drivers/gpu/drm/img/img-volcanic/services/system/volcanic/common/vmm_type_vzfpga.c rename drivers/pinctrl/{pinctrl-eic7700.c => pinctrl-eic7x.c} (84%) create mode 100644 drivers/regulator/es5340.c create mode 100644 drivers/soc/eswin/d2d.c rename {drivers/memory/eswin/es_iommu_rsv/include => include}/linux/es_iommu_rsv.h (77%) diff --git a/arch/riscv/boot/dts/eswin/Makefile b/arch/riscv/boot/dts/eswin/Makefile index c5b439ec7f27..b3925240449f 100644 --- a/arch/riscv/boot/dts/eswin/Makefile +++ b/arch/riscv/boot/dts/eswin/Makefile @@ -2,8 +2,7 @@ dtb-$(CONFIG_SOC_SIFIVE) += eswin-win2030.dtb \ eic7700-evb.dtb \ eic7700-evb-a2.dtb \ + eic7700-evb-a3.dtb \ eic7700-hifive-premier-p550.dtb \ - eic7702-evb-a1-d0.dtb \ - eic7702-evb-a1-d1.dtb - + eic7702-evb-a1.dtb obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/riscv/boot/dts/eswin/eic7700-evb-a2.dts b/arch/riscv/boot/dts/eswin/eic7700-evb-a2.dts index d329b5d3652e..1ea82a170c60 100644 --- a/arch/riscv/boot/dts/eswin/eic7700-evb-a2.dts +++ b/arch/riscv/boot/dts/eswin/eic7700-evb-a2.dts @@ -39,7 +39,7 @@ / { #address-cells = <2>; #size-cells = <2>; - model = "ESWIN EIC7700"; + model = "ESWIN EIC7700 EVB A2"; compatible = "sifive,hifive-unmatched-a00", "sifive,fu740-c000", "sifive,fu740", "eswin,eic7700"; @@ -734,8 +734,18 @@ npu_vcc1:npu_svcc{ }; &d0_i2c2 { - /* mipi dsi */ - status = "disabled"; + /* touch screen*/ + status = "ok"; + gt911:touchscreen@14 { + compatible = "goodix,gt911"; + reg = <0x14>; + interrupt-parent = <&porta>; + interrupts = <10 IRQ_TYPE_EDGE_RISING>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio10_default>; + irq-gpios = <&porta 10 GPIO_ACTIVE_HIGH>; + reset-gpios = <&portd 10 GPIO_ACTIVE_HIGH>; + }; }; &d0_i2c3 { diff --git a/arch/riscv/boot/dts/eswin/eic7700-evb-a3.dts b/arch/riscv/boot/dts/eswin/eic7700-evb-a3.dts new file mode 100644 index 000000000000..47b490f8c6c1 --- /dev/null +++ b/arch/riscv/boot/dts/eswin/eic7700-evb-a3.dts @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree file for Eswin EIC7700 SoC. + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * SPDX-License-Identifier: GPL-2.0 + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "eic7700-evb-a2.dts" + +/ { + model = "ESWIN EIC7700 EVB A3"; +}; + +&d0_clock { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio95_default>; + cpu-voltage-gpios = <&portc 31 GPIO_ACTIVE_HIGH>; +}; + +&d0_cpu_opp_table { + opp-1500000000 { + opp-hz = /bits/ 64 ; + opp-microvolt = <900000>; + clock-latency-ns = <70000>; + }; + opp-1600000000 { + opp-hz = /bits/ 64 ; + opp-microvolt = <900000>; + clock-latency-ns = <70000>; + }; + opp-1700000000 { + opp-hz = /bits/ 64 ; + opp-microvolt = <900000>; + clock-latency-ns = <70000>; + }; + opp-1800000000 { + opp-hz = /bits/ 64 ; + opp-microvolt = <900000>; + clock-latency-ns = <70000>; + }; +}; \ No newline at end of file diff --git a/arch/riscv/boot/dts/eswin/eic7700-evb.dts b/arch/riscv/boot/dts/eswin/eic7700-evb.dts index 456890a19c8c..fe56de5b1699 100644 --- a/arch/riscv/boot/dts/eswin/eic7700-evb.dts +++ b/arch/riscv/boot/dts/eswin/eic7700-evb.dts @@ -38,7 +38,7 @@ / { #address-cells = <2>; #size-cells = <2>; - model = "ESWIN EIC7700"; + model = "ESWIN EIC7700 EVB A1"; compatible = "sifive,hifive-unmatched-a00", "sifive,fu740-c000", "sifive,fu740", "eswin,eic7700"; @@ -889,3 +889,4 @@ &dev_llc_d0{ npu-supply=<&npu_vcc1>; status = "okay"; }; + diff --git a/arch/riscv/boot/dts/eswin/eic7700-hifive-premier-p550.dts b/arch/riscv/boot/dts/eswin/eic7700-hifive-premier-p550.dts index 0a8342376dbc..d4225cf45d5b 100644 --- a/arch/riscv/boot/dts/eswin/eic7700-hifive-premier-p550.dts +++ b/arch/riscv/boot/dts/eswin/eic7700-hifive-premier-p550.dts @@ -838,7 +838,6 @@ gpio111 : gphy1 resern(O), active low &gpio0 { status = "okay"; }; - &dev_llc_d0{ /* apply_npu_1G_freq; */ npu-supply=<&npu_vcc1>; @@ -851,14 +850,24 @@ &d0_clock { }; &d0_cpu_opp_table { + opp-1500000000 { + opp-hz = /bits/ 64 ; + opp-microvolt = <900000>; + clock-latency-ns = <70000>; + }; opp-1600000000 { opp-hz = /bits/ 64 ; opp-microvolt = <900000>; clock-latency-ns = <70000>; }; + opp-1700000000 { + opp-hz = /bits/ 64 ; + opp-microvolt = <900000>; + clock-latency-ns = <70000>; + }; opp-1800000000 { opp-hz = /bits/ 64 ; opp-microvolt = <900000>; clock-latency-ns = <70000>; }; -}; \ No newline at end of file +}; diff --git a/arch/riscv/boot/dts/eswin/eic7700-pinctrl.dtsi b/arch/riscv/boot/dts/eswin/eic7700-pinctrl.dtsi index 0529f98d4dd3..26aa840c9882 100644 --- a/arch/riscv/boot/dts/eswin/eic7700-pinctrl.dtsi +++ b/arch/riscv/boot/dts/eswin/eic7700-pinctrl.dtsi @@ -25,7 +25,7 @@ / { #size-cells = <2>; soc { pinctrl: pinctrl@0x51600080 { - compatible = "eswin,eic7700-pinctrl", "syscon"; + compatible = "eswin,eic7x-pinctrl", "syscon"; reg = <0x0 0x51600080 0x0 0x1FFF80>; status = "disabled"; //func0 @@ -41,12 +41,6 @@ mux { function = "sdio1_func"; }; }; - pinctrl_por_sel_default: por_sel-default{ - mux { - groups = "por_sel_group"; - function = "por_sel_func"; - }; - }; pinctrl_jtag0_default: jtag0-default{ mux { groups = "jtag0_group"; @@ -275,10 +269,10 @@ mux { function = "s_mode_func"; }; }; - pinctrl_pinmux_ddr_refclk_sel_default: pinmux_ddr_refclk_sel-default{ + pinctrl_ddr_refclk_sel_default: ddr_refclk_sel-default{ mux { - groups = "pinmux_ddr_refclk_sel_group"; - function = "pinmux_ddr_refclk_sel_func"; + groups = "ddr_refclk_sel_group"; + function = "ddr_refclk_sel_func"; }; }; pinctrl_boot_sel_default: boot_sel-default{ diff --git a/arch/riscv/boot/dts/eswin/eic7702-evb-a1-d0.dts b/arch/riscv/boot/dts/eswin/eic7702-evb-a1-d0.dts index 8c558119a286..1bacf33dd7c0 100644 --- a/arch/riscv/boot/dts/eswin/eic7702-evb-a1-d0.dts +++ b/arch/riscv/boot/dts/eswin/eic7702-evb-a1-d0.dts @@ -609,11 +609,10 @@ d0_codec1_endpoint: endpoint { }; &d0_i2c1 { - /* mpq8785 */ status = "okay"; - mpq8785@10 { - compatible = "mps,mpq8785"; - reg = <0x10>; + es5430@f { + compatible = "einno,es5340"; + reg = <0xf>; eswin,regulator_default-microvolt=<1000000>; eswin,regulator_label = "supply vdd1", "npu vdd1", "npu current1", "npu temperature1"; label = "npu_vdd"; @@ -693,37 +692,36 @@ &d0_i2c9 { }; &d0_aon_i2c0 { - /* ina226x4 & tmp102aidrlr */ + /* ina226x3 */ status = "okay"; i2c-sda-hold-time-ns = <0x40>; - ina226@45 { + ina226@40 { compatible = "ti,ina226"; #io-channel-cells = <1>; label = "cpu_vdd"; - reg = <0x45>; - shunt-resistor = <1000>; - }; - ina226@44 { - compatible = "ti,ina226"; - #io-channel-cells = <1>; - label = "soc_vdd"; - reg = <0x44>; + reg = <0x40>; shunt-resistor = <1000>; }; ina226@41 { compatible = "ti,ina226"; #io-channel-cells = <1>; - label = "lpddr_vdd"; + label = "d0_lpddr_vdd"; reg = <0x41>; shunt-resistor = <1000>; }; - ina226@48 { + ina226@44 { compatible = "ti,ina226"; #io-channel-cells = <1>; - label = "dc_vdd"; - reg = <0x48>; + label = "d0_soc_vdd"; + reg = <0x44>; shunt-resistor = <1000>; }; + tmp102@48 { + compatible = "ti,tmp102"; + reg = <0x48>; + label = "d0_board_tmp"; + #thermal-sensor-cells = <1>; + }; }; &d0_aon_i2c1 { @@ -812,4 +810,3 @@ &dev_llc_d0{ npu-supply=<&npu_vcc1>; status = "okay"; }; - diff --git a/arch/riscv/boot/dts/eswin/eic7702-evb-a1-d1.dts b/arch/riscv/boot/dts/eswin/eic7702-evb-a1-d1.dts index 2733649b4c3f..4154b2da9d22 100644 --- a/arch/riscv/boot/dts/eswin/eic7702-evb-a1-d1.dts +++ b/arch/riscv/boot/dts/eswin/eic7702-evb-a1-d1.dts @@ -623,11 +623,10 @@ d0_codec1_endpoint: endpoint { }; &d0_i2c1 { - /* mpq8785 */ status = "okay"; - mpq8785@10 { - compatible = "mps,mpq8785"; - reg = <0x10>; + es5430@f { + compatible = "einno,es5340"; + reg = <0xf>; eswin,regulator_default-microvolt=<1000000>; eswin,regulator_label = "supply vdd1", "npu vdd1", "npu current1", "npu temperature1"; label = "npu_vdd"; diff --git a/arch/riscv/boot/dts/eswin/eic7702-evb-a1.dts b/arch/riscv/boot/dts/eswin/eic7702-evb-a1.dts new file mode 100644 index 000000000000..dfb0f03463bc --- /dev/null +++ b/arch/riscv/boot/dts/eswin/eic7702-evb-a1.dts @@ -0,0 +1,1508 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree file for Eswin EIC7700 SoC. + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * SPDX-License-Identifier: GPL-2.0 + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/dts-v1/; + +#define RTCCLK_FREQ 1000000 +#define LSPCLK_FREQ 200000000 + +/* If wanna enable ECC capability of DDR, should reserve highest zone of 1/8 all space for it */ +#define MEMORY_SIZE_H 0x4 +#define MEMORY_SIZE_L 0x0 +#define CMA_SIZE 0x20000000 + +#include "eswin-win2030-die0-soc.dtsi" +#include "eswin-win2030-die1-soc.dtsi" + +#include "eic7x-die0-pinctrl.dtsi" +#include "eic7x-die1-pinctrl.dtsi" +#include +#include +#include + +/* Clock frequency (in Hz) of the PCB crystal for rtcclk */ + +/ { + #address-cells = <2>; + #size-cells = <2>; + model = "ESWIN EIC7702 EVB A1"; + compatible = "sifive,hifive-unmatched-a00", "sifive,fu740-c000", + "sifive,fu740", "eswin,eic7700"; + + aliases { + serial0 = &d0_uart0; + ethernet0 = &d0_gmac0; + ethernet1 = &d0_gmac1; + ethernet2 = &d1_gmac0; + ethernet3 = &d1_gmac1; + rtc0 = &die0_rtc; + rtc1 = &d1_rtc; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + cpus { + timebase-frequency = ; + }; + + memory@59000000 { + device_type = "memory"; + reg = <0x0 0x59000000 0x0 0x400000>; + numa-node-id = <0>; + }; + + memory@79000000 { + device_type = "memory"; + reg = <0x0 0x79000000 0x0 0x400000>; + numa-node-id = <1>; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x0 0x80000000 MEMORY_SIZE_H MEMORY_SIZE_L>; + numa-node-id = <0>; + }; + + memory@2000000000 { + device_type = "memory"; + reg = <0x20 0x00000000 MEMORY_SIZE_H MEMORY_SIZE_L>; + numa-node-id = <1>; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + linux,cma { + compatible = "shared-dma-pool"; + reusable; + size = <0x0 CMA_SIZE>; + alignment = <0x0 0x1000>; + alloc-ranges = <0x20 0x00000000 MEMORY_SIZE_H MEMORY_SIZE_L>; + linux,cma-default; + }; + + npu0_reserved: sprammemory@59000000 { + no-map; + reg = <0x0 0x59000000 0x0 0x400000>; + }; + + npu1_reserved: sprammemory@79000000 { + no-map; + reg = <0x0 0x79000000 0x0 0x400000>; + }; + + g2d_4GB_boundary_reserved_4k { + no-map; + reg = <0x0 0xfffff000 0x0 0x1000>; + }; + + g2d_8GB_boundary_reserved_4k { + no-map; + reg = <0x1 0xfffff000 0x0 0x1000>; + }; + + g2d_12GB_boundary_reserved_4k { + no-map; + reg = <0x2 0xfffff000 0x0 0x1000>; + }; + + mmz_nid_0_part_0 { + compatible = "eswin-reserve-memory"; + reg = <0x3 0x0 0x1 0x80000000>; + no-map; + }; + + d1_g2d_4GB_boundary_reserved_4k { + no-map; + reg = <0x20 0xfffff000 0x0 0x1000>; + }; + + d1_g2d_8GB_boundary_reserved_4k { + no-map; + reg = <0x21 0xfffff000 0x0 0x1000>; + }; + + mmz_nid_1_part_0 { + compatible = "eswin-reserve-memory"; + reg = <0x22 0x80000000 0x1 0x80000000>; + no-map; + }; + }; + + distance-map { + compatible = "numa-distance-map-v1"; + distance-matrix = <0 0 10>, + <0 1 100>, + <1 0 100>, + <1 1 10>; + }; + + /* die1 gpio68 system led */ + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_die1_gpio68_default>; + + gpio-68 { + gpios = <&d1_portc 4 GPIO_ACTIVE_LOW>; + label = "heartbeat"; + linux,default-trigger = "heartbeat"; + }; + }; + +}; + +&d0_clock { + status = "okay"; +}; +&d1_clock { + status = "okay"; +}; + +&d0_reset { + status = "okay"; +}; +&d1_reset { + status = "okay"; +}; + +&d0_pmu { + status = "okay"; +}; +&d1_pmu { + status = "disabled"; +}; + +&ddr0 { + status = "okay"; +}; + +&ddr1 { + status = "okay"; +}; + +&d1_ddr0 { + status = "okay"; +}; + +&d1_ddr1 { + status = "okay"; +}; + +&smmu0 { + status = "okay"; +}; +&smmu1 { + status = "okay"; +}; + +&smmu_pmu0 { + status = "disabled"; +}; +&smmu_pmu1 { + status = "disabled"; +}; + + +&dev_foo_a { + status = "okay"; +}; + +&d0_cfg_noc { + status = "okay"; +}; + +&d0_llc_noc { + status = "okay"; + stat,0 = "TracePort:ddr0_p0_req"; + stat,1 = "TracePort:ddr1_p0_req"; + //latency,0 = "TracePort:llcnoc_trans_probe"; + //pending,0 = "TracePort:llcnoc_trans_probe"; +}; + +&d0_sys_noc { + status = "okay"; + + //eswin,DSPT-qos-owner; + //eswin,NPU-qos-owner; + //eswin,SPISLV_TBU3-qos-owner; + + stat,0 = "TracePort:ddr0_p1_req", + "InitFlow:mcput_snoc_mp/I/0"; + + stat,1 = "TracePort:ddr0_p2_req", + "InitFlow:dspt_snoc/I/0", + "AddrBase:0x81000000", "AddrSize:0x30", + "Opcode:RdWrLockUrg", "Status:ReqRsp", "Length:0x8000", "Urgency:0x0"; + + stat,2 = "TracePort:ddr1_p1_req", + "Status:Req", "AddrSize:0x28"; + + stat,3 = "TracePort:ddr1_p2_req"; + + latency,0 = "TracePort:sysnoc_trans_probe_0", "AddrSize:0x0"; + latency,1 = "TracePort:sysnoc_trans_probe_1","Mode:latency","AddrBase:0x82000000","AddrSize:0x28","Opcode:RdWr"; + //latency,2 = "TracePort:sysnoc_trans_probe_2"; + + //pending,0 = "TracePort:sysnoc_trans_probe_0"; + //pending,1 = "TracePort:sysnoc_trans_probe_1","Mode:latency","AddrBase:0x82000000","AddrSize:0x0","Opcode:RdWr"; + pending,0 = "TracePort:sysnoc_trans_probe_2", "AddrSize:0x3"; +}; + +&d0_media_noc { + status = "okay"; + + //eswin,GPU-qos-owner; + //eswin,TBU2-qos-owner; + //eswin,VC-qos-owner; + + stat,0 = "TracePort:ddr0_p3_req"; + stat,1 = "TracePort:ddr1_p3_req"; + //latency,0 = "TracePort:mnoc_trans_probe"; + //pending,0 = "TracePort:mnoc_trans_probe"; +}; + +&d0_realtime_noc { + status = "okay"; + + //eswin,TBU0-qos-owner; + //eswin,VO-qos-owner; + + stat,0 = "TracePort:ddr0_p4_req"; + stat,1 = "TracePort:ddr1_p4_req"; + //latency,0 = "TracePort:rnoc_trans_probe"; + //pending,0 = "TracePort:rnoc_trans_probe"; +}; + +&d0_noc_wdt { + status = "okay"; +}; + +&d1_cfg_noc { + status = "okay"; +}; + +&d1_llc_noc { + status = "okay"; + stat,0 = "TracePort:ddr0_p0_req"; + stat,1 = "TracePort:ddr1_p0_req"; + //latency,0 = "TracePort:llcnoc_trans_probe"; + //pending,0 = "TracePort:llcnoc_trans_probe"; +}; + +&d1_sys_noc { + status = "okay"; + + //eswin,DSPT-qos-owner; + //eswin,NPU-qos-owner; + //eswin,SPISLV_TBU3-qos-owner; + + stat,0 = "TracePort:ddr0_p1_req", + "InitFlow:mcput_snoc_mp/I/0"; + + stat,1 = "TracePort:ddr0_p2_req", + "InitFlow:dspt_snoc/I/0", + "AddrBase:0x81000000", "AddrSize:0x30", + "Opcode:RdWrLockUrg", "Status:ReqRsp", "Length:0x8000", "Urgency:0x0"; + + stat,2 = "TracePort:ddr1_p1_req", + "Status:Req", "AddrSize:0x28"; + + stat,3 = "TracePort:ddr1_p2_req"; + + latency,0 = "TracePort:sysnoc_trans_probe_0", "AddrSize:0x0"; + latency,1 = "TracePort:sysnoc_trans_probe_1","Mode:latency","AddrBase:0x82000000","AddrSize:0x28","Opcode:RdWr"; + //latency,2 = "TracePort:sysnoc_trans_probe_2"; + + //pending,0 = "TracePort:sysnoc_trans_probe_0"; + //pending,1 = "TracePort:sysnoc_trans_probe_1","Mode:latency","AddrBase:0x82000000","AddrSize:0x0","Opcode:RdWr"; + pending,0 = "TracePort:sysnoc_trans_probe_2", "AddrSize:0x3"; +}; + +&d1_media_noc { + status = "okay"; + + //eswin,GPU-qos-owner; + //eswin,TBU2-qos-owner; + //eswin,VC-qos-owner; + + stat,0 = "TracePort:ddr0_p3_req"; + stat,1 = "TracePort:ddr1_p3_req"; + //latency,0 = "TracePort:mnoc_trans_probe"; + //pending,0 = "TracePort:mnoc_trans_probe"; +}; + +&d1_realtime_noc { + status = "okay"; + + //eswin,TBU0-qos-owner; + //eswin,VO-qos-owner; + + stat,0 = "TracePort:ddr0_p4_req"; + stat,1 = "TracePort:ddr1_p4_req"; + //latency,0 = "TracePort:rnoc_trans_probe"; + //pending,0 = "TracePort:rnoc_trans_probe"; +}; + +&d1_noc_wdt { + status = "okay"; +}; + +&d0_ipc_scpu { + status = "okay"; +}; +&d1_ipc_scpu { + status = "disabled"; +}; + +&d0_lpcpu { + status = "okay"; +}; +&d1_lpcpu { + status = "disabled"; +}; + +&pcie { + status = "okay"; +}; +&d1_pcie { + status = "okay"; +}; + +&d0_npu{ + status = "okay"; +}; +&d1_npu { + status = "okay"; +}; + +&d0_dsp_subsys { + status = "okay"; +}; +&d1_dsp_subsys { + status = "okay"; +}; + +&d0_dsp0 { + status = "okay"; +}; +&d1_dsp0 { + status = "okay"; +}; + +&d0_dsp1 { + status = "okay"; +}; +&d1_dsp1 { + status = "okay"; +}; + +&d0_dsp2 { + status = "okay"; +}; +&d1_dsp2 { + status = "okay"; +}; + +&d0_dsp3 { + status = "okay"; +}; +&d1_dsp3 { + status = "okay"; +}; + +&gpu0 { + status = "okay"; +}; +&d1_gpu { + status = "disabled"; +}; + +&gc820 { + status = "okay"; +}; +&d1_gc820 { + status = "okay"; +}; + +&vdec0 { + status = "okay"; +}; +&vdec1 { + status = "okay"; +}; + +&venc0 { + status = "okay"; +}; +&venc1 { + status = "okay"; +}; + +&video_output { + status = "okay"; +}; + +&d1_video_output { + status = "okay"; +}; + +&dc { + status = "okay"; +}; + +&d1_dc { + status = "okay"; +}; + +&dc_test { + status = "disabled"; +}; +&d1_dc_test { + status = "disabled"; +}; + +&virtual_display { + status = "okay"; +}; + +&d1_virtual_display { + status = "okay"; +}; + +&dsi_controller { + status = "okay"; +}; +&d1_dsi_controller { + status = "okay"; +}; + +&dsi_panel { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_die0_gpio27_default &pinctrl_die0_gpio106_default>; + backlight0-gpios = <&porta 27 GPIO_ACTIVE_HIGH>; + rst-gpios = <&portd 10 GPIO_ACTIVE_HIGH>; +}; +&d1_dsi_panel { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_die1_gpio69_default &pinctrl_die1_gpio21_default>; + backlight0-gpios = <&d1_portc 5 GPIO_ACTIVE_HIGH>; + rst-gpios = <&d1_porta 21 GPIO_ACTIVE_HIGH>; +}; + +&dw_hdmi { + status = "okay"; + ports { + port@2 { + reg = <2>; + hdmi_in_i2s: endpoint@1 { + system-clock-frequency = <12288000>; + remote-endpoint = <&d0_i2s0_endpoint>; + }; + }; + }; +}; +&d1_dw_hdmi { + status = "okay"; + ports { + port@2 { + reg = <2>; + d1_hdmi_in_i2s: endpoint@1 { + system-clock-frequency = <12288000>; + remote-endpoint = <&d1_i2s0_endpoint>; + }; + }; + }; +}; + +&dw_hdmi_hdcp2 { + status = "okay"; +}; +&d1_dw_hdmi_hdcp2 { + status = "disabled"; +}; + + +&d0_i2s0 { + status = "okay"; + numa-node-id = <0>; + d0_i2s0_port: port { + d0_i2s0_endpoint: endpoint { + remote-endpoint = <&hdmi_in_i2s>; + dai-format = "i2s"; + }; + }; +}; + +&d1_i2s0 { + status = "okay"; + numa-node-id = <1>; + d1_i2s0_port: port { + d1_i2s0_endpoint: endpoint { + remote-endpoint = <&d1_hdmi_in_i2s>; + dai-format = "i2s"; + }; + }; +}; + +&d0_i2s1 { + status = "okay"; + numa-node-id = <0>; + d0_i2s1_port: port { + d0_i2s1_endpoint: endpoint { + remote-endpoint = <&d0_codec0_endpoint>; + dai-format = "i2s"; + }; + }; +}; + +&d1_i2s1 { + status = "okay"; + numa-node-id = <1>; + d1_i2s1_port: port { + d1_i2s1_endpoint: endpoint { + remote-endpoint = <&d1_codec0_endpoint>; + dai-format = "i2s"; + }; + }; +}; + +&d0_i2s2 { + status = "okay"; + numa-node-id = <0>; + d0_i2s2_port: port { + d0_i2s2_endpoint: endpoint { + remote-endpoint = <&d0_codec1_endpoint>; + dai-format = "i2s"; + }; + }; +}; + +&d1_i2s2 { + status = "okay"; + numa-node-id = <1>; + d1_i2s2_port: port { + d1_i2s2_endpoint: endpoint { + remote-endpoint = <&d1_codec1_endpoint>; + dai-format = "i2s"; + }; + }; +}; + +&d0_graphcard0 { + status = "okay"; + label = "Analog Audio-0"; + dais = <&d0_i2s1_port>; +}; + +&d0_graphcard1 { + status = "okay"; + label = "Analog Audio-1"; + dais = <&d0_i2s2_port>; +}; + +&d0_graphcard2 { + status = "okay"; + label = "HDMI Audio-0"; + dais = <&d0_i2s0_port>; +}; + +&d1_graphcard0 { + status = "okay"; + label = "Analog Audio-2"; + dais = <&d1_i2s1_port>; +}; + +&d1_graphcard1 { + status = "okay"; + label = "Analog Audio-3"; + dais = <&d1_i2s2_port>; +}; + +&d1_graphcard2 { + status = "okay"; + label = "HDMI Audio-1"; + dais = <&d1_i2s0_port>; +}; + +&isp_0 { + status = "okay"; +}; +&d1_isp_0 { + status = "disabled"; +}; + +&isp_1 { + status = "okay"; +}; +&d1_isp_1 { + status = "disabled"; +}; + +&dewarp { + status = "okay"; +}; +&d1_dewarp { + status = "okay"; +}; + +&mipi_dphy_rx { + status = "okay"; +}; +&d1_mipi_dphy_rx { + status = "disabled"; +}; + +&csi_dma0 { + status = "okay"; +}; +&d1_csi_dma0 { + status = "disabled"; +}; + +&csi_dma1 { + status = "disabled"; +}; +&d1_csi_dma1 { + status = "disabled"; +}; + +&csi2_0 { + status = "okay"; +}; +&d1_csi2_0 { + status = "disabled"; +}; + +&csi2_1 { + status = "disabled"; +}; +&d1_csi2_1 { + status = "disabled"; +}; + +&sdhci_emmc { + /* emmc */ + status = "okay"; + delay_code = <0x17>; + drive-impedance-ohm = <50>; + enable-cmd-pullup; + enable-data-pullup; + no-sdio; + no-sd; +}; + +&d1_sdhci_emmc { + /* emmc */ + status = "okay"; + delay_code = <0x17>; + drive-impedance-ohm = <50>; + enable-cmd-pullup; + enable-data-pullup; + no-sdio; + no-sd; +}; + +&sdio0 { + /* sd card */ + status = "okay"; + delay_code = <0x55>; + drive-impedance-ohm = <33>; + enable-cmd-pullup; + enable-data-pullup; + no-sdio; + no-mmc; +}; + +&d1_sdio0 { + /* wifi module */ + status = "okay"; + delay_code = <0x29>; + drive-impedance-ohm = <33>; + enable-cmd-pullup; + enable-data-pullup; + keep-power-in-suspend; + non-removable; + no-sd; + no-mmc; + aw315:wifi_aw3155@0 { + compatible = "aml_w1_sdio"; + reg = <0x0>; + interrupt-parent = <&d1_porta>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default", "default"; + pinctrl-0 = <&pinctrl_die1_gpio18_default>; + pinctrl-1 = <&pinctrl_die1_gpio16_default>; + irq-gpios = <&d1_porta 18 GPIO_ACTIVE_HIGH>; + rst-gpios = <&d1_porta 16 GPIO_ACTIVE_HIGH>; + }; +}; + +&sdio1 { + status = "disabled"; +}; + +&d1_sdio1 { + status = "disabled"; +}; + +&d0_gmac0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_die0_gpio15_default>; + rst-gpios = <&porta 15 GPIO_ACTIVE_LOW>; + eswin,rgmiisel = <&pinctrl 0x290 0x3>; + eswin,led-cfgs = <0x6100 0xa40 0x420>; + + status = "okay"; +}; + +&d1_gmac0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_die1_gpio13_default>; + rst-gpios = <&d1_porta 13 GPIO_ACTIVE_LOW>; + eswin,rgmiisel = <&d1_pinctrl 0x290 0x3>; + eswin,led-cfgs = <0x6100 0xa40 0x420>; + + status = "okay"; +}; + +&d0_gmac1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_die0_gpio14_default>; + rst-gpios = <&porta 14 GPIO_ACTIVE_LOW>; + eswin,rgmiisel = <&pinctrl 0x294 0x3>; + eswin,led-cfgs = <0x6100 0xa40 0x420>; + + status = "okay"; +}; + +&d1_gmac1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_die1_gpio14_default>; + rst-gpios = <&d1_porta 14 GPIO_ACTIVE_LOW>; + eswin,rgmiisel = <&d1_pinctrl 0x294 0x3>; + eswin,led-cfgs = <0x6100 0xa40 0x420>; + + status = "okay"; +}; + +&d0_sata { + status = "okay"; +}; + +&d1_sata { + status = "okay"; +}; + +&d0_usbdrd3_0 { + status = "okay"; +}; +&d1_usbdrd3_0 { + status = "okay"; +}; + +&d0_usbdrd_dwc3_0 { + status = "okay"; + dr_mode = "host"; + maximum-speed = "super-speed"; +}; + +&d1_usbdrd_dwc3_0 { + status = "okay"; + dr_mode = "host"; + maximum-speed = "super-speed"; +}; + +&d0_usbdrd3_1 { + status = "okay"; +}; + +&d1_usbdrd3_1 { + status = "okay"; +}; + +&d0_usbdrd_dwc3_1 { + status = "okay"; + dr_mode = "host"; + maximum-speed = "super-speed"; +}; + +&d1_usbdrd_dwc3_1 { + status = "okay"; + dr_mode = "host"; + maximum-speed = "super-speed"; +}; + +&d0_dmac0 { + status = "okay"; +}; + +&d1_dmac0 { + status = "disabled"; +}; + +&d0_aon_dmac { + status = "okay"; +}; + +&d1_aon_dmac { + status = "okay"; +}; + +&d0_uart0 { + /* debug */ + status = "okay"; +}; + +&d1_uart0 { + status = "okay"; +}; + +&d0_uart1 { + /* RS232 DB9 */ + status = "okay"; +}; + +&d1_uart1 { + /* BT M.2 KEY-E */ + status = "okay"; +}; + +&d0_uart2 { + /* pin header */ + status = "okay"; +}; + +&d1_uart2 { + /* pin header */ + status = "okay"; +}; + +&d0_uart3 { + /* unused */ + status = "disabled"; +}; + +&d1_uart3 { + /* unused */ + status = "disabled"; +}; + +&d0_uart4 { + /* unused */ + status = "disabled"; +}; + +&d1_uart4 { + /* unused */ + status = "disabled"; +}; + +&ssi0 { + /* spi flash */ + status = "okay"; + num-cs = <2>; + spi-flash@0 { + compatible = "winbond,w25q128fw", + "jedec,spi-nor"; + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <4800000>; + rx-sample-delay-ns = <10>; + }; + spi-flash@1 { + compatible = "winbond,w25q128fw", + "jedec,spi-nor"; + reg = <1>; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <4800000>; + rx-sample-delay-ns = <10>; + }; +}; +&d1_ssi0 { + /* spi flash */ + status = "okay"; + num-cs = <2>; + spi-flash@0 { + compatible = "winbond,w25q128fw", + "jedec,spi-nor"; + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <4800000>; + rx-sample-delay-ns = <10>; + }; + spi-flash@1 { + compatible = "winbond,w25q128fw", + "jedec,spi-nor"; + reg = <1>; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <4800000>; + rx-sample-delay-ns = <10>; + }; +}; + +&ssi1 { + /* unused */ + status = "disabled"; +}; + +&d1_ssi1 { + /* unused */ + status = "disabled"; +}; + +&bootspi { + /* spi flash */ + status = "okay"; + num-cs = <1>; + cs-gpios = <&portd 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&portd 4 GPIO_ACTIVE_LOW>; + spi-flash@0 { + compatible = "winbond,w25q128jw", + "jedec,spi-nor"; + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <4800000>; + rx-sample-delay-ns = <10>; + }; +}; + +&d1_bootspi { + /* spi flash */ + status = "disabled"; + num-cs = <1>; + cs-gpios = <&d1_portd 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&d1_portd 4 GPIO_ACTIVE_LOW>; + spi-flash@0 { + compatible = "winbond,w25q128jw", + "jedec,spi-nor"; + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <4800000>; + rx-sample-delay-ns = <10>; + }; +}; + +&d0_mbox0 { + status = "okay"; +}; +&d1_mbox0 { + status = "okay"; +}; + +&d0_mbox1 { + status = "okay"; +}; +&d1_mbox1 { + status = "okay"; +}; + +&d0_mbox2 { + status = "okay"; +}; +&d1_mbox2 { + status = "okay"; +}; + +&d0_mbox3 { + status = "okay"; +}; +&d1_mbox3 { + status = "okay"; +}; + +&d0_mbox4 { + status = "okay"; +}; +&d1_mbox4 { + status = "okay"; +}; + +&d0_mbox5 { + status = "okay"; +}; +&d1_mbox5 { + status = "okay"; +}; + +&d0_mbox6 { + status = "okay"; +}; +&d1_mbox6 { + status = "okay"; +}; + +&d0_mbox7 { + status = "okay"; +}; +&d1_mbox7 { + status = "okay"; +}; + +&fan_control { + status = "okay"; +}; +&d1_fan_control { + status = "disabled"; +}; + +&d0_i2c0 { + /* codec es8388 */ + status = "okay"; + d0_es8388_0: es8388-0@10 { + compatible = "eswin,es8388"; + reg = <0x10>; + #sound-dai-cells = <0>; + port { + d0_codec0_endpoint: endpoint { + system-clock-frequency = <12288000>; + remote-endpoint = <&d0_i2s1_endpoint>; + }; + }; + }; + d0_es8388_1: es8388-1@11 { + compatible = "eswin,es8388"; + reg = <0x11>; + #sound-dai-cells = <0>; + port { + d0_codec1_endpoint: endpoint { + system-clock-frequency = <12288000>; + remote-endpoint = <&d0_i2s2_endpoint>; + }; + }; + }; +}; + +&d1_i2c0 { + /* codec es8388 */ + status = "okay"; + d1_es8388_0: es8388-0@10 { + compatible = "eswin,es8388"; + reg = <0x10>; + #sound-dai-cells = <0>; + port { + d1_codec0_endpoint: endpoint { + system-clock-frequency = <12288000>; + remote-endpoint = <&d1_i2s1_endpoint>; + }; + }; + }; + d1_es8388_1: es8388-1@11 { + compatible = "eswin,es8388"; + reg = <0x11>; + #sound-dai-cells = <0>; + port { + d1_codec1_endpoint: endpoint { + system-clock-frequency = <12288000>; + remote-endpoint = <&d1_i2s2_endpoint>; + }; + }; + }; +}; + +&d0_i2c1 { + status = "okay"; + es5430@f { + compatible = "einno,es5340"; + reg = <0xf>; + eswin,regulator_default-microvolt=<1000000>; + eswin,regulator_label = "supply vdd1", "npu vdd1", "npu current1", "npu temperature1"; + label = "npu_vdd"; + regulators{ + d0_npu_vcc:npu_svcc{ + regulator-name="NPU_SVCC"; + regulator-min-microvolt=<700000>; + regulator-max-microvolt=<1100000>; + regulator-min-microamp=<20000000>; + regulator-max-microamp=<40000000>; + regulator-ov-protection-microvolt=<1100000>; + regulator-always-on; + }; + }; + }; +}; +&d1_i2c1 { + status = "okay"; + es5430@f { + compatible = "einno,es5340"; + reg = <0xf>; + eswin,regulator_default-microvolt=<1000000>; + eswin,regulator_label = "supply vdd1", "npu vdd1", "npu current1", "npu temperature1"; + label = "d1_npu_vdd"; + regulators{ + d1_npu_vcc:npu_svcc{ + regulator-name="NPU_SVCC2"; + regulator-min-microvolt=<700000>; + regulator-max-microvolt=<1100000>; + regulator-min-microamp=<20000000>; + regulator-max-microamp=<40000000>; + regulator-ov-protection-microvolt=<1100000>; + regulator-always-on; + }; + }; + }; +}; + +&d0_i2c2 { + /* touch screen*/ + status = "ok"; + d0_gt911:touchscreen@14 { + compatible = "goodix,gt911"; + reg = <0x14>; + interrupt-parent = <&porta>; + interrupts = <20 IRQ_TYPE_EDGE_RISING>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_die0_gpio20_default>; + irq-gpios = <&porta 20 GPIO_ACTIVE_HIGH>; + reset-gpios = <&portd 15 GPIO_ACTIVE_HIGH>; + }; +}; + +&d1_i2c2 { + /* touch screen*/ + status = "ok"; + d1_gt911:touchscreen@14 { + compatible = "goodix,gt911"; + reg = <0x14>; + interrupt-parent = <&d1_porta>; + interrupts = <0 IRQ_TYPE_EDGE_RISING>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_die1_gpio0_default>; + irq-gpios = <&d1_porta 0 GPIO_ACTIVE_HIGH>; + reset-gpios = <&d1_portd 15 GPIO_ACTIVE_HIGH>; + }; +}; + +&d0_i2c3 { + /* mipi csi0/csi1 */ + status = "okay"; +}; +&d1_i2c3 { + status = "disabled"; +}; + +&d0_i2c4 { + /* mipi csi2/csi3 */ + status = "disabled"; +}; +&d1_i2c4 { + status = "disabled"; +}; + +&d0_i2c5 { + /* mipi csi4/csi5 */ + status = "disabled"; +}; +&d1_i2c5 { + status = "disabled"; +}; + +&d0_i2c6 { + /* unused */ + status = "disabled"; +}; +&d1_i2c6 { + status = "disabled"; +}; + +&d0_i2c7 { + /* unused */ + status = "disabled"; +}; +&d1_i2c7 { + status = "disabled"; +}; + +&d0_i2c8 { + /* io extended for mipi csi */ + status = "okay"; + tca6416_0: gpio@20 { + compatible = "ti,tca6416"; + reg = <0x20>; + gpio-controller; /* IRQ not connected */ + #gpio-cells = <2>; + gpio-line-names = "MIPI_CSI0_PWDN", "MIPI_CSI0_RESET", "MIPI_CSI1_FBC", "MIPI_CSI1_ENB", + "MIPI_CSI1_RESET", "MIPI_CSI1_PWDN", "FREX_GP0", "", + "MIPI_CSI0_ENB", "MIPI_CSI0_FBC", "FREX_GP2", "MIPI_CSI2_FBC", + "MIPI_CSI2_ENB", "FREX_GP1", "MIPI_CSI2_RESET", "MIPI_CSI2_PWDN"; + }; + tca6416_1: gpio@21 { + compatible = "ti,tca6416"; + reg = <0x21>; + gpio-controller; + #gpio-cells = <2>; + /* IRQ not connected */ + gpio-line-names = "MIPI_CSI3_PWDN", "MIPI_CSI3_RESET", "MIPI_CSI3_ENB", "MIPI_CSI3_FBC", + "MIPI_CSI4_PWDN", "MIPI_CSI4_RESET", "MIPI_CSI4_ENB", "MIPI_CSI4_FBC", + "MIPI_CSI5_FBC", "MIPI_CSI5_ENB", "MIPI_CSI5_RESET", "MIPI_CSI5_PWDN", + "", "", "", ""; + }; +}; +&d1_i2c8 { + status = "disabled"; +}; + +&d0_i2c9 { + /* unused */ + status = "disabled"; +}; +&d1_i2c9 { + status = "disabled"; +}; + +&d0_aon_i2c0 { + /* ina226x3 */ + status = "okay"; + i2c-sda-hold-time-ns = <0x40>; + ina226@40 { + compatible = "ti,ina226"; + #io-channel-cells = <1>; + label = "cpu_vdd"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + ina226@41 { + compatible = "ti,ina226"; + #io-channel-cells = <1>; + label = "d0_lpddr_vdd"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + ina226@44 { + compatible = "ti,ina226"; + #io-channel-cells = <1>; + label = "d0_soc_vdd"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + tmp102@48 { + compatible = "ti,tmp102"; + reg = <0x48>; + label = "d0_board_tmp"; + #thermal-sensor-cells = <1>; + }; +}; + + +&d1_aon_i2c0 { + /* ina226x4 */ + status = "okay"; + i2c-sda-hold-time-ns = <0x40>; + ina226@40 { + compatible = "ti,ina226"; + #io-channel-cells = <1>; + label = "vdd_dc_in"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + ina226@41 { + compatible = "ti,ina226"; + #io-channel-cells = <1>; + label = "d1_lpddr_vdd"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + ina226@44 { + compatible = "ti,ina226"; + #io-channel-cells = <1>; + label = "d1_soc_vdd"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + tmp102@48 { + compatible = "ti,tmp102"; + reg = <0x48>; + label = "d1_board_tmp"; + #thermal-sensor-cells = <1>; + }; +}; +&d1_aon_i2c1 { + status = "disabled"; +}; + +&pwm0 { + /* fan */ + status = "okay"; +}; +&d1_pwm0 { + status = "disabled"; +}; + +&pvt0 { + status = "okay"; +}; +&d1_pvt0 { + status = "okay"; +}; + +&pvt1 { + status = "okay"; +}; +&d1_pvt1 { + status = "okay"; +}; + +&wdt0 { + status = "disabled"; +}; +&d1_wdt0 { + status = "disabled"; +}; + +&wdt1 { + status = "disabled"; +}; +&d1_wdt1 { + status = "disabled"; +}; + +&wdt2 { + status = "disabled"; +}; +&d1_wdt2 { + status = "disabled"; +}; + +&wdt3 { + status = "disabled"; +}; +&d1_wdt3 { + status = "disabled"; +}; + +&die0_rtc { + status = "okay"; +}; +&d1_rtc { + status = "okay"; +}; + +&timer0 { + status = "okay"; +}; +&d1_timer0 { + status = "okay"; +}; + +&timer1 { + status = "okay"; +}; +&d1_timer1 { + status = "okay"; +}; + +&timer2 { + status = "okay"; +}; +&d1_timer2 { + status = "okay"; +}; + +&timer3 { + status = "okay"; +}; +&d1_timer3 { + status = "okay"; +}; + +&pinctrl { + status = "okay"; +}; +&d1_pinctrl { + status = "okay"; +}; + +&gpio0 { + status = "okay"; +}; +&d1_gpio0 { + status = "okay"; +}; + +&dev_llc_d0{ + /* apply_npu_1G_freq; */ + npu-supply=<&d0_npu_vcc>; + status = "okay"; +}; +&d1_llc_dev{ + /* apply_npu_1G_freq; */ + npu-supply=<&d1_npu_vcc>; + status = "okay"; +}; + +&d2d { + status = "okay"; +}; + +&d1_d2d { + status = "okay"; +}; + +/* die0 GPIO Function Description + + gpio0 : pcie prstn(I) + gpio11 : tmp alert(I) + gpio13 : tf card detect(I) + gpio14 : gphy1 resetn(O) + gpio15 : gphy0 resetn(O) + gpio18 : ina226 alert(I) + gpio20 : lcd touch int(I) + gpio21 : head phone plug/unplug detection2(I) + gpio27 : black light pwr_en(O) + gpio28 : head phone plug/unplug detection1(I) + gpio68 : fan pwm(O) + gpio69 : fan touch(I) + gpio92 : frex gp0(O) + gpio93 : frex gp1(O) + gpio106 : mipi dsi resetn(O) + gpio111 : lcd touch resetn(O) +*/ +/* die1 GPIO Function Description + + gpio0 : lcd touch int(I) + gpio13 : gphy0 resetn(O) + gpio14 : gphy1 resetn(O) + gpio15 : bt wake host(I) + gpio16 : sdio wifi disable(O) + gpio18 : sdio wifi wake host(I) + gpio19 : pcie prstn(I) + gpio20 : head phone plug/unplug detection2(I) + gpio21 : mipi dsi resetn(O) + gpio27 : tmp alert(I) + gpio28 : head phone plug/unplug detection1(I) + gpio68 : system led(O) + gpio69 : black light pwr_en(O) + gpio92 : frex gp0(O) + gpio93 : frex gp1(O) + gpio106 : host wak bt(O) + gpio111 : lcd touch resetn(O) +*/ diff --git a/arch/riscv/boot/dts/eswin/eic7x-die0-pinctrl.dtsi b/arch/riscv/boot/dts/eswin/eic7x-die0-pinctrl.dtsi new file mode 100644 index 000000000000..8409a0886b7d --- /dev/null +++ b/arch/riscv/boot/dts/eswin/eic7x-die0-pinctrl.dtsi @@ -0,0 +1,1447 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Include file for pin control of Eswin EIC770x family SoC. + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * SPDX-License-Identifier: GPL-2.0 + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include "eswin-win2030-die0-soc.dtsi" +#include + +/ { + #address-cells = <2>; + #size-cells = <2>; + soc { + pinctrl: pinctrl@0x51600080 { + compatible = "eswin,eic7x-pinctrl", "syscon"; + reg = <0x0 0x51600080 0x0 0x1FFF80>; + status = "disabled"; + + //Dual Die Configuration + //func0 + /* used with pinctrl_die1_jtag1_off_default*/ + pinctrl_die0_jtag1_on_default: jtag1_on-default{ + mux { + groups = "jtag1_on_group"; + function = "jtag1_on_func"; + }; + }; + /* used with pinctrl_die1_jtag1_on_default*/ + pinctrl_die0_jtag1_off_default: jtag1_off-default{ + mux { + groups = "jtag1_off_group"; + function = "jtag1_off_func"; + }; + }; + + /* used with pinctrl_die1_jtag2_off_default*/ + pinctrl_die0_jtag2_on_default: jtag2_on-default{ + mux { + groups = "jtag2_on_group"; + function = "jtag2_on_func"; + }; + }; + /* used with pinctrl_die1_jtag2_on_default*/ + pinctrl_die0_jtag2_off_default: jtag2_off-default{ + mux { + groups = "jtag2_off_group"; + function = "jtag2_off_func"; + }; + }; + + //func2 + /* used with pinctrl_die1_gpio7_off_default*/ + pinctrl_die0_gpio7_on_default: gpio7_on-default{ + mux { + groups = "gpio7_on_group"; + function = "gpio7_on_func"; + }; + conf { + groups = "gpio7_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die1_gpio7_on_default*/ + pinctrl_die0_gpio7_off_default: gpio7_off-default{ + mux { + groups = "gpio7_off_group"; + function = "gpio7_off_func"; + }; + }; + + /* used with pinctrl_die1_gpio8_off_default*/ + pinctrl_die0_gpio8_on_default: gpio8_on-default{ + mux { + groups = "gpio8_on_group"; + function = "gpio8_on_func"; + }; + conf { + groups = "gpio8_on_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + /* used with pinctrl_die1_gpio8_on_default*/ + pinctrl_die0_gpio8_off_default: gpio8_off-default{ + mux { + groups = "gpio8_off_group"; + function = "gpio8_off_func"; + }; + }; + + /* used with pinctrl_die1_gpio9_off_default*/ + pinctrl_die0_gpio9_on_default: gpio9_on-default{ + mux { + groups = "gpio9_on_group"; + function = "gpio9_on_func"; + }; + conf { + groups = "gpio9_on_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + /* used with pinctrl_die1_gpio9_on_default*/ + pinctrl_die0_gpio9_off_default: gpio9_off-default{ + mux { + groups = "gpio9_off_group"; + function = "gpio9_off_func"; + }; + }; + + /* used with pinctrl_die1_gpio17_off_default*/ + pinctrl_die0_gpio17_on_default: gpio17_on-default{ + mux { + groups = "gpio17_on_group"; + function = "gpio17_on_func"; + }; + conf { + groups = "gpio17_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die1_gpio17_on_default*/ + pinctrl_die0_gpio17_off_default: gpio17_off-default{ + mux { + groups = "gpio17_off_group"; + function = "gpio17_off_func"; + }; + }; + + /* used with pinctrl_die1_gpio64_off_default*/ + pinctrl_die0_gpio64_on_default: gpio64_on-default{ + mux { + groups = "gpio64_on_group"; + function = "gpio64_on_func"; + }; + conf { + groups = "gpio64_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die1_gpio64_on_default*/ + pinctrl_die0_gpio64_off_default: gpio64_off-default{ + mux { + groups = "gpio64_off_group"; + function = "gpio64_off_func"; + }; + }; + + /* used with pinctrl_die1_gpio65_off_default*/ + pinctrl_die0_gpio65_on_default: gpio65_on-default{ + mux { + groups = "gpio65_on_group"; + function = "gpio65_on_func"; + }; + conf { + groups = "gpio65_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die1_gpio65_on_default*/ + pinctrl_die0_gpio65_off_default: gpio65_off-default{ + mux { + groups = "gpio65_off_group"; + function = "gpio65_off_func"; + }; + }; + + /* used with pinctrl_die1_gpio66_off_default*/ + pinctrl_die0_gpio66_on_default: gpio66_on-default{ + mux { + groups = "gpio66_on_group"; + function = "gpio66_on_func"; + }; + conf { + groups = "gpio66_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die1_gpio66_on_default*/ + pinctrl_die0_gpio66_off_default: gpio66_off-default{ + mux { + groups = "gpio66_off_group"; + function = "gpio66_off_func"; + }; + }; + + //func6 + /* used with pinctrl_die1_vc_g2d0_debug_out_off_default*/ + pinctrl_die0_vc_g2d0_debug_out_on_default: vc_g2d0_debug_out_on-default{ + mux { + groups = "vc_g2d0_debug_out_on_group"; + function = "vc_g2d0_debug_out_on_func"; + }; + }; + /* used with pinctrl_die1_vc_g2d0_debug_out_on_default*/ + pinctrl_die0_vc_g2d0_debug_out_off_default: vc_g2d0_debug_out_off-default{ + mux { + groups = "vc_g2d0_debug_out_off_group"; + function = "vc_g2d0_debug_out_off_func"; + }; + }; + + //func7 + /* used with pinctrl_die1_ftm_test_out_off_default*/ + pinctrl_die0_ftm_test_out_on_default: ftm_test_out_on-default{ + mux { + groups = "ftm_test_out_on_group"; + function = "ftm_test_out_on_func"; + }; + }; + /* used with pinctrl_die1_ftm_test_out_on_default*/ + pinctrl_die0_ftm_test_out_off_default: ftm_test_out_off-default{ + mux { + groups = "ftm_test_out_off_group"; + function = "ftm_test_out_off_func"; + }; + }; + + //func0 + pinctrl_die0_sdio0_default: sdio0-default{ + mux { + groups = "sdio0_group"; + function = "sdio0_func"; + }; + }; + pinctrl_die0_sdio1_default: sdio1-default{ + mux { + groups = "sdio1_group"; + function = "sdio1_func"; + }; + }; + pinctrl_die0_jtag0_default: jtag0-default{ + mux { + groups = "jtag0_group"; + function = "jtag0_func"; + }; + }; + pinctrl_die0_pcie_default: pcie-default{ + mux{ + groups = "pcie_group"; + function = "pcie_func"; + }; + }; + pinctrl_die0_hdmi_default: hdmi-default{ + mux{ + groups = "hdmi_group"; + function = "hdmi_func"; + }; + }; + pinctrl_die0_rgmii0_default: rgmii0-default{ + mux { + groups = "rgmii0_group"; + function = "rgmii0_func"; + }; + }; + pinctrl_die0_rgmii1_default: rgmii1-default{ + mux{ + groups = "rgmii1_group"; + function = "rgmii1_func"; + }; + }; + pinctrl_die0_spi0_default: spi0-default{ + mux { + groups = "spi0_group"; + function = "spi0_func"; + }; + }; + pinctrl_die0_spi1_default: spi1-default{ + mux{ + groups = "spi1_group"; + function = "spi1_func"; + }; + }; + pinctrl_die0_spi3_default: spi3-default{ + mux { + groups = "spi3_group"; + function = "spi3_func"; + }; + }; + pinctrl_die0_por_time_sel0_default: por_time_sel0-default{ + mux { + groups = "por_time_sel0_group"; + function = "por_time_sel0_func"; + }; + }; + pinctrl_die0_por_time_sel1_default: por_time_sel1-default{ + mux { + groups = "por_time_sel1_group"; + function = "por_time_sel1_func"; + }; + }; + pinctrl_die0_i2s0_default: i2s0-default{ + mux { + groups = "i2s0_group"; + function = "i2s0_func"; + }; + }; + pinctrl_die0_i2s1_default: i2s1-default{ + mux { + groups = "i2s1_group"; + function = "i2s1_func"; + }; + }; + pinctrl_die0_i2s2_default: i2s2-default{ + mux { + groups = "i2s2_group"; + function = "i2s2_func"; + }; + }; + pinctrl_die0_usb0_pwren_default: usb0_pwren-default{ + mux { + groups = "usb0_pwren_group"; + function = "usb0_pwren_func"; + }; + }; + pinctrl_die0_usb1_pwren_default: usb1_pwren-default{ + mux { + groups = "usb1_pwren_group"; + function = "usb1_pwren_func"; + }; + }; + pinctrl_die0_i2c0_default: i2c0-default{ + mux { + groups = "i2c0_group"; + function = "i2c0_func"; + }; + }; + pinctrl_die0_i2c1_default: i2c1-default{ + mux { + groups = "i2c1_group"; + function = "i2c1_func"; + }; + }; + pinctrl_die0_i2c2_default: i2c2-default{ + mux { + groups = "i2c2_group"; + function = "i2c2_func"; + }; + }; + pinctrl_die0_i2c3_default: i2c3-default{ + mux { + groups = "i2c3_group"; + function = "i2c3_func"; + }; + }; + pinctrl_die0_i2c4_default: i2c4-default{ + mux { + groups = "i2c4_group"; + function = "i2c4_func"; + }; + }; + pinctrl_die0_i2c5_default: i2c5-default{ + mux { + groups = "i2c5_group"; + function = "i2c5_func"; + }; + }; + pinctrl_die0_i2c8_default: i2c8-default{ + mux { + groups = "i2c8_group"; + function = "i2c8_func"; + }; + }; + pinctrl_die0_i2c10_default: i2c10-default{ + mux { + groups = "i2c10_group"; + function = "i2c10_func"; + }; + }; + pinctrl_die0_i2c11_default: i2c11-default{ + mux { + groups = "i2c11_group"; + function = "i2c11_func"; + }; + }; + pinctrl_die0_uart0_default: uart0-default{ + mux { + groups = "uart0_group"; + function = "uart0_func"; + }; + }; + pinctrl_die0_uart1_default: uart1-default{ + mux { + groups = "uart1_group"; + function = "uart1_func"; + }; + }; + pinctrl_die0_uart2_default: uart2-default{ + mux { + groups = "uart2_group"; + function = "uart2_func"; + }; + }; + pinctrl_die0_pwm0_default: pwm0-default{ + mux { + groups = "pwm0_group"; + function = "pwm0_func"; + }; + }; + pinctrl_die0_fan_tach_default: fan_tach-default{ + mux { + groups = "fan_tach_group"; + function = "fan_tach_func"; + }; + }; + pinctrl_die0_mipi_csi0_default: mipi_csi0-default{ + mux { + groups = "mipi_csi0_group"; + function = "mipi_csi0_func"; + }; + }; + pinctrl_die0_mipi_csi1_default: mipi_csi1-default{ + mux { + groups = "mipi_csi1_group"; + function = "mipi_csi1_func"; + }; + }; + pinctrl_die0_mipi_csi2_default: mipi_csi2-default{ + mux { + groups = "mipi_csi2_group"; + function = "mipi_csi2_func"; + }; + }; + pinctrl_die0_mipi_csi3_default: mipi_csi3-default{ + mux { + groups = "mipi_csi3_group"; + function = "mipi_csi3_func"; + }; + }; + pinctrl_die0_mipi_csi4_default: mipi_csi4-default{ + mux { + groups = "mipi_csi4_group"; + function = "mipi_csi4_func"; + }; + }; + pinctrl_die0_mipi_csi5_default: mipi_csi5-default{ + mux { + groups = "mipi_csi5_group"; + function = "mipi_csi5_func"; + }; + }; + pinctrl_die0_s_mode_default: s_mode-default{ + mux { + groups = "s_mode_group"; + function = "s_mode_func"; + }; + }; + pinctrl_die0_ddr_refclk_sel_default: ddr_refclk_sel-default{ + mux { + groups = "ddr_refclk_sel_group"; + function = "ddr_refclk_sel_func"; + }; + }; + pinctrl_die0_boot_sel_default: boot_sel-default{ + mux { + groups = "boot_sel_group"; + function = "boot_sel_func"; + }; + }; + pinctrl_die0_lpddr_ref_clk_default: lpddr_ref_clk-default{ + mux { + groups = "lpddr_ref_clk_group"; + function = "lpddr_ref_clk_func"; + }; + }; + + //func1 + pinctrl_die0_spi2_default: spi2-default{ + mux1 { + groups = "spi2_clk_group"; + function = "spi2_clk_func"; + }; + conf1 { + groups = "spi2_clk_group"; + input-enable = <1>; + bias-pull-up = <1>; + bias-pull-down = <0>; + }; + mux2 { + groups = "spi2_d0_group"; + function = "spi2_d0_func"; + }; + conf2 { + groups = "spi2_d0_group"; + input-enable = <1>; + bias-pull-up = <1>; + bias-pull-down = <0>; + }; + mux3 { + groups = "spi2_d1_d2_d3_group"; + function = "spi2_d1_d2_d3_func"; + }; + conf3 { + groups = "spi2_d1_d2_d3_group"; + input-enable = <1>; + bias-pull-up = <1>; + bias-pull-down = <0>; + }; + mux4 { + groups = "spi2_cs_group"; + function = "spi2_cs_func"; + }; + }; + + pinctrl_die0_sata_act_led_default: sata_act_led-default{ + mux { + groups = "sata_act_led_group"; + function = "sata_act_led_func"; + }; + conf { + groups = "sata_act_led_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_emmc_led_control_default: emmc_led_control-default{ + mux { + groups = "emmc_led_control_group"; + function = "emmc_led_control_func"; + }; + conf { + groups = "emmc_led_control_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_sd0_led_control_default: sd0_led_control-default{ + mux { + groups = "sd0_led_control_group"; + function = "sd0_led_control_func"; + }; + }; + pinctrl_die0_sd1_led_control_default: sd1_led_control-default{ + mux { + groups = "sd1_led_control_group"; + function = "sd1_led_control_func"; + }; + }; + pinctrl_die0_i2c6_default: i2c6-default{ + mux { + groups = "i2c6_group"; + function = "i2c6_func"; + }; + }; + pinctrl_die0_i2c7_default: i2c7-default{ + mux { + groups = "i2c7_group"; + function = "i2c7_func"; + }; + }; + pinctrl_die0_i2c9_default: i2c9-default{ + mux { + groups = "i2c9_group"; + function = "i2c9_func"; + }; + }; + pinctrl_die0_pwm1_default: pwm1-default{ + mux { + groups = "pwm1_group"; + function = "pwm1_func"; + }; + }; + pinctrl_die0_pwm2_default: pwm2-default{ + mux { + groups = "pwm2_group"; + function = "pwm2_func"; + }; + }; + pinctrl_die0_mipi_csi_xtrig_default: mipi_csi_xtrig-default{ + mux { + groups = "mipi_csi_xtrig_group"; + function = "mipi_csi_xtrig_func"; + }; + }; + + //func3 + pinctrl_die0_uart3_default: uart3-default{ + mux { + groups = "uart3_group"; + function = "uart3_func"; + }; + }; + pinctrl_die0_uart4_default: uart4-default{ + mux { + groups = "uart4_group"; + function = "uart4_func"; + }; + }; + + //gpio + pinctrl_die0_gpio0_default: gpio0-default{ + mux { + groups = "gpio0_group"; + function = "gpio0_func"; + }; + conf { + groups = "gpio0_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die0_gpio1_default: gpio1-default{ + mux { + groups = "gpio1_group"; + function = "gpio1_func"; + }; + }; + pinctrl_die0_gpio2_default: gpio2-default{ + mux { + groups = "gpio2_group"; + function = "gpio2_func"; + }; + }; + pinctrl_die0_gpio3_default: gpio3-default{ + mux { + groups = "gpio3_group"; + function = "gpio3_func"; + }; + }; + pinctrl_die0_gpio4_default: gpio4-default{ + mux { + groups = "gpio4_group"; + function = "gpio4_func"; + }; + }; + pinctrl_die0_gpio5_default: gpio5-default{ + mux { + groups = "gpio5_group"; + function = "gpio5_func"; + }; + conf { + groups = "gpio5_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die0_gpio6_default: gpio6-default{ + mux { + groups = "gpio6_group"; + function = "gpio6_func"; + }; + conf { + groups = "gpio6_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio10_default: gpio10-default{ + mux { + groups = "gpio10_group"; + function = "gpio10_func"; + }; + conf { + groups = "gpio10_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die0_gpio11_default: gpio11-default{ + mux { + groups = "gpio11_group"; + function = "gpio11_func"; + }; + conf { + groups = "gpio11_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die0_gpio12_default: gpio12-default{ + mux { + groups = "gpio12_group"; + function = "gpio12_func"; + }; + }; + pinctrl_die0_gpio13_default: gpio13-default{ + mux { + groups = "gpio13_group"; + function = "gpio13_func"; + }; + }; + pinctrl_die0_gpio14_default: gpio14-default{ + mux { + groups = "gpio14_group"; + function = "gpio14_func"; + }; + }; + pinctrl_die0_gpio15_default: gpio15-default{ + mux { + groups = "gpio15_group"; + function = "gpio15_func"; + }; + }; + pinctrl_die0_gpio16_default: gpio16-default{ + mux { + groups = "gpio16_group"; + function = "gpio16_func"; + }; + conf { + groups = "gpio16_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio18_default: gpio18-default{ + mux { + groups = "gpio18_group"; + function = "gpio18_func"; + }; + conf { + groups = "gpio18_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio19_default: gpio19-default{ + mux { + groups = "gpio19_group"; + function = "gpio19_func"; + }; + conf { + groups = "gpio19_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio20_default: gpio20-default{ + mux { + groups = "gpio20_group"; + function = "gpio20_func"; + }; + conf { + groups = "gpio20_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio21_default: gpio21-default{ + mux { + groups = "gpio21_group"; + function = "gpio21_func"; + }; + conf { + groups = "gpio21_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio22_default: gpio22-default{ + mux { + groups = "gpio22_group"; + function = "gpio22_func"; + }; + }; + pinctrl_die0_gpio23_default: gpio23-default{ + mux { + groups = "gpio23_group"; + function = "gpio23_func"; + }; + }; + pinctrl_die0_gpio24_default: gpio24-default{ + mux { + groups = "gpio24_group"; + function = "gpio24_func"; + }; + }; + pinctrl_die0_gpio25_default: gpio25-default{ + mux { + groups = "gpio25_group"; + function = "gpio25_func"; + }; + }; + pinctrl_die0_gpio26_default: gpio26-default{ + mux { + groups = "gpio26_group"; + function = "gpio26_func"; + }; + }; + pinctrl_die0_gpio27_default: gpio27-default{ + mux { + groups = "gpio27_group"; + function = "gpio27_func"; + }; + }; + pinctrl_die0_gpio28_default: gpio28-default{ + mux { + groups = "gpio28_group"; + function = "gpio28_func"; + }; + conf { + groups = "gpio28_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die0_gpio29_default: gpio29-default{ + mux { + groups = "gpio29_group"; + function = "gpio29_func"; + }; + }; + pinctrl_die0_gpio30_default: gpio30-default{ + mux { + groups = "gpio30_group"; + function = "gpio30_func"; + }; + }; + pinctrl_die0_gpio31_default: gpio31-default{ + mux { + groups = "gpio31_group"; + function = "gpio31_func"; + }; + }; + pinctrl_die0_gpio32_default: gpio32-default{ + mux { + groups = "gpio32_group"; + function = "gpio32_func"; + }; + }; + pinctrl_die0_gpio33_default: gpio33-default{ + mux { + groups = "gpio33_group"; + function = "gpio33_func"; + }; + }; + pinctrl_die0_gpio34_default: gpio34-default{ + mux { + groups = "gpio34_group"; + function = "gpio34_func"; + }; + }; + pinctrl_die0_gpio35_default: gpio35-default{ + mux { + groups = "gpio35_group"; + function = "gpio35_func"; + }; + conf { + groups = "gpio35_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio36_default: gpio36-default{ + mux { + groups = "gpio36_group"; + function = "gpio36_func"; + }; + conf { + groups = "gpio36_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio37_default: gpio37-default{ + mux { + groups = "gpio37_group"; + function = "gpio37_func"; + }; + conf { + groups = "gpio37_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio38_default: gpio38-default{ + mux { + groups = "gpio38_group"; + function = "gpio38_func"; + }; + conf { + groups = "gpio38_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio39_default: gpio39-default{ + mux { + groups = "gpio39_group"; + function = "gpio39_func"; + }; + conf { + groups = "gpio39_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio40_default: gpio40-default{ + mux { + groups = "gpio40_group"; + function = "gpio40_func"; + }; + conf { + groups = "gpio40_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio41_default: gpio41-default{ + mux { + groups = "gpio41_group"; + function = "gpio41_func"; + }; + }; + pinctrl_die0_gpio42_default: gpio42-default{ + mux { + groups = "gpio42_group"; + function = "gpio42_func"; + }; + conf { + groups = "gpio42_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio43_default: gpio43-default{ + mux { + groups = "gpio43_group"; + function = "gpio43_func"; + }; + }; + pinctrl_die0_gpio44_default: gpio44-default{ + mux { + groups = "gpio44_group"; + function = "gpio44_func"; + }; + }; + pinctrl_die0_gpio45_default: gpio45-default{ + mux { + groups = "gpio45_group"; + function = "gpio45_func"; + }; + }; + pinctrl_die0_gpio46_default: gpio46-default{ + mux { + groups = "gpio46_group"; + function = "gpio46_func"; + }; + conf { + groups = "gpio46_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio47_default: gpio47-default{ + mux { + groups = "gpio47_group"; + function = "gpio47_func"; + }; + conf { + groups = "gpio47_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio48_default: gpio48-default{ + mux { + groups = "gpio48_group"; + function = "gpio48_func"; + }; + }; + pinctrl_die0_gpio49_default: gpio49-default{ + mux { + groups = "gpio49_group"; + function = "gpio49_func"; + }; + }; + pinctrl_die0_gpio50_default: gpio50-default{ + mux { + groups = "gpio50_group"; + function = "gpio50_func"; + }; + }; + pinctrl_die0_gpio51_default: gpio51-default{ + mux { + groups = "gpio51_group"; + function = "gpio51_func"; + }; + }; + pinctrl_die0_gpio52_default: gpio52-default{ + mux { + groups = "gpio52_group"; + function = "gpio52_func"; + }; + conf { + groups = "gpio52_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio53_default: gpio53-default{ + mux { + groups = "gpio53_group"; + function = "gpio53_func"; + }; + conf { + groups = "gpio53_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio54_default: gpio54-default{ + mux { + groups = "gpio54_group"; + function = "gpio54_func"; + }; + }; + pinctrl_die0_gpio55_default: gpio55-default{ + mux { + groups = "gpio55_group"; + function = "gpio55_func"; + }; + }; + pinctrl_die0_gpio56_default: gpio56-default{ + mux { + groups = "gpio56_group"; + function = "gpio56_func"; + }; + }; + pinctrl_die0_gpio57_default: gpio57-default{ + mux { + groups = "gpio57_group"; + function = "gpio57_func"; + }; + }; + pinctrl_die0_gpio58_default: gpio58-default{ + mux { + groups = "gpio58_group"; + function = "gpio58_func"; + }; + }; + pinctrl_die0_gpio59_default: gpio59-default{ + mux { + groups = "gpio59_group"; + function = "gpio59_func"; + }; + }; + pinctrl_die0_gpio60_default: gpio60-default{ + mux { + groups = "gpio60_group"; + function = "gpio60_func"; + }; + }; + pinctrl_die0_gpio61_default: gpio61-default{ + mux { + groups = "gpio61_group"; + function = "gpio61_func"; + }; + }; + pinctrl_die0_gpio62_default: gpio62-default{ + mux { + groups = "gpio62_group"; + function = "gpio62_func"; + }; + }; + pinctrl_die0_gpio63_default: gpio63-default{ + mux { + groups = "gpio63_group"; + function = "gpio63_func"; + }; + }; + pinctrl_die0_gpio67_default: gpio67-default{ + mux { + groups = "gpio67_group"; + function = "gpio67_func"; + }; + conf { + groups = "gpio67_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio68_default: gpio68-default{ + mux { + groups = "gpio68_group"; + function = "gpio68_func"; + }; + }; + pinctrl_die0_gpio69_default: gpio69-default{ + mux { + groups = "gpio69_group"; + function = "gpio69_func"; + }; + }; + pinctrl_die0_gpio70_default: gpio70-default{ + mux { + groups = "gpio70_group"; + function = "gpio70_func"; + }; + }; + pinctrl_die0_gpio71_default: gpio71-default{ + mux { + groups = "gpio71_group"; + function = "gpio71_func"; + }; + }; + pinctrl_die0_gpio72_default: gpio72-default{ + mux { + groups = "gpio72_group"; + function = "gpio72_func"; + }; + }; + pinctrl_die0_gpio73_default: gpio73-default{ + mux { + groups = "gpio73_group"; + function = "gpio73_func"; + }; + }; + pinctrl_die0_gpio74_default: gpio74-default{ + mux { + groups = "gpio74_group"; + function = "gpio74_func"; + }; + }; + pinctrl_die0_gpio75_default: gpio75-default{ + mux { + groups = "gpio75_group"; + function = "gpio75_func"; + }; + }; + pinctrl_die0_gpio76_default: gpio76-default{ + mux { + groups = "gpio76_group"; + function = "gpio76_func"; + }; + }; + pinctrl_die0_gpio77_default: gpio77-default{ + mux { + groups = "gpio77_group"; + function = "gpio77_func"; + }; + }; + pinctrl_die0_gpio78_default: gpio78-default{ + mux { + groups = "gpio78_group"; + function = "gpio78_func"; + }; + }; + pinctrl_die0_gpio79_default: gpio79-default{ + mux { + groups = "gpio79_group"; + function = "gpio79_func"; + }; + }; + pinctrl_die0_gpio80_default: gpio80-default{ + mux { + groups = "gpio80_group"; + function = "gpio80_func"; + }; + }; + pinctrl_die0_gpio81_default: gpio81-default{ + mux { + groups = "gpio81_group"; + function = "gpio81_func"; + }; + }; + pinctrl_die0_gpio88_default: gpio88-default{ + mux { + groups = "gpio88_group"; + function = "gpio88_func"; + }; + }; + pinctrl_die0_gpio89_default: gpio89-default{ + mux { + groups = "gpio89_group"; + function = "gpio89_func"; + }; + }; + pinctrl_die0_gpio90_default: gpio90-default{ + mux { + groups = "gpio90_group"; + function = "gpio90_func"; + }; + }; + pinctrl_die0_gpio91_default: gpio91-default{ + mux { + groups = "gpio91_group"; + function = "gpio91_func"; + }; + }; + pinctrl_die0_gpio92_default: gpio92-default{ + mux { + groups = "gpio92_group"; + function = "gpio92_func"; + }; + conf { + groups = "gpio92_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio93_default: gpio93-default{ + mux { + groups = "gpio93_group"; + function = "gpio93_func"; + }; + conf { + groups = "gpio93_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio94_default: gpio94-default{ + mux { + groups = "gpio94_group"; + function = "gpio94_func"; + }; + conf { + groups = "gpio94_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio95_default: gpio95-default{ + mux { + groups = "gpio95_group"; + function = "gpio95_func"; + }; + }; + pinctrl_die0_gpio96_default: gpio96-default{ + mux { + groups = "gpio96_group"; + function = "gpio96_func"; + }; + }; + pinctrl_die0_gpio97_default: gpio97-default{ + mux { + groups = "gpio97_group"; + function = "gpio97_func"; + }; + }; + pinctrl_die0_gpio98_default: gpio98-default{ + mux { + groups = "gpio98_group"; + function = "gpio98_func"; + }; + }; + pinctrl_die0_gpio99_default: gpio99-default{ + mux { + groups = "gpio99_group"; + function = "gpio99_func"; + }; + }; + pinctrl_die0_gpio100_default: gpio100-default{ + mux { + groups = "gpio100_group"; + function = "gpio100_func"; + }; + }; + pinctrl_die0_gpio101_default: gpio101-default{ + mux { + groups = "gpio101_group"; + function = "gpio101_func"; + }; + }; + pinctrl_die0_gpio102_default: gpio102-default{ + mux { + groups = "gpio102_group"; + function = "gpio102_func"; + }; + }; + pinctrl_die0_gpio103_default: gpio103-default{ + mux { + groups = "gpio103_group"; + function = "gpio103_func"; + }; + }; + pinctrl_die0_gpio104_default: gpio104-default{ + mux { + groups = "gpio104_group"; + function = "gpio104_func"; + }; + }; + pinctrl_die0_gpio105_default: gpio105-default{ + mux { + groups = "gpio105_group"; + function = "gpio105_func"; + }; + }; + pinctrl_die0_gpio106_default: gpio106-default{ + mux { + groups = "gpio106_group"; + function = "gpio106_func"; + }; + conf { + groups = "gpio106_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio107_default: gpio107-default{ + mux { + groups = "gpio107_group"; + function = "gpio107_func"; + }; + conf { + groups = "gpio107_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio108_default: gpio108-default{ + mux { + groups = "gpio108_group"; + function = "gpio108_func"; + }; + conf { + groups = "gpio108_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio109_default: gpio109-default{ + mux { + groups = "gpio109_group"; + function = "gpio109_func"; + }; + conf { + groups = "gpio109_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio110_default: gpio110-default{ + mux { + groups = "gpio110_group"; + function = "gpio110_func"; + }; + conf { + groups = "gpio110_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die0_gpio111_default: gpio111-default{ + mux { + groups = "gpio111_group"; + function = "gpio111_func"; + }; + conf { + groups = "gpio111_group"; + input-enable = <0>; + bias-pull-up = <1>; + }; + }; + + //func6 + pinctrl_die0_csi_mon_out_default: csi_mon_out-default{ + mux { + groups = "csi_mon_out_group"; + function = "csi_mon_out_func"; + }; + }; + pinctrl_die0_csi_ocla_clk_default: csi_ocla_clk-default{ + mux { + groups = "csi_ocla_clk_group"; + function = "csi_ocla_clk_func"; + }; + }; + pinctrl_die0_csi_mon_out_valid_default: csi_mon_out_valid-default{ + mux { + groups = "csi_mon_out_valid_group"; + function = "csi_mon_out_valid_func"; + }; + }; + pinctrl_die0_csi_parity_error_default: csi_parity_error-default{ + mux { + groups = "csi_parity_error_group"; + function = "csi_parity_error_func"; + }; + }; + pinctrl_die0_csi_dtb_out_default: csi_dtb_out-default{ + mux { + groups = "csi_dtb_out_group"; + function = "csi_dtb_out_func"; + }; + }; + pinctrl_die0_csi_phy_sel_default: csi_phy_sel-default{ + mux { + groups = "csi_phy_sel_group"; + function = "csi_phy_sel_func"; + }; + }; + pinctrl_die0_vc_g2d0_debug_out_default: vc_g2d0_debug_out-default{ + mux { + groups = "vc_g2d0_debug_out_group"; + function = "vc_g2d0_debug_out_func"; + }; + }; + pinctrl_die0_vc_g2d1_debug_out_default: vc_g2d1_debug_out-default{ + mux { + groups = "vc_g2d1_debug_out_group"; + function = "vc_g2d1_debug_out_func"; + }; + }; + pinctrl_die0_sata_mpll_clk_default: sata_mpll_clk-default{ + mux { + groups = "sata_mpll_clk_group"; + function = "sata_mpll_clk_func"; + }; + }; + pinctrl_die0_sata_ref_repeat_clk_m_default: sata_ref_repeat_clk_m-default{ + mux { + groups = "sata_ref_repeat_clk_m_group"; + function = "sata_ref_repeat_clk_m_func"; + }; + }; + pinctrl_die0_sata_ref_repeat_clk_p_default: sata_ref_repeat_clk_p-default{ + mux { + groups = "sata_ref_repeat_clk_p_group"; + function = "sata_ref_repeat_clk_p_func"; + }; + }; + + //func7 + pinctrl_die0_clk_d2d_test_out_default: clk_d2d_test_out-default{ + mux { + groups = "clk_d2d_test_out_group"; + function = "clk_d2d_test_out_func"; + }; + }; + pinctrl_die0_clk_spll0_test_out_default: clk_spll0_test_out-default{ + mux { + groups = "clk_spll0_test_out_group"; + function = "clk_spll0_test_out_func"; + }; + }; + }; + }; +}; diff --git a/arch/riscv/boot/dts/eswin/eic7x-die1-pinctrl.dtsi b/arch/riscv/boot/dts/eswin/eic7x-die1-pinctrl.dtsi new file mode 100644 index 000000000000..fcb3d855c105 --- /dev/null +++ b/arch/riscv/boot/dts/eswin/eic7x-die1-pinctrl.dtsi @@ -0,0 +1,1452 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Include file for pin control of Eswin EIC770x family SoC. + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * SPDX-License-Identifier: GPL-2.0 + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include "eswin-win2030-die1-soc.dtsi" +#include + +/ { + #address-cells = <2>; + #size-cells = <2>; + soc { + d1_pinctrl: pinctrl@0x71600080 { + compatible = "eswin,eic7x-pinctrl", "syscon"; + reg = <0x0 0x71600080 0x0 0x1FFF80>; + status = "disabled"; + + //Dual Die Configuration + //func0 + /* used with pinctrl_die0_jtag1_off_default*/ + pinctrl_die1_jtag1_on_default: jtag1_on-default{ + mux { + groups = "jtag1_on_group"; + function = "jtag1_on_func"; + }; + }; + /* used with pinctrl_die0_jtag1_on_default*/ + pinctrl_die1_jtag1_off_default: jtag1_off-default{ + mux { + groups = "jtag1_off_group"; + function = "jtag1_off_func"; + }; + }; + + /* used with pinctrl_die0_jtag2_off_default*/ + pinctrl_die1_jtag2_on_default: jtag2_on-default{ + mux { + groups = "jtag2_on_group"; + function = "jtag2_on_func"; + }; + }; + /* used with pinctrl_die0_jtag2_on_default*/ + pinctrl_die1_jtag2_off_default: jtag2_off-default{ + mux { + groups = "jtag2_off_group"; + function = "jtag2_off_func"; + }; + }; + + //func2 + /* used with pinctrl_die0_gpio7_off_default*/ + pinctrl_die1_gpio7_on_default: gpio7_on-default{ + mux { + groups = "gpio7_on_group"; + function = "gpio7_on_func"; + }; + conf { + groups = "gpio7_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die0_gpio7_on_default*/ + pinctrl_die1_gpio7_off_default: gpio7_off-default{ + mux { + groups = "gpio7_off_group"; + function = "gpio7_off_func"; + }; + }; + + /* used with pinctrl_die0_gpio8_off_default*/ + pinctrl_die1_gpio8_on_default: gpio8_on-default{ + mux { + groups = "gpio8_on_group"; + function = "gpio8_on_func"; + }; + conf { + groups = "gpio8_on_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + /* used with pinctrl_die0_gpio8_on_default*/ + pinctrl_die1_gpio8_off_default: gpio8_off-default{ + mux { + groups = "gpio8_off_group"; + function = "gpio8_off_func"; + }; + }; + + /* used with pinctrl_die0_gpio9_off_default*/ + pinctrl_die1_gpio9_on_default: gpio9_on-default{ + mux { + groups = "gpio9_on_group"; + function = "gpio9_on_func"; + }; + conf { + groups = "gpio9_on_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + /* used with pinctrl_die0_gpio9_on_default*/ + pinctrl_die1_gpio9_off_default: gpio9_off-default{ + mux { + groups = "gpio9_off_group"; + function = "gpio9_off_func"; + }; + }; + + /* used with pinctrl_die0_gpio17_off_default*/ + pinctrl_die1_gpio17_on_default: gpio17_on-default{ + mux { + groups = "gpio17_on_group"; + function = "gpio17_on_func"; + }; + conf { + groups = "gpio17_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die0_gpio17_on_default*/ + pinctrl_die1_gpio17_off_default: gpio17_off-default{ + mux { + groups = "gpio17_off_group"; + function = "gpio17_off_func"; + }; + }; + + /* used with pinctrl_die0_gpio64_off_default*/ + pinctrl_die1_gpio64_on_default: gpio64_on-default{ + mux { + groups = "gpio64_on_group"; + function = "gpio64_on_func"; + }; + conf { + groups = "gpio64_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die0_gpio64_on_default*/ + pinctrl_die1_gpio64_off_default: gpio64_off-default{ + mux { + groups = "gpio64_off_group"; + function = "gpio64_off_func"; + }; + }; + + /* used with pinctrl_die0_gpio65_off_default*/ + pinctrl_die1_gpio65_on_default: gpio65_on-default{ + mux { + groups = "gpio65_on_group"; + function = "gpio65_on_func"; + }; + conf { + groups = "gpio65_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die0_gpio65_on_default*/ + pinctrl_die1_gpio65_off_default: gpio65_off-default{ + mux { + groups = "gpio65_off_group"; + function = "gpio65_off_func"; + }; + }; + + /* used with pinctrl_die0_gpio66_off_default*/ + pinctrl_die1_gpio66_on_default: gpio66_on-default{ + mux { + groups = "gpio66_on_group"; + function = "gpio66_on_func"; + }; + conf { + groups = "gpio66_on_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + /* used with pinctrl_die0_gpio66_on_default*/ + pinctrl_die1_gpio66_off_default: gpio66_off-default{ + mux { + groups = "gpio66_off_group"; + function = "gpio66_off_func"; + }; + }; + + //func6 + /* used with pinctrl_die0_vc_g2d0_debug_out_off_default*/ + pinctrl_die1_vc_g2d0_debug_out_on_default: vc_g2d0_debug_out_on-default{ + mux { + groups = "vc_g2d0_debug_out_on_group"; + function = "vc_g2d0_debug_out_on_func"; + }; + }; + /* used with pinctrl_die0_vc_g2d0_debug_out_on_default*/ + pinctrl_die1_vc_g2d0_debug_out_off_default: vc_g2d0_debug_out_off-default{ + mux { + groups = "vc_g2d0_debug_out_off_group"; + function = "vc_g2d0_debug_out_off_func"; + }; + }; + + //func7 + /* used with pinctrl_die0_ftm_test_out_off_default*/ + pinctrl_die1_ftm_test_out_on_default: ftm_test_out_on-default{ + mux { + groups = "ftm_test_out_on_group"; + function = "ftm_test_out_on_func"; + }; + }; + /* used with pinctrl_die0_ftm_test_out_on_default*/ + pinctrl_die1_ftm_test_out_off_default: ftm_test_out_off-default{ + mux { + groups = "ftm_test_out_off_group"; + function = "ftm_test_out_off_func"; + }; + }; + + //func0 + pinctrl_die1_sdio0_default: sdio0-default{ + mux { + groups = "sdio0_group"; + function = "sdio0_func"; + }; + }; + pinctrl_die1_sdio1_default: sdio1-default{ + mux { + groups = "sdio1_group"; + function = "sdio1_func"; + }; + }; + pinctrl_die1_jtag0_default: jtag0-default{ + mux { + groups = "jtag0_group"; + function = "jtag0_func"; + }; + }; + pinctrl_die1_pcie_default: pcie-default{ + mux{ + groups = "pcie_group"; + function = "pcie_func"; + }; + }; + pinctrl_die1_hdmi_default: hdmi-default{ + mux{ + groups = "hdmi_group"; + function = "hdmi_func"; + }; + }; + pinctrl_die1_rgmii0_default: rgmii0-default{ + mux { + groups = "rgmii0_group"; + function = "rgmii0_func"; + }; + }; + pinctrl_die1_rgmii1_default: rgmii1-default{ + mux{ + groups = "rgmii1_group"; + function = "rgmii1_func"; + }; + }; + pinctrl_die1_spi0_default: spi0-default{ + mux { + groups = "spi0_group"; + function = "spi0_func"; + }; + }; + pinctrl_die1_spi1_default: spi1-default{ + mux{ + groups = "spi1_group"; + function = "spi1_func"; + }; + }; + pinctrl_die1_spi3_default: spi3-default{ + mux { + groups = "spi3_group"; + function = "spi3_func"; + }; + }; + pinctrl_die1_por_time_sel0_default: por_time_sel0-default{ + mux { + groups = "por_time_sel0_group"; + function = "por_time_sel0_func"; + }; + }; + pinctrl_die1_por_time_sel1_default: por_time_sel1-default{ + mux { + groups = "por_time_sel1_group"; + function = "por_time_sel1_func"; + }; + }; + pinctrl_die1_i2s0_default: i2s0-default{ + mux { + groups = "i2s0_group"; + function = "i2s0_func"; + }; + }; + pinctrl_die1_i2s1_default: i2s1-default{ + mux { + groups = "i2s1_group"; + function = "i2s1_func"; + }; + }; + pinctrl_die1_i2s2_default: i2s2-default{ + mux { + groups = "i2s2_group"; + function = "i2s2_func"; + }; + }; + pinctrl_die1_usb0_pwren_default: usb0_pwren-default{ + mux { + groups = "usb0_pwren_group"; + function = "usb0_pwren_func"; + }; + }; + pinctrl_die1_usb1_pwren_default: usb1_pwren-default{ + mux { + groups = "usb1_pwren_group"; + function = "usb1_pwren_func"; + }; + }; + pinctrl_die1_i2c0_default: i2c0-default{ + mux { + groups = "i2c0_group"; + function = "i2c0_func"; + }; + }; + pinctrl_die1_i2c1_default: i2c1-default{ + mux { + groups = "i2c1_group"; + function = "i2c1_func"; + }; + }; + pinctrl_die1_i2c2_default: i2c2-default{ + mux { + groups = "i2c2_group"; + function = "i2c2_func"; + }; + }; + pinctrl_die1_i2c3_default: i2c3-default{ + mux { + groups = "i2c3_group"; + function = "i2c3_func"; + }; + }; + pinctrl_die1_i2c4_default: i2c4-default{ + mux { + groups = "i2c4_group"; + function = "i2c4_func"; + }; + }; + pinctrl_die1_i2c5_default: i2c5-default{ + mux { + groups = "i2c5_group"; + function = "i2c5_func"; + }; + }; + pinctrl_die1_i2c8_default: i2c8-default{ + mux { + groups = "i2c8_group"; + function = "i2c8_func"; + }; + }; + pinctrl_die1_i2c10_default: i2c10-default{ + mux { + groups = "i2c10_group"; + function = "i2c10_func"; + }; + }; + pinctrl_die1_i2c11_default: i2c11-default{ + mux { + groups = "i2c11_group"; + function = "i2c11_func"; + }; + }; + pinctrl_die1_uart0_default: uart0-default{ + mux { + groups = "uart0_group"; + function = "uart0_func"; + }; + }; + pinctrl_die1_uart1_default: uart1-default{ + mux { + groups = "uart1_group"; + function = "uart1_func"; + }; + }; + pinctrl_die1_uart2_default: uart2-default{ + mux { + groups = "uart2_group"; + function = "uart2_func"; + }; + }; + pinctrl_die1_pwm0_default: pwm0-default{ + mux { + groups = "pwm0_group"; + function = "pwm0_func"; + }; + }; + pinctrl_die1_fan_tach_default: fan_tach-default{ + mux { + groups = "fan_tach_group"; + function = "fan_tach_func"; + }; + }; + pinctrl_die1_mipi_csi0_default: mipi_csi0-default{ + mux { + groups = "mipi_csi0_group"; + function = "mipi_csi0_func"; + }; + }; + pinctrl_die1_mipi_csi1_default: mipi_csi1-default{ + mux { + groups = "mipi_csi1_group"; + function = "mipi_csi1_func"; + }; + }; + pinctrl_die1_mipi_csi2_default: mipi_csi2-default{ + mux { + groups = "mipi_csi2_group"; + function = "mipi_csi2_func"; + }; + }; + pinctrl_die1_mipi_csi3_default: mipi_csi3-default{ + mux { + groups = "mipi_csi3_group"; + function = "mipi_csi3_func"; + }; + }; + pinctrl_die1_mipi_csi4_default: mipi_csi4-default{ + mux { + groups = "mipi_csi4_group"; + function = "mipi_csi4_func"; + }; + }; + pinctrl_die1_mipi_csi5_default: mipi_csi5-default{ + mux { + groups = "mipi_csi5_group"; + function = "mipi_csi5_func"; + }; + }; + pinctrl_die1_s_mode_default: s_mode-default{ + mux { + groups = "s_mode_group"; + function = "s_mode_func"; + }; + }; + pinctrl_die1_ddr_refclk_sel_default: ddr_refclk_sel-default{ + mux { + groups = "ddr_refclk_sel_group"; + function = "ddr_refclk_sel_func"; + }; + }; + pinctrl_die1_boot_sel_default: boot_sel-default{ + mux { + groups = "boot_sel_group"; + function = "boot_sel_func"; + }; + }; + pinctrl_die1_lpddr_ref_clk_default: lpddr_ref_clk-default{ + mux { + groups = "lpddr_ref_clk_group"; + function = "lpddr_ref_clk_func"; + }; + }; + + //func1 + pinctrl_die1_spi2_default: spi2-default{ + mux1 { + groups = "spi2_clk_group"; + function = "spi2_clk_func"; + }; + conf1 { + groups = "spi2_clk_group"; + input-enable = <1>; + bias-pull-up = <1>; + bias-pull-down = <0>; + }; + mux2 { + groups = "spi2_d0_group"; + function = "spi2_d0_func"; + }; + conf2 { + groups = "spi2_d0_group"; + input-enable = <1>; + bias-pull-up = <1>; + bias-pull-down = <0>; + }; + mux3 { + groups = "spi2_d1_d2_d3_group"; + function = "spi2_d1_d2_d3_func"; + }; + conf3 { + groups = "spi2_d1_d2_d3_group"; + input-enable = <1>; + bias-pull-up = <1>; + bias-pull-down = <0>; + }; + mux4 { + groups = "spi2_cs_group"; + function = "spi2_cs_func"; + }; + }; + + pinctrl_die1_sata_act_led_default: sata_act_led-default{ + mux { + groups = "sata_act_led_group"; + function = "sata_act_led_func"; + }; + conf { + groups = "sata_act_led_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_emmc_led_control_default: emmc_led_control-default{ + mux { + groups = "emmc_led_control_group"; + function = "emmc_led_control_func"; + }; + conf { + groups = "emmc_led_control_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_sd0_led_control_default: sd0_led_control-default{ + mux { + groups = "sd0_led_control_group"; + function = "sd0_led_control_func"; + }; + }; + pinctrl_die1_sd1_led_control_default: sd1_led_control-default{ + mux { + groups = "sd1_led_control_group"; + function = "sd1_led_control_func"; + }; + }; + pinctrl_die1_i2c6_default: i2c6-default{ + mux { + groups = "i2c6_group"; + function = "i2c6_func"; + }; + }; + pinctrl_die1_i2c7_default: i2c7-default{ + mux { + groups = "i2c7_group"; + function = "i2c7_func"; + }; + }; + pinctrl_die1_i2c9_default: i2c9-default{ + mux { + groups = "i2c9_group"; + function = "i2c9_func"; + }; + }; + pinctrl_die1_pwm1_default: pwm1-default{ + mux { + groups = "pwm1_group"; + function = "pwm1_func"; + }; + }; + pinctrl_die1_pwm2_default: pwm2-default{ + mux { + groups = "pwm2_group"; + function = "pwm2_func"; + }; + }; + pinctrl_die1_mipi_csi_xtrig_default: mipi_csi_xtrig-default{ + mux { + groups = "mipi_csi_xtrig_group"; + function = "mipi_csi_xtrig_func"; + }; + }; + + //func3 + pinctrl_die1_uart3_default: uart3-default{ + mux { + groups = "uart3_group"; + function = "uart3_func"; + }; + }; + pinctrl_die1_uart4_default: uart4-default{ + mux { + groups = "uart4_group"; + function = "uart4_func"; + }; + }; + + //gpio + pinctrl_die1_gpio0_default: gpio0-default{ + mux { + groups = "gpio0_group"; + function = "gpio0_func"; + }; + conf { + groups = "gpio0_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die1_gpio1_default: gpio1-default{ + mux { + groups = "gpio1_group"; + function = "gpio1_func"; + }; + }; + pinctrl_die1_gpio2_default: gpio2-default{ + mux { + groups = "gpio2_group"; + function = "gpio2_func"; + }; + }; + pinctrl_die1_gpio3_default: gpio3-default{ + mux { + groups = "gpio3_group"; + function = "gpio3_func"; + }; + }; + pinctrl_die1_gpio4_default: gpio4-default{ + mux { + groups = "gpio4_group"; + function = "gpio4_func"; + }; + }; + pinctrl_die1_gpio5_default: gpio5-default{ + mux { + groups = "gpio5_group"; + function = "gpio5_func"; + }; + conf { + groups = "gpio5_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die1_gpio6_default: gpio6-default{ + mux { + groups = "gpio6_group"; + function = "gpio6_func"; + }; + conf { + groups = "gpio6_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio10_default: gpio10-default{ + mux { + groups = "gpio10_group"; + function = "gpio10_func"; + }; + conf { + groups = "gpio10_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die1_gpio11_default: gpio11-default{ + mux { + groups = "gpio11_group"; + function = "gpio11_func"; + }; + conf { + groups = "gpio11_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die1_gpio12_default: gpio12-default{ + mux { + groups = "gpio12_group"; + function = "gpio12_func"; + }; + }; + pinctrl_die1_gpio13_default: gpio13-default{ + mux { + groups = "gpio13_group"; + function = "gpio13_func"; + }; + }; + pinctrl_die1_gpio14_default: gpio14-default{ + mux { + groups = "gpio14_group"; + function = "gpio14_func"; + }; + }; + pinctrl_die1_gpio15_default: gpio15-default{ + mux { + groups = "gpio15_group"; + function = "gpio15_func"; + }; + }; + pinctrl_die1_gpio16_default: gpio16-default{ + mux { + groups = "gpio16_group"; + function = "gpio16_func"; + }; + conf { + groups = "gpio16_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio18_default: gpio18-default{ + mux { + groups = "gpio18_group"; + function = "gpio18_func"; + }; + conf { + groups = "gpio18_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio19_default: gpio19-default{ + mux { + groups = "gpio19_group"; + function = "gpio19_func"; + }; + conf { + groups = "gpio19_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio20_default: gpio20-default{ + mux { + groups = "gpio20_group"; + function = "gpio20_func"; + }; + conf { + groups = "gpio20_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio21_default: gpio21-default{ + mux { + groups = "gpio21_group"; + function = "gpio21_func"; + }; + conf { + groups = "gpio21_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio22_default: gpio22-default{ + mux { + groups = "gpio22_group"; + function = "gpio22_func"; + }; + }; + pinctrl_die1_gpio23_default: gpio23-default{ + mux { + groups = "gpio23_group"; + function = "gpio23_func"; + }; + }; + pinctrl_die1_gpio24_default: gpio24-default{ + mux { + groups = "gpio24_group"; + function = "gpio24_func"; + }; + }; + pinctrl_die1_gpio25_default: gpio25-default{ + mux { + groups = "gpio25_group"; + function = "gpio25_func"; + }; + }; + pinctrl_die1_gpio26_default: gpio26-default{ + mux { + groups = "gpio26_group"; + function = "gpio26_func"; + }; + }; + pinctrl_die1_gpio27_default: gpio27-default{ + mux { + groups = "gpio27_group"; + function = "gpio27_func"; + }; + }; + pinctrl_die1_gpio28_default: gpio28-default{ + mux { + groups = "gpio28_group"; + function = "gpio28_func"; + }; + conf { + groups = "gpio28_group"; + input-enable = <1>; + bias-pull-up = <1>; + }; + }; + pinctrl_die1_gpio29_default: gpio29-default{ + mux { + groups = "gpio29_group"; + function = "gpio29_func"; + }; + }; + pinctrl_die1_gpio30_default: gpio30-default{ + mux { + groups = "gpio30_group"; + function = "gpio30_func"; + }; + }; + pinctrl_die1_gpio31_default: gpio31-default{ + mux { + groups = "gpio31_group"; + function = "gpio31_func"; + }; + }; + pinctrl_die1_gpio32_default: gpio32-default{ + mux { + groups = "gpio32_group"; + function = "gpio32_func"; + }; + }; + pinctrl_die1_gpio33_default: gpio33-default{ + mux { + groups = "gpio33_group"; + function = "gpio33_func"; + }; + }; + pinctrl_die1_gpio34_default: gpio34-default{ + mux { + groups = "gpio34_group"; + function = "gpio34_func"; + }; + }; + pinctrl_die1_gpio35_default: gpio35-default{ + mux { + groups = "gpio35_group"; + function = "gpio35_func"; + }; + conf { + groups = "gpio35_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio36_default: gpio36-default{ + mux { + groups = "gpio36_group"; + function = "gpio36_func"; + }; + conf { + groups = "gpio36_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio37_default: gpio37-default{ + mux { + groups = "gpio37_group"; + function = "gpio37_func"; + }; + conf { + groups = "gpio37_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio38_default: gpio38-default{ + mux { + groups = "gpio38_group"; + function = "gpio38_func"; + }; + conf { + groups = "gpio38_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio39_default: gpio39-default{ + mux { + groups = "gpio39_group"; + function = "gpio39_func"; + }; + conf { + groups = "gpio39_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio40_default: gpio40-default{ + mux { + groups = "gpio40_group"; + function = "gpio40_func"; + }; + conf { + groups = "gpio40_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio41_default: gpio41-default{ + mux { + groups = "gpio41_group"; + function = "gpio41_func"; + }; + }; + pinctrl_die1_gpio42_default: gpio42-default{ + mux { + groups = "gpio42_group"; + function = "gpio42_func"; + }; + conf { + groups = "gpio42_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio43_default: gpio43-default{ + mux { + groups = "gpio43_group"; + function = "gpio43_func"; + }; + }; + pinctrl_die1_gpio44_default: gpio44-default{ + mux { + groups = "gpio44_group"; + function = "gpio44_func"; + }; + }; + pinctrl_die1_gpio45_default: gpio45-default{ + mux { + groups = "gpio45_group"; + function = "gpio45_func"; + }; + }; + pinctrl_die1_gpio46_default: gpio46-default{ + mux { + groups = "gpio46_group"; + function = "gpio46_func"; + }; + conf { + groups = "gpio46_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio47_default: gpio47-default{ + mux { + groups = "gpio47_group"; + function = "gpio47_func"; + }; + conf { + groups = "gpio47_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio48_default: gpio48-default{ + mux { + groups = "gpio48_group"; + function = "gpio48_func"; + }; + }; + pinctrl_die1_gpio49_default: gpio49-default{ + mux { + groups = "gpio49_group"; + function = "gpio49_func"; + }; + }; + pinctrl_die1_gpio50_default: gpio50-default{ + mux { + groups = "gpio50_group"; + function = "gpio50_func"; + }; + }; + pinctrl_die1_gpio51_default: gpio51-default{ + mux { + groups = "gpio51_group"; + function = "gpio51_func"; + }; + }; + pinctrl_die1_gpio52_default: gpio52-default{ + mux { + groups = "gpio52_group"; + function = "gpio52_func"; + }; + conf { + groups = "gpio52_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio53_default: gpio53-default{ + mux { + groups = "gpio53_group"; + function = "gpio53_func"; + }; + conf { + groups = "gpio53_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio54_default: gpio54-default{ + mux { + groups = "gpio54_group"; + function = "gpio54_func"; + }; + }; + pinctrl_die1_gpio55_default: gpio55-default{ + mux { + groups = "gpio55_group"; + function = "gpio55_func"; + }; + }; + pinctrl_die1_gpio56_default: gpio56-default{ + mux { + groups = "gpio56_group"; + function = "gpio56_func"; + }; + }; + pinctrl_die1_gpio57_default: gpio57-default{ + mux { + groups = "gpio57_group"; + function = "gpio57_func"; + }; + }; + pinctrl_die1_gpio58_default: gpio58-default{ + mux { + groups = "gpio58_group"; + function = "gpio58_func"; + }; + }; + pinctrl_die1_gpio59_default: gpio59-default{ + mux { + groups = "gpio59_group"; + function = "gpio59_func"; + }; + }; + pinctrl_die1_gpio60_default: gpio60-default{ + mux { + groups = "gpio60_group"; + function = "gpio60_func"; + }; + }; + pinctrl_die1_gpio61_default: gpio61-default{ + mux { + groups = "gpio61_group"; + function = "gpio61_func"; + }; + }; + pinctrl_die1_gpio62_default: gpio62-default{ + mux { + groups = "gpio62_group"; + function = "gpio62_func"; + }; + }; + pinctrl_die1_gpio63_default: gpio63-default{ + mux { + groups = "gpio63_group"; + function = "gpio63_func"; + }; + }; + pinctrl_die1_gpio67_default: gpio67-default{ + mux { + groups = "gpio67_group"; + function = "gpio67_func"; + }; + conf { + groups = "gpio67_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio68_default: gpio68-default{ + mux { + groups = "gpio68_group"; + function = "gpio68_func"; + }; + conf { + groups = "gpio68_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio69_default: gpio69-default{ + mux { + groups = "gpio69_group"; + function = "gpio69_func"; + }; + }; + pinctrl_die1_gpio70_default: gpio70-default{ + mux { + groups = "gpio70_group"; + function = "gpio70_func"; + }; + }; + pinctrl_die1_gpio71_default: gpio71-default{ + mux { + groups = "gpio71_group"; + function = "gpio71_func"; + }; + }; + pinctrl_die1_gpio72_default: gpio72-default{ + mux { + groups = "gpio72_group"; + function = "gpio72_func"; + }; + }; + pinctrl_die1_gpio73_default: gpio73-default{ + mux { + groups = "gpio73_group"; + function = "gpio73_func"; + }; + }; + pinctrl_die1_gpio74_default: gpio74-default{ + mux { + groups = "gpio74_group"; + function = "gpio74_func"; + }; + }; + pinctrl_die1_gpio75_default: gpio75-default{ + mux { + groups = "gpio75_group"; + function = "gpio75_func"; + }; + }; + pinctrl_die1_gpio76_default: gpio76-default{ + mux { + groups = "gpio76_group"; + function = "gpio76_func"; + }; + }; + pinctrl_die1_gpio77_default: gpio77-default{ + mux { + groups = "gpio77_group"; + function = "gpio77_func"; + }; + }; + pinctrl_die1_gpio78_default: gpio78-default{ + mux { + groups = "gpio78_group"; + function = "gpio78_func"; + }; + }; + pinctrl_die1_gpio79_default: gpio79-default{ + mux { + groups = "gpio79_group"; + function = "gpio79_func"; + }; + }; + pinctrl_die1_gpio80_default: gpio80-default{ + mux { + groups = "gpio80_group"; + function = "gpio80_func"; + }; + }; + pinctrl_die1_gpio81_default: gpio81-default{ + mux { + groups = "gpio81_group"; + function = "gpio81_func"; + }; + }; + pinctrl_die1_gpio88_default: gpio88-default{ + mux { + groups = "gpio88_group"; + function = "gpio88_func"; + }; + }; + pinctrl_die1_gpio89_default: gpio89-default{ + mux { + groups = "gpio89_group"; + function = "gpio89_func"; + }; + }; + pinctrl_die1_gpio90_default: gpio90-default{ + mux { + groups = "gpio90_group"; + function = "gpio90_func"; + }; + }; + pinctrl_die1_gpio91_default: gpio91-default{ + mux { + groups = "gpio91_group"; + function = "gpio91_func"; + }; + }; + pinctrl_die1_gpio92_default: gpio92-default{ + mux { + groups = "gpio92_group"; + function = "gpio92_func"; + }; + conf { + groups = "gpio92_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio93_default: gpio93-default{ + mux { + groups = "gpio93_group"; + function = "gpio93_func"; + }; + conf { + groups = "gpio93_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio94_default: gpio94-default{ + mux { + groups = "gpio94_group"; + function = "gpio94_func"; + }; + conf { + groups = "gpio94_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio95_default: gpio95-default{ + mux { + groups = "gpio95_group"; + function = "gpio95_func"; + }; + }; + pinctrl_die1_gpio96_default: gpio96-default{ + mux { + groups = "gpio96_group"; + function = "gpio96_func"; + }; + }; + pinctrl_die1_gpio97_default: gpio97-default{ + mux { + groups = "gpio97_group"; + function = "gpio97_func"; + }; + }; + pinctrl_die1_gpio98_default: gpio98-default{ + mux { + groups = "gpio98_group"; + function = "gpio98_func"; + }; + }; + pinctrl_die1_gpio99_default: gpio99-default{ + mux { + groups = "gpio99_group"; + function = "gpio99_func"; + }; + }; + pinctrl_die1_gpio100_default: gpio100-default{ + mux { + groups = "gpio100_group"; + function = "gpio100_func"; + }; + }; + pinctrl_die1_gpio101_default: gpio101-default{ + mux { + groups = "gpio101_group"; + function = "gpio101_func"; + }; + }; + pinctrl_die1_gpio102_default: gpio102-default{ + mux { + groups = "gpio102_group"; + function = "gpio102_func"; + }; + }; + pinctrl_die1_gpio103_default: gpio103-default{ + mux { + groups = "gpio103_group"; + function = "gpio103_func"; + }; + }; + pinctrl_die1_gpio104_default: gpio104-default{ + mux { + groups = "gpio104_group"; + function = "gpio104_func"; + }; + }; + pinctrl_die1_gpio105_default: gpio105-default{ + mux { + groups = "gpio105_group"; + function = "gpio105_func"; + }; + }; + pinctrl_die1_gpio106_default: gpio106-default{ + mux { + groups = "gpio106_group"; + function = "gpio106_func"; + }; + conf { + groups = "gpio106_group"; + input-enable = <0>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio107_default: gpio107-default{ + mux { + groups = "gpio107_group"; + function = "gpio107_func"; + }; + conf { + groups = "gpio107_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio108_default: gpio108-default{ + mux { + groups = "gpio108_group"; + function = "gpio108_func"; + }; + conf { + groups = "gpio108_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio109_default: gpio109-default{ + mux { + groups = "gpio109_group"; + function = "gpio109_func"; + }; + conf { + groups = "gpio109_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio110_default: gpio110-default{ + mux { + groups = "gpio110_group"; + function = "gpio110_func"; + }; + conf { + groups = "gpio110_group"; + input-enable = <1>; + bias-pull-down = <1>; + }; + }; + pinctrl_die1_gpio111_default: gpio111-default{ + mux { + groups = "gpio111_group"; + function = "gpio111_func"; + }; + conf { + groups = "gpio111_group"; + input-enable = <0>; + bias-pull-up = <1>; + }; + }; + + //func6 + pinctrl_die1_csi_mon_out_default: csi_mon_out-default{ + mux { + groups = "csi_mon_out_group"; + function = "csi_mon_out_func"; + }; + }; + pinctrl_die1_csi_ocla_clk_default: csi_ocla_clk-default{ + mux { + groups = "csi_ocla_clk_group"; + function = "csi_ocla_clk_func"; + }; + }; + pinctrl_die1_csi_mon_out_valid_default: csi_mon_out_valid-default{ + mux { + groups = "csi_mon_out_valid_group"; + function = "csi_mon_out_valid_func"; + }; + }; + pinctrl_die1_csi_parity_error_default: csi_parity_error-default{ + mux { + groups = "csi_parity_error_group"; + function = "csi_parity_error_func"; + }; + }; + pinctrl_die1_csi_dtb_out_default: csi_dtb_out-default{ + mux { + groups = "csi_dtb_out_group"; + function = "csi_dtb_out_func"; + }; + }; + pinctrl_die1_csi_phy_sel_default: csi_phy_sel-default{ + mux { + groups = "csi_phy_sel_group"; + function = "csi_phy_sel_func"; + }; + }; + pinctrl_die1_vc_g2d0_debug_out_default: vc_g2d0_debug_out-default{ + mux { + groups = "vc_g2d0_debug_out_group"; + function = "vc_g2d0_debug_out_func"; + }; + }; + pinctrl_die1_vc_g2d1_debug_out_default: vc_g2d1_debug_out-default{ + mux { + groups = "vc_g2d1_debug_out_group"; + function = "vc_g2d1_debug_out_func"; + }; + }; + pinctrl_die1_sata_mpll_clk_default: sata_mpll_clk-default{ + mux { + groups = "sata_mpll_clk_group"; + function = "sata_mpll_clk_func"; + }; + }; + pinctrl_die1_sata_ref_repeat_clk_m_default: sata_ref_repeat_clk_m-default{ + mux { + groups = "sata_ref_repeat_clk_m_group"; + function = "sata_ref_repeat_clk_m_func"; + }; + }; + pinctrl_die1_sata_ref_repeat_clk_p_default: sata_ref_repeat_clk_p-default{ + mux { + groups = "sata_ref_repeat_clk_p_group"; + function = "sata_ref_repeat_clk_p_func"; + }; + }; + + //func7 + pinctrl_die1_clk_d2d_test_out_default: clk_d2d_test_out-default{ + mux { + groups = "clk_d2d_test_out_group"; + function = "clk_d2d_test_out_func"; + }; + }; + pinctrl_die1_clk_spll0_test_out_default: clk_spll0_test_out-default{ + mux { + groups = "clk_spll0_test_out_group"; + function = "clk_spll0_test_out_func"; + }; + }; + }; + }; +}; diff --git a/arch/riscv/boot/dts/eswin/eswin-win2030-arch-d2d.dtsi b/arch/riscv/boot/dts/eswin/eswin-win2030-arch-d2d.dtsi index 828840c3e719..c9afd60d2434 100644 --- a/arch/riscv/boot/dts/eswin/eswin-win2030-arch-d2d.dtsi +++ b/arch/riscv/boot/dts/eswin/eswin-win2030-arch-d2d.dtsi @@ -18,868 +18,383 @@ * along with this program. If not, see . */ -#include -#include "eswin-win2030-platform.dtsi" +#define CHIPLET_AND_DIE (0x2) +#include "eswin-win2030-arch.dtsi" -#define UART0_INT 100 -#define UART1_INT 101 -#define UART2_INT 102 -/ { - #address-cells = <2>; - #size-cells = <2>; - compatible = "SiFive,FU800-dev", "fu800-dev", "sifive-dev", "eic770x-dev"; - - L64: cpus { - #address-cells = <1>; - #size-cells = <0>; - timebase-frequency = ; - idle-states { - CPU_RET: cpu-retentive { - compatible = "riscv,idle-state"; - riscv,sbi-suspend-param = <0x00000000>; - entry-latency-us = <20>; - exit-latency-us = <40>; - min-residency-us = <80>; +&L64 { + cpu-map { + #if (CHIPLET_AND_DIE & 0x2) + cluster0 { + core0 { + cpu = <&cpu_0>; }; - }; - cpu-map { - #if (CHIPLET_AND_DIE & 0x2) - cluster0 { - core0 { - cpu = <&cpu_0>; - }; - core1 { - cpu = <&cpu_1>; - }; - core2 { - cpu = <&cpu_2>; - }; - core3 { - cpu = <&cpu_3>; - }; - }; - #endif - cluster1 { - core0 { - cpu = <&cpu_4>; - }; - #ifndef PLATFORM_HAPS - core1 { - cpu = <&cpu_5>; - }; - core2 { - cpu = <&cpu_6>; - }; - core3 { - cpu = <&cpu_7>; - }; - #endif + core1 { + cpu = <&cpu_1>; }; - - }; - - #if (CHIPLET_AND_DIE & 0x2) - cpu_0: cpu@0 { - clock-frequency = <0>; - compatible = "eswin,eic770x", "riscv"; - d-cache-block-size = <64>; - d-cache-sets = <128>; - d-cache-size = <32768>; - d-tlb-sets = <1>; - d-tlb-size = <32>; - device_type = "cpu"; - hardware-exec-breakpoint-count = <4>; - hwpf-distanceBits = <6>; - hwpf-hitCacheThrdBits = <5>; - hwpf-hitMSHRThrdBits = <4>; - hwpf-l2pfPoolSize = <10>; - hwpf-nIssQEnt = <6>; - hwpf-nPrefetchQueueEntries = <8>; - hwpf-nStreams = <16>; - hwpf-qFullnessThrdBits = <4>; - hwpf-windowBits = <6>; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <32768>; - i-tlb-sets = <1>; - i-tlb-size = <32>; - mmu-type = "riscv,sv48"; - next-level-cache = <&L15>; - reg = <0x0>; - riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb"; - riscv,pmpgranularity = <4096>; - riscv,pmpregions = <8>; - sifive,buserror = <&L16>; - status = "okay"; - timebase-frequency = ; - tlb-split; - numa-node-id = <0>; - clocks = <&d0_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_0>; - operating-points-v2 = <&d0_cpu_opp_table>; - cpu-idle-states = <&CPU_RET>; - cpu0_intc: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; - }; - L13: pmu { - compatible = "riscv,pmu0", "riscv,pmu"; - interrupts-extended = <&cpu0_intc 13>; - }; - }; - cpu_1: cpu@1 { - clock-frequency = <0>; - compatible = "eswin,eic770x", "riscv"; - d-cache-block-size = <64>; - d-cache-sets = <128>; - d-cache-size = <32768>; - d-tlb-sets = <1>; - d-tlb-size = <32>; - device_type = "cpu"; - hardware-exec-breakpoint-count = <4>; - hwpf-distanceBits = <6>; - hwpf-hitCacheThrdBits = <5>; - hwpf-hitMSHRThrdBits = <4>; - hwpf-l2pfPoolSize = <10>; - hwpf-nIssQEnt = <6>; - hwpf-nPrefetchQueueEntries = <8>; - hwpf-nStreams = <16>; - hwpf-qFullnessThrdBits = <4>; - hwpf-windowBits = <6>; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <32768>; - i-tlb-sets = <1>; - i-tlb-size = <32>; - mmu-type = "riscv,sv48"; - next-level-cache = <&L20>; - reg = <0x1>; - riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb"; - riscv,pmpgranularity = <4096>; - riscv,pmpregions = <8>; - sifive,buserror = <&L21>; - status = "okay"; - timebase-frequency = ; - tlb-split; - numa-node-id = <0>; - clocks = <&d0_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_1>; - operating-points-v2 = <&d0_cpu_opp_table>; - cpu-idle-states = <&CPU_RET>; - cpu1_intc: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; + core2 { + cpu = <&cpu_2>; }; - L18: pmu { - compatible = "riscv,pmu0", "riscv,pmu"; - interrupts-extended = <&cpu1_intc 13>; + core3 { + cpu = <&cpu_3>; }; }; - cpu_2: cpu@2 { - clock-frequency = <0>; - compatible = "eswin,eic770x", "riscv"; - d-cache-block-size = <64>; - d-cache-sets = <128>; - d-cache-size = <32768>; - d-tlb-sets = <1>; - d-tlb-size = <32>; - device_type = "cpu"; - hardware-exec-breakpoint-count = <4>; - hwpf-distanceBits = <6>; - hwpf-hitCacheThrdBits = <5>; - hwpf-hitMSHRThrdBits = <4>; - hwpf-l2pfPoolSize = <10>; - hwpf-nIssQEnt = <6>; - hwpf-nPrefetchQueueEntries = <8>; - hwpf-nStreams = <16>; - hwpf-qFullnessThrdBits = <4>; - hwpf-windowBits = <6>; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <32768>; - i-tlb-sets = <1>; - i-tlb-size = <32>; - mmu-type = "riscv,sv48"; - next-level-cache = <&L25>; - reg = <0x2>; - riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb"; - riscv,pmpgranularity = <4096>; - riscv,pmpregions = <8>; - sifive,buserror = <&L26>; - status = "okay"; - timebase-frequency = ; - tlb-split; - numa-node-id = <0>; - clocks = <&d0_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_2>; - operating-points-v2 = <&d0_cpu_opp_table>; - cpu-idle-states = <&CPU_RET>; - cpu2_intc: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; + #endif + cluster1 { + core0 { + cpu = <&cpu_4>; }; - L23: pmu { - compatible = "riscv,pmu0", "riscv,pmu"; - interrupts-extended = <&cpu2_intc 13>; + #ifndef PLATFORM_HAPS + core1 { + cpu = <&cpu_5>; }; - }; - cpu_3: cpu@3 { - clock-frequency = <0>; - compatible = "eswin,eic770x", "riscv"; - d-cache-block-size = <64>; - d-cache-sets = <128>; - d-cache-size = <32768>; - d-tlb-sets = <1>; - d-tlb-size = <32>; - device_type = "cpu"; - hardware-exec-breakpoint-count = <4>; - hwpf-distanceBits = <6>; - hwpf-hitCacheThrdBits = <5>; - hwpf-hitMSHRThrdBits = <4>; - hwpf-l2pfPoolSize = <10>; - hwpf-nIssQEnt = <6>; - hwpf-nPrefetchQueueEntries = <8>; - hwpf-nStreams = <16>; - hwpf-qFullnessThrdBits = <4>; - hwpf-windowBits = <6>; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <32768>; - i-tlb-sets = <1>; - i-tlb-size = <32>; - mmu-type = "riscv,sv48"; - next-level-cache = <&L30>; - reg = <0x3>; - riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb"; - riscv,pmpgranularity = <4096>; - riscv,pmpregions = <8>; - sifive,buserror = <&L31>; - status = "okay"; - timebase-frequency = ; - tlb-split; - numa-node-id = <0>; - clocks = <&d0_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_3>; - operating-points-v2 = <&d0_cpu_opp_table>; - cpu-idle-states = <&CPU_RET>; - cpu3_intc: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; + core2 { + cpu = <&cpu_6>; }; - L28: pmu { - compatible = "riscv,pmu0", "riscv,pmu"; - interrupts-extended = <&cpu3_intc 13>; + core3 { + cpu = <&cpu_7>; }; - }; - #endif - - cpu_4: cpu@4 { - clock-frequency = <0>; - compatible = "eswin,eic770x", "riscv"; - d-cache-block-size = <64>; - d-cache-sets = <128>; - d-cache-size = <32768>; - d-tlb-sets = <1>; - d-tlb-size = <32>; - device_type = "cpu"; - hardware-exec-breakpoint-count = <4>; - hwpf-distanceBits = <6>; - hwpf-hitCacheThrdBits = <5>; - hwpf-hitMSHRThrdBits = <4>; - hwpf-l2pfPoolSize = <10>; - hwpf-nIssQEnt = <6>; - hwpf-nPrefetchQueueEntries = <8>; - hwpf-nStreams = <16>; - hwpf-qFullnessThrdBits = <4>; - hwpf-windowBits = <6>; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <32768>; - i-tlb-sets = <1>; - i-tlb-size = <32>; - mmu-type = "riscv,sv48"; - next-level-cache = <&D2L2_0>; - #if (CHIPLET_AND_DIE == 1) - #ifdef PLATFORM_HAPS - reg = <0x1>; - #else - reg = <0x4>; - #endif - #else - reg = <0x4>; #endif - riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb"; - riscv,pmpgranularity = <4096>; - riscv,pmpregions = <8>; - #if (CHIPLET_AND_DIE & 0x2) - sifive,buserror = <&L16>; - #endif - status = "okay"; - timebase-frequency = ; - tlb-split; - numa-node-id = <1>; - clocks = <&d1_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_0>; - operating-points-v2 = <&d1_cpu_opp_table>; - cpu-idle-states = <&CPU_RET>; - cpu4_intc: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; - }; - }; - #ifndef PLATFORM_HAPS - cpu_5: cpu@5 { - clock-frequency = <0>; - compatible = "eswin,eic770x", "riscv"; - d-cache-block-size = <64>; - d-cache-sets = <128>; - d-cache-size = <32768>; - d-tlb-sets = <1>; - d-tlb-size = <32>; - device_type = "cpu"; - hardware-exec-breakpoint-count = <4>; - hwpf-distanceBits = <6>; - hwpf-hitCacheThrdBits = <5>; - hwpf-hitMSHRThrdBits = <4>; - hwpf-l2pfPoolSize = <10>; - hwpf-nIssQEnt = <6>; - hwpf-nPrefetchQueueEntries = <8>; - hwpf-nStreams = <16>; - hwpf-qFullnessThrdBits = <4>; - hwpf-windowBits = <6>; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <32768>; - i-tlb-sets = <1>; - i-tlb-size = <32>; - mmu-type = "riscv,sv48"; - next-level-cache = <&D2L2_1>; - reg = <0x5>; - riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb"; - riscv,pmpgranularity = <4096>; - riscv,pmpregions = <8>; - #if (CHIPLET_AND_DIE & 0x2) - sifive,buserror = <&L21>; - #endif - status = "okay"; - timebase-frequency = ; - tlb-split; - numa-node-id = <1>; - clocks = <&d1_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_1>; - operating-points-v2 = <&d1_cpu_opp_table>; - cpu-idle-states = <&CPU_RET>; - cpu5_intc: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; - }; - - }; - cpu_6: cpu@6 { - clock-frequency = <0>; - compatible = "eswin,eic770x", "riscv"; - d-cache-block-size = <64>; - d-cache-sets = <128>; - d-cache-size = <32768>; - d-tlb-sets = <1>; - d-tlb-size = <32>; - device_type = "cpu"; - hardware-exec-breakpoint-count = <4>; - hwpf-distanceBits = <6>; - hwpf-hitCacheThrdBits = <5>; - hwpf-hitMSHRThrdBits = <4>; - hwpf-l2pfPoolSize = <10>; - hwpf-nIssQEnt = <6>; - hwpf-nPrefetchQueueEntries = <8>; - hwpf-nStreams = <16>; - hwpf-qFullnessThrdBits = <4>; - hwpf-windowBits = <6>; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <32768>; - i-tlb-sets = <1>; - i-tlb-size = <32>; - mmu-type = "riscv,sv48"; - next-level-cache = <&D2L2_2>; - reg = <0x6>; - riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb"; - riscv,pmpgranularity = <4096>; - riscv,pmpregions = <8>; - #if (CHIPLET_AND_DIE & 0x2) - sifive,buserror = <&L26>; - #endif - status = "okay"; - timebase-frequency = ; - tlb-split; - numa-node-id = <1>; - clocks = <&d1_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_2>; - operating-points-v2 = <&d1_cpu_opp_table>; - cpu-idle-states = <&CPU_RET>; - cpu6_intc: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; - }; - - }; - cpu_7: cpu@7 { - clock-frequency = <0>; - compatible = "eswin,eic770x", "riscv"; - d-cache-block-size = <64>; - d-cache-sets = <128>; - d-cache-size = <32768>; - d-tlb-sets = <1>; - d-tlb-size = <32>; - device_type = "cpu"; - hardware-exec-breakpoint-count = <4>; - hwpf-distanceBits = <6>; - hwpf-hitCacheThrdBits = <5>; - hwpf-hitMSHRThrdBits = <4>; - hwpf-l2pfPoolSize = <10>; - hwpf-nIssQEnt = <6>; - hwpf-nPrefetchQueueEntries = <8>; - hwpf-nStreams = <16>; - hwpf-qFullnessThrdBits = <4>; - hwpf-windowBits = <6>; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <32768>; - i-tlb-sets = <1>; - i-tlb-size = <32>; - mmu-type = "riscv,sv48"; - next-level-cache = <&D2L2_3>; - reg = <0x7>; - riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb"; - riscv,pmpgranularity = <4096>; - riscv,pmpregions = <8>; - #if (CHIPLET_AND_DIE & 0x2) - sifive,buserror = <&L31>; - #endif - status = "okay"; - timebase-frequency = ; - tlb-split; - numa-node-id = <1>; - clocks = <&d1_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_3>; - operating-points-v2 = <&d1_cpu_opp_table>; - cpu-idle-states = <&CPU_RET>; - cpu7_intc: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; - }; - }; - #endif }; - //D1MEM: memory@80000000 { - // compatible = "sifive,axi4-mem-port", "sifive,axi4-port", "sifive,mem-port"; - // device_type = "memory"; - // reg = <0x0 0x80000000 0xf 0x80000000>; - // sifive,port-width-bytes = <32>; - //}; - //D2MEM: memory@2000000000 { - // compatible = "sifive,axi4-mem-port", "sifive,axi4-port", "sifive,mem-port"; - // device_type = "memory"; - // reg = <0x20 0x00000000 0x10 0x00000000>; - // sifive,port-width-bytes = <32>; - //}; - - SOC: soc { - #address-cells = <2>; - #size-cells = <2>; - compatible = "SiFive,FU800-soc", "fu800-soc", "sifive-soc", "simple-bus"; - ranges; - L40: authentication-controller { - compatible = "sifive,authentication0"; - sifive,auth-types = "fuse"; - }; - L51: axi4-sys-port@40000000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "sifive,axi4-sys-port", "sifive,axi4-port", "sifive,sys-port", "simple-external-bus", "simple-bus"; - ranges = <0x40000000 0x0 0x40000000 0x40000000>; - sifive,port-width-bytes = <16>; - }; - L52: axi4-sys-port@8000000000 { - #address-cells = <2>; - #size-cells = <2>; - compatible = "sifive,axi4-sys-port", "sifive,axi4-port", "sifive,sys-port", "simple-external-bus", "simple-bus"; - ranges = <0x80 0x0 0x80 0x0 0x180 0x0>; - sifive,port-width-bytes = <16>; - }; - L46: basic-bus-blocker@200000 { - compatible = "sifive,basic-bus-blocker1"; - reg = <0x0 0x200000 0x0 0x1000>; - reg-names = "control"; - }; - L43: basic-bus-blocker@202000 { - compatible = "sifive,basic-bus-blocker1"; - reg = <0x0 0x202000 0x0 0x1000>; - reg-names = "control"; - }; - L48: basic-bus-blocker@204000 { - compatible = "sifive,basic-bus-blocker1"; - reg = <0x0 0x204000 0x0 0x1000>; - reg-names = "control"; - }; - L54: burst-bundler@10010000 { - compatible = "sifive,burst-bundler0"; - reg = <0x0 0x10010000 0x0 0x1000>; - reg-names = "control"; - }; + cpu_4: cpu@4 { + clock-frequency = <0>; + compatible = "eswin,eic770x", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <128>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + hardware-exec-breakpoint-count = <4>; + hwpf-distanceBits = <6>; + hwpf-hitCacheThrdBits = <5>; + hwpf-hitMSHRThrdBits = <4>; + hwpf-l2pfPoolSize = <10>; + hwpf-nIssQEnt = <6>; + hwpf-nPrefetchQueueEntries = <8>; + hwpf-nStreams = <16>; + hwpf-qFullnessThrdBits = <4>; + hwpf-windowBits = <6>; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv48"; + next-level-cache = <&d1_l2_cache_0>; + #if (CHIPLET_AND_DIE == 1) + #ifdef PLATFORM_HAPS + reg = <0x1>; + #else + reg = <0x4>; + #endif + #else + reg = <0x4>; + #endif + riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb_sscofpmf"; + riscv,pmpgranularity = <4096>; + riscv,pmpregions = <8>; #if (CHIPLET_AND_DIE & 0x2) - L16: bus-error-unit@1700000 { - compatible = "sifive,buserror"; - interrupt-parent = <&plic0>; - interrupts = <517>; - reg = <0x0 0x1700000 0x0 0x1000>; - reg-names = "control"; - }; - L21: bus-error-unit@1701000 { - compatible = "sifive,buserror"; - interrupt-parent = <&plic0>; - interrupts = <518>; - reg = <0x0 0x1701000 0x0 0x1000>; - reg-names = "control"; - }; - L26: bus-error-unit@1702000 { - compatible = "sifive,buserror"; - interrupt-parent = <&plic0>; - interrupts = <519>; - reg = <0x0 0x1702000 0x0 0x1000>; - reg-names = "control"; - }; - L31: bus-error-unit@1703000 { - compatible = "sifive,buserror"; - interrupt-parent = <&plic0>; - interrupts = <520>; - reg = <0x0 0x1703000 0x0 0x1000>; - reg-names = "control"; - }; + sifive,buserror = <&d1_bus_err0>; #endif - - d1_bus_err0: bus-error-unit@21700000 { - compatible = "sifive,buserror"; - interrupt-parent = <&plic1>; - interrupts = <517>; - reg = <0x0 0x21700000 0x0 0x1000>; - reg-names = "control"; - }; - d1_bus_err1: bus-error-unit@21701000 { - compatible = "sifive,buserror"; - interrupt-parent = <&plic1>; - interrupts = <518>; - reg = <0x0 0x21701000 0x0 0x1000>; - reg-names = "control"; - }; - d1_bus_err2: bus-error-unit@21702000 { - compatible = "sifive,buserror"; - interrupt-parent = <&plic1>; - interrupts = <519>; - reg = <0x0 0x21702000 0x0 0x1000>; - reg-names = "control"; - }; - d1_bus_err3: bus-error-unit@21703000 { - compatible = "sifive,buserror"; - interrupt-parent = <&plic1>; - interrupts = <520>; - reg = <0x0 0x21703000 0x0 0x1000>; - reg-names = "control"; + status = "okay"; + timebase-frequency = ; + tlb-split; + numa-node-id = <1>; + clocks = <&d1_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_0>; + operating-points-v2 = <&d1_cpu_opp_table>; + cpu-idle-states = <&CPU_RET>; + cpu4_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; }; + }; +#ifndef PLATFORM_HAPS + cpu_5: cpu@5 { + clock-frequency = <0>; + compatible = "eswin,eic770x", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <128>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + hardware-exec-breakpoint-count = <4>; + hwpf-distanceBits = <6>; + hwpf-hitCacheThrdBits = <5>; + hwpf-hitMSHRThrdBits = <4>; + hwpf-l2pfPoolSize = <10>; + hwpf-nIssQEnt = <6>; + hwpf-nPrefetchQueueEntries = <8>; + hwpf-nStreams = <16>; + hwpf-qFullnessThrdBits = <4>; + hwpf-windowBits = <6>; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv48"; + next-level-cache = <&d1_l2_cache_1>; + reg = <0x5>; + riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb_sscofpmf"; + riscv,pmpgranularity = <4096>; + riscv,pmpregions = <8>; #if (CHIPLET_AND_DIE & 0x2) - D1CACHE: cache-controller@2010000 { - cache-block-size = <64>; - cache-level = <3>; - cache-sets = <4096>; - cache-size = <4194304>; - cache-unified; - compatible = "sifive,ccache1", "cache", "sifive,fu740-c000-ccache"; - interrupt-parent = <&plic0>; - interrupts = <1>, <3>, <4>, <2>; - //next-level-cache = <&L9 &L10 &L11 &D1MEM>; - next-level-cache = <&L9 &L10 &L11>; - reg = <0x0 0x2010000 0x0 0x4000 0x0 0x8000000 0x0 0x400000>; - reg-names = "control", "sideband"; - sifive,a-mshr-count = <60>; - sifive,bank-count = <4>; - sifive,ecc-granularity = <8>; - sifive,max-master-id = <13>; - sifive,perfmon-counters = <6>; - numa-node-id = <0>; - }; + sifive,buserror = <&d1_bus_err1>; #endif - - D2CACHE: cache-controller@22010000 { - cache-block-size = <64>; - cache-level = <3>; - cache-sets = <4096>; - cache-size = <4194304>; - cache-unified; - compatible = "sifive,ccache1", "cache", "sifive,fu740-c000-ccache"; - interrupt-parent = <&plic1>; - interrupts = <1>, <3>, <4>, <2>; - //next-level-cache = <&L9 &L10 &L11 &D2MEM>; - next-level-cache = <&L9 &L10 &L11>; - reg = <0x0 0x22010000 0x0 0x4000 0x0 0x8000000 0x0 0x400000>; - reg-names = "control", "sideband"; - sifive,a-mshr-count = <60>; - sifive,bank-count = <4>; - sifive,ecc-granularity = <8>; - sifive,max-master-id = <13>; - sifive,perfmon-counters = <6>; - numa-node-id = <1>; - }; - - /* - clint0: clint@2000000 { - compatible = "riscv,clint0"; - interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7 &cpu1_intc 3 &cpu1_intc 7 &cpu2_intc 3 &cpu2_intc 7 &cpu3_intc 3 &cpu3_intc 7>; - reg = <0x0 0x2000000 0x0 0x10000>; - reg-names = "control"; - }; - - clint1: clint@22000000 { - compatible = "riscv,clint0"; - interrupts-extended = <&cpu4_intc 3 &cpu4_intc 7 &cpu5_intc 3 &cpu5_intc 7 &cpu6_intc 3 &cpu6_intc 7 &cpu7_intc 3 &cpu7_intc 7>; - reg = <0x0 0x2000000 0x0 0x10000>; - reg-names = "control"; + status = "okay"; + timebase-frequency = ; + tlb-split; + numa-node-id = <1>; + clocks = <&d1_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_1>; + operating-points-v2 = <&d1_cpu_opp_table>; + cpu-idle-states = <&CPU_RET>; + cpu5_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; }; - */ - L34: debug-controller@0 { - compatible = "sifive,debug-100", "riscv,debug-100"; - debug-attach = "jtag"; - reg = <0x0 0x0 0x0 0x1000>; - reg-names = "control"; - }; - L8: error-device@1000 { - compatible = "sifive,error0"; - reg = <0x0 0x1000 0x0 0x3000 0x0 0x5000 0x0 0x13000 0x0 0x19000 0x0 0xe7000 0x0 0x114000 0x0 0xec000 0x0 0x201000 0x0 0x1000 0x0 0x203000 0x0 0x1000 0x0 0x205000 0x0 0x14fb000 0x0 0x1704000 0x0 0x8fc000 0x0 0x2014000 0x0 0x5fec000 0x0 0x8400000 0x0 0x3c00000 0x0 0x10000000 0x0 0x3000 0x0 0x10004000 0x0 0xc000 0x0 0x10011000 0x0 0x1f000 0x0 0x10034000 0x0 0x9fcc000 0x0 0x1a400000 0x0 0x5c00000>; - }; - L9: error-device@10003000 { - compatible = "sifive,error0"; - reg = <0x0 0x10003000 0x0 0x1000>; - }; - /* - L49: global-external-interrupts { - compatible = "sifive,global-external-interrupts0"; - interrupt-parent = <&plic0>; - interrupts = <5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515>; - }; - */ + }; + cpu_6: cpu@6 { + clock-frequency = <0>; + compatible = "eswin,eic770x", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <128>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + hardware-exec-breakpoint-count = <4>; + hwpf-distanceBits = <6>; + hwpf-hitCacheThrdBits = <5>; + hwpf-hitMSHRThrdBits = <4>; + hwpf-l2pfPoolSize = <10>; + hwpf-nIssQEnt = <6>; + hwpf-nPrefetchQueueEntries = <8>; + hwpf-nStreams = <16>; + hwpf-qFullnessThrdBits = <4>; + hwpf-windowBits = <6>; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv48"; + next-level-cache = <&d1_l2_cache_2>; + reg = <0x6>; + riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb_sscofpmf"; + riscv,pmpgranularity = <4096>; + riscv,pmpregions = <8>; #if (CHIPLET_AND_DIE & 0x2) - plic0: interrupt-controller@c000000 { + sifive,buserror = <&d1_bus_err2>; + #endif + status = "okay"; + timebase-frequency = ; + tlb-split; + numa-node-id = <1>; + clocks = <&d1_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_2>; + operating-points-v2 = <&d1_cpu_opp_table>; + cpu-idle-states = <&CPU_RET>; + cpu6_intc: interrupt-controller { #interrupt-cells = <1>; - compatible = "sifive,plic-1.0.0"; + compatible = "riscv,cpu-intc"; interrupt-controller; - interrupts-extended = < - &cpu0_intc 0xffffffff &cpu0_intc 9 - &cpu1_intc 0xffffffff &cpu1_intc 9 - &cpu2_intc 0xffffffff &cpu2_intc 9 - &cpu3_intc 0xffffffff &cpu3_intc 9>; - reg = <0x0 0xc000000 0x0 0x4000000>; - reg-names = "control"; - riscv,max-priority = <7>; - riscv,ndev = <520>; - numa-node-id = <0>; }; - #endif - plic1: interrupt-controller@2c000000 { + }; + cpu_7: cpu@7 { + clock-frequency = <0>; + compatible = "eswin,eic770x", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <128>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + hardware-exec-breakpoint-count = <4>; + hwpf-distanceBits = <6>; + hwpf-hitCacheThrdBits = <5>; + hwpf-hitMSHRThrdBits = <4>; + hwpf-l2pfPoolSize = <10>; + hwpf-nIssQEnt = <6>; + hwpf-nPrefetchQueueEntries = <8>; + hwpf-nStreams = <16>; + hwpf-qFullnessThrdBits = <4>; + hwpf-windowBits = <6>; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv48"; + next-level-cache = <&d1_l2_cache_3>; + reg = <0x7>; + riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb_sscofpmf"; + riscv,pmpgranularity = <4096>; + riscv,pmpregions = <8>; + #if (CHIPLET_AND_DIE & 0x2) + sifive,buserror = <&d1_bus_err3>; + #endif + status = "okay"; + timebase-frequency = ; + tlb-split; + numa-node-id = <1>; + clocks = <&d1_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_3>; + operating-points-v2 = <&d1_cpu_opp_table>; + cpu-idle-states = <&CPU_RET>; + cpu7_intc: interrupt-controller { #interrupt-cells = <1>; - compatible = "sifive,plic-1.0.0"; + compatible = "riscv,cpu-intc"; interrupt-controller; - interrupts-extended = < - &cpu4_intc 0xffffffff &cpu4_intc 9 - #ifndef PLATFORM_HAPS - &cpu5_intc 0xffffffff &cpu5_intc 9 - &cpu6_intc 0xffffffff &cpu6_intc 9 - &cpu7_intc 0xffffffff &cpu7_intc 9 - #endif - >; - reg = <0x0 0x2c000000 0x0 0x4000000>; - reg-names = "control"; - riscv,max-priority = <7>; - riscv,ndev = <520>; - numa-node-id = <1>; }; - #if (CHIPLET_AND_DIE & 0x2) - L53: order-obliterator@10030000 { - compatible = "sifive,order-obliterator0"; - interrupt-parent = <&plic0>; - interrupts = <516>; - reg = <0x0 0x10030000 0x0 0x4000>; - reg-names = "control"; - }; - L15: pl2@104000 { - cache-block-size = <64>; - cache-level = <2>; - cache-sets = <512>; - cache-size = <262144>; - cache-unified; - compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&D1CACHE>; - reg = <0x0 0x104000 0x0 0x4000>; - reg-names = "control"; - sifive,ecc-granularity = <16>; - sifive,perfmon-counters = <6>; - }; - L20: pl2@108000 { - cache-block-size = <64>; - cache-level = <2>; - cache-sets = <512>; - cache-size = <262144>; - cache-unified; - compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&D1CACHE>; - reg = <0x0 0x108000 0x0 0x4000>; - reg-names = "control"; - sifive,ecc-granularity = <16>; - sifive,perfmon-counters = <6>; - }; - L25: pl2@10c000 { - cache-block-size = <64>; - cache-level = <2>; - cache-sets = <512>; - cache-size = <262144>; - cache-unified; - compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&D1CACHE>; - reg = <0x0 0x10c000 0x0 0x4000>; - reg-names = "control"; - sifive,ecc-granularity = <16>; - sifive,perfmon-counters = <6>; - }; - L30: pl2@110000 { - cache-block-size = <64>; - cache-level = <2>; - cache-sets = <512>; - cache-size = <262144>; - cache-unified; - compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&D1CACHE>; - reg = <0x0 0x110000 0x0 0x4000>; - reg-names = "control"; - sifive,ecc-granularity = <16>; - sifive,perfmon-counters = <6>; - }; - #endif + }; +#endif +}; - D2L2_0: pl2@20104000 { - cache-block-size = <64>; - cache-level = <2>; - cache-sets = <512>; - cache-size = <262144>; - cache-unified; - compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&D2CACHE>; - reg = <0x0 0x20104000 0x0 0x4000>; - reg-names = "control"; - sifive,ecc-granularity = <16>; - sifive,perfmon-counters = <6>; - }; - D2L2_1: pl2@20108000 { - cache-block-size = <64>; - cache-level = <2>; - cache-sets = <512>; - cache-size = <262144>; - cache-unified; - compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&D2CACHE>; - reg = <0x0 0x20108000 0x0 0x4000>; - reg-names = "control"; - sifive,ecc-granularity = <16>; - sifive,perfmon-counters = <6>; - }; - D2L2_2: pl2@2010c000 { - cache-block-size = <64>; - cache-level = <2>; - cache-sets = <512>; - cache-size = <262144>; - cache-unified; - compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&D2CACHE>; - reg = <0x0 0x2010c000 0x0 0x4000>; - reg-names = "control"; - sifive,ecc-granularity = <16>; - sifive,perfmon-counters = <6>; - }; - D2L2_3: pl2@20110000 { - cache-block-size = <64>; - cache-level = <2>; - cache-sets = <512>; - cache-size = <262144>; - cache-unified; - compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&D2CACHE>; - reg = <0x0 0x20110000 0x0 0x4000>; - reg-names = "control"; - sifive,ecc-granularity = <16>; - sifive,perfmon-counters = <6>; - }; +&SOC { + #address-cells = <2>; + #size-cells = <2>; + compatible = "SiFive,FU800-soc", "fu800-soc", "sifive-soc", "simple-bus"; + ranges; + PMU1: pmu@1 { + riscv,raw-event-to-mhpmcounters = <0x0 0x0 0xffffffff 0xff 0x1f8 + 0x0 0x1 0xffffffff 0xfff800ff 0x1f8 + 0x0 0x2 0xffffffff 0xffffc0ff 0x1f8>; + riscv,event-to-mhpmcounters = <0x01 0x01 0x01 0x02 0x02 0x02 0x4 0x6 0x1f8 0x10009 0x10009 0x1f8 0x10019 0x10019 0x1f8 0x10021 0x10021 0x1f8>; + riscv,event-to-mhpmevent = <0x4 0x0 0x202 0x5 0x0 0x4000 0x6 0x0 0x2001 0x10009 0x0 0x102 0x10019 0x0 0x1002 0x10021 0x0 0x802>; + compatible = "riscv,pmu0", "riscv,pmu"; + interrupts-extended = <&cpu4_intc 13 &cpu5_intc 13 &cpu6_intc 13 &cpu7_intc 13>; + }; - L10: rom@1a000000 { - compatible = "ucbbar,cacheable-zero0"; - reg = <0x0 0x1a000000 0x0 0x400000>; - }; - L11: rom@3a000000 { - compatible = "ucbbar,cacheable-zero0"; - reg = <0x0 0x3a000000 0x0 0x400000>; - }; - L6: subsystem_pbus_clock { - #clock-cells = <0>; - clock-frequency = <10000000>; - clock-output-names = "subsystem_pbus_clock"; - compatible = "fixed-clock"; - }; - L61: teststatus@4000 { - compatible = "sifive,test0"; - reg = <0x0 0x4000 0x0 0x1000>; - reg-names = "control"; - }; - L45: tl-address-adjuster@20000000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "sifive,tl-inter-sys-port", "sifive,tl-port", "simple-external-bus", "simple-bus"; - ranges = <0x20000000 0x0 0x20000000 0x1a000000 0x3a400000 0x0 0x3a400000 0x5c00000>; - sifive,port-width-bytes = <8>; - }; - L42: tl-inter-mem-master-port@80000000 { - #address-cells = <2>; - #size-cells = <2>; - compatible = "sifive,tl-inter-mem-master-port", "sifive,tl-port", "sifive,inter-mem-master-port", "simple-external-bus", "simple-bus"; - ranges = <0x0 0x80000000 0x0 0x80000000 0x7f 0x80000000>; - sifive,port-width-bytes = <32>; - }; - L55: trace-encoder-0@100000 { - compatible = "sifive,trace0"; - reg = <0x0 0x100000 0x0 0x1000>; - reg-names = "control"; - }; - L56: trace-encoder-1@101000 { - compatible = "sifive,trace0"; - reg = <0x0 0x101000 0x0 0x1000>; - reg-names = "control"; - }; - L57: trace-encoder-2@102000 { - compatible = "sifive,trace0"; - reg = <0x0 0x102000 0x0 0x1000>; - reg-names = "control"; - }; - L58: trace-encoder-3@103000 { - compatible = "sifive,trace0"; - reg = <0x0 0x103000 0x0 0x1000>; - reg-names = "control"; - }; - L59: trace-funnel-0@18000 { - compatible = "sifive,trace0"; - reg = <0x0 0x18000 0x0 0x1000>; - reg-names = "control"; - }; + d1_bus_err0: bus-error-unit@21700000 { + compatible = "sifive,buserror"; + interrupt-parent = <&plic1>; + interrupts = <517>; + reg = <0x0 0x21700000 0x0 0x1000>; + reg-names = "control"; + }; + d1_bus_err1: bus-error-unit@21701000 { + compatible = "sifive,buserror"; + interrupt-parent = <&plic1>; + interrupts = <518>; + reg = <0x0 0x21701000 0x0 0x1000>; + reg-names = "control"; + }; + d1_bus_err2: bus-error-unit@21702000 { + compatible = "sifive,buserror"; + interrupt-parent = <&plic1>; + interrupts = <519>; + reg = <0x0 0x21702000 0x0 0x1000>; + reg-names = "control"; + }; + d1_bus_err3: bus-error-unit@21703000 { + compatible = "sifive,buserror"; + interrupt-parent = <&plic1>; + interrupts = <520>; + reg = <0x0 0x21703000 0x0 0x1000>; + reg-names = "control"; + }; + + D1CACHE: cache-controller@22010000 { + cache-block-size = <64>; + cache-level = <3>; + cache-sets = <4096>; + cache-size = <4194304>; + cache-unified; + compatible = "sifive,ccache1", "cache", "sifive,fu740-c000-ccache"; + interrupt-parent = <&plic1>; + interrupts = <1>, <3>, <4>, <2>; + //next-level-cache = <&L9 &L10 &L11 &D2MEM>; + next-level-cache = <&L9 &L10 &L11>; + reg = <0x0 0x22010000 0x0 0x4000 0x0 0x8000000 0x0 0x400000>; + reg-names = "control", "sideband"; + sifive,a-mshr-count = <60>; + sifive,bank-count = <4>; + sifive,ecc-granularity = <8>; + sifive,max-master-id = <13>; + sifive,perfmon-counters = <6>; + numa-node-id = <1>; + }; + + + plic1: interrupt-controller@2c000000 { + #interrupt-cells = <1>; + compatible = "sifive,plic-1.0.0"; + interrupt-controller; + interrupts-extended = < + &cpu4_intc 0xffffffff &cpu4_intc 9 + #ifndef PLATFORM_HAPS + &cpu5_intc 0xffffffff &cpu5_intc 9 + &cpu6_intc 0xffffffff &cpu6_intc 9 + &cpu7_intc 0xffffffff &cpu7_intc 9 + #endif + >; + reg = <0x0 0x2c000000 0x0 0x4000000>; + reg-names = "control"; + riscv,max-priority = <7>; + riscv,ndev = <520>; + numa-node-id = <1>; + }; + + d1_l2_cache_0: pl2@20104000 { + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <512>; + cache-size = <262144>; + cache-unified; + compatible = "sifive,pL2Cache0", "cache"; + next-level-cache = <&D1CACHE>; + reg = <0x0 0x20104000 0x0 0x4000>; + reg-names = "control"; + sifive,ecc-granularity = <16>; + sifive,perfmon-counters = <6>; + }; + d1_l2_cache_1: pl2@20108000 { + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <512>; + cache-size = <262144>; + cache-unified; + compatible = "sifive,pL2Cache0", "cache"; + next-level-cache = <&D1CACHE>; + reg = <0x0 0x20108000 0x0 0x4000>; + reg-names = "control"; + sifive,ecc-granularity = <16>; + sifive,perfmon-counters = <6>; + }; + d1_l2_cache_2: pl2@2010c000 { + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <512>; + cache-size = <262144>; + cache-unified; + compatible = "sifive,pL2Cache0", "cache"; + next-level-cache = <&D1CACHE>; + reg = <0x0 0x2010c000 0x0 0x4000>; + reg-names = "control"; + sifive,ecc-granularity = <16>; + sifive,perfmon-counters = <6>; + }; + d1_l2_cache_3: pl2@20110000 { + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <512>; + cache-size = <262144>; + cache-unified; + compatible = "sifive,pL2Cache0", "cache"; + next-level-cache = <&D1CACHE>; + reg = <0x0 0x20110000 0x0 0x4000>; + reg-names = "control"; + sifive,ecc-granularity = <16>; + sifive,perfmon-counters = <6>; }; }; + diff --git a/arch/riscv/boot/dts/eswin/eswin-win2030-arch.dtsi b/arch/riscv/boot/dts/eswin/eswin-win2030-arch.dtsi index bde485b4abfa..64671ff693e3 100644 --- a/arch/riscv/boot/dts/eswin/eswin-win2030-arch.dtsi +++ b/arch/riscv/boot/dts/eswin/eswin-win2030-arch.dtsi @@ -48,7 +48,7 @@ CPU_RET: cpu-retentive { min-residency-us = <80>; }; }; - L17: cpu@0 { + cpu_0: cpu@0 { clock-frequency = <0>; compatible = "eswin,eic770x", "riscv"; d-cache-block-size = <64>; @@ -73,27 +73,28 @@ L17: cpu@0 { i-tlb-sets = <1>; i-tlb-size = <32>; mmu-type = "riscv,sv48"; - next-level-cache = <&L15>; + next-level-cache = <&d0_l2_cache_0>; reg = <0x0>; riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb_sscofpmf"; riscv,pmpgranularity = <4096>; riscv,pmpregions = <8>; - sifive,buserror = <&L16>; + sifive,buserror = <&d0_bus_err0>; status = "okay"; timebase-frequency = ; tlb-split; + numa-node-id = <0>; clocks = <&d0_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_0>; operating-points-v2 = <&d0_cpu_opp_table>; #cooling-cells = <2>; dynamic-power-coefficient = <324>; cpu-idle-states = <&CPU_RET>; - L14: interrupt-controller { + cpu0_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; interrupt-controller; }; }; - L22: cpu@1 { + cpu_1: cpu@1 { clock-frequency = <0>; compatible = "eswin,eic770x", "riscv"; d-cache-block-size = <64>; @@ -118,27 +119,28 @@ L22: cpu@1 { i-tlb-sets = <1>; i-tlb-size = <32>; mmu-type = "riscv,sv48"; - next-level-cache = <&L20>; + next-level-cache = <&d0_l2_cache_1>; reg = <0x1>; riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb_sscofpmf"; riscv,pmpgranularity = <4096>; riscv,pmpregions = <8>; - sifive,buserror = <&L21>; + sifive,buserror = <&d0_bus_err1>; status = "okay"; timebase-frequency = ; tlb-split; + numa-node-id = <0>; clocks = <&d0_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_1>; operating-points-v2 = <&d0_cpu_opp_table>; #cooling-cells = <2>; dynamic-power-coefficient = <324>; cpu-idle-states = <&CPU_RET>; - L19: interrupt-controller { + cpu1_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; interrupt-controller; }; }; - L27: cpu@2 { + cpu_2: cpu@2 { clock-frequency = <0>; compatible = "eswin,eic770x", "riscv"; d-cache-block-size = <64>; @@ -163,27 +165,28 @@ L27: cpu@2 { i-tlb-sets = <1>; i-tlb-size = <32>; mmu-type = "riscv,sv48"; - next-level-cache = <&L25>; + next-level-cache = <&d0_l2_cache_2>; reg = <0x2>; riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb_sscofpmf"; riscv,pmpgranularity = <4096>; riscv,pmpregions = <8>; - sifive,buserror = <&L26>; + sifive,buserror = <&d0_bus_err2>; status = "okay"; timebase-frequency = ; tlb-split; + numa-node-id = <0>; clocks = <&d0_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_2>; operating-points-v2 = <&d0_cpu_opp_table>; #cooling-cells = <2>; dynamic-power-coefficient = <324>; cpu-idle-states = <&CPU_RET>; - L24: interrupt-controller { + cpu2_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; interrupt-controller; }; }; - L32: cpu@3 { + cpu_3: cpu@3 { clock-frequency = <0>; compatible = "eswin,eic770x", "riscv"; d-cache-block-size = <64>; @@ -208,21 +211,22 @@ L32: cpu@3 { i-tlb-sets = <1>; i-tlb-size = <32>; mmu-type = "riscv,sv48"; - next-level-cache = <&L30>; + next-level-cache = <&d0_l2_cache_3>; reg = <0x3>; riscv,isa = "rv64imafdch_zicsr_zifencei_zba_zbb_sscofpmf"; riscv,pmpgranularity = <4096>; riscv,pmpregions = <8>; - sifive,buserror = <&L31>; + sifive,buserror = <&d0_bus_err3>; status = "okay"; timebase-frequency = ; tlb-split; + numa-node-id = <0>; clocks = <&d0_clock WIN2030_CLK_CPU_EXT_SRC_CORE_CLK_3>; operating-points-v2 = <&d0_cpu_opp_table>; #cooling-cells = <2>; dynamic-power-coefficient = <324>; cpu-idle-states = <&CPU_RET>; - L29: interrupt-controller { + cpu3_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; interrupt-controller; @@ -240,14 +244,14 @@ SOC: soc { #size-cells = <2>; compatible = "SiFive,FU800-soc", "fu800-soc", "sifive-soc", "simple-bus"; ranges; - pmu { + PMU0: pmu@0 { riscv,raw-event-to-mhpmcounters = <0x0 0x0 0xffffffff 0xff 0x1f8 0x0 0x1 0xffffffff 0xfff800ff 0x1f8 0x0 0x2 0xffffffff 0xffffc0ff 0x1f8>; riscv,event-to-mhpmcounters = <0x01 0x01 0x01 0x02 0x02 0x02 0x4 0x6 0x1f8 0x10009 0x10009 0x1f8 0x10019 0x10019 0x1f8 0x10021 0x10021 0x1f8>; riscv,event-to-mhpmevent = <0x4 0x0 0x202 0x5 0x0 0x4000 0x6 0x0 0x2001 0x10009 0x0 0x102 0x10019 0x0 0x1002 0x10021 0x0 0x802>; compatible = "riscv,pmu0", "riscv,pmu"; - interrupts-extended = <&L14 13 &L19 13 &L24 13 &L29 13>; + interrupts-extended = <&cpu0_intc 13 &cpu1_intc 13 &cpu2_intc 13 &cpu3_intc 13>; }; L40: authentication-controller { compatible = "sifive,authentication0"; @@ -287,35 +291,35 @@ L54: burst-bundler@10010000 { reg = <0x0 0x10010000 0x0 0x1000>; reg-names = "control"; }; - L16: bus-error-unit@hart0 { + d0_bus_err0: bus-error-unit@hart0 { compatible = "sifive,buserror"; interrupt-parent = <&plic0>; interrupts = <517>; reg = <0x0 0x1700000 0x0 0x1000>; reg-names = "control"; }; - L21: bus-error-unit@hart1 { + d0_bus_err1: bus-error-unit@hart1 { compatible = "sifive,buserror"; interrupt-parent = <&plic0>; interrupts = <518>; reg = <0x0 0x1701000 0x0 0x1000>; reg-names = "control"; }; - L26: bus-error-unit@hart2 { + d0_bus_err2: bus-error-unit@hart2 { compatible = "sifive,buserror"; interrupt-parent = <&plic0>; interrupts = <519>; reg = <0x0 0x1702000 0x0 0x1000>; reg-names = "control"; }; - L31: bus-error-unit@hart3 { + d0_bus_err3: bus-error-unit@hart3 { compatible = "sifive,buserror"; interrupt-parent = <&plic0>; interrupts = <520>; reg = <0x0 0x1703000 0x0 0x1000>; reg-names = "control"; }; - L7: cache-controller@2010000 { + D0CACHE: cache-controller@2010000 { cache-block-size = <64>; cache-level = <3>; cache-sets = <4096>; @@ -353,14 +357,15 @@ plic0: interrupt-controller@c000000 { compatible = "sifive,plic-1.0.0"; interrupt-controller; interrupts-extended = < - &L14 0xffffffff &L14 9 - &L19 0xffffffff &L19 9 - &L24 0xffffffff &L24 9 - &L29 0xffffffff &L29 9>; + &cpu0_intc 0xffffffff &cpu0_intc 9 + &cpu1_intc 0xffffffff &cpu1_intc 9 + &cpu2_intc 0xffffffff &cpu2_intc 9 + &cpu3_intc 0xffffffff &cpu3_intc 9>; reg = <0x0 0xc000000 0x0 0x4000000>; reg-names = "control"; riscv,max-priority = <7>; riscv,ndev = <520>; + numa-node-id = <0>; }; L53: order-obliterator@10030000 { compatible = "sifive,order-obliterator0"; @@ -369,53 +374,53 @@ L53: order-obliterator@10030000 { reg = <0x0 0x10030000 0x0 0x4000>; reg-names = "control"; }; - L15: pl2@104000 { + d0_l2_cache_0: pl2@104000 { cache-block-size = <64>; cache-level = <2>; cache-sets = <512>; cache-size = <262144>; cache-unified; compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&L7>; + next-level-cache = <&D0CACHE>; reg = <0x0 0x104000 0x0 0x4000>; reg-names = "control"; sifive,ecc-granularity = <16>; sifive,perfmon-counters = <6>; }; - L20: pl2@108000 { + d0_l2_cache_1: pl2@108000 { cache-block-size = <64>; cache-level = <2>; cache-sets = <512>; cache-size = <262144>; cache-unified; compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&L7>; + next-level-cache = <&D0CACHE>; reg = <0x0 0x108000 0x0 0x4000>; reg-names = "control"; sifive,ecc-granularity = <16>; sifive,perfmon-counters = <6>; }; - L25: pl2@10c000 { + d0_l2_cache_2: pl2@10c000 { cache-block-size = <64>; cache-level = <2>; cache-sets = <512>; cache-size = <262144>; cache-unified; compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&L7>; + next-level-cache = <&D0CACHE>; reg = <0x0 0x10c000 0x0 0x4000>; reg-names = "control"; sifive,ecc-granularity = <16>; sifive,perfmon-counters = <6>; }; - L30: pl2@110000 { + d0_l2_cache_3: pl2@110000 { cache-block-size = <64>; cache-level = <2>; cache-sets = <512>; cache-size = <262144>; cache-unified; compatible = "sifive,pL2Cache0", "cache"; - next-level-cache = <&L7>; + next-level-cache = <&D0CACHE>; reg = <0x0 0x110000 0x0 0x4000>; reg-names = "control"; sifive,ecc-granularity = <16>; diff --git a/arch/riscv/boot/dts/eswin/eswin-win2030-die0-soc.dtsi b/arch/riscv/boot/dts/eswin/eswin-win2030-die0-soc.dtsi index 9de7c694998a..7df6029a9451 100644 --- a/arch/riscv/boot/dts/eswin/eswin-win2030-die0-soc.dtsi +++ b/arch/riscv/boot/dts/eswin/eswin-win2030-die0-soc.dtsi @@ -18,11 +18,8 @@ * along with this program. If not, see . */ -#if (!(CHIPLET_AND_DIE & 0x2)) #include "eswin-win2030-arch.dtsi" -#else -#include "eswin-win2030-arch-d2d.dtsi" -#endif + #include #include @@ -136,10 +133,10 @@ map0 { trip = <&target>; contribution = <1024>; cooling-device = - <&L17 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, - <&L22 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, - <&L27 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, - <&L32 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + <&cpu_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; map1 { trip = <&target>; @@ -485,7 +482,7 @@ d0_gmac0: ethernet@50400000 { clock-names = "app", "stmmaceth","tx"; resets = <&d0_reset HSPDMA_RST_CTRL SW_HSP_ETH0_ARSTN>; reset-names = "ethrst"; - // iommus = <&smmu0 WIN2030_SID_ETH0>; + iommus = <&smmu0 WIN2030_SID_ETH0>; tbus = ; dma-noncoherent; eswin,hsp_sp_csr = <&d0_hsp_sp_csr 0x1030 0x100 0x108>; @@ -515,7 +512,7 @@ d0_gmac1: ethernet@50410000 { clock-names = "app", "stmmaceth","tx"; resets = <&d0_reset HSPDMA_RST_CTRL SW_HSP_ETH1_ARSTN>; reset-names = "ethrst"; - // iommus = <&smmu0 WIN2030_SID_ETH1>; + iommus = <&smmu0 WIN2030_SID_ETH1>; tbus = ; dma-noncoherent; eswin,hsp_sp_csr = <&d0_hsp_sp_csr 0x1034 0x200 0x208>; @@ -537,7 +534,7 @@ noc { }; d0_npu: eswin-npu@51c00000 { - compatible = "eswin,npu0"; + compatible = "eswin,npu"; reg = <0x0 0x51c00000 0x0 0x400000>; interrupt-parent = <&plic0>; interrupts = <387 16>; @@ -559,6 +556,7 @@ d0_npu: eswin-npu@51c00000 { reset-names = "e31_core"; numa-node-id = <0>; + firmware-name = "eic7700_die0_e31_fw"; dma-noncoherent; #cooling-cells = <2>; @@ -753,6 +751,7 @@ gc820: g2d@50140000 { contiguous-size = <0xa00000>; recovery = <0>; dma-noncoherent; + numa-node-id = <0>; }; gpu0: gpu@51400000 { @@ -914,6 +913,7 @@ sdhci_emmc: mmc@50450000 { <&d0_reset HSPDMA_RST_CTRL SW_HSP_EMMC_ARSTN>; reset-names = "txrx_rst", "phy_rst", "prstn", "arstn"; + core-clk-reg = <0x51828160>; disable-cqe-dcmd; bus-width = <8>; non-removable; @@ -1333,7 +1333,7 @@ fan_control: fan_control@50b50000 { pwm-minimum-period = <1000>; pwms = <&pwm0 0 100000>; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_fan_tach_default>; + pinctrl-0 = <&pinctrl_die0_fan_tach_default>; status = "disabled"; label = "fan_control"; }; @@ -1504,9 +1504,21 @@ d0_aon_i2c1: i2c@51838000 { interrupt-parent = <&plic0>; }; pinctrl: pinctrl@0x51600080 { - compatible = "eswin,eic7700-pinctrl"; + compatible = "eswin,eic7x-pinctrl"; reg = <0x0 0x51600080 0x0 0x1FFF80>; status = "disabled"; + pinctrl_die0_pwm0_default: pwm0-default{ + mux { + groups = "pwm0_group"; + function = "pwm0_func"; + }; + }; + pinctrl_die0_fan_tach_default: fan_tach-default{ + mux { + groups = "fan_tach_group"; + function = "fan_tach_func"; + }; + }; }; gpio0: gpio@51600000 { @@ -1563,7 +1575,7 @@ pwm0: pwm@0x50818000 { resets = <&d0_reset TIMER_RST_CTRL SW_TIMER_RST_N>; reset-names = "rst"; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_pwm0_default>; + pinctrl-0 = <&pinctrl_die0_pwm0_default>; status = "disabled"; }; @@ -1935,6 +1947,7 @@ dw_hdmi: hdmi@502a0000 { <&d0_reset VO_PHYRST_CTRL SW_HDMI_PHYCTRL_RSTN>, <&d0_reset VO_PHYRST_CTRL SW_VO_HDMI_RSTN>; reset-names = "prstn", "phyrstn", "rstn"; + numa-node-id = <0>; ports { #address-cells = <1>; @@ -1953,18 +1966,21 @@ dw_hdmi_hdcp2: hdmi-hdcp2@50290000 { reg = <0x0 0x50290000 0x0 0x10000>; interrupt-parent = <&plic0>; interrupts = <275>; - clocks = <&d0_clock WIN2030_CLK_VO_CFG_CLK>, + clocks = <&d0_clock WIN2030_CLK_VO_ACLK>, <&d0_clock WIN2030_CLK_VO_HDMI_IESMCLK>; - clock-names ="pclk_hdcp2", "hdcp2_clk_hdmi"; + clock-names ="aclk", "iesmclk"; dma-noncoherent; + numa-node-id = <0>; }; d0_usbdrd3_0: usb0@50480000 { compatible = "eswin,win2030-dwc3"; #address-cells = <2>; #size-cells = <2>; - clocks =<&d0_clock WIN2030_GATE_HSP_USB0_SUSPEND_CLK>; - clock-names = "suspend"; + clocks =<&d0_clock WIN2030_GATE_HSP_USB0_SUSPEND_CLK>, + <&d0_clock WIN2030_CLK_HSP_ACLK>, + <&d0_clock WIN2030_CLK_HSP_CFG_CLK>; + clock-names = "suspend","aclk", "cfg_clk"; eswin,hsp_sp_csr = <&d0_hsp_sp_csr 0x800 0x808 0x83c 0x840>; resets = <&d0_reset HSPDMA_RST_CTRL SW_USB0_VAUX_RSTN>; reset-names = "vaux"; @@ -2000,8 +2016,10 @@ d0_usbdrd3_1: usb1@50490000 { compatible = "eswin,win2030-dwc3"; #address-cells = <2>; #size-cells = <2>; - clocks =<&d0_clock WIN2030_GATE_HSP_USB1_SUSPEND_CLK>; - clock-names = "suspend"; + clocks =<&d0_clock WIN2030_GATE_HSP_USB1_SUSPEND_CLK>, + <&d0_clock WIN2030_CLK_HSP_ACLK>, + <&d0_clock WIN2030_CLK_HSP_CFG_CLK>; + clock-names = "suspend","aclk", "cfg_clk"; eswin,hsp_sp_csr = <&d0_hsp_sp_csr 0x900 0x908 0x93c 0x940>; resets = <&d0_reset HSPDMA_RST_CTRL SW_USB1_VAUX_RSTN>; reset-names = "vaux"; @@ -2294,23 +2312,35 @@ d0_numa_sample:numa_sample@0 { dma-noncoherent; }; - ddr0: ddr-controller@0 { + ddr0: ddr-controller@52300000 { compatible = "eswin,ddrc-1.20a"; interrupt-parent = <&plic0>; interrupts = <10>; interrupt-names = "ddr-ecc"; reg = <0x0 0x52300000 0x0 0x40000>; ctrl-id = <0>; + numa-node-id = <0>; status = "disabled"; }; - ddr1: ddr-controller@1 { + ddr1: ddr-controller@52380000 { compatible = "eswin,ddrc-1.20a"; interrupt-parent = <&plic0>; interrupts = <299>; interrupt-names = "ddr-ecc"; reg = <0x0 0x52380000 0x0 0x40000>; ctrl-id = <1>; + numa-node-id = <0>; + status = "disabled"; + }; + + d2d: d2d-unit@52100000 { + compatible = "eswin,eic7x-d2d"; + interrupt-parent = <&plic0>; + interrupts = <287>, <288>; + reg = <0x0 0x52100000 0x0 0x50000>; + reg-names = "control"; + numa-node-id = <0>; status = "disabled"; }; }; diff --git a/arch/riscv/boot/dts/eswin/eswin-win2030-die1-soc.dtsi b/arch/riscv/boot/dts/eswin/eswin-win2030-die1-soc.dtsi index d271b6843621..7ef1be2f7a36 100644 --- a/arch/riscv/boot/dts/eswin/eswin-win2030-die1-soc.dtsi +++ b/arch/riscv/boot/dts/eswin/eswin-win2030-die1-soc.dtsi @@ -18,8 +18,6 @@ * along with this program. If not, see . */ -#include "eswin-win2030-platform.dtsi" - #include "eswin-win2030-arch-d2d.dtsi" @@ -29,16 +27,10 @@ #include #include #include +#include #include / { -#if (CHIPLET_AND_DIE == 0x1) - aliases { - serial0 = &d1_uart0; - ethernet0 = &d1_gmac0; - ethernet1 = &d1_gmac1; - }; -#endif d1_cpu_opp_table: opp-table1 { compatible = "operating-points-v2"; opp-shared; @@ -173,7 +165,50 @@ d1_uart0: serial@0x70900000 { }; d1_uart1: serial@0x70910000 { + compatible = "snps,dw-apb-uart"; reg = <0x0 0x70910000 0x0 0x10000>; + clock-frequency = ; + interrupt-parent = <&plic1>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + numa-node-id = <1>; + }; + + d1_uart2: serial@0x70920000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x70920000 0x0 0x10000>; + clock-frequency = ; + interrupt-parent = <&plic1>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + numa-node-id = <1>; + status = "disabled"; + }; + + d1_uart3: serial@0x70930000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x70930000 0x0 0x10000>; + clock-frequency = ; + interrupt-parent = <&plic1>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + numa-node-id = <1>; + status = "disabled"; + }; + + d1_uart4: serial@0x70940000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x70940000 0x0 0x10000>; + clock-frequency = ; + interrupt-parent = <&plic1>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + numa-node-id = <1>; + status = "disabled"; }; d1_sys_con: scu_sys_con@0x71810000 { @@ -347,12 +382,12 @@ d1_pmu_dsp3: win2030-pmu-controller-port@240 { }; d1_dmac0: dma-controller-hsp@0x70430000 { - compatible = "snps,axi-dma-1.01a"; + compatible = "eswin,eic770x-axi-dma"; reg = <0x0 0x70430000 0x0 0x10000>; interrupt-parent = <&plic1>; interrupts = <57>; #dma-cells = <2>; // change dma-cells value <1> to <2>, to support peripheral selection dma-controller,See the parameter dmas for details; - clocks = <&d1_clock WIN2030_CLK_HSP_DMA0_CLK>; + clocks = <&d1_clock WIN2030_CLK_HSP_DMA0_CLK>, <&d1_clock WIN2030_CLK_HSP_DMA0_CLK_TEST>; clock-names = "core-clk", "cfgr-clk"; resets = <&d1_reset HSPDMA_RST_CTRL SW_HSP_DMA0_RSTN>, @@ -374,7 +409,7 @@ d1_dmac0: dma-controller-hsp@0x70430000 { }; d1_aon_dmac: dma-controller-aon@0x718c0000 { - compatible = "snps,axi-dma-1.01a"; + compatible = "eswin,eic770x-axi-dma"; reg = <0x0 0x718c0000 0x0 0x10000>; interrupt-parent = <&plic1>; interrupts = <289>; @@ -544,7 +579,7 @@ d1_mbox1: mbox@70a20000 { /*mailbox between u84 & npu_0*/ d1_mbox2: mbox@70a40000 { - compatible = "eswin,win2030-mailbox"; + compatible = "eswin,npu1-mailbox"; reg = <0 (ESWIN_MAILBOX_U84_TO_NPU_0_REG_BASE + 0x20000000) 0 0x10000>, <0 (ESWIN_MAILBOX_NPU_0_TO_U84_REG_BASE + 0x20000000) 0 0x10000>; interrupt-parent = <&plic1>; @@ -671,7 +706,7 @@ d1_ipc_scpu:ipc@1 { d1_lpcpu:lpcpu@1 { compatible = "eswin,win2030-lpcpu"; - fw-region = <&lpcpu1_reserved>; + /* fw-region = <&lpcpu1_reserved>; */ clocks = <&d1_clock WIN2030_CLK_CLK_LPCPU_CORE>, <&d1_clock WIN2030_CLK_CLK_LPCPU_BUS>; clock-names = "core_clk", "bus_clk"; @@ -790,19 +825,6 @@ d1_i2c2: i2c@70970000 { interrupts = <107>; interrupt-parent = <&plic1>; status = "disabled"; - d1_es8316: es8316@10 { - compatible = "everest,es8316"; - reg = <0x10>; - interrupts = <107>; - interrupt-parent = <&plic1>; - #sound-dai-cells = <0>; - port { - d1_codec_endpoint: endpoint { - system-clock-frequency = <12288000>; - remote-endpoint = <&d1_i2s0_endpoint>; - }; - }; - }; }; d1_i2c3: i2c@70980000 { compatible = "snps,designware-i2c"; @@ -940,7 +962,7 @@ d1_aon_i2c1: i2c@71838000 { }; d1_npu: eswin-npu@71c00000 { - compatible = "eswin,npu1"; + compatible = "eswin,npu"; reg = <0x0 0x71c00000 0x0 0x400000>; interrupt-parent = <&plic1>; interrupts = <387 16>; @@ -962,13 +984,14 @@ d1_npu: eswin-npu@71c00000 { reset-names = "e31_core"; numa-node-id = <1>; + firmware-name = "eic7700_die1_e31_fw"; dma-noncoherent; #cooling-cells = <2>; dynamic-power-coefficient = <0>; /*TBD*/ }; - dev_llc_d1: llc@71c00000 { + d1_llc_dev: llc@71c00000 { compatible = "eswin,llc"; reg = <0x0 0x71c00000 0x0 0x400000>; eswin,syscfg = <&d1_sys_con 0x324>; @@ -979,7 +1002,7 @@ dev_llc_d1: llc@71c00000 { <&d1_clock WIN2030_CLK_NPU_CLK>, <&d1_clock WIN2030_MUX_U_NPU_CORE_3MUX1_GFREE>, <&d1_clock WIN2030_SPLL2_FOUT2>, - <&d0_clock WIN2030_SPLL1_FOUT1>; + <&d1_clock WIN2030_SPLL1_FOUT1>; clock-names = "aclk", "cfg_clk", "llc_clk", "core_clk", "mux_u_npu_core_3mux1_gfree", "fixed_rate_clk_spll2_fout2", "fixed_rate_clk_spll1_fout1"; @@ -1035,7 +1058,7 @@ d1_sata: sata@0x70420000 { }; d1_pinctrl: pinctrl@0x71600080 { - compatible = "eswin,win2030-pinctrl"; + compatible = "eswin,eic7x-pinctrl"; reg = <0x0 0x71600080 0x0 0x1FFF80>; status = "disabled"; d1_pinctrl_pwm0_default: pwm0-default{ @@ -1076,58 +1099,47 @@ conf { }; }; - d1_gpio0: gpio@0x71600000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "eswin,win2030-gpio"; - reg = <0x0 0x71600000 0x0 0x80>; - status = "disabled"; - eswin,syscfg = <&d1_sys_con 0x3c0>; + d1_gpio0: gpio@71600000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dw-apb-gpio"; + reg = <0x0 0x71600000 0x0 0x80>; d1_porta: gpio-port@0 { - compatible = "eswin,win2030-gpio-port"; + compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; ngpios = <32>; reg = <0>; - interrupts = <303>; interrupt-parent = <&plic1>; - interrupt-state = <0 1 1 1>; - direction-input = <5 8 9 16>; - direction-output = <1 0 3 1>; - gpio-state = <11 1 12 1>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 + 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334>; }; + d1_portb: gpio-port@1 { - compatible = "eswin,win2030-gpio-port"; + compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; ngpios = <32>; reg = <1>; - direction-input = <5 13 9 25>; - direction-output = <26 0 3 1>; - gpio-state = <11 1 17 1>; }; d1_portc: gpio-port@2 { - compatible = "eswin,win2030-gpio-port"; + compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; ngpios = <32>; reg = <2>; - direction-input = <5 13 9 25>; - direction-output = <26 0 3 1>; - gpio-state = <11 1 17 1>; }; d1_portd: gpio-port@3 { - compatible = "eswin,win2030-gpio-port"; + compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; ngpios = <16>; reg = <3>; - direction-input = <9 1 8 1>; - direction-output = <3 1 15 1>; - gpio-state = <6 1 5 1>; }; }; @@ -1289,6 +1301,18 @@ d1_i2s2: i2s@70220000 { dma-noncoherent; }; + d1_graphcard0: graphcard4 { + compatible = "audio-graph-card"; + }; + + d1_graphcard1: graphcard5 { + compatible = "audio-graph-card"; + }; + + d1_graphcard2: graphcard6 { + compatible = "audio-graph-card"; + }; + d1_dsp_subsys:dsp_subsys@72280400 { #address-cells = <2>; #size-cells = <2>; @@ -1298,7 +1322,7 @@ d1_dsp_subsys:dsp_subsys@72280400 { dma-ranges = <0x0 0x30000000 0x0 0xc0000000 0x0 0xce000000>; compatible = "es-dsp-subsys", "simple-bus"; clocks = <&d1_clock WIN2030_CLK_DSPT_CFG_CLK>, <&d1_clock WIN2030_CLK_DSPT_ACLK>; - clock-names = "cfg_clk", aclk; + clock-names = "cfg_clk", "aclk"; resets = <&d1_reset DSP_RST_CTRL SW_DSP_AXI_RSTN>, <&d1_reset DSP_RST_CTRL SW_DSP_CFG_RSTN>, <&d1_reset DSP_RST_CTRL SW_DSP_DIV4_RSTN>, @@ -1326,7 +1350,7 @@ ESWIN_MAIBOX_U84_IRQ_BIT device-uart = <0x70910000>; device-irq-mode = <1>; host-irq-mode = <1>; - firmware-name = "eic7702_dsp_fw"; + firmware-name = "eic7700_dsp_fw"; process-id = <0>; iommus = <&smmu1 WIN2030_SID_DSP_0>; tbus = ; @@ -1355,7 +1379,7 @@ ESWIN_MAIBOX_U84_IRQ_BIT device-uart = <0x70910000>; device-irq-mode = <1>; host-irq-mode = <1>; - firmware-name = "eic7702_dsp_fw"; + firmware-name = "eic7700_dsp_fw"; process-id = <1>; iommus = <&smmu1 WIN2030_SID_DSP_1>; tbus = ; @@ -1384,7 +1408,7 @@ ESWIN_MAIBOX_U84_IRQ_BIT device-uart = <0x70910000>; device-irq-mode = <1>; host-irq-mode = <1>; - firmware-name = "eic7702_dsp_fw"; + firmware-name = "eic7700_dsp_fw"; process-id = <2>; iommus = <&smmu1 WIN2030_SID_DSP_2>; tbus = ; @@ -1413,7 +1437,7 @@ ESWIN_MAIBOX_U84_IRQ_BIT device-uart = <0x70910000>; device-irq-mode = <1>; host-irq-mode = <1>; - firmware-name = "eic7702_dsp_fw"; + firmware-name = "eic7700_dsp_fw"; process-id = <3>; iommus = <&smmu1 WIN2030_SID_DSP_3>; tbus = ; @@ -1424,7 +1448,7 @@ dsp@0 { }; }; }; - die1_rtc: rtc@71818000 { + d1_rtc: rtc@71818000 { compatible = "eswin,win2030-rtc"; reg = <0x0 0x71818000 0x0 0x400>; eswin,syscfg = <&d1_sys_con 0x3c0>; @@ -1438,7 +1462,7 @@ die1_rtc: rtc@71818000 { status = "disabled"; }; - pcie_die1: pcie@0x74000000 { + d1_pcie: pcie@0x74000000 { compatible = "eswin,win2030-pcie"; clocks = <&d1_clock WIN2030_CLK_PCIET_ACLK>, <&d1_clock WIN2030_CLK_PCIET_CFG_CLK>, @@ -1455,33 +1479,32 @@ pcie_die1: pcie@0x74000000 { #size-cells = <2>; #interrupt-cells = <1>; reg = <0x0 0x74000000 0x0 0x4000000>, /* IP registers */ - <0x0 0x60000000 0x0 0x800000>, /* Configuration space */ - <0x0 0x70000000 0x0 0x100000>; + <0x0 0x60000000 0x0 0x800000>, /* Configuration space */ + <0x0 0x70000000 0x0 0x100000>; reg-names = "dbi", "config", "mgmt"; device_type = "pci"; /* dma-coherent; */ bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x60800000 0x0 0x60800000 0x0 0x800000>, /* I/O */ - <0x82000000 0x0 0x61000000 0x0 0x61000000 0x0 0xf000000>, /* mem */ - <0xc3000000 0xa0 0x00000000 0xa0 0x00000000 0x20 0x00000000>; /* mem prefetchable */ + ranges = <0x81000000 0x0 0x60800000 0x0 0x60800000 0x0 0x800000>, /* I/O */ + <0x82000000 0x0 0x61000000 0x0 0x61000000 0x0 0xf000000>, /* mem */ + <0xc3000000 0xa0 0x00000000 0xa0 0x00000000 0x2 0x00000000>; /* mem prefetchable */ - /* num-lanes = <0x4>; */ + num-lanes = <0x4>; /********************************** msi_ctrl_io[0~31] : 188~219 msi_ctrl_int : 220 **********************************/ - interrupts = <220>; - interrupt-names = "msi"; + interrupts = <220>, <179>, <180>, <181>, <182>, <183>, <184>, <185>, <186>; + interrupt-names = "msi", "inta", "intb", "intc", "intd"; interrupt-parent = <&plic1>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &plic1 179>, + <0x0 0x0 0x0 0x2 &plic1 180>, + <0x0 0x0 0x0 0x3 &plic1 181>, + <0x0 0x0 0x0 0x4 &plic1 182>; iommus = <&smmu1 0xfe0000>; iommu-map = <0x0 &smmu1 0xff0000 0xffffff>; - #ifdef PLATFORM_HAPS - gen-x = <1>; - #else - gen-x = <3>; - #endif - lane-x = <4>; tbus = ; status = "disabled"; numa-node-id = <1>; @@ -1504,7 +1527,7 @@ d1_gmac0: ethernet@70400000 { clock-names = "app", "stmmaceth","tx"; resets = <&d1_reset HSPDMA_RST_CTRL SW_HSP_ETH0_ARSTN>; reset-names = "ethrst"; - // iommus = <&smmu1 WIN2030_SID_ETH0>; + iommus = <&smmu1 WIN2030_SID_ETH0>; tbus = ; dma-noncoherent; eswin,hsp_sp_csr = <&d1_hsp_sp_csr 0x1030 0x100 0x108>; @@ -1534,7 +1557,7 @@ d1_gmac1: ethernet@70410000 { clock-names = "app", "stmmaceth","tx"; resets = <&d1_reset HSPDMA_RST_CTRL SW_HSP_ETH1_ARSTN>; reset-names = "ethrst"; - // iommus = <&smmu1 WIN2030_SID_ETH1>; + iommus = <&smmu1 WIN2030_SID_ETH1>; tbus = ; dma-noncoherent; eswin,hsp_sp_csr = <&d1_hsp_sp_csr 0x1034 0x200 0x208>; @@ -1655,9 +1678,9 @@ d1_sdio1: mmc@0x70470000{ d1_ssi0: spi1@70810000 { compatible = "snps,eic770x-spi"; - #address-cells = <2>; - #size-cells = <2>; - reg = <0x0 0x70810000 0x0 0x5000>; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0 0x70810000 0x0 0x4000>; spi-max-frequency = <4800000>; clocks = <&d1_clock WIN2030_CLK_LSP_SSI0_PCLK>; clock-names = "clk"; @@ -1668,200 +1691,213 @@ d1_ssi0: spi1@70810000 { numa-node-id = <1>; status = "disabled"; dma-noncoherent; - d1_spi_demo: spi-demo@0 { - compatible = "eswin,demo-spi"; - reg = <0 0 0 0>; - spi-max-frequency = <4800000>; - }; }; - d1_video_output: display-subsystem { - compatible = "eswin,display-subsystem"; - ports = <&d1_dc_out>; - dma-noncoherent; - }; - - d1_dc: display_control@702c0000 { - compatible = "eswin,dc"; - reg = <0x0 0x702c0000 0x0 0x100>, <0x0 0x702c0180 0x0 0x700>, <0x0 0x702c1400 0x0 0x1400>; - interrupt-parent = <&plic1>; - interrupts = <238>; - - clocks = <&d1_clock WIN2030_CLK_VO_CFG_CLK>, - <&d1_clock WIN2030_CLK_VO_PIXEL_CLK>, - <&d1_clock WIN2030_CLK_VO_ACLK>, - <&d1_clock WIN2030_SPLL0_FOUT1>, - <&d1_clock WIN2030_MUX_U_VO_ACLK_ROOT_2MUX1_GFREE>; - clock-names = "cfg_clk", "pix_clk", "axi_clk", "spll0_fout1", "vo_mux"; + d1_ssi1: spi@70814000 { + compatible = "snps,eic770x-spi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0 0x70814000 0x0 0x4000>; + spi-max-frequency = <4800000>; + clocks = <&d1_clock WIN2030_CLK_LSP_SSI1_PCLK>; + clock-names = "clk"; + interrupt-parent = <&plic1>; + interrupts = <92>; + resets = <&d1_reset SSI_RST_CTRL SW_SSI_RST_N_1>; + reset-names = "spi"; + numa-node-id = <1>; + status = "disabled"; + dma-noncoherent; + }; + + d1_bootspi: spi@71800000 { + compatible = "eswin,bootspi"; + reg = <0x0 0x71800000 0x0 0x8000>, + <0x0 0x71828000 0x0 0x8000>, + <0x0 0x7c000000 0x0 0x8000>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&d1_clock WIN2030_CLK_CLK_BOOTSPI_CFG>, + <&d1_clock WIN2030_CLK_CLK_BOOTSPI>; + clock-names = "cfg_clk", "clk"; + resets = <&d1_reset BOOTSPI_RST_CTRL SW_BOOTSPI_RSTN>; + reset-names = "rst"; + spi-max-frequency = <4800000>; + reg-io-width = <4>; + status = "disabled"; + }; + + d1_video_output: display-subsystem@1 { + compatible = "eswin,display-subsystem"; + ports = <&d1_dc_out>; + numa-node-id = <1>; + dma-noncoherent; + }; + + d1_dc: display_control@702c0000 { + compatible = "eswin,dc"; + reg = <0x0 0x702c0000 0x0 0x100>, <0x0 0x702c0180 0x0 0x700>, <0x0 0x702c1400 0x0 0x1400>; + interrupt-parent = <&plic1>; + interrupts = <238>; + + clocks = <&d1_clock WIN2030_CLK_VO_CFG_CLK>, + <&d1_clock WIN2030_CLK_VO_PIXEL_CLK>, + <&d1_clock WIN2030_CLK_VO_ACLK>, + <&d1_clock WIN2030_SPLL0_FOUT1>, + <&d1_clock WIN2030_MUX_U_VO_ACLK_ROOT_2MUX1_GFREE>; + clock-names = "cfg_clk", "pix_clk", "axi_clk", "spll0_fout1", "vo_mux"; resets = <&d1_reset VO_RST_CTRL SW_VO_AXI_RSTN>, <&d1_reset VO_RST_CTRL SW_VO_CFG_RSTN>, <&d1_reset VO_RST_CTRL SW_VO_DC_RSTN>, <&d1_reset VO_RST_CTRL SW_VO_DC_PRSTN>; reset-names = "vo_arst", "vo_prst", "dc_arst", "dc_prst"; - dma-noncoherent; - - - d1_dc_out: port { - #address-cells = <1>; - #size-cells = <0>; - - d1_dc_out_dpi0: endpoint@0 { - reg = <0>; - remote-endpoint = <&d1_dsi_input0>; - }; - - d1_dc_out_dpi1: endpoint@1 { - reg = <1>; - remote-endpoint = <&d1_vd_input>; - }; - - d1_dc_out_hdmi: endpoint@2 { - reg = <2>; - remote-endpoint = <&d1_hdmi_in_dc>; - }; - }; - }; - - d1_virtual_display: es_wb { - compatible = "eswin,virtual_display"; - bpp = /bits/ 8 <8>; - - port { - d1_vd_input: endpoint { - remote-endpoint = <&d1_dc_out_dpi1>; - }; - }; - }; - - d1_dsi_output: dsi-output { - compatible = "eswin,dsi-encoder"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - /* input */ - port@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0>; - d1_dsi_input0: endpoint@0 { - reg = <0>; - remote-endpoint = <&d1_dc_out_dpi0>; - }; - }; - - /* output */ - port@1 { - reg = <1>; - d1_dsi_out:endpoint { - remote-endpoint = <&d1_mipi_dsi_in>; - }; - }; - }; - }; - - d1_dsi_controller: mipi_dsi@70270000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "eswin,dw-mipi-dsi"; - reg = <0x0 0x70270000 0x0 0x10000>; - clocks = <&d1_clock WIN2030_CLK_CLK_MIPI_TXESC>; - clock-names = "pclk"; - - /* - phys = <&dphy>; - phy-names = "dphy"; - */ - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0>; - - d1_mipi_dsi_in: endpoint { - remote-endpoint = <&d1_dsi_out>; - }; - }; - - port@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - - d1_mipi_dsi_out: endpoint { - remote-endpoint = <&d1_panel_in>; - }; - }; - }; - - panel@1 { - compatible = "eswin,generic-panel"; - reg = <0>; - dsi,format = <0>; //RGB888:0, RGB666:1, RGB565:3 - dsi,lanes = <4>; - - port { - d1_panel_in: endpoint { - remote-endpoint = <&d1_mipi_dsi_out>; - }; - }; - }; - }; - - d1_dc_test: dctest@702c0000 { - compatible = "eswin,dc"; - reg = <0x0 0x702c0000 0x0 0x10000>; - interrupt-parent = <&plic1>; - interrupts = <238>; - }; - - d1_dw_hdmi: hdmi@702a0000 { - compatible = "eswin,eswin-dw-hdmi"; - reg = <0x0 0x702a0000 0x0 0x20000>; - pinctrl-names = "default"; - //pinctrl-0 = <&hdmi_i2c_xfer>; - interrupt-parent = <&plic1>; - interrupts = <274>; - clocks = <&d1_clock WIN2030_CLK_VO_CFG_CLK>, <&d1_clock WIN2030_CLK_VO_CEC_CLK>, + dma-noncoherent; + numa-node-id = <1>; + + d1_dc_out: port { + #address-cells = <1>; + #size-cells = <0>; + + d1_dc_out_dsi: endpoint@0 { + reg = <0>; + remote-endpoint = <&d1_dsi_input>; + }; + + d1_dc_out_dpi1: endpoint@1 { + reg = <1>; + remote-endpoint = <&d1_vd_input>; + }; + + d1_dc_out_hdmi: endpoint@2 { + reg = <2>; + remote-endpoint = <&d1_hdmi_in_dc>; + }; + }; + }; + + d1_virtual_display: es_wb@1 { + compatible = "eswin,virtual_display"; + bpp = /bits/ 8 <8>; + numa-node-id = <1>; + + port { + d1_vd_input: endpoint { + remote-endpoint = <&d1_dc_out_dpi1>; + }; + }; + }; + + d1_dsi_output: dsi-output@1 { + compatible = "eswin,dsi-encoder"; + numa-node-id = <1>; + status = "disabled"; + }; + + d1_dsi_controller: mipi_dsi@70270000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "eswin,dsi"; + reg = <0x0 0x70270000 0x0 0x10000>; + clocks = <&d1_clock WIN2030_CLK_CLK_MIPI_TXESC>; + clock-names = "pclk"; + resets = <&d1_reset VO_PHYRST_CTRL SW_VO_MIPI_PRSTN>; + reset-names ="phyrstn"; + numa-node-id = <1>; + + /* + phys = <&dphy>; + phy-names = "dphy"; + */ + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + d1_dsi_input: endpoint { + remote-endpoint = <&d1_dc_out_dsi>; + }; + }; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + d1_mipi_dsi_out: endpoint { + remote-endpoint = <&d1_panel_in>; + }; + }; + }; + + d1_dsi_panel:dsi_panel@1 { + compatible = "eswin,generic-panel"; + reg = <0>; + dsi,format = <0>; //RGB888:0, RGB666:1, RGB565:3 + dsi,lanes = <4>; + port { + d1_panel_in: endpoint { + remote-endpoint = <&d1_mipi_dsi_out>; + }; + }; + }; + }; + + d1_dc_test: dctest@702c0000 { + compatible = "eswin,dc"; + reg = <0x0 0x702c0000 0x0 0x10000>; + interrupt-parent = <&plic1>; + interrupts = <238>; + }; + + d1_dw_hdmi: hdmi@702a0000 { + compatible = "eswin,eswin-dw-hdmi"; + reg = <0x0 0x702a0000 0x0 0x20000>; + pinctrl-names = "default"; + //pinctrl-0 = <&hdmi_i2c_xfer>; + interrupt-parent = <&plic1>; + interrupts = <274>; + clocks = <&d1_clock WIN2030_CLK_VO_CFG_CLK>, <&d1_clock WIN2030_CLK_VO_CEC_CLK>, <&d1_clock WIN2030_CLK_VO_CR_CLK>; - clock-names = "iahb", "cec", "isfr"; - //power-domains = <&power WIN2030_PD_HDCP>; - reg-io-width = <4>; - ddc-i2c-scl-high-time-ns = <4708>; - ddc-i2c-scl-low-time-ns = <4916>; - #sound-dai-cells = <0>; - resets = <&d1_reset VO_PHYRST_CTRL SW_VO_HDMI_PRSTN>, - <&d1_reset VO_PHYRST_CTRL SW_HDMI_PHYCTRL_RSTN>, - <&d1_reset VO_PHYRST_CTRL SW_VO_HDMI_RSTN>; - reset-names = "prstn", "phyrstn", "rstn"; - - ports { - port@0 { - #address-cells = <1>; - #size-cells = <0>; - d1_hdmi_in_dc: endpoint@0 { - reg = <0>; - remote-endpoint = <&d1_dc_out_hdmi>; - }; - }; - }; - }; - - d1_dw_hdmi_hdcp2: hdmi-hdcp2@70290000 { - compatible = "eswin,dw-hdmi-hdcp2"; - reg = <0x0 0x70290000 0x0 0x10000>; - interrupt-parent = <&plic1>; - interrupts = <275>; - clocks = <&d1_clock WIN2030_CLK_VO_CFG_CLK>, - <&d1_clock WIN2030_CLK_VO_HDMI_IESMCLK>; - clock-names ="pclk_hdcp2", "hdcp2_clk_hdmi"; + clock-names = "iahb", "cec", "isfr"; + //power-domains = <&power WIN2030_PD_HDCP>; + reg-io-width = <4>; + ddc-i2c-scl-high-time-ns = <4708>; + ddc-i2c-scl-low-time-ns = <4916>; + #sound-dai-cells = <0>; + resets = <&d1_reset VO_PHYRST_CTRL SW_VO_HDMI_PRSTN>, + <&d1_reset VO_PHYRST_CTRL SW_HDMI_PHYCTRL_RSTN>, + <&d1_reset VO_PHYRST_CTRL SW_VO_HDMI_RSTN>; + reset-names = "prstn", "phyrstn", "rstn"; + numa-node-id = <1>; + + ports { + port@0 { + #address-cells = <1>; + #size-cells = <0>; + d1_hdmi_in_dc: endpoint@0 { + reg = <0>; + remote-endpoint = <&d1_dc_out_hdmi>; + }; + }; + }; + }; + + d1_dw_hdmi_hdcp2: hdmi-hdcp2@70290000 { + compatible = "eswin,dw-hdmi-hdcp2"; + reg = <0x0 0x70290000 0x0 0x10000>; + interrupt-parent = <&plic1>; + interrupts = <275>; + clocks = <&d1_clock WIN2030_CLK_VO_ACLK>, + <&d1_clock WIN2030_CLK_VO_HDMI_IESMCLK>; + clock-names ="aclk", "iesmclk"; dma-noncoherent; - }; + numa-node-id = <1>; + }; d1_wdt0: watchdog@0x70800000 { compatible = "snps,dw-wdt"; @@ -1911,34 +1947,72 @@ d1_wdt3: watchdog@0x7080c000 { status = "disabled"; }; - d1_gc820: g2d@70140000 { - compatible = "eswin,galcore_d1"; - clocks = <&d1_clock WIN2030_CLK_VC_ACLK>, - <&d1_clock WIN2030_CLK_VC_CFG_CLK>, - <&d1_clock WIN2030_CLK_G2D_CFG_CLK>, - <&d1_clock WIN2030_CLK_CLK_G2D_ST2>, - <&d1_clock WIN2030_CLK_G2D_CLK>, - <&d1_clock WIN2030_CLK_G2D_ACLK>, - <&d1_clock WIN2030_CLK_VC_MON_PCLK>; - clock-names = "vc_aclk", "vc_cfg", "g2d_cfg", "g2d_st2", "g2d_clk", "g2d_aclk","mon_pclk"; - resets = <&d1_reset VC_RST_CTRL SW_VC_AXI_RSTN>, - <&d1_reset VC_RST_CTRL SW_VC_CFG_RSTN>, - <&d1_reset VC_RST_CTRL SW_VC_MONCFG_RSTN>, - <&d1_reset G2D_RST_CTRL SW_G2D_CORE_RSTN>, - <&d1_reset G2D_RST_CTRL SW_G2D_CFG_RSTN>, - <&d1_reset G2D_RST_CTRL SW_G2D_AXI_RSTN>; - reset-names = "axi", "cfg", "moncfg", "g2d_core", "g2d_cfg", "g2d_axi"; - reg = <0 0x70140000 0 0x40000>, <0 0x70180000 0 0x40000>; - reg-names = "core_2d", "core_2d1"; - fe-apb-offset = <0x800>; - interrupt-parent = <&plic1>; - interrupts = <49>, <50>; - interrupt-names = "core_2d", "core_2d1"; - enable-mmu = <1>; - contiguous-size = <0xa00000>; - recovery = <0>; + d1_gc820: g2d@70140000 { + compatible = "eswin,galcore_d1"; + clocks = <&d1_clock WIN2030_CLK_VC_ACLK>, + <&d1_clock WIN2030_CLK_VC_CFG_CLK>, + <&d1_clock WIN2030_CLK_G2D_CFG_CLK>, + <&d1_clock WIN2030_CLK_CLK_G2D_ST2>, + <&d1_clock WIN2030_CLK_G2D_CLK>, + <&d1_clock WIN2030_CLK_G2D_ACLK>, + <&d1_clock WIN2030_CLK_VC_MON_PCLK>; + clock-names = "vc_aclk", "vc_cfg", "g2d_cfg", "g2d_st2", "g2d_clk", "g2d_aclk","mon_pclk"; + resets = <&d1_reset VC_RST_CTRL SW_VC_AXI_RSTN>, + <&d1_reset VC_RST_CTRL SW_VC_CFG_RSTN>, + <&d1_reset VC_RST_CTRL SW_VC_MONCFG_RSTN>, + <&d1_reset G2D_RST_CTRL SW_G2D_CORE_RSTN>, + <&d1_reset G2D_RST_CTRL SW_G2D_CFG_RSTN>, + <&d1_reset G2D_RST_CTRL SW_G2D_AXI_RSTN>; + reset-names = "axi", "cfg", "moncfg", "g2d_core", "g2d_cfg", "g2d_axi"; + reg = <0 0x70140000 0 0x40000>, <0 0x70180000 0 0x40000>; + reg-names = "core_2d", "core_2d1"; + fe-apb-offset = <0x800>; + interrupt-parent = <&plic1>; + interrupts = <49>, <50>; + interrupt-names = "core_2d", "core_2d1"; + enable-mmu = <1>; + contiguous-size = <0xa00000>; + recovery = <0>; + dma-noncoherent; + numa-node-id = <1>; + }; + + d1_sdhci_emmc: mmc@70450000 { + compatible = "eswin,emmc-sdhci-5.1"; + reg = <0x0 0x70450000 0x0 0x10000>; + interrupt-parent = <&plic1>; + interrupts = <79>; + assigned-clocks = <&d1_clock WIN2030_CLK_HSP_MSHC0_CORE_CLK>; + assigned-clock-rates = <200000000>; + clocks = <&d1_clock WIN2030_CLK_HSP_MSHC0_CORE_CLK>, <&d1_clock WIN2030_CLK_HSP_CFG_CLK>; + clock-names = "clk_xin", "clk_ahb"; + clock-output-names = "d1_emmc_cardclock"; + #clock-cells = <0>; + + resets = <&d1_reset HSPDMA_RST_CTRL SW_MSHC0_TXRX_RSTN>, + <&d1_reset HSPDMA_RST_CTRL SW_MSHC0_PHY_RSTN>, + <&d1_reset HSPDMA_RST_CTRL SW_HSP_EMMC_PRSTN>, + <&d1_reset HSPDMA_RST_CTRL SW_HSP_EMMC_ARSTN>; + reset-names = "txrx_rst", "phy_rst", "prstn", "arstn"; + + core-clk-reg = <0x71828160>; + disable-cqe-dcmd; + bus-width = <8>; + non-removable; + /* mmc-ddr-1_8v; */ + mmc-hs400-1_8v; + max-frequency = <200000000>; + /* sdhci-caps-mask = <0x0 0x3200000>; */ + /* smmu */ + #size-cells = <2>; + iommus = <&smmu1 WIN2030_SID_EMMC0>; + tbus = ; + dma-ranges = <0x0 0x00000000 0x0 0xc0000000 0x1 0x0>; + eswin,hsp_sp_csr = <&d1_hsp_sp_csr 0x1038>; + status = "disabled"; + numa-node-id = <1>; dma-noncoherent; - }; + }; d1_graphcard: graphcard { compatible = "audio-graph-card"; @@ -1948,8 +2022,10 @@ d1_usbdrd3_0: usb0@70480000 { compatible = "eswin,win2030-dwc3"; #address-cells = <2>; #size-cells = <2>; - clocks =<&d1_clock WIN2030_GATE_HSP_USB0_SUSPEND_CLK>; - clock-names = "suspend"; + clocks =<&d1_clock WIN2030_GATE_HSP_USB0_SUSPEND_CLK>, + <&d1_clock WIN2030_CLK_HSP_ACLK>, + <&d1_clock WIN2030_CLK_HSP_CFG_CLK>; + clock-names = "suspend","aclk", "cfg_clk"; eswin,hsp_sp_csr = <&d1_hsp_sp_csr 0x800 0x808 0x83c 0x840>; resets = <&d1_reset HSPDMA_RST_CTRL SW_USB0_VAUX_RSTN>; reset-names = "vaux"; @@ -1985,8 +2061,10 @@ d1_usbdrd3_1: usb1@70490000 { compatible = "eswin,win2030-dwc3"; #address-cells = <2>; #size-cells = <2>; - clocks =<&d1_clock WIN2030_GATE_HSP_USB1_SUSPEND_CLK>; - clock-names = "suspend"; + clocks =<&d1_clock WIN2030_GATE_HSP_USB1_SUSPEND_CLK>, + <&d1_clock WIN2030_CLK_HSP_ACLK>, + <&d1_clock WIN2030_CLK_HSP_CFG_CLK>; + clock-names = "suspend","aclk", "cfg_clk"; eswin,hsp_sp_csr = <&d1_hsp_sp_csr 0x900 0x908 0x93c 0x940>; resets = <&d1_reset HSPDMA_RST_CTRL SW_USB1_VAUX_RSTN>; reset-names = "vaux"; @@ -2021,36 +2099,36 @@ d1_usbdrd_dwc3_1: dwc3@70490000 { d1_vi_top_csr: vi_common_top_csr@0x71030000 { compatible = "esw,vi-common-csr", "syscon"; clocks = <&d1_clock WIN2030_CLK_VI_ACLK>, - <&d1_clock WIN2030_CLK_VI_CFG_CLK>, - <&d1_clock WIN2030_CLK_VI_DIG_ISP_CLK>, - <&d1_clock WIN2030_CLK_VI_DVP_CLK>, - <&d1_clock WIN2030_CLK_VI_PHY_CFG>, - <&d1_clock WIN2030_CLK_VI_PHY_TXCLKESC>, - <&d1_clock WIN2030_CLK_VI_SHUTTER_0>, - <&d1_clock WIN2030_CLK_VI_SHUTTER_1>, - <&d1_clock WIN2030_CLK_VI_SHUTTER_2>, - <&d1_clock WIN2030_CLK_VI_SHUTTER_3>, - <&d1_clock WIN2030_CLK_VI_SHUTTER_4>, - <&d1_clock WIN2030_CLK_VI_SHUTTER_5>, - <&d1_clock WIN2030_MUX_U_VI_ACLK_ROOT_2MUX1_GFREE>, - <&d1_clock WIN2030_MUX_U_VI_DVP_ROOT_2MUX1_GFREE>, - <&d1_clock WIN2030_MUX_U_VI_DIG_ISP_ROOT_2MUX1_GFREE>, - <&d1_clock WIN2030_SPLL0_FOUT1>, - <&d1_clock WIN2030_VPLL_FOUT1>; + <&d1_clock WIN2030_CLK_VI_CFG_CLK>, + <&d1_clock WIN2030_CLK_VI_DIG_ISP_CLK>, + <&d1_clock WIN2030_CLK_VI_DVP_CLK>, + <&d1_clock WIN2030_CLK_VI_PHY_CFG>, + <&d1_clock WIN2030_CLK_VI_PHY_TXCLKESC>, + <&d1_clock WIN2030_CLK_VI_SHUTTER_0>, + <&d1_clock WIN2030_CLK_VI_SHUTTER_1>, + <&d1_clock WIN2030_CLK_VI_SHUTTER_2>, + <&d1_clock WIN2030_CLK_VI_SHUTTER_3>, + <&d1_clock WIN2030_CLK_VI_SHUTTER_4>, + <&d1_clock WIN2030_CLK_VI_SHUTTER_5>, + <&d1_clock WIN2030_MUX_U_VI_ACLK_ROOT_2MUX1_GFREE>, + <&d1_clock WIN2030_MUX_U_VI_DVP_ROOT_2MUX1_GFREE>, + <&d1_clock WIN2030_MUX_U_VI_DIG_ISP_ROOT_2MUX1_GFREE>, + <&d1_clock WIN2030_SPLL0_FOUT1>, + <&d1_clock WIN2030_VPLL_FOUT1>; clock-names = "aclk", "cfg_clk", "isp_aclk", "dvp_clk", "phy_cfg", - "phy_escclk", "sht0", "sht1", "sht2", "sht3", "sht4", - "sht5", "aclk_mux", "dvp_mux", "isp_mux", "spll0_fout1", "vpll_fout1"; + "phy_escclk", "sht0", "sht1", "sht2", "sht3", "sht4", + "sht5", "aclk_mux", "dvp_mux", "isp_mux", "spll0_fout1", "vpll_fout1"; resets = <&d1_reset VI_RST_CTRL SW_VI_AXI_RSTN>, - <&d1_reset VI_RST_CTRL SW_VI_CFG_RSTN>, - <&d1_reset ISP0_RST_CTRL SW_VI_ISP0_RSTN>, - <&d1_reset ISP1_RST_CTRL SW_VI_ISP1_RSTN>, - <&d1_reset DVP_RST_CTRL SW_VI_DVP_RSTN>, - <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_0>, - <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_1>, - <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_2>, - <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_3>, - <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_4>, - <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_5>; + <&d1_reset VI_RST_CTRL SW_VI_CFG_RSTN>, + <&d1_reset ISP0_RST_CTRL SW_VI_ISP0_RSTN>, + <&d1_reset ISP1_RST_CTRL SW_VI_ISP1_RSTN>, + <&d1_reset DVP_RST_CTRL SW_VI_DVP_RSTN>, + <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_0>, + <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_1>, + <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_2>, + <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_3>, + <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_4>, + <&d1_reset SHUTTER_RST_CTRL SW_VI_SHUTTER_RSTN_5>; reset-names = "axi", "cfg", "isp0", "isp1", "dvp", "sht0", "sht1", "sht2", "sht3", "sht4", "sht5"; id = <0>; @@ -2089,16 +2167,16 @@ d1_isp_1: isp@0x71010000 { d1_dewarp: dewarp@71020000 { compatible = "eswin,dewarp"; clocks = <&d1_clock WIN2030_CLK_VI_ACLK>, - <&d1_clock WIN2030_CLK_VI_CFG_CLK>, - <&d1_clock WIN2030_CLK_VI_DIG_DW_CLK>, - <&d1_clock WIN2030_MUX_U_VI_ACLK_ROOT_2MUX1_GFREE>, - <&d1_clock WIN2030_MUX_U_VI_DW_ROOT_2MUX1>, - <&d1_clock WIN2030_SPLL0_FOUT1>, - <&d1_clock WIN2030_VPLL_FOUT1>; + <&d1_clock WIN2030_CLK_VI_CFG_CLK>, + <&d1_clock WIN2030_CLK_VI_DIG_DW_CLK>, + <&d1_clock WIN2030_MUX_U_VI_ACLK_ROOT_2MUX1_GFREE>, + <&d1_clock WIN2030_MUX_U_VI_DW_ROOT_2MUX1>, + <&d1_clock WIN2030_SPLL0_FOUT1>, + <&d1_clock WIN2030_VPLL_FOUT1>; clock-names = "aclk", "cfg_clk", "dw_aclk", "aclk_mux", "dw_mux", "spll0_fout1", "vpll_fout1"; resets = <&d1_reset VI_RST_CTRL SW_VI_AXI_RSTN>, - <&d1_reset VI_RST_CTRL SW_VI_CFG_RSTN>, - <&d1_reset VI_RST_CTRL SW_VI_DWE_RSTN>; + <&d1_reset VI_RST_CTRL SW_VI_CFG_RSTN>, + <&d1_reset VI_RST_CTRL SW_VI_DWE_RSTN>; reset-names = "axi", "cfg", "dwe"; interrupt-parent = <&plic1>; @@ -2106,6 +2184,7 @@ d1_dewarp: dewarp@71020000 { #size-cells = <2>; dma-ranges = <0x0 0x20000000 0x0 0x80000000 0x0 0x40000000>; iommus = <&smmu1 WIN2030_SID_DW>; + tbus = ; eswin,vi_top_csr = <&d1_vi_top_csr 0x1008>; reg = <0x0 0x71020000 0x0 0xc00>, <0x0 0x71020c00 0x0 0x120>; numa-node-id = <1>; @@ -2267,5 +2346,37 @@ d1_numa_sample:numa_sample@1 { numa-node-id = <1>; dma-noncoherent; }; + + d1_ddr0: ddr-controller@72300000 { + compatible = "eswin,ddrc-1.20a"; + interrupt-parent = <&plic1>; + interrupts = <10>; + interrupt-names = "ddr-ecc"; + reg = <0x0 0x72300000 0x0 0x40000>; + ctrl-id = <0>; + numa-node-id = <1>; + status = "disabled"; + }; + + d1_ddr1: ddr-controller@72380000 { + compatible = "eswin,ddrc-1.20a"; + interrupt-parent = <&plic1>; + interrupts = <299>; + interrupt-names = "ddr-ecc"; + reg = <0x0 0x72380000 0x0 0x40000>; + ctrl-id = <1>; + numa-node-id = <1>; + status = "disabled"; + }; + + d1_d2d: d2d-unit@72100000 { + compatible = "eswin,eic7x-d2d"; + interrupt-parent = <&plic1>; + interrupts = <287>, <288>; + reg = <0x0 0x72100000 0x0 0x50000>; + reg-names = "control"; + numa-node-id = <1>; + status = "disabled"; + }; }; diff --git a/arch/riscv/boot/dts/eswin/eswin-win2030-platform.dtsi b/arch/riscv/boot/dts/eswin/eswin-win2030-platform.dtsi index 6d8817dfc2b6..0754fce5b9d7 100644 --- a/arch/riscv/boot/dts/eswin/eswin-win2030-platform.dtsi +++ b/arch/riscv/boot/dts/eswin/eswin-win2030-platform.dtsi @@ -31,7 +31,7 @@ BIT1 BIT0 #define MEMMODE_FLAT #define AVAILABLE_DIE 0 -#define CHIPLET_AND_DIE ((CHIPLET_NUM << 1) | AVAILABLE_DIE) +//#define CHIPLET_AND_DIE ((CHIPLET_NUM << 1) | AVAILABLE_DIE) /* VI related Macros */ #define MIPI_CSI2_IMX290_ENBALE diff --git a/arch/riscv/boot/dts/eswin/eswin-win2030.dts b/arch/riscv/boot/dts/eswin/eswin-win2030.dts index 4f8c9f146ce3..8d9e0e76c350 100644 --- a/arch/riscv/boot/dts/eswin/eswin-win2030.dts +++ b/arch/riscv/boot/dts/eswin/eswin-win2030.dts @@ -1131,7 +1131,7 @@ &d1_dsp3 { status = "disabled"; }; -&pcie_die1 { +&d0_pcie { status = "disabled"; }; @@ -1246,7 +1246,7 @@ &d1_timer3 { status = "disabled"; }; -&die1_rtc { +&d1_rtc { status = "disabled"; }; diff --git a/arch/riscv/configs/eic7700_defconfig b/arch/riscv/configs/eic7700_defconfig index ce5fb86d2d9b..cb9f99ea41b6 100644 --- a/arch/riscv/configs/eic7700_defconfig +++ b/arch/riscv/configs/eic7700_defconfig @@ -430,6 +430,8 @@ CONFIG_RTL8192SE=m CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_GOODIX=y CONFIG_SERIO_LIBPS2=y # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_8250=y @@ -455,7 +457,7 @@ CONFIG_SPI_DW_MMIO=y CONFIG_SPI_ESWIN_BOOTSPI=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_PINCTRL=y -CONFIG_PINCTRL_EIC7700=y +CONFIG_PINCTRL_EIC7X=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_DWAPB=y diff --git a/arch/riscv/configs/eic7702_defconfig b/arch/riscv/configs/eic7702_defconfig new file mode 100644 index 000000000000..830481fffc7e --- /dev/null +++ b/arch/riscv/configs/eic7702_defconfig @@ -0,0 +1,844 @@ +CONFIG_LOCALVERSION="-eic7x" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_WATCH_QUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_NUMA_BALANCING=y +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_MISC=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_EXPERT=y +# CONFIG_SYSFS_SYSCALL is not set +CONFIG_PERF_EVENTS=y +CONFIG_SOC_SIFIVE=y +CONFIG_SOC_VIRT=y +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=1 +CONFIG_RISCV_SBI_V01=y +# CONFIG_RISCV_BOOT_SPINWAIT is not set +CONFIG_CMDLINE="earlycon=sbi console=ttyS0,115200n8 clk_ignore_unused cma_pernuma=0x20000000 disable_bypass=false firmware_class.path=/lib/firmware/eic7x/" +CONFIG_CMDLINE_EXTEND=y +# CONFIG_SUSPEND is not set +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPUFREQ_DT=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_JUMP_LABEL=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BINFMT_MISC=m +CONFIG_CMA=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XDP_SOCKETS=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPVTI=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=y +CONFIG_DEFAULT_BBR=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_IPV6_IOAM6_LWTUNNEL=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_NFCT=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_BRIDGE_CFM=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_LLC2=m +CONFIG_NET_SCHED=y +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_ACT=y +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_MCTP=y +CONFIG_CFG80211=y +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=y +CONFIG_RFKILL=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM_PERFORMANCE=y +CONFIG_PCIE_PTM=y +CONFIG_PCIE_ESWIN=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_MTD=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=2 +CONFIG_BLK_DEV_RAM_SIZE=32768 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_EEPROM_AT24=y +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_EMC=m +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_AHCI_ESWIN=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_MIRROR=m +CONFIG_DM_RAID=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_MULTIPATH_HST=m +CONFIG_DM_MULTIPATH_IOA=m +CONFIG_DM_DELAY=m +CONFIG_DM_VERITY=m +CONFIG_NETDEVICES=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ASIX is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_ENGLEDER is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_GOOGLE is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_LITEX is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MICROSOFT is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_PENSANDO is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_SELFTESTS=y +CONFIG_DWMAC_WIN2030=y +# CONFIG_DWMAC_GENERIC is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VERTEXCOM is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_MICROCHIP is not set +# CONFIG_WLAN_VENDOR_PURELIFI is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +CONFIG_RTL8192SE=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_SILABS is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_GOODIX=y +CONFIG_SERIO_LIBPS2=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_EARLYCON_RISCV_SBI=y +CONFIG_HVC_RISCV_SBI=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_DESIGNWARE_SLAVE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_SLAVE_EEPROM=y +CONFIG_SPI=y +CONFIG_SPI_DESIGNWARE=y +CONFIG_SPI_DW_DMA=y +CONFIG_SPI_DW_MMIO=y +CONFIG_SPI_ESWIN_BOOTSPI=y +# CONFIG_PTP_1588_CLOCK is not set +CONFIG_PINCTRL=y +CONFIG_PINCTRL_EIC7X=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_DWAPB=y +CONFIG_SENSORS_ESWIN_FAN_CONTROL=y +CONFIG_SENSORS_ESWIN_PVT=y +CONFIG_SENSORS_INA2XX=y +CONFIG_SENSORS_TMP102=y +CONFIG_SENSORS_PAC1934=y +CONFIG_WATCHDOG=y +CONFIG_DW_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_ES5340=y +CONFIG_REGULATOR_PCA9450=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_SUPPORT_FILTER=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_PWC=m +CONFIG_USB_VIDEO_CLASS=m +CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_VIDEO_XILINX=y +CONFIG_CXD2880_SPI_DRV=m +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m +CONFIG_DVB_DRXK=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_TDA18271C2DD=m +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MT312=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB9000=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_EC100=m +CONFIG_DVB_L64781=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_S5H1432=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_CXD2880=m +CONFIG_DVB_STV0297=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_VES1820=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_MXL692=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_S921=m +CONFIG_DVB_MN88443X=m +CONFIG_DVB_TC90522=m +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_HELENE=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBH29=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_DRX39XYJ=m +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA9950=y +CONFIG_DRM_RADEON=m +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_AMD_ACP=y +CONFIG_DRM_AMD_DC_SI=y +CONFIG_DRM_NOUVEAU=y +CONFIG_DRM_DISPLAY_CONNECTOR=y +CONFIG_DRM_SIMPLE_BRIDGE=y +CONFIG_DRM_TOSHIBA_TC358768=m +CONFIG_DRM_SIMPLEDRM=m +CONFIG_DRM_ESWIN=y +CONFIG_ESWIN_MMU=y +CONFIG_ESWIN_DW_HDMI=y +CONFIG_ESWIN_VIRTUAL_DISPLAY=y +CONFIG_DW_HDMI_I2S_AUDIO=y +CONFIG_DW_HDMI_CEC=y +CONFIG_DRM_IMG_VOLCANIC=m +CONFIG_DRM_LEGACY=y +CONFIG_FB=y +CONFIG_FB_RADEON=m +CONFIG_FB_SIMPLE=m +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_PREALLOC_SIZE=2048 +CONFIG_SND_SOC=y +CONFIG_SND_SOC_SOF_TOPLEVEL=y +CONFIG_SND_SOC_SOF_OF=y +CONFIG_SND_ESWIN_DW_I2S=y +CONFIG_ESWIN_SND_SOC_CODECS=y +CONFIG_ESWIN_SND_ES8388_CODEC=y +CONFIG_SND_SIMPLE_CARD=y +CONFIG_SND_AUDIO_GRAPH_CARD=y +CONFIG_USB_ULPI_BUS=y +CONFIG_USB_CONN_GPIO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +# CONFIG_USB_DEFAULT_PERSIST is not set +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_ACM=y +CONFIG_USB_WDM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_UAS=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_ULPI=y +CONFIG_USB_DWC2=y +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_CP210X=y +CONFIG_USB_SERIAL_FTDI_SIO=y +CONFIG_USB_SERIAL_KEYSPAN=y +CONFIG_USB_SERIAL_PL2303=y +CONFIG_USB_SERIAL_OTI6858=y +CONFIG_USB_SERIAL_QUALCOMM=y +CONFIG_USB_SERIAL_OPTION=y +CONFIG_USB_TEST=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_LB_SS=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_ZERO=m +CONFIG_USB_MASS_STORAGE=m +CONFIG_TYPEC=y +CONFIG_TYPEC_FUSB303B=y +CONFIG_MMC=y +CONFIG_MMC_TEST=y +CONFIG_MMC_DEBUG=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_OF_ESWIN=y +CONFIG_MMC_SDHCI_OF_SDIO_ESWIN=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_ONESHOT=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_EDAC=y +CONFIG_EDAC_ESWIN=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PCF8563=y +CONFIG_RTC_DRV_ESWIN=y +CONFIG_DMADEVICES=y +CONFIG_DW_AXI_DMAC=y +CONFIG_DMATEST=m +CONFIG_DMABUF_HEAPS=y +CONFIG_DMABUF_HEAPS_SYSTEM=y +CONFIG_DMABUF_HEAPS_CMA=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_STAGING_MEDIA=y +CONFIG_VIDEO_ESWIN=y +CONFIG_VIDEO_ESWIN_VDEC=y +CONFIG_VIDEO_ESWIN_VENC=y +CONFIG_VIDEO_ESWIN_HAE=y +CONFIG_VIDEO_ESWIN_DEWARP=y +CONFIG_VIDEO_ESWIN_MEDIA_EXT_DRIVER=y +CONFIG_COMMON_CLK_WIN2030=y +CONFIG_TIMER_ESWIN=y +CONFIG_MAILBOX=y +CONFIG_ESWIN_MBOX=y +CONFIG_ESWIN_LPCPU=m +CONFIG_ARM_SMMU_V3=y +CONFIG_RPMSG_VIRTIO=y +CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY=y +CONFIG_ARCH_ESWIN_EIC7702_SOC=y +CONFIG_ESWIN_DSP=m +CONFIG_ESWIN_NPU=m +CONFIG_EIC7X_D2D=y +CONFIG_EXTCON=y +CONFIG_MEMORY=y +CONFIG_ESWIN_BUDDY=y +CONFIG_ESWIN_PROC=y +CONFIG_ESWIN_RSVMEM_HEAP=y +CONFIG_ESWIN_MMZ_VB=y +CONFIG_ESWIN_DEV_DMA_BUF=y +CONFIG_ESWIN_IOMMU_RSV=y +CONFIG_PWM=y +CONFIG_PWM_ESWIN=y +CONFIG_RESET_ESWIN_WIN2030=y +CONFIG_INTERCONNECT=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FANOTIFY=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m +CONFIG_NTFS_RW=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=m +CONFIG_SECURITY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_DEFAULT_SECURITY_APPARMOR=y +CONFIG_LSM="landlock,lockdown,yama,integrity,apparmor" +CONFIG_CRYPTO_CBC=m +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +# CONFIG_CRYPTO_XTS is not set +CONFIG_CRYPTO_HMAC=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_DEV_VIRTIO=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=y +CONFIG_XZ_DEC=y +CONFIG_DMA_CMA=y +CONFIG_DMA_NUMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 +CONFIG_CONSOLE_LOGLEVEL_QUIET=15 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7 +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SCHED_STACK_END_CHECK=y +CONFIG_DEBUG_VM=y +CONFIG_DEBUG_VM_PGFLAGS=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_WQ_WATCHDOG=y +CONFIG_DEBUG_TIMEKEEPING=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_LIST=y +CONFIG_DEBUG_PLIST=y +CONFIG_DEBUG_SG=y +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_EQS_DEBUG=y +# CONFIG_FTRACE is not set +# CONFIG_RUNTIME_TESTING_MENU is not set +CONFIG_MEMTEST=y diff --git a/arch/riscv/configs/win2030_defconfig b/arch/riscv/configs/win2030_defconfig index cc2484e64c2c..c2c4f5a620b7 100644 --- a/arch/riscv/configs/win2030_defconfig +++ b/arch/riscv/configs/win2030_defconfig @@ -455,7 +455,7 @@ CONFIG_SPI_DW_MMIO=y CONFIG_SPI_ESWIN_BOOTSPI=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_PINCTRL=y -CONFIG_PINCTRL_EIC7700=y +CONFIG_PINCTRL_EIC7X=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_DWAPB=y diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 3245bb525212..3f8fc5dc772f 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -283,7 +283,6 @@ static void __init setup_bootmem(void) if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); - dma_contiguous_reserve(dma32_phys_limit); if (IS_ENABLED(CONFIG_64BIT)) hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); } @@ -1518,6 +1517,14 @@ void __init misc_mem_init(void) local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END); #endif zone_sizes_init(); + + /* + * Reserve the CMA area after dma32_phys_limit was initialised. + * It must be called after arch_numa_init() which calls numa_init() to + * initialize node_online_map that gets used by dma_contiguous_reserve() + */ + dma_contiguous_reserve(dma32_phys_limit); + reserve_crashkernel(); memblock_dump_all(); } diff --git a/drivers/clk/eswin/clk.c b/drivers/clk/eswin/clk.c index c035da206e7b..3c3ba2be4bfa 100755 --- a/drivers/clk/eswin/clk.c +++ b/drivers/clk/eswin/clk.c @@ -379,6 +379,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, } break; case CLK_FREQ_1600M: + case CLK_FREQ_1500M: cpu_target_volatge = true == cpu_no_boost_1_6ghz ? VOLTAGE_0_8V : VOLTAGE_0_9V; ret = eswin_clk_set_cpu_volatge(clk->cpu_voltage_gpio, cpu_target_volatge); if (ret) { diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 6700a9b44f0f..78fbfd5d8f8c 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -6,6 +6,25 @@ * * Author: Eugeniy Paltsev */ +/***************************************************************************** + * ESWIN dma driver + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Eswin Driver team + */ #include #include @@ -126,37 +145,49 @@ static inline void axi_chan_config_write(struct axi_dma_chan *chan, static inline void axi_dma_disable(struct axi_dma_chip *chip) { u32 val; + unsigned long flags; + spin_lock_irqsave(&chip->lock, flags); val = axi_dma_ioread32(chip, DMAC_CFG); val &= ~DMAC_EN_MASK; axi_dma_iowrite32(chip, DMAC_CFG, val); + spin_unlock_irqrestore(&chip->lock, flags); } static inline void axi_dma_enable(struct axi_dma_chip *chip) { u32 val; + unsigned long flags; + spin_lock_irqsave(&chip->lock, flags); val = axi_dma_ioread32(chip, DMAC_CFG); val |= DMAC_EN_MASK; axi_dma_iowrite32(chip, DMAC_CFG, val); + spin_unlock_irqrestore(&chip->lock, flags); } static inline void axi_dma_irq_disable(struct axi_dma_chip *chip) { u32 val; + unsigned long flags; + spin_lock_irqsave(&chip->lock, flags); val = axi_dma_ioread32(chip, DMAC_CFG); val &= ~INT_EN_MASK; axi_dma_iowrite32(chip, DMAC_CFG, val); + spin_unlock_irqrestore(&chip->lock, flags); } static inline void axi_dma_irq_enable(struct axi_dma_chip *chip) { u32 val; + unsigned long flags; + spin_lock_irqsave(&chip->lock, flags); val = axi_dma_ioread32(chip, DMAC_CFG); val |= INT_EN_MASK; axi_dma_iowrite32(chip, DMAC_CFG, val); + spin_unlock_irqrestore(&chip->lock, flags); } static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask) @@ -1454,6 +1485,93 @@ static int parse_device_properties(struct axi_dma_chip *chip) return 0; } +static int dw_axi_dma_ctrl_show(struct seq_file *s, void *v) +{ + struct axi_dma_chip *chip = s->private; + u32 i = 0, j = 0, val = 0; + u32 chan_num; + + if (of_node_name_prefix(chip->dev->of_node, "dma-controller-aon")) { + chan_num = 16; + } else { + chan_num = 11; + } + + seq_puts(s, "\n-------------------dma common reg------------------------"); + + for (i = 0; i < 15; i++) { + if (i * 8 == 0x38) + continue; + + val = axi_dma_ioread32(chip, i * 8); + seq_printf(s, "\n>>>dma reg 0x%02x:0x%02x", i*8, val); + } + seq_puts(s, "\n-------------------------------------------------------\n"); + + seq_puts(s, "\n--------------------dma chan reg-------------------------"); + for (i = 0; i < chan_num; i++) { + seq_printf(s, "\n>>>dma chan[%d]reg:", i); + for (j=0; j<19; j++) { + if (j * 8 == 0x48) + continue; + val = axi_chan_ioread32(&chip->dw->chan[i], j*8); + seq_printf(s, " %02x:%02x", j*8, val); + } + } + seq_puts(s, "\n------------------------------------------------------\n"); + return 0; +} + +static int dw_axi_dma_ctrl_open(struct inode *inode, struct file *file) +{ + return single_open(file, dw_axi_dma_ctrl_show, inode->i_private); +} + +static ssize_t dw_axi_dma_ctrl_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct axi_dma_chip *chip = ((struct seq_file *)file->private_data)->private; + u32 reg, val; + char kbuf[25]; + + if (copy_from_user(kbuf, buf, count)) + return -EFAULT; + if (sscanf(kbuf, "%x%x", ®, &val) == -1) + return -EFAULT; + if ((reg < 0) || (reg > 0x78)) { + dev_err(chip->dev, "it is no a dma register\n"); + return count; + } + dev_info(chip->dev, "write dma common reg=0x%x, val=0x%x\n", reg, val); + axi_dma_iowrite32(chip, reg, val); + return count; +} + +static const struct file_operations dw_axi_dma_status_fops = { + .owner = THIS_MODULE, + .open = dw_axi_dma_ctrl_open, + .read = seq_read, + .write = dw_axi_dma_ctrl_write, + .llseek = seq_lseek, + .release = single_release, +}; + +void dw_axi_dma_register_debugfs(struct axi_dma_chip *chip) +{ + if (of_node_name_prefix(chip->dev->of_node, "dma-controller-aon")) { + chip->debugfs_dir = debugfs_create_dir("dw-axi-aon-dma", NULL); + } else { + chip->debugfs_dir = debugfs_create_dir("dw-axi-hsp-dma", NULL); + } + if (IS_ERR(chip->debugfs_dir)) { + dev_err(chip->dev, "failed to create debugfs dir!\n"); + return; + } + + debugfs_create_file("ctrl", 0400, chip->debugfs_dir, chip, + &dw_axi_dma_status_fops); +} + static int dw_probe(struct platform_device *pdev) { struct axi_dma_chip *chip; @@ -1488,6 +1606,8 @@ static int dw_probe(struct platform_device *pdev) if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); + spin_lock_init(&chip->lock); + flags = (uintptr_t)of_device_get_match_data(&pdev->dev); if (flags & AXI_DMA_FLAG_HAS_APB_REGS) { chip->apb_regs = devm_platform_ioremap_resource(pdev, 1); @@ -1631,6 +1751,8 @@ static int dw_probe(struct platform_device *pdev) dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n", dw->hdata->nr_channels); + dw_axi_dma_register_debugfs(chip); + return 0; err_pm_disable: @@ -1675,6 +1797,8 @@ static int dw_remove(struct platform_device *pdev) win2030_tbu_power(chip->dev, false); } + debugfs_remove_recursive(chip->debugfs_dir); + return 0; } diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index cc09abf15aad..3bd0e65ed010 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -6,6 +6,25 @@ * * Author: Eugeniy Paltsev */ +/***************************************************************************** + * ESWIN dma driver + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Eswin Driver team + */ #ifndef _AXI_DMA_PLATFORM_H #define _AXI_DMA_PLATFORM_H @@ -71,6 +90,8 @@ struct axi_dma_chip { struct clk *core_clk; struct clk *cfgr_clk; struct dw_axi_dma *dw; + struct dentry *debugfs_dir; + spinlock_t lock; }; /* LLI == Linked List Item */ diff --git a/drivers/edac/eswin_edac.c b/drivers/edac/eswin_edac.c index 82d75406cc48..508010cf4434 100644 --- a/drivers/edac/eswin_edac.c +++ b/drivers/edac/eswin_edac.c @@ -1337,13 +1337,13 @@ static int mc_probe(struct platform_device *pdev) goto free_edac_mc; } - rc = edac_mc_add_mc(mci); - if (rc) - { - edac_printk(KERN_ERR, EDAC_MC, - "Failed to register with EDAC core\n"); - goto free_edac_mc; - } + // rc = edac_mc_add_mc(mci); + // if (rc) + // { + // edac_printk(KERN_ERR, EDAC_MC, + // "Failed to register with EDAC core\n"); + // goto free_edac_mc; + // } #ifdef CONFIG_EDAC_DEBUG if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) @@ -1366,7 +1366,7 @@ static int mc_probe(struct platform_device *pdev) */ if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)) writel(0x0, baseaddr + ECC_CTRL_OFST); - + edac_printk(KERN_INFO, EDAC_MC, "%s init succ\n",pdev->name); return rc; free_edac_mc: diff --git a/drivers/gpu/drm/eswin/Kconfig b/drivers/gpu/drm/eswin/Kconfig index 77733cd77352..7edd441f446d 100644 --- a/drivers/gpu/drm/eswin/Kconfig +++ b/drivers/gpu/drm/eswin/Kconfig @@ -73,13 +73,6 @@ config DW_HDMI_HDCP2 Support the HDCP2 interface which is part of the Synopsys Designware HDMI block. -config DW_HDMI_HDCP1X_ENABLED - bool "Synopsis Designware HDCP1X enabled" - depends on DW_HDMI_HDCP - help - Enable/Disable HDCP1X of the Synopsys Designware HDMI block. - - config ESWIN_MIPI_DSI tristate "Eswin mipi dsi and 1080p panel driver" select DRM_ESWIN diff --git a/drivers/gpu/drm/eswin/Makefile b/drivers/gpu/drm/eswin/Makefile index 6d4043ca833c..09cb7092e2e5 100644 --- a/drivers/gpu/drm/eswin/Makefile +++ b/drivers/gpu/drm/eswin/Makefile @@ -1,5 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 +ccflags-y += -I$(srctree)/drivers/memory/eswin/es_dev_buf/include/linux + es_drm-objs := es_dc_hw.o \ es_dc.o \ es_crtc.o \ diff --git a/drivers/gpu/drm/eswin/dw-hdmi.c b/drivers/gpu/drm/eswin/dw-hdmi.c index 8c73453d2beb..c301d589a508 100644 --- a/drivers/gpu/drm/eswin/dw-hdmi.c +++ b/drivers/gpu/drm/eswin/dw-hdmi.c @@ -71,7 +71,7 @@ #define HDMI_CEC_CLK 32768 -static bool hpd_flag = false; +static bool hpd_flag[2] = {0, 0}; static const u16 csc_coeff_default[3][4] = { { 0x2000, 0x0000, 0x0000, 0x0000 }, @@ -194,6 +194,7 @@ struct dw_hdmi { const struct dw_hdmi_plat_data *plat_data; struct dw_hdcp *hdcp; int vic; + int numa_id; u8 edid[HDMI_EDID_LEN]; @@ -339,7 +340,7 @@ static void repo_hpd_event(struct work_struct *p_work) if (hdmi->hdcp && hdmi->hdcp->hdcp2 && hdmi->hdcp->hdcp2->enable && (tv_hdmi_hdcp2_support(hdmi)) == 1) { - hdmi->hdcp->hdcp2->start(); + hdmi->hdcp->hdcp2->start(hdmi->hdcp->hdcp2); } else { hdmi_tx_hdcp_config(hdmi, &hdmi->previous_mode); } @@ -364,7 +365,7 @@ static bool check_hdmi_irq(struct dw_hdmi *hdmi, int intr_stat, int phy_int_pol) hdmi->hpd_state = false; if (hdmi->hdcp && hdmi->hdcp->hdcp2 && hdmi->hdcp->hdcp2->enable) { - hdmi->hdcp->hdcp2->stop(); + hdmi->hdcp->hdcp2->stop(hdmi->hdcp->hdcp2); } if (hdmi->hdcp && hdmi->hdcp->hdcp_stop) { hdmi->hdcp->hdcp_stop(hdmi->hdcp); @@ -2636,7 +2637,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, hdmi_video_packetize(hdmi); hdmi_video_csc(hdmi); hdmi_video_sample(hdmi); - if (!hpd_flag) { + if (!hpd_flag[hdmi->numa_id]) { hdmi_tx_hdcp_config(hdmi, mode); } dw_hdmi_clear_overflow(hdmi); @@ -3576,7 +3577,7 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) */ if (intr_stat & (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) { - hpd_flag = true; + hpd_flag[hdmi->numa_id] = true; dw_hdmi_setup_rx_sense(hdmi, phy_stat & HDMI_PHY_HPD, phy_stat & HDMI_PHY_RX_SENSE); @@ -3596,7 +3597,7 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) HDMI_IH_MUTE_PHY_STAT0); } - hpd_flag = false; + hpd_flag[hdmi->numa_id] = false; hdcp_stat = hdmi_readb(hdmi, HDMI_A_APIINTSTAT); if (hdcp_stat) { @@ -3985,7 +3986,9 @@ static const struct file_operations dw_hdmi_phy_fops = { static void dw_hdmi_register_debugfs(struct device *dev, struct dw_hdmi *hdmi) { - hdmi->debugfs_dir = debugfs_create_dir("dw-hdmi", NULL); + char name[15]; + sprintf(name, "dw-hdmi-%d", hdmi->numa_id); + hdmi->debugfs_dir = debugfs_create_dir(name, NULL); if (IS_ERR(hdmi->debugfs_dir)) { dev_err(dev, "failed to create debugfs dir!\n"); return; @@ -4011,6 +4014,7 @@ static void dw_hdmi_register_hdcp(struct device *dev, struct dw_hdmi *hdmi, .regs = hdmi->regs, .reg_io_width = val, .enable = hdcp1x_enable, + .numa_id = hdmi->numa_id, }; struct platform_device_info hdcp_device_info = { .parent = dev, @@ -4158,6 +4162,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, u8 config0; u8 config3; bool hdcp1x_enable = false; + int numa_id = 0; hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) @@ -4181,6 +4186,12 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, if (ret < 0) return ERR_PTR(ret); + if(of_property_read_u32(pdev->dev.of_node, "numa-node-id", &numa_id)) { + numa_id = 0; + } + dev_info(&pdev->dev, "numa_id=%d\n", numa_id); + hdmi->numa_id = numa_id; + ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); if (ddc_node) { hdmi->ddc = of_get_i2c_adapter_by_node(ddc_node); diff --git a/drivers/gpu/drm/eswin/dw_hdmi_hdcp.c b/drivers/gpu/drm/eswin/dw_hdmi_hdcp.c index e342e766cb25..12933da6568b 100644 --- a/drivers/gpu/drm/eswin/dw_hdmi_hdcp.c +++ b/drivers/gpu/drm/eswin/dw_hdmi_hdcp.c @@ -132,7 +132,7 @@ enum { HDMI_A_KSVMEMCTRL_KSV_SHA1_STATUS = 0x08, }; -static struct dw_hdcp *g_hdcp; +static struct dw_hdcp *g_hdcp[2]; static int trytimes = 0; static void hdcp_modb(struct dw_hdcp *hdcp, u8 data, u8 mask, unsigned int reg) @@ -413,55 +413,55 @@ static int dw_hdmi_hdcp1x_stop(struct dw_hdcp *hdcp) void dw_hdmi_hdcp2_init(struct dw_hdcp2 *hdcp2) { - if (g_hdcp) - g_hdcp->hdcp2 = hdcp2; + if (g_hdcp[hdcp2->numa_id]) + g_hdcp[hdcp2->numa_id]->hdcp2 = hdcp2; } EXPORT_SYMBOL_GPL(dw_hdmi_hdcp2_init); -void dw_hdmi_hdcp2_remove(void) +void dw_hdmi_hdcp2_remove(struct dw_hdcp2 *hdcp2) { - printk("func: %s; line: %d\n", __func__, __LINE__); - if (g_hdcp->hdcp2) - g_hdcp->hdcp2->stop(); - g_hdcp->hdcp2 = NULL; + dev_info(hdcp2->dev, "%s numa id:%d\n", __func__, hdcp2->numa_id); + if (g_hdcp[hdcp2->numa_id]->hdcp2) + g_hdcp[hdcp2->numa_id]->hdcp2->stop(hdcp2); + g_hdcp[hdcp2->numa_id]->hdcp2 = NULL; } EXPORT_SYMBOL_GPL(dw_hdmi_hdcp2_remove); -void dw_hdmi_hdcp2_start(int enable) +void dw_hdmi_hdcp2_start(int enable, int numa_id) { int val; - if (!(g_hdcp->hdcp2)) + if (!(g_hdcp[numa_id]->hdcp2)) return; - dev_dbg(g_hdcp->dev, "%s enable = %d\n", __func__, enable); + dev_dbg(g_hdcp[numa_id]->dev, "%s enable = %d\n", __func__, enable); if (enable == 0) { - hdcp_modb(g_hdcp, + hdcp_modb(g_hdcp[numa_id], HDMI_HDCP2_OVR_ENABLE | HDMI_HDCP2_FORCE_DISABLE, HDMI_HDCP2_OVR_EN_MASK | HDMI_HDCP2_FORCE_MASK, HDMI_HDCP2REG_CTRL); - hdcp_modb(g_hdcp, HDMI_MC_CLKDIS_HDCPCLK_DISABLE, + hdcp_modb(g_hdcp[numa_id], HDMI_MC_CLKDIS_HDCPCLK_DISABLE, HDMI_MC_CLKDIS_HDCPCLK_MASK, HDMI_MC_CLKDIS); } else if (enable == 1) { - hdcp_modb(g_hdcp, HDMI_MC_CLKDIS_HDCPCLK_ENABLE, + hdcp_modb(g_hdcp[numa_id], HDMI_MC_CLKDIS_HDCPCLK_ENABLE, HDMI_MC_CLKDIS_HDCPCLK_MASK, HDMI_MC_CLKDIS); - hdcp_modb(g_hdcp, + hdcp_modb(g_hdcp[numa_id], HDMI_HDCP2_OVR_ENABLE | HDMI_HDCP2_FORCE_ENABLE, HDMI_HDCP2_OVR_EN_MASK | HDMI_HDCP2_FORCE_MASK, HDMI_HDCP2REG_CTRL); - hdcp_modb(g_hdcp, HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE, + hdcp_modb(g_hdcp[numa_id], HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE, HDMI_FC_INVIDCONF_HDCP_KEEPOUT_MASK, HDMI_FC_INVIDCONF); } else if (enable == 2) { - val = g_hdcp->read(g_hdcp->hdmi, HDMI_PHY_STAT0); + val = g_hdcp[numa_id]->read(g_hdcp[numa_id]->hdmi, HDMI_PHY_STAT0); if (val & HDMI_PHY_HPD) - dw_hdmi_hdcp1x_start(g_hdcp); + dw_hdmi_hdcp1x_start(g_hdcp[numa_id]); } else if (enable == 3) { - if (g_hdcp->hdcp2 && g_hdcp->hdcp2->enable && - (tv_hdmi_hdcp2_support(g_hdcp->hdmi) == 1)) { - if (g_hdcp->status != DW_HDCP_DISABLED) - dw_hdmi_hdcp1x_stop(g_hdcp); - g_hdcp->hdcp2->start(); + if (g_hdcp[numa_id]->hdcp2 && g_hdcp[numa_id]->hdcp2->enable && + (tv_hdmi_hdcp2_support(g_hdcp[numa_id]->hdmi) == 1)) { + if (g_hdcp[numa_id]->status != DW_HDCP_DISABLED) + dw_hdmi_hdcp1x_stop(g_hdcp[numa_id]); + g_hdcp[numa_id]->hdcp2->start(g_hdcp[numa_id]->hdcp2); } } } @@ -536,7 +536,8 @@ static ssize_t hdcp_enable_read(struct device *device, struct device_attribute *attr, char *buf) { bool enable = 0; - struct dw_hdcp *hdcp = g_hdcp; + struct miscdevice *mdev= dev_get_drvdata(device); + struct dw_hdcp *hdcp = mdev->parent->platform_data; if (hdcp) enable = hdcp->enable; @@ -549,7 +550,8 @@ static ssize_t hdcp_enable_write(struct device *device, size_t count) { bool enable; - struct dw_hdcp *hdcp = g_hdcp; + struct miscdevice *mdev= dev_get_drvdata(device); + struct dw_hdcp *hdcp = mdev->parent->platform_data; int ret; if (!hdcp) @@ -589,7 +591,8 @@ static ssize_t hdcp_trytimes_read(struct device *device, struct device_attribute *attr, char *buf) { int trytimes = 0; - struct dw_hdcp *hdcp = g_hdcp; + struct miscdevice *mdev= dev_get_drvdata(device); + struct dw_hdcp *hdcp = mdev->parent->platform_data; if (hdcp) trytimes = hdcp->retry_times; @@ -602,7 +605,8 @@ static ssize_t hdcp_trytimes_write(struct device *device, const char *buf, size_t count) { int trytimes; - struct dw_hdcp *hdcp = g_hdcp; + struct miscdevice *mdev= dev_get_drvdata(device); + struct dw_hdcp *hdcp = mdev->parent->platform_data; if (!hdcp) return -EINVAL; @@ -624,7 +628,8 @@ static ssize_t hdcp_status_read(struct device *device, struct device_attribute *attr, char *buf) { int status = DW_HDCP_DISABLED; - struct dw_hdcp *hdcp = g_hdcp; + struct miscdevice *mdev= dev_get_drvdata(device); + struct dw_hdcp *hdcp = mdev->parent->platform_data; if (hdcp) status = hdcp->status; @@ -648,11 +653,14 @@ static int dw_hdmi_hdcp_probe(struct platform_device *pdev) int ret = 0; struct dw_hdcp *hdcp = pdev->dev.platform_data; - dev_info(&pdev->dev, "%s...\n", __func__); - g_hdcp = hdcp; + dev_info(&pdev->dev, "%s numa id:%d\n", __func__, hdcp->numa_id); + g_hdcp[hdcp->numa_id] = hdcp; + char name[15]; + sprintf(name, "hdmi_%d_hdcp1x", hdcp->numa_id); hdcp->mdev.minor = MISC_DYNAMIC_MINOR; - hdcp->mdev.name = "hdmi_hdcp1x"; + hdcp->mdev.name = name; hdcp->mdev.mode = 0666; + hdcp->mdev.parent = &pdev->dev; if (misc_register(&hdcp->mdev)) { dev_err(&pdev->dev, "HDCP: Could not add character driver\n"); @@ -687,10 +695,6 @@ static int dw_hdmi_hdcp_probe(struct platform_device *pdev) hdcp->hdcp_stop = dw_hdmi_hdcp_stop; hdcp->hdcp_isr = dw_hdmi_hdcp_isr; -#ifdef CONFIG_DW_HDMI_HDCP1X_ENABLED - hdcp_enable_write(NULL, NULL, "1", 1); -#endif - dev_dbg(hdcp->dev, "%s success\n", __func__); return 0; diff --git a/drivers/gpu/drm/eswin/dw_hdmi_hdcp.h b/drivers/gpu/drm/eswin/dw_hdmi_hdcp.h index f11b259c1731..20fd169b387c 100644 --- a/drivers/gpu/drm/eswin/dw_hdmi_hdcp.h +++ b/drivers/gpu/drm/eswin/dw_hdmi_hdcp.h @@ -23,6 +23,7 @@ #define DW_HDMI_HDCP_H #include +#include "dsp_dma_buf.h" #define DW_HDCP_DRIVER_NAME "dw-hdmi-hdcp" #define HDCP_PRIVATE_KEY_SIZE 280 @@ -34,16 +35,44 @@ struct hdcp_keys { u8 sha1[HDCP_KEY_SHA_SIZE]; }; +typedef struct { + int allocated, initialized; + int code_loaded; + + int code_is_phys_mem; + dma_addr_t code_base; + uint32_t code_size; + uint8_t *code; + int data_is_phys_mem; + dma_addr_t data_base; + uint32_t data_size; + uint8_t *data; + + struct resource *hpi_resource; + uint8_t __iomem *hpi; + struct device *dev; +} hl_device; + struct dw_hdcp2 { + int numa_id; int enable; - void (*start)(void); - void (*stop)(void); + void (*start)(struct dw_hdcp2* hdcp2); + void (*stop)(struct dw_hdcp2* hdcp2); struct device *dev; int wait_hdcp2_reset; int hot_plug; struct miscdevice mdev; int auth_sucess; + hl_device *hld; + struct miscdevice mics_hld; + struct clk *aclk; + struct clk *iesmclk; + + struct dev_buffer_desc code_buffer; + struct dev_buffer_desc data_buffer; + struct dma_buf_attachment *code_attach; + struct dma_buf_attachment *data_attach; }; struct dw_hdcp { @@ -56,6 +85,7 @@ struct dw_hdcp { int hdcp2_enable; int status; u32 reg_io_width; + int numa_id; struct dw_hdcp2 *hdcp2; struct miscdevice mdev; @@ -73,6 +103,6 @@ struct dw_hdcp { extern u8 tv_hdmi_hdcp2_support(struct dw_hdmi *hdmi); extern void dw_hdmi_hdcp2_init(struct dw_hdcp2 *hdcp2); -extern void dw_hdmi_hdcp2_remove(void); -extern void dw_hdmi_hdcp2_start(int enable); +extern void dw_hdmi_hdcp2_remove(struct dw_hdcp2 *hdcp2); +extern void dw_hdmi_hdcp2_start(int enable, int numa_id); #endif diff --git a/drivers/gpu/drm/eswin/dw_hdmi_hdcp2.c b/drivers/gpu/drm/eswin/dw_hdmi_hdcp2.c index 63c01abfb2af..6c3b0af804f8 100644 --- a/drivers/gpu/drm/eswin/dw_hdmi_hdcp2.c +++ b/drivers/gpu/drm/eswin/dw_hdmi_hdcp2.c @@ -83,48 +83,26 @@ #define MAX_HL_DEVICES 16 #define TROOT_GRIFFIN +static hl_device hl_devices[2][MAX_HL_DEVICES]; + static bool randomize_mem = false; module_param(randomize_mem, bool, 0); MODULE_PARM_DESC(noverify, "Wipe memory allocations on startup (for debug)"); -static struct dw_hdcp2 *g_dw_hdcp2; - -static void dw_hdcp2_stop(void) +static void dw_hdcp2_stop(struct dw_hdcp2 *hdcp2) { - printk("func: %s; line: %d\n", __func__, __LINE__); - g_dw_hdcp2->hot_plug = 0; - dw_hdmi_hdcp2_start(0); + dev_info(hdcp2->dev, "%s\n", __func__); + hdcp2->hot_plug = 0; + dw_hdmi_hdcp2_start(0, hdcp2->numa_id); } -static void dw_hdcp2_start(void) +static void dw_hdcp2_start(struct dw_hdcp2 *hdcp2) { - printk("func: %s; line: %d\n", __func__, __LINE__); - dw_hdmi_hdcp2_start(1); - g_dw_hdcp2->hot_plug = 1; + dev_info(hdcp2->dev, "%s\n", __func__); + dw_hdmi_hdcp2_start(1, hdcp2->numa_id); + hdcp2->hot_plug = 1; } -// -// HL Device -// -typedef struct { - int allocated, initialized; - int code_loaded; - - int code_is_phys_mem; - dma_addr_t code_base; - uint32_t code_size; - uint8_t *code; - int data_is_phys_mem; - dma_addr_t data_base; - uint32_t data_size; - uint8_t *data; - - struct resource *hpi_resource; - uint8_t __iomem *hpi; -} hl_device; - -static hl_device hl_devices[MAX_HL_DEVICES]; - /* HL_DRV_IOC_MEMINFO implementation */ static long get_meminfo(hl_device *hl_dev, void __user *arg) { @@ -302,7 +280,7 @@ static long hpi_write(hl_device *hl_dev, void __user *arg) return 0; } -static hl_device *alloc_hl_dev_slot(const struct hl_drv_ioc_meminfo *info) +static hl_device *alloc_hl_dev_slot(const struct hl_drv_ioc_meminfo *info, int numa_id) { int i; @@ -312,7 +290,7 @@ static hl_device *alloc_hl_dev_slot(const struct hl_drv_ioc_meminfo *info) /* Check if we have a matching device (same HPI base) */ for (i = 0; i < MAX_HL_DEVICES; i++) { - hl_device *slot = &hl_devices[i]; + hl_device *slot = &hl_devices[numa_id][i]; if (slot->allocated && (info->hpi_base == slot->hpi_resource->start)) return slot; @@ -320,7 +298,7 @@ static hl_device *alloc_hl_dev_slot(const struct hl_drv_ioc_meminfo *info) /* Find unused slot */ for (i = 0; i < MAX_HL_DEVICES; i++) { - hl_device *slot = &hl_devices[i]; + hl_device *slot = &hl_devices[numa_id][i]; if (!slot->allocated) { slot->allocated = 1; return slot; @@ -330,28 +308,59 @@ static hl_device *alloc_hl_dev_slot(const struct hl_drv_ioc_meminfo *info) return 0; } -static void free_dma_areas(hl_device *hl_dev) +static void free_dma_areas(struct dw_hdcp2 *hdcp2) { + hl_device *hl_dev = hdcp2->hld; + if (hl_dev == 0) { return; } - if (!hl_dev->code_is_phys_mem && hl_dev->code) { - dma_free_coherent(0, hl_dev->code_size, hl_dev->code, - hl_dev->code_base); - hl_dev->code = 0; - } + if (!hdcp2->numa_id) { + if (!hl_dev->code_is_phys_mem && hl_dev->code) { + dma_free_coherent(0, hl_dev->code_size, hl_dev->code, + hl_dev->code_base); + hl_dev->code = 0; + } + + if (!hl_dev->data_is_phys_mem && hl_dev->data) { + dma_free_coherent(0, hl_dev->data_size, hl_dev->data, + hl_dev->data_base); + hl_dev->data = 0; + } + } else { + if (!hl_dev->code_is_phys_mem && hl_dev->code) { + dev_mem_vunmap(&hdcp2->code_buffer, hl_dev->code); + + if (hdcp2->code_attach) { + dev_mem_detach(hdcp2->code_attach, DMA_BIDIRECTIONAL); + } + + if (hdcp2->code_buffer.buf) { + dev_mem_free(hdcp2->code_buffer.buf); + } + } + + if (!hl_dev->data_is_phys_mem && hl_dev->data) { + dev_mem_vunmap(&hdcp2->data_buffer, hl_dev->data); + + if (hdcp2->data_attach) { + dev_mem_detach(hdcp2->data_attach, DMA_BIDIRECTIONAL); + } - if (!hl_dev->data_is_phys_mem && hl_dev->data) { - dma_free_coherent(0, hl_dev->data_size, hl_dev->data, - hl_dev->data_base); - hl_dev->data = 0; + if (hdcp2->data_buffer.buf) { + dev_mem_free(hdcp2->data_buffer.buf); + } + } } } -static int alloc_dma_areas(hl_device *hl_dev, - const struct hl_drv_ioc_meminfo *info) +static int alloc_dma_areas(struct dw_hdcp2 *hdcp2, const struct hl_drv_ioc_meminfo *info) { + int ret; + hl_device *hl_dev = hdcp2->hld; + struct device *dev = hdcp2->dev; + if ((hl_dev == 0) || (info == 0)) { return -EFAULT; } @@ -365,11 +374,34 @@ static int alloc_dma_areas(hl_device *hl_dev, hl_dev->code_base = info->code_base; hl_dev->code = phys_to_virt(hl_dev->code_base); } else { - hl_dev->code = - dma_alloc_coherent(g_dw_hdcp2->dev, hl_dev->code_size, - &hl_dev->code_base, GFP_KERNEL); - if (!hl_dev->code) { - return -ENOMEM; + if (!hdcp2->numa_id) { + hl_dev->code = dma_alloc_coherent(dev, hl_dev->code_size, + &hl_dev->code_base, GFP_KERNEL); + if (!hl_dev->code) { + return -ENOMEM; + } + dev_info(dev,"code dma addr:0x%llx, size:0x%x\n", hl_dev->code_base, hl_dev->code_size); + } else { + ret = dev_mem_alloc(hl_dev->code_size, ES_MEM_ALLOC_SPRAM_DIE1, + &hdcp2->code_buffer.buf); + if (ret < 0) { + dev_err(dev, "dev_mem_alloc code buf failed!\n"); + return -ENOMEM; + } + + hl_dev->code_base = dev_mem_attach(hdcp2->code_buffer.buf, dev, + DMA_BIDIRECTIONAL, &hdcp2->code_attach); + if (hl_dev->code_base == 0) { + dev_err(dev, "dev_mem_attach code buf failed!\n"); + goto release_code_buf; + } + + hl_dev->code = dev_mem_vmap(&hdcp2->code_buffer); + if (!hl_dev->code) { + dev_err(dev, "dev_mem_vmap code buf failed!\n"); + goto release_code_attach; + } + dev_info(dev, "code spram addr:0x%llx, size:0x%x\n", hl_dev->code_base, hl_dev->code_size); } } @@ -381,38 +413,75 @@ static int alloc_dma_areas(hl_device *hl_dev, hl_dev->data_base = info->data_base; hl_dev->data = phys_to_virt(hl_dev->data_base); } else { - hl_dev->data = - dma_alloc_coherent(g_dw_hdcp2->dev, hl_dev->data_size, - &hl_dev->data_base, GFP_KERNEL); - if (!hl_dev->data) { - free_dma_areas(hl_dev); - return -ENOMEM; + if (!hdcp2->numa_id) { + hl_dev->data = dma_alloc_coherent(dev, hl_dev->data_size, + &hl_dev->data_base, GFP_KERNEL); + if (!hl_dev->data) { + free_dma_areas(hdcp2); + return -ENOMEM; + } + dev_info(dev,"data dma addr:0x%llx, size:0x%x\n", hl_dev->code_base, hl_dev->data_size); + } else { + ret = dev_mem_alloc(hl_dev->data_size, ES_MEM_ALLOC_SPRAM_DIE1, + &hdcp2->data_buffer.buf); + if (ret < 0) { + dev_err(dev, "dev_mem_alloc data buf failed!\n"); + goto release_code_vmap; + } + + hl_dev->data_base = dev_mem_attach(hdcp2->data_buffer.buf, dev, + DMA_BIDIRECTIONAL, &hdcp2->data_attach); + if (hl_dev->data_base == 0) { + dev_err(dev, "dev_mem_attach data buf failed!\n"); + goto release_data_buf; + } + + hl_dev->data = dev_mem_vmap(&hdcp2->data_buffer); + if (!hl_dev->data) { + dev_err(dev, "dev_mem_vmap data buf failed!\n"); + goto release_data_attach; + } + dev_info(dev, "data spram addr:0x%llx, size:0x%x\n", hl_dev->data_base, hl_dev->data_size); } } return 0; + +release_data_attach: + dev_mem_detach(hdcp2->data_attach, DMA_BIDIRECTIONAL); +release_data_buf: + dev_mem_free(hdcp2->data_buffer.buf); +release_code_vmap: + dev_mem_vunmap(&hdcp2->code_buffer, hl_dev->code); +release_code_attach: + dev_mem_detach(hdcp2->code_attach, DMA_BIDIRECTIONAL); +release_code_buf: + dev_mem_free(hdcp2->code_buffer.buf); + + return -ENOMEM; } /* HL_DRV_IOC_INIT implementation */ -static long init(struct file *f, void __user *arg) +static long hld_init(struct dw_hdcp2 *hdcp2, void __user *arg) { struct resource *hpi_mem; struct hl_drv_ioc_meminfo info; - hl_device *hl_dev; int rc; - if ((f == 0) || (arg == 0)) { + if ((arg == 0)) { + dev_err(hdcp2->dev, "param error!\n"); return -EFAULT; } if (copy_from_user(&info, arg, sizeof info) != 0) return -EFAULT; - hl_dev = alloc_hl_dev_slot(&info); - if (!hl_dev) + + hdcp2->hld = alloc_hl_dev_slot(&info, hdcp2->numa_id); + if (!hdcp2->hld) return -EMFILE; - if (!hl_dev->initialized) { - rc = alloc_dma_areas(hl_dev, &info); + if (!hdcp2->hld->initialized) { + rc = alloc_dma_areas(hdcp2, &info); if (rc < 0) goto err_free; @@ -422,26 +491,25 @@ static long init(struct file *f, void __user *arg) goto err_free; } - hl_dev->hpi = ioremap(hpi_mem->start, resource_size(hpi_mem)); - if (!hl_dev->hpi) { + hdcp2->hld->hpi = ioremap(hpi_mem->start, resource_size(hpi_mem)); + if (!hdcp2->hld->hpi) { rc = -ENOMEM; goto err_release_region; } - hl_dev->hpi_resource = hpi_mem; - hl_dev->initialized = 1; + hdcp2->hld->hpi_resource = hpi_mem; + hdcp2->hld->initialized = 1; } - f->private_data = hl_dev; return 0; err_release_region: release_resource(hpi_mem); err_free: - free_dma_areas(hl_dev); - hl_dev->initialized = 0; - hl_dev->allocated = 0; - hl_dev->hpi_resource = 0; - hl_dev->hpi = 0; + free_dma_areas(hdcp2); + hdcp2->hld->initialized = 0; + hdcp2->hld->allocated = 0; + hdcp2->hld->hpi_resource = 0; + hdcp2->hld->hpi = 0; return rc; } @@ -465,8 +533,6 @@ static void free_hl_dev_slot(hl_device *slot) release_mem_region(slot->hpi_resource->start, 128); slot->hpi_resource = 0; } - - free_dma_areas(slot); } slot->initialized = 0; @@ -475,69 +541,69 @@ static void free_hl_dev_slot(hl_device *slot) static long hld_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { - hl_device *hl_dev; + struct miscdevice *misc_hld = f->private_data; + struct dw_hdcp2 *hdcp2 = dev_get_drvdata(misc_hld->parent); void __user *data; if (f == 0) { return -EFAULT; } - hl_dev = f->private_data; data = (void __user *)arg; if (cmd == HL_DRV_IOC_INIT) { - return init(f, data); - } else if (!hl_dev) { + return hld_init(hdcp2, data); + } else if (!hdcp2->hld) { return -EAGAIN; } switch (cmd) { case HL_DRV_IOC_INIT: - return init(f, data); + return hld_init(hdcp2, data); case HL_DRV_IOC_MEMINFO: - return get_meminfo(hl_dev, data); + return get_meminfo(hdcp2->hld, data); case HL_DRV_IOC_READ_HPI: - return hpi_read(hl_dev, data); + return hpi_read(hdcp2->hld, data); case HL_DRV_IOC_WRITE_HPI: - return hpi_write(hl_dev, data); + return hpi_write(hdcp2->hld, data); case HL_DRV_IOC_LOAD_CODE: - return load_code(hl_dev, data); + return load_code(hdcp2->hld, data); case HL_DRV_IOC_WRITE_DATA: - return write_data(hl_dev, data); + return write_data(hdcp2->hld, data); case HL_DRV_IOC_READ_DATA: - return read_data(hl_dev, data); + return read_data(hdcp2->hld, data); case HL_DRV_IOC_MEMSET_DATA: - return set_data(hl_dev, data); + return set_data(hdcp2->hld, data); case DW_DRV_IOC_CONNECT_STATUS: - return g_dw_hdcp2->hot_plug; + return hdcp2->hot_plug; case DW_DRV_IOC_CONNECT_SET: - printk("set hdcp2 reset one\n"); - g_dw_hdcp2->wait_hdcp2_reset = 1; - dw_hdmi_hdcp2_start(1); + dev_info(hdcp2->dev, "set hdcp2 reset one\n"); + hdcp2->wait_hdcp2_reset = 1; + dw_hdmi_hdcp2_start(1, hdcp2->numa_id); return 0; case DW_DRV_IOC_DISCONNECT_SET: - if (g_dw_hdcp2->wait_hdcp2_reset == 1) { + if (hdcp2->wait_hdcp2_reset == 1) { printk("set hdcp2 reset zero\n"); - g_dw_hdcp2->wait_hdcp2_reset = 0; - dw_hdmi_hdcp2_start(0); + hdcp2->wait_hdcp2_reset = 0; + dw_hdmi_hdcp2_start(0, hdcp2->numa_id); } - if (g_dw_hdcp2->auth_sucess == 1) { - g_dw_hdcp2->auth_sucess = 0; + if (hdcp2->auth_sucess == 1) { + hdcp2->auth_sucess = 0; } return 0; case DW_DRV_IOC_AUTH_SUCCESS: - g_dw_hdcp2->auth_sucess = 1; + hdcp2->auth_sucess = 1; return 0; case DW_DRV_IOC_AUTH_FAIL: - g_dw_hdcp2->auth_sucess = 0; + hdcp2->auth_sucess = 0; return 0; case DW_DRV_IOC_NO_CAPACITY: printk("set hdcp2 reset zero 3005\n"); - g_dw_hdcp2->hot_plug = 0; - g_dw_hdcp2->wait_hdcp2_reset = 0; - dw_hdmi_hdcp2_start(0); - dw_hdmi_hdcp2_start(2); + hdcp2->hot_plug = 0; + hdcp2->wait_hdcp2_reset = 0; + dw_hdmi_hdcp2_start(0, hdcp2->numa_id); + dw_hdmi_hdcp2_start(2, hdcp2->numa_id); return 0; } @@ -552,66 +618,66 @@ static const struct file_operations hld_file_operations = { .owner = THIS_MODULE, }; -static struct miscdevice hld_device = { - .minor = MISC_DYNAMIC_MINOR, - .name = "hl_dev", - .fops = &hld_file_operations, -}; - -static int hld_init(void) +static int rigister_hld_device(struct dw_hdcp2 *hdcp2) { int i; + int numa_id = hdcp2->numa_id; + char name[20]; - printk("%s...\n", __func__); for (i = 0; i < MAX_HL_DEVICES; i++) { - hl_devices[i].allocated = 0; - hl_devices[i].initialized = 0; - hl_devices[i].code_loaded = 0; - hl_devices[i].code = 0; - hl_devices[i].data = 0; - hl_devices[i].hpi_resource = 0; - hl_devices[i].hpi = 0; - } - return misc_register(&hld_device); + hl_devices[numa_id][i].allocated = 0; + hl_devices[numa_id][i].initialized = 0; + hl_devices[numa_id][i].code_loaded = 0; + hl_devices[numa_id][i].code = 0; + hl_devices[numa_id][i].data = 0; + hl_devices[numa_id][i].hpi_resource = 0; + hl_devices[numa_id][i].hpi = 0; + } + + sprintf(name, "hl_dev%d", numa_id); + hdcp2->mics_hld.minor = MISC_DYNAMIC_MINOR; + hdcp2->mics_hld.name = name; + hdcp2->mics_hld.fops = &hld_file_operations; + hdcp2->mics_hld.parent = hdcp2->dev; + + dev_info(hdcp2->dev, "%s numa id:%d\n", __func__, numa_id); + + if (misc_register(&hdcp2->mics_hld)) { + dev_err(hdcp2->dev, "Could not add character driver\n"); + return -EINVAL; + } + + return 0; } -static void hld_exit(void) +static void hld_exit(struct dw_hdcp2 *hdcp2) { int i; for (i = 0; i < MAX_HL_DEVICES; i++) { - free_hl_dev_slot(&hl_devices[i]); + free_hl_dev_slot(&hl_devices[hdcp2->numa_id][i]); } - misc_deregister(&hld_device); + free_dma_areas(hdcp2); + + misc_deregister(&hdcp2->mics_hld); } -static int dw_hdmi2_hdcp2_clk_enable(struct device *dev) +static int dw_hdmi2_hdcp2_clk_enable(struct dw_hdcp2 *hdcp2) { - struct clk *pclk; - //struct clk *aclk; - struct clk *hdcp2_clk_hdmi; - - pclk = devm_clk_get(dev, "pclk_hdcp2"); - if (IS_ERR(pclk)) { - pr_err("Unable to get hdcp2 pclk\n"); + hdcp2->aclk = devm_clk_get(hdcp2->dev, "aclk"); + if (IS_ERR(hdcp2->aclk)) { + dev_err(hdcp2->dev, "Unable to get hdcp2 aclk\n"); return -1; } - clk_prepare_enable(pclk); -#if 0 - aclk = devm_clk_get(dev, "aclk_hdcp2"); - if (IS_ERR(aclk)) { - pr_err("Unable to get hdcp2 aclk\n"); - return -1; - } - clk_prepare_enable(aclk); -#endif - hdcp2_clk_hdmi = devm_clk_get(dev, "hdcp2_clk_hdmi"); - if (IS_ERR(hdcp2_clk_hdmi)) { - pr_err("Unable to get hdcp2_clk_hdmi\n"); + clk_prepare_enable(hdcp2->aclk); + + hdcp2->iesmclk = devm_clk_get(hdcp2->dev, "iesmclk"); + if (IS_ERR(hdcp2->iesmclk)) { + dev_err(hdcp2->dev, "Unable to get hdcp2_clk_hdmi\n"); return -1; } - clk_prepare_enable(hdcp2_clk_hdmi); + clk_prepare_enable(hdcp2->iesmclk); return 0; } @@ -619,7 +685,10 @@ static int dw_hdmi2_hdcp2_clk_enable(struct device *dev) static ssize_t hdcp2_show_enable(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", g_dw_hdcp2->enable); + struct miscdevice *mdev= dev_get_drvdata(dev); + struct dw_hdcp2 *hdcp2 = dev_get_drvdata(mdev->parent); + + return snprintf(buf, PAGE_SIZE, "%d\n", hdcp2->enable); } static ssize_t hdcp2_store_enable(struct device *dev, @@ -627,17 +696,20 @@ static ssize_t hdcp2_store_enable(struct device *dev, const char *buf, size_t size) { int enable; + struct miscdevice *mdev= dev_get_drvdata(dev); + struct dw_hdcp2 *hdcp2 = dev_get_drvdata(mdev->parent); + if (kstrtoint(buf, 0, &enable)) return size; - if (g_dw_hdcp2->enable != enable) { - g_dw_hdcp2->enable = enable; + if (hdcp2->enable != enable) { + hdcp2->enable = enable; if (enable) { - dw_hdmi_hdcp2_start(3); + dw_hdmi_hdcp2_start(3, hdcp2->numa_id); } else { - if (g_dw_hdcp2->hot_plug) { - g_dw_hdcp2->stop(); - dw_hdmi_hdcp2_start(2); + if (hdcp2->hot_plug) { + hdcp2->stop(hdcp2); + dw_hdmi_hdcp2_start(2, hdcp2->numa_id); } } } @@ -647,12 +719,15 @@ static ssize_t hdcp2_store_enable(struct device *dev, static ssize_t hdcp2_show_status(struct device *dev, struct device_attribute *attr, char *buf) { - if (g_dw_hdcp2->enable != 1) { + struct miscdevice *mdev= dev_get_drvdata(dev); + struct dw_hdcp2 *hdcp2 = dev_get_drvdata(mdev->parent); + + if (hdcp2->enable != 1) { return snprintf(buf, PAGE_SIZE, "%s\n", "no enable hdcp2"); - } else if (!g_dw_hdcp2->hot_plug) { + } else if (!hdcp2->hot_plug) { return snprintf(buf, PAGE_SIZE, "%s\n", "hdcp2 no auth"); } else { - if (g_dw_hdcp2->auth_sucess) + if (hdcp2->auth_sucess) return snprintf(buf, PAGE_SIZE, "%s\n", "hdcp2 auth sucess"); else @@ -664,32 +739,33 @@ static ssize_t hdcp2_show_status(struct device *dev, static DEVICE_ATTR(enable, 0644, hdcp2_show_enable, hdcp2_store_enable); static DEVICE_ATTR(status, 0444, hdcp2_show_status, NULL); -static int create_device_node(void) +static int create_device_node(struct dw_hdcp2 *hdcp2) { int ret; - - if (!g_dw_hdcp2) - return -1; - g_dw_hdcp2->mdev.minor = MISC_DYNAMIC_MINOR; - g_dw_hdcp2->mdev.name = "hdcp2_node"; - g_dw_hdcp2->mdev.mode = 0666; - if (misc_register(&(g_dw_hdcp2->mdev))) { - pr_err("HDCP2: Could not add character driver\n"); + char name[20]; + + sprintf(name, "hdcp2-%d-node", hdcp2->numa_id); + hdcp2->mdev.minor = MISC_DYNAMIC_MINOR; + hdcp2->mdev.name = name; + hdcp2->mdev.mode = 0666; + hdcp2->mdev.parent = hdcp2->dev; + if (misc_register(&(hdcp2->mdev))) { + dev_err(hdcp2->dev, "HDCP2: Could not add character driver\n"); return -1; } - ret = device_create_file(g_dw_hdcp2->mdev.this_device, + ret = device_create_file(hdcp2->mdev.this_device, &dev_attr_enable); if (ret) { - pr_err("HDCP: Could not add sys file enable\n"); + dev_err(hdcp2->dev, "HDCP: Could not add sys file enable\n"); ret = -EINVAL; goto error0; } - ret = device_create_file(g_dw_hdcp2->mdev.this_device, + ret = device_create_file(hdcp2->mdev.this_device, &dev_attr_status); if (ret) { - pr_err("HDCP: Could not add sys file status\n"); + dev_err(hdcp2->dev, "HDCP: Could not add sys file status\n"); ret = -EINVAL; goto error1; } @@ -697,87 +773,110 @@ static int create_device_node(void) return 0; error1: - device_remove_file(g_dw_hdcp2->mdev.this_device, &dev_attr_enable); + device_remove_file(hdcp2->mdev.this_device, &dev_attr_enable); error0: - misc_deregister(&g_dw_hdcp2->mdev); + misc_deregister(&hdcp2->mdev); return ret; } -static void end_device_node(void) +static void end_device_node(struct dw_hdcp2 *hdcp2) { - if (g_dw_hdcp2) - misc_deregister(&(g_dw_hdcp2->mdev)); + device_remove_file(hdcp2->mdev.this_device, &dev_attr_enable); + device_remove_file(hdcp2->mdev.this_device, &dev_attr_status); + misc_deregister(&(hdcp2->mdev)); } static int eswin_hdmi_hdcp2_probe(struct platform_device *pdev) { - struct device *hdcp2_dev = &pdev->dev; + struct dw_hdcp2 *hdcp2; + struct device *dev = &pdev->dev; + int numa_id; - printk("%s...\n", __func__); - g_dw_hdcp2 = kmalloc(sizeof(*g_dw_hdcp2), GFP_KERNEL); - if (!g_dw_hdcp2) { - printk("malloc g_dw_hdcp2 error\n"); + hdcp2 = devm_kzalloc(dev, sizeof(*hdcp2), GFP_KERNEL); + if (!hdcp2) return -ENOMEM; + + platform_set_drvdata(pdev, hdcp2); + if(of_property_read_u32(dev->of_node, "numa-node-id", &numa_id)) { + numa_id = 0; } - memset(g_dw_hdcp2, 0, sizeof(*g_dw_hdcp2)); + dev_info(dev, "%s numa_id:%d\n", __func__, numa_id); + hdcp2->numa_id = numa_id; - g_dw_hdcp2->dev = hdcp2_dev; - g_dw_hdcp2->stop = dw_hdcp2_stop; - g_dw_hdcp2->start = dw_hdcp2_start; - hld_init(); - dw_hdmi2_hdcp2_clk_enable(hdcp2_dev); - dma_set_mask_and_coherent(hdcp2_dev, DMA_BIT_MASK(32)); - dw_hdmi_hdcp2_init(g_dw_hdcp2); - dw_hdmi_hdcp2_start(3); + hdcp2->dev = dev; + hdcp2->stop = dw_hdcp2_stop; + hdcp2->start = dw_hdcp2_start; + rigister_hld_device(hdcp2); + dw_hdmi2_hdcp2_clk_enable(hdcp2); + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + dw_hdmi_hdcp2_init(hdcp2); + dw_hdmi_hdcp2_start(3, numa_id); - create_device_node(); + create_device_node(hdcp2); return 0; } static int eswin_hdmi_hdcp2_remove(struct platform_device *pdev) { - printk("%s...\n", __func__); - dw_hdmi_hdcp2_remove(); - end_device_node(); - hld_exit(); - kfree(g_dw_hdcp2); - g_dw_hdcp2 = NULL; + struct dw_hdcp2 *hdcp2 = platform_get_drvdata(pdev); + dev_info(hdcp2->dev, "%s numa id:%d\n", __func__, hdcp2->numa_id); + dw_hdmi_hdcp2_remove(hdcp2); + end_device_node(hdcp2); + hld_exit(hdcp2); + + clk_disable_unprepare(hdcp2->aclk); + clk_disable_unprepare(hdcp2->iesmclk); return 0; } -static void eswin_hdmi_hdcp2_shutdown(struct platform_device *pdev) +static int __maybe_unused dw_hdcp2_suspend(struct device *dev) { - printk("%s...\n", __func__); + dev_dbg(dev, "%s\n", __func__); + struct dw_hdcp2 *hdcp2 = dev_get_drvdata(dev); + + clk_disable(hdcp2->aclk); + clk_disable(hdcp2->iesmclk); + + return 0; } -#if defined(CONFIG_OF) +static int __maybe_unused dw_hdcp2_resume(struct device *dev) +{ + dev_dbg(dev, "%s\n", __func__); + struct dw_hdcp2 *hdcp2 = dev_get_drvdata(dev); + + clk_enable(hdcp2->aclk); + clk_enable(hdcp2->iesmclk); + + return 0; +} + +static const struct dev_pm_ops dw_hdcp2_pm = { + .resume = dw_hdcp2_resume, + .suspend = dw_hdcp2_suspend, +}; + static const struct of_device_id dw_hdmi_hdcp2_dt_ids[] = { { .compatible = "eswin,dw-hdmi-hdcp2", }, - {} }; - MODULE_DEVICE_TABLE(of, dw_hdmi_hdcp2_dt_ids); -#endif struct platform_driver dw_hdmi_hdcp2_driver = { - .probe = eswin_hdmi_hdcp2_probe, - .remove = eswin_hdmi_hdcp2_remove, - .shutdown = eswin_hdmi_hdcp2_shutdown, - .driver = { + .probe = eswin_hdmi_hdcp2_probe, + .remove = eswin_hdmi_hdcp2_remove, + .driver = { .name = "dw-hdmi-hdcp2", .owner = THIS_MODULE, -#if defined(CONFIG_OF) + .pm = &dw_hdcp2_pm, .of_match_table = of_match_ptr(dw_hdmi_hdcp2_dt_ids), -#endif }, }; static int __init hdmi_hdcp2_init(void) { - printk("%s...\n", __func__); return platform_driver_register(&dw_hdmi_hdcp2_driver); } diff --git a/drivers/gpu/drm/eswin/eswin_dw_hdmi.c b/drivers/gpu/drm/eswin/eswin_dw_hdmi.c index 77c1481b2d2e..19f7cccdca33 100644 --- a/drivers/gpu/drm/eswin/eswin_dw_hdmi.c +++ b/drivers/gpu/drm/eswin/eswin_dw_hdmi.c @@ -955,7 +955,7 @@ static int __maybe_unused dw_hdmi_eswin_resume(struct device *dev) return 0; } -static const struct dev_pm_ops dw_hdmi_eswin_pm = { SET_SYSTEM_SLEEP_PM_OPS( +static const struct dev_pm_ops dw_hdmi_eswin_pm = { SET_SYSTEM_SLEEP_PM_OPS( dw_hdmi_eswin_suspend, dw_hdmi_eswin_resume) }; struct platform_driver dw_hdmi_eswin_pltfm_driver = { diff --git a/drivers/gpu/drm/img/img-volcanic/Makefile b/drivers/gpu/drm/img/img-volcanic/Makefile index 60174af49e4a..087e156a263f 100644 --- a/drivers/gpu/drm/img/img-volcanic/Makefile +++ b/drivers/gpu/drm/img/img-volcanic/Makefile @@ -74,7 +74,13 @@ WINDOW_SYSTEM=nulldrmws #include $(OUT)/config_kernel.mk include $(srctree)/$(src)/config_kernel.mk +# Use the logic from kernel's Kbuild.include to select .NOTINTERMEDIATE or .SECONDARY based on make version +ifneq ($(and $(filter notintermediate, $(.FEATURES)),$(filter-out 4.4,$(MAKE_VERSION))),) +.NOTINTERMEDIATE: +else .SECONDARY: +endif + define symlink-source-file @if [ ! -e $(dir $@) ]; then mkdir -p $(dir $@); fi diff --git a/drivers/gpu/drm/img/img-volcanic/config_kernel.h b/drivers/gpu/drm/img/img-volcanic/config_kernel.h index d3f0a6a1b051..1e73467fd7bc 100644 --- a/drivers/gpu/drm/img/img-volcanic/config_kernel.h +++ b/drivers/gpu/drm/img/img-volcanic/config_kernel.h @@ -1,5 +1,4 @@ #define SUPPORT_RGXRAY_BRIDGE -#define PVRSRV_ENABLE_CCCB_GROW #define RGX_FW_FILENAME "rgx.fw" #define RGX_SH_FILENAME "rgx.sh" #define PVR_BUILD_DIR "eswin_linux" @@ -9,35 +8,41 @@ #define PVRSYNC_MODNAME "pvr_sync" #define SUPPORT_RGX 1 #define DISPLAY_CONTROLLER drm_nulldisp +#define PVRSRV_MAX_DEVICES 8 #define PVRSRV_HWPERF_COUNTERS_PERBLK 12 #define RELEASE #define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_30.3.408.101.h" #define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_30.V.408.101.h" +#define PVRSRV_ENABLE_XD_MEM #define PVRSRV_NEED_PVR_DPF #define PVRSRV_NEED_PVR_ASSERT +#define SUPPORT_LINUX_FDINFO +#define PVRSRV_TRACE_ROGUE_EVENTS #define SUPPORT_PHYSMEM_TEST #define PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY 5 #define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9 #define PVRSRV_POISON_ON_FREE_VALUE 0x63 #define RGX_NUM_OS_SUPPORTED 1 -#define RGX_OSID_0_DEFAULT_PRIORITY (1 - 0) -#define RGX_OSID_1_DEFAULT_PRIORITY (1 - 1) -#define RGX_OSID_2_DEFAULT_PRIORITY (1 - 2) -#define RGX_OSID_3_DEFAULT_PRIORITY (1 - 3) -#define RGX_OSID_4_DEFAULT_PRIORITY (1 - 4) -#define RGX_OSID_5_DEFAULT_PRIORITY (1 - 5) -#define RGX_OSID_6_DEFAULT_PRIORITY (1 - 6) -#define RGX_OSID_7_DEFAULT_PRIORITY (1 - 7) +#define RGX_NUM_DRIVERS_SUPPORTED 1 #define RGX_HCS_DEFAULT_DEADLINE_MS 0xFFFFFFFFU -#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF +#define RGX_FW_HEAP_USES_FIRMWARE_OSID 0 +#define RGX_FW_HEAP_USES_HOST_OSID 1 +#define RGX_FW_HEAP_USES_DEDICATED_OSID 2 +#define RGX_FW_HEAP_OSID_ASSIGNMENT RGX_FW_HEAP_USES_FIRMWARE_OSID +#define PVRSRV_APPHINT_PHYSHEAPMINMEMONCONNECTION 0 +#define RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION 512 +#define PVRSRV_APPHINT_DRIVERMODE "default" #define RGX_FW_HEAP_SHIFT 25 +#define PVRSRV_APPHINT_GUESTFWHEAPSTRIDE (1 << RGX_FW_HEAP_SHIFT) +#define PVRSRV_APPHINT_AUTOVZGPUPOWERDOWN IMG_FALSE #define RGX_VZ_CONNECTION_TIMEOUT_US 60000000 #define PVRSRV_VZ_BYPASS_HMMU -#define PVRSRV_APPHINT_CDMARBITRATIONOVERRIDE 0x00000000 +#define GPUVIRT_VALIDATION_NUM_OS 8 +#define PVRSRV_ENABLE_CCCB_GROW #define SUPPORT_POWMON_COMPONENT #define PVR_POWER_ACTOR_MEASUREMENT_PERIOD_MS 10U #define PVR_POWER_MONITOR_HWPERF 1 -#define SUPPORT_POWER_VALIDATION_VIA_DEBUGFS +#define SUPPORT_POWER_SAMPLING_VIA_DEBUGFS #define PVR_LDM_PLATFORM_PRE_REGISTERED #define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm" #define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256 @@ -64,17 +69,17 @@ #define PVRSRV_APPHINT_HWVALENABLESPUPOWERMASKCHANGE 0 #define PVRSRV_APPHINT_HWVALAVAILABLESPUMASK 0xFFFFFFFF #define PVRSRV_APPHINT_HWVALAVAILABLERACMASK 0xFFFFFFFF +#define PVRSRV_APPHINT_HWVALTPUF20BILPRECISION 1 #define PVRSRV_APPHINT_KILLINGCTL 0 #define PVRSRV_APPHINT_CDMTDM_KILLINGCTL 0 #define PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH 0 -#define PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH 0 +#define PVRSRV_APPHINT_ENABLESOFTRESETCONTEXTSWITCH 0 #define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE #define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN #define PVRSRV_APPHINT_HWPERFDISABLECOUNTERFILTER 0 #define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048 #define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048 #define PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS 50 -#define PVRSRV_APPHINT_JONESDISABLEMASK 0 #define PVRSRV_APPHINT_NEWFILTERINGMODE IMG_FALSE #define PVRSRV_APPHINT_TRUNCATEMODE 0 #define PVRSRV_APPHINT_EMUMAXFREQ 0 @@ -83,23 +88,30 @@ #define PVRSRV_APPHINT_RISCVDMITEST 0 #define PVRSRV_APPHINT_RCEDISABLEMASK 0x00000000 #define PVRSRV_APPHINT_PCGPKTDROPTHRESH 0x0000000b -#define RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US 1000000 +#define PVRSRV_APPHINT_RAYSLCMMUAUTOCACHEOPS 0x0000000f +#define RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US 2000000 #define PVRSRV_APPHINT_ISPSCHEDULINGLATENCYMODE 1 #define PVRSRV_APPHINT_VALIDATESOCUSCTIMERS 0 #define PVRSRV_APPHINT_SHGEOMPIPEMASK_OVERRIDE 0 #define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 5 #define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0 #define PVRSRV_APPHINT_CACHEOPTHREADPRIORITY 1 +#define PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 11 +#define PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES 10000 #define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE #define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE #define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG #define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE -#define PVRSRV_APPHINT_KCCB_SIZE_LOG2 7 +#define PVRSRV_APPHINT_KCCB_SIZE_LOG2 10 #define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT #define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0 +#define PVRSRV_APPHINT_ENABLEIDLECYCLESTEALING RGXFWIF_INICFG_OS_ICS_DM_APPHINT +#define PVRSRV_APPHINT_FAULTDETECTIONTIMEINTERVAL_USEC 40000 +#define PVRSRV_APPHINT_ICSTIMEINTERVAL_THRESHOLD 90 #define PVRSRV_APPHINT_ENABLELOGGROUP RGXFWIF_LOG_TYPE_NONE #define PVRSRV_APPHINT_FIRMWARELOGTYPE 0 #define PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS +#define PVRSRV_APPHINT_DEBUGDUMPFWTLOGTYPE 1 #define PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE 0 #define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPOLDEST #define PVRSRV_APPHINT_HTBUFFERSIZE 64 @@ -112,7 +124,7 @@ #define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0 #define PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN 0 #define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL 0 -#define PVRSRV_APPHINT_TIMECORRCLOCK 0 +#define PVRSRV_APPHINT_SECONDARYOSCLOCKSOURCE 2 #define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE #define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD #define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE @@ -122,16 +134,22 @@ #define PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE 0 #define PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC IMG_FALSE #define PVRSRV_APPHINT_PHYSMEMTESTPASSES APPHNT_PHYSMEMTEST_ENABLE +#define PVRSRV_APPHINT_MMUVAL_PARITYFLIPPMR "unset" #define PVRSRV_APPHINT_TESTSLRINTERVAL 0 +#define PVRSRV_APPHINT_CHECKPOINTPOOLMAXLOG2 8 +#define PVRSRV_APPHINT_CHECKPOINTPOOLINITLOG2 7 +#define PVRSRV_APPHINT_CLKCTRL0 0xAAAAAAAAAAAAAAAAULL +#define PVRSRV_APPHINT_CLKCTRL1 0xAAAAAAAAAAAAAAAAULL +#define PVRSRV_APPHINT_CLKCTRL2 0xAAAAAAAAULL #define SUPPORT_SOC_TIMER -#define SOC_TIMER_FREQ 20 #define PDVFS_COM_HOST 1 #define PDVFS_COM_AP 2 #define PDVFS_COM_PMC 3 #define PDVFS_COM_IMG_CLKDIV 4 +#define PDVFS_COM_SYSREG 5 #define PDVFS_COM PDVFS_COM_HOST +#define SUPPORT_FW_CORE_CLK_RATE_CHANGE_NOTIFY #define PVR_GPIO_MODE_GENERAL 1 -#define PVR_GPIO_MODE_POWMON_PIN 2 #define PVR_GPIO_MODE PVR_GPIO_MODE_GENERAL #define PVRSRV_ENABLE_PROCESS_STATS #define PVR_ANNOTATION_MAX_LEN 63 @@ -139,20 +157,27 @@ #define SUPPORT_DI_BRG_IMPL #define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240 #define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480 +#define PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES #define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288 #define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256 #define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2 #define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384 +#define SUPPORT_PMR_DEFERRED_FREE +#define SUPPORT_PMR_PAGES_DEFERRED_FREE +#define SUPPORT_MMU_DEFERRED_FREE +#define SUPPORT_WRAP_EXTMEM #define PVRSRV_USE_LINUX_CONFIG_INIT_ON_ALLOC 1 #define SUPPORT_NATIVE_FENCE_SYNC #define PVRSRV_STALLED_CCB_ACTION +#define PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP #define UPDATE_FENCE_CHECKPOINT_COUNT 1 +#define RGX_VZ_CONNECTION_COOLDOWN_PERIOD 0 #define PVR_DRM_NAME "pvr" #define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 16 #define RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS 0 #define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14 #define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14 -#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 14 #define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15 #define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16 #define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13 @@ -160,10 +185,140 @@ #define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM 13 #define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D 17 #define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D 17 -#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM 15 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM 17 #define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA 16 #define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D 17 #define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC 13 #define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM 17 #define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_RDM 15 +#define RGX_DRIVERID_0_DEFAULT_PRIORITY (1 - 0) +#define RGX_DRIVERID_1_DEFAULT_PRIORITY (1 - 1) +#define RGX_DRIVERID_2_DEFAULT_PRIORITY (1 - 2) +#define RGX_DRIVERID_3_DEFAULT_PRIORITY (1 - 3) +#define RGX_DRIVERID_4_DEFAULT_PRIORITY (1 - 4) +#define RGX_DRIVERID_5_DEFAULT_PRIORITY (1 - 5) +#define RGX_DRIVERID_6_DEFAULT_PRIORITY (1 - 6) +#define RGX_DRIVERID_7_DEFAULT_PRIORITY (1 - 7) +#define RGX_DRIVERID_8_DEFAULT_PRIORITY (1 - 8) +#define RGX_DRIVERID_9_DEFAULT_PRIORITY (1 - 9) +#define RGX_DRIVERID_10_DEFAULT_PRIORITY (1 - 10) +#define RGX_DRIVERID_11_DEFAULT_PRIORITY (1 - 11) +#define RGX_DRIVERID_12_DEFAULT_PRIORITY (1 - 12) +#define RGX_DRIVERID_13_DEFAULT_PRIORITY (1 - 13) +#define RGX_DRIVERID_14_DEFAULT_PRIORITY (1 - 14) +#define RGX_DRIVERID_15_DEFAULT_PRIORITY (1 - 15) +#define RGX_DRIVERID_16_DEFAULT_PRIORITY (1 - 16) +#define RGX_DRIVERID_17_DEFAULT_PRIORITY (1 - 17) +#define RGX_DRIVERID_18_DEFAULT_PRIORITY (1 - 18) +#define RGX_DRIVERID_19_DEFAULT_PRIORITY (1 - 19) +#define RGX_DRIVERID_20_DEFAULT_PRIORITY (1 - 20) +#define RGX_DRIVERID_21_DEFAULT_PRIORITY (1 - 21) +#define RGX_DRIVERID_22_DEFAULT_PRIORITY (1 - 22) +#define RGX_DRIVERID_23_DEFAULT_PRIORITY (1 - 23) +#define RGX_DRIVERID_24_DEFAULT_PRIORITY (1 - 24) +#define RGX_DRIVERID_25_DEFAULT_PRIORITY (1 - 25) +#define RGX_DRIVERID_26_DEFAULT_PRIORITY (1 - 26) +#define RGX_DRIVERID_27_DEFAULT_PRIORITY (1 - 27) +#define RGX_DRIVERID_28_DEFAULT_PRIORITY (1 - 28) +#define RGX_DRIVERID_29_DEFAULT_PRIORITY (1 - 29) +#define RGX_DRIVERID_30_DEFAULT_PRIORITY (1 - 30) +#define RGX_DRIVERID_31_DEFAULT_PRIORITY (1 - 31) +#define RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_1_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_2_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_3_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_4_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_5_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_6_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_7_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_8_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_9_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_10_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_11_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_12_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_13_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_14_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_15_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_16_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_17_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_18_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_19_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_20_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_21_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_22_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_23_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_24_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_25_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_26_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_27_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_28_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_29_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_30_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_31_DEFAULT_ISOLATION_GROUP 0 +#define RGX_DRIVERID_0_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_1_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_2_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_3_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_4_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_5_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_6_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_7_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_8_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_9_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_10_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_11_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_12_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_13_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_14_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_15_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_16_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_17_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_18_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_19_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_20_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_21_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_22_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_23_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_24_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_25_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_26_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_27_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_28_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_29_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_30_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVERID_31_DEFAULT_TIME_SLICE 0 +#define RGX_DRIVER_DEFAULT_TIME_SLICE_INTERVAL 0 +#define RGX_DRIVER_DEFAULT_TIME_SLICES_SUM (((((((((((((((((((((((((((((((( + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) + 0) +#define DRIVER0_SECURITY_SUPPORT 0 +#define DRIVER1_SECURITY_SUPPORT 0 +#define DRIVER2_SECURITY_SUPPORT 0 +#define DRIVER3_SECURITY_SUPPORT 0 +#define DRIVER4_SECURITY_SUPPORT 0 +#define DRIVER5_SECURITY_SUPPORT 0 +#define DRIVER6_SECURITY_SUPPORT 0 +#define DRIVER7_SECURITY_SUPPORT 0 +#define DRIVER8_SECURITY_SUPPORT 0 +#define DRIVER9_SECURITY_SUPPORT 0 +#define DRIVER10_SECURITY_SUPPORT 0 +#define DRIVER11_SECURITY_SUPPORT 0 +#define DRIVER12_SECURITY_SUPPORT 0 +#define DRIVER13_SECURITY_SUPPORT 0 +#define DRIVER14_SECURITY_SUPPORT 0 +#define DRIVER15_SECURITY_SUPPORT 0 +#define DRIVER16_SECURITY_SUPPORT 0 +#define DRIVER17_SECURITY_SUPPORT 0 +#define DRIVER18_SECURITY_SUPPORT 0 +#define DRIVER19_SECURITY_SUPPORT 0 +#define DRIVER20_SECURITY_SUPPORT 0 +#define DRIVER21_SECURITY_SUPPORT 0 +#define DRIVER22_SECURITY_SUPPORT 0 +#define DRIVER23_SECURITY_SUPPORT 0 +#define DRIVER24_SECURITY_SUPPORT 0 +#define DRIVER25_SECURITY_SUPPORT 0 +#define DRIVER26_SECURITY_SUPPORT 0 +#define DRIVER27_SECURITY_SUPPORT 0 +#define DRIVER28_SECURITY_SUPPORT 0 +#define DRIVER29_SECURITY_SUPPORT 0 +#define DRIVER30_SECURITY_SUPPORT 0 +#define DRIVER31_SECURITY_SUPPORT 0 #define SUPPORT_BUFFER_SYNC 1 diff --git a/drivers/gpu/drm/img/img-volcanic/config_kernel.mk b/drivers/gpu/drm/img/img-volcanic/config_kernel.mk index fb9e496049ba..a1ca43790015 100644 --- a/drivers/gpu/drm/img/img-volcanic/config_kernel.mk +++ b/drivers/gpu/drm/img/img-volcanic/config_kernel.mk @@ -1,7 +1,7 @@ -override HOST_PRIMARY_ARCH := host_riscv64 -override HOST_32BIT_ARCH := host_riscv32 -override HOST_FORCE_32BIT := -override HOST_ALL_ARCH := host_riscv64 host_riscv32 +override HOST_PRIMARY_ARCH := host_x86_64 +override HOST_32BIT_ARCH := host_i386 +override HOST_FORCE_32BIT := -m32 +override HOST_ALL_ARCH := host_x86_64 host_i386 override TARGET_PRIMARY_ARCH := target_riscv64 override TARGET_SECONDARY_ARCH := override TARGET_ALL_ARCH := target_riscv64 @@ -9,12 +9,13 @@ override TARGET_FORCE_32BIT := override PVR_ARCH := volcanic override PVRSRV_DIR := services override METAG_VERSION_NEEDED := 2.8.1.0.3 -override RISCV_VERSION_NEEDED := 1.0.1 -override KERNELDIR := /lib/modules/5.17.0-rc7-win2030/build/ -override KERNEL_ID := 5.17.0-rc7-win2030 -override PVRSRV_MODULE_BASEDIR := /lib/modules/5.17.0-rc7-win2030/extra/ +override RISCV_VERSION_NEEDED := 1.7.1 +override KERNELDIR := /lib/modules/6.6.21-eic7x/build/ +override KERNEL_ID := 6.6.21-eic7x +override PVRSRV_MODULE_BASEDIR := /lib/modules/6.6.21-eic7x/extra/ override KERNEL_COMPONENTS := srvkm drm_nulldisp drm_nulldisp -override WINDOW_SYSTEM := lws-generic +override KERNEL_CROSS_COMPILE := riscv64-unknown-linux-gnu- +override WINDOW_SYSTEM := xorg override PVRSRV_MODNAME := pvrsrvkm override PVRHMMU_MODNAME := override PVR_BUILD_DIR := eswin_linux @@ -27,22 +28,28 @@ override BUILD := release override SORT_BRIDGE_STRUCTS := 1 override DEBUGLINK := 1 override RGX_BNC := 30.V.408.101 +override PVRSRV_ENABLE_XD_MEM := 1 +override SUPPORT_LINUX_FDINFO := 1 +override PVRSRV_TRACE_ROGUE_EVENTS := 1 override SUPPORT_PHYSMEM_TEST := 1 -override RGX_NUM_OS_SUPPORTED := 1 +override RGX_NUM_DRIVERS_SUPPORTED := 1 +override RGX_FW_HEAP_OSID_ASSIGNMENT := RGX_FW_HEAP_USES_FIRMWARE_OSID override VMM_TYPE := stub override SUPPORT_POWMON_COMPONENT := 1 -override RGX_TIMECORR_CLOCK := mono +override RGX_SECONDARY_OS_CLOCK_SOURCE := mono override PDVFS_COM_HOST := 1 override PDVFS_COM_AP := 2 override PDVFS_COM_PMC := 3 override PDVFS_COM_IMG_CLKDIV := 4 +override PDVFS_COM_SYSREG := 5 override PDVFS_COM := PDVFS_COM_HOST +override SUPPORT_FW_CORE_CLK_RATE_CHANGE_NOTIFY := 1 override PVR_GPIO_MODE_GENERAL := 1 -override PVR_GPIO_MODE_POWMON_PIN := 2 override PVR_GPIO_MODE := PVR_GPIO_MODE_GENERAL override PVR_HANDLE_BACKEND := idr override SUPPORT_DMABUF_BRIDGE := 1 override SUPPORT_DI_BRG_IMPL := 1 +override SUPPORT_WRAP_EXTMEM := 1 override SUPPORT_NATIVE_FENCE_SYNC := 1 override SUPPORT_DMA_FENCE := 1 override SUPPORT_BUFFER_SYNC := 1 diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/cache_bridge/server_cache_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/cache_bridge/server_cache_bridge.c index b79d314f0e85..b2f5267cc597 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/cache_bridge/server_cache_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/cache_bridge/server_cache_bridge.c @@ -87,9 +87,7 @@ PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -116,7 +114,6 @@ PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long)); @@ -132,7 +129,6 @@ PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -301,11 +297,7 @@ PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -431,13 +423,16 @@ PVRSRV_ERROR InitCACHEBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, - PVRSRVBridgeCacheOpQueue, NULL); + PVRSRVBridgeCacheOpQueue, NULL, sizeof(PVRSRV_BRIDGE_IN_CACHEOPQUEUE), + sizeof(PVRSRV_BRIDGE_OUT_CACHEOPQUEUE)); SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, - PVRSRVBridgeCacheOpExec, NULL); + PVRSRVBridgeCacheOpExec, NULL, sizeof(PVRSRV_BRIDGE_IN_CACHEOPEXEC), + sizeof(PVRSRV_BRIDGE_OUT_CACHEOPEXEC)); SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG, - PVRSRVBridgeCacheOpLog, NULL); + PVRSRVBridgeCacheOpLog, NULL, sizeof(PVRSRV_BRIDGE_IN_CACHEOPLOG), + sizeof(PVRSRV_BRIDGE_OUT_CACHEOPLOG)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/cmm_bridge/server_cmm_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/cmm_bridge/server_cmm_bridge.c index b95f8589ae99..cc658c793d3b 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/cmm_bridge/server_cmm_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/cmm_bridge/server_cmm_bridge.c @@ -340,17 +340,16 @@ PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psContextInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psHandleBase); } - if (psContextInt) + else if (psContextInt) { DevmemIntCtxDestroy(psContextInt); } + } return 0; @@ -373,13 +372,19 @@ PVRSRV_ERROR InitCMMBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, - PVRSRVBridgeDevmemIntExportCtx, NULL); + PVRSRVBridgeDevmemIntExportCtx, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX)); SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, - PVRSRVBridgeDevmemIntUnexportCtx, NULL); + PVRSRVBridgeDevmemIntUnexportCtx, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX)); SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, - PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL); + PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c index 03eb654bf195..72c6a86a0ec8 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c @@ -90,9 +90,7 @@ PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; @@ -107,7 +105,6 @@ PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long)); @@ -123,7 +120,6 @@ PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -200,11 +196,7 @@ PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -232,9 +224,7 @@ PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; @@ -249,7 +239,6 @@ PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long)); @@ -265,7 +254,6 @@ PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -342,11 +330,7 @@ PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -372,9 +356,7 @@ PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; @@ -389,7 +371,6 @@ PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long)); @@ -405,7 +386,6 @@ PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -455,11 +435,7 @@ PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -485,9 +461,7 @@ PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; @@ -502,7 +476,6 @@ PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long)); @@ -519,7 +492,6 @@ PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -571,11 +543,7 @@ PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -609,9 +577,7 @@ PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -646,7 +612,6 @@ PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long)); @@ -663,7 +628,6 @@ PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -791,11 +755,7 @@ PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -819,24 +779,33 @@ PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, - PVRSRVBridgeDevicememHistoryMap, pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistoryMap, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP)); SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, - PVRSRVBridgeDevicememHistoryUnmap, pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistoryUnmap, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP)); SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, - PVRSRVBridgeDevicememHistoryMapVRange, pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistoryMapVRange, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, - PVRSRVBridgeDevicememHistoryUnmapVRange, pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistoryUnmapVRange, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, - PVRSRVBridgeDevicememHistorySparseChange, - pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistorySparseChange, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/di_bridge/server_di_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/di_bridge/server_di_bridge.c index 3c40afbd676a..154bccdaae5e 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/di_bridge/server_di_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/di_bridge/server_di_bridge.c @@ -90,9 +90,7 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -112,7 +110,6 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDICreateContextIN), sizeof(unsigned long)); @@ -128,7 +125,6 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -190,10 +186,36 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, if (psDICreateContextOUT->eError != PVRSRV_OK) { - if (psContextInt) + if (psDICreateContextOUT->hContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDICreateContextOUT-> + hContext, + PVRSRV_HANDLE_TYPE_DI_CONTEXT); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); + + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + else if (psContextInt) { DIDestroyContextKM(psContextInt); } + } /* Allocated space should be equal to the last updated offset */ @@ -202,11 +224,7 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -266,9 +284,7 @@ PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + 0; @@ -283,7 +299,6 @@ PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDIReadEntryIN), sizeof(unsigned long)); @@ -299,7 +314,6 @@ PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -370,11 +384,7 @@ PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -402,9 +412,7 @@ PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -427,7 +435,6 @@ PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDIWriteEntryIN), sizeof(unsigned long)); @@ -443,7 +450,6 @@ PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -534,11 +540,7 @@ PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -606,19 +608,27 @@ PVRSRV_ERROR InitDIBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT, - PVRSRVBridgeDICreateContext, NULL); + PVRSRVBridgeDICreateContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_DICREATECONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_DICREATECONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT, - PVRSRVBridgeDIDestroyContext, NULL); + PVRSRVBridgeDIDestroyContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY, - PVRSRVBridgeDIReadEntry, NULL); + PVRSRVBridgeDIReadEntry, NULL, sizeof(PVRSRV_BRIDGE_IN_DIREADENTRY), + sizeof(PVRSRV_BRIDGE_OUT_DIREADENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY, - PVRSRVBridgeDIWriteEntry, NULL); + PVRSRVBridgeDIWriteEntry, NULL, sizeof(PVRSRV_BRIDGE_IN_DIWRITEENTRY), + sizeof(PVRSRV_BRIDGE_OUT_DIWRITEENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES, - PVRSRVBridgeDIListAllEntries, NULL); + PVRSRVBridgeDIListAllEntries, NULL, + sizeof(PVRSRV_BRIDGE_IN_DILISTALLENTRIES), + sizeof(PVRSRV_BRIDGE_OUT_DILISTALLENTRIES)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/dma_bridge/server_dma_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/dma_bridge/server_dma_bridge.c index 29d45c71380c..7f2c5dd495e3 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/dma_bridge/server_dma_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/dma_bridge/server_dma_bridge.c @@ -85,9 +85,7 @@ PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -113,7 +111,6 @@ PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDmaTransferIN), sizeof(unsigned long)); @@ -129,7 +126,6 @@ PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -280,11 +276,7 @@ PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -311,9 +303,7 @@ PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -337,7 +327,6 @@ PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDmaSparseMappingTableIN), sizeof(unsigned long)); @@ -353,7 +342,6 @@ PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -432,11 +420,7 @@ PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -476,13 +460,17 @@ PVRSRV_ERROR InitDMABridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMATRANSFER, - PVRSRVBridgeDmaTransfer, NULL); + PVRSRVBridgeDmaTransfer, NULL, sizeof(PVRSRV_BRIDGE_IN_DMATRANSFER), + sizeof(PVRSRV_BRIDGE_OUT_DMATRANSFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE, - PVRSRVBridgeDmaSparseMappingTable, NULL); + PVRSRVBridgeDmaSparseMappingTable, NULL, + sizeof(PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE), + sizeof(PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS, - PVRSRVBridgeDmaDeviceParams, NULL); + PVRSRVBridgeDmaDeviceParams, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h index 7547d9f76297..cc631e3e1116 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h @@ -55,8 +55,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0 #define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0 -#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1 -#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTGEMHANDLE PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2 #define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3 #define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3) @@ -82,28 +82,6 @@ typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF; -/******************************************* - PhysmemImportDmaBufLocked - *******************************************/ - -/* Bridge in structure for PhysmemImportDmaBufLocked */ -typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED_TAG -{ - const IMG_CHAR *puiName; - IMG_INT ifd; - IMG_UINT32 ui32NameSize; - PVRSRV_MEMALLOCFLAGS_T uiFlags; -} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED; - -/* Bridge out structure for PhysmemImportDmaBufLocked */ -typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED_TAG -{ - IMG_DEVMEM_ALIGN_T uiAlign; - IMG_DEVMEM_SIZE_T uiSize; - IMG_HANDLE hPMRPtr; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED; - /******************************************* PhysmemExportDmaBuf *******************************************/ @@ -121,6 +99,23 @@ typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG IMG_INT iFd; } __packed PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF; +/******************************************* + PhysmemExportGemHandle + *******************************************/ + +/* Bridge in structure for PhysmemExportGemHandle */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE; + +/* Bridge out structure for PhysmemExportGemHandle */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Handle; +} __packed PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE; + /******************************************* PhysmemImportSparseDmaBuf *******************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c index 4e784db6ea23..89aa1477f55a 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c @@ -94,9 +94,7 @@ PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -118,7 +116,6 @@ PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), sizeof(unsigned long)); @@ -134,7 +131,6 @@ PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -219,175 +215,7 @@ PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); - - return 0; -} - -static PVRSRV_ERROR _PhysmemImportDmaBufLockedpsPMRPtrIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = PMRUnrefUnlockPMR((PMR *) pvData); - return eError; -} - -static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, - "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); - -static IMG_INT -PVRSRVBridgePhysmemImportDmaBufLocked(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPhysmemImportDmaBufLockedIN_UI8, - IMG_UINT8 * psPhysmemImportDmaBufLockedOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedIN = - (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *) - IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedOUT = - (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *) - IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedOUT_UI8, 0); - - IMG_CHAR *uiNameInt = NULL; - PMR *psPMRPtrInt = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; - - if (unlikely(psPhysmemImportDmaBufLockedIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) - { - psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto PhysmemImportDmaBufLocked_exit; - } - - if (ui64BufferSize > IMG_UINT32_MAX) - { - psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto PhysmemImportDmaBufLocked_exit; - } - - ui32BufferSize = (IMG_UINT32) ui64BufferSize; - - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psPhysmemImportDmaBufLockedIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; - - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufLockedIN; - - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); - - if (!pArrayArgsBuffer) - { - psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto PhysmemImportDmaBufLocked_exit; - } - } - } - - if (psPhysmemImportDmaBufLockedIN->ui32NameSize != 0) - { - uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR); - } - - /* Copy the data over */ - if (psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR) > 0) - { - if (OSCopyFromUser - (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufLockedIN->puiName, - psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) - { - psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto PhysmemImportDmaBufLocked_exit; - } - ((IMG_CHAR *) - uiNameInt)[(psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] = - '\0'; - } - - psPhysmemImportDmaBufLockedOUT->eError = - PhysmemImportDmaBufLocked(psConnection, OSGetDevNode(psConnection), - psPhysmemImportDmaBufLockedIN->ifd, - psPhysmemImportDmaBufLockedIN->uiFlags, - psPhysmemImportDmaBufLockedIN->ui32NameSize, - uiNameInt, - &psPMRPtrInt, - &psPhysmemImportDmaBufLockedOUT->uiSize, - &psPhysmemImportDmaBufLockedOUT->uiAlign); - /* Exit early if bridged call fails */ - if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)) - { - goto PhysmemImportDmaBufLocked_exit; - } - - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); - - psPhysmemImportDmaBufLockedOUT->eError = - PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psPhysmemImportDmaBufLockedOUT->hPMRPtr, (void *)psPMRPtrInt, - PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _PhysmemImportDmaBufLockedpsPMRPtrIntRelease); - if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto PhysmemImportDmaBufLocked_exit; - } - - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); - -PhysmemImportDmaBufLocked_exit: - - if (psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK) - { - if (psPMRPtrInt) - { - LockHandle(KERNEL_HANDLE_BASE); - PMRUnrefUnlockPMR(psPMRPtrInt); - UnlockHandle(KERNEL_HANDLE_BASE); - } - } - - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psPhysmemImportDmaBufLockedOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -446,6 +274,59 @@ PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static IMG_INT +PVRSRVBridgePhysmemExportGemHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemExportGemHandleIN_UI8, + IMG_UINT8 * psPhysmemExportGemHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE *psPhysmemExportGemHandleIN = + (PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE *) + IMG_OFFSET_ADDR(psPhysmemExportGemHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE *psPhysmemExportGemHandleOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE *) + IMG_OFFSET_ADDR(psPhysmemExportGemHandleOUT_UI8, 0); + + IMG_HANDLE hPMR = psPhysmemExportGemHandleIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPhysmemExportGemHandleOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psPhysmemExportGemHandleOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemExportGemHandle_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPhysmemExportGemHandleOUT->eError = + PhysmemExportGemHandle(psConnection, OSGetDevNode(psConnection), + psPMRInt, &psPhysmemExportGemHandleOUT->ui32Handle); + +PhysmemExportGemHandle_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + static PVRSRV_ERROR _PhysmemImportSparseDmaBufpsPMRPtrIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -477,9 +358,7 @@ PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -509,7 +388,6 @@ PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long)); @@ -525,7 +403,6 @@ PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -637,11 +514,7 @@ PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -661,16 +534,24 @@ PVRSRV_ERROR InitDMABUFBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, - PVRSRVBridgePhysmemImportDmaBuf, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED, - PVRSRVBridgePhysmemImportDmaBufLocked, NULL); + PVRSRVBridgePhysmemImportDmaBuf, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF)); SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, - PVRSRVBridgePhysmemExportDmaBuf, NULL); + PVRSRVBridgePhysmemExportDmaBuf, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTGEMHANDLE, + PVRSRVBridgePhysmemExportGemHandle, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, - PVRSRVBridgePhysmemImportSparseDmaBuf, NULL); + PVRSRVBridgePhysmemImportSparseDmaBuf, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF)); return PVRSRV_OK; } @@ -683,11 +564,10 @@ void DeinitDMABUFBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, - PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTGEMHANDLE); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF); diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h index b3514eaba9b8..2c5d18843120 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h @@ -61,11 +61,4 @@ IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, IMG_UINT32 ui32EnablePID, IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode); -IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge, - IMG_UINT32 ui32PID, - IMG_UINT32 ui32TID, - IMG_UINT64 ui64TimeStamp, - IMG_UINT32 ui32SF, - IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args); - #endif /* CLIENT_HTBUFFER_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c index 9c5833116075..3e6295cdfdf3 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c @@ -68,18 +68,3 @@ IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, return eError; } - -IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge, - IMG_UINT32 ui32PID, - IMG_UINT32 ui32TID, - IMG_UINT64 ui64TimeStamp, - IMG_UINT32 ui32SF, - IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args) -{ - PVRSRV_ERROR eError; - PVR_UNREFERENCED_PARAMETER(hBridge); - - eError = HTBLogKM(ui32PID, ui32TID, ui64TimeStamp, ui32SF, ui32NumArgs, pui32Args); - - return eError; -} diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h index 69a406b253d9..32be202186dc 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h @@ -56,8 +56,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0 #define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0 -#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1 -#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1) +#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0) /******************************************* HTBControl @@ -80,25 +79,4 @@ typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_HTBCONTROL; -/******************************************* - HTBLog - *******************************************/ - -/* Bridge in structure for HTBLog */ -typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG -{ - IMG_UINT64 ui64TimeStamp; - IMG_UINT32 *pui32Args; - IMG_UINT32 ui32NumArgs; - IMG_UINT32 ui32PID; - IMG_UINT32 ui32SF; - IMG_UINT32 ui32TID; -} __packed PVRSRV_BRIDGE_IN_HTBLOG; - -/* Bridge out structure for HTBLog */ -typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_HTBLOG; - #endif /* COMMON_HTBUFFER_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c index dd81d914b2f2..9b730d28e5b4 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c @@ -86,9 +86,7 @@ PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -112,7 +110,6 @@ PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long)); @@ -128,7 +125,6 @@ PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -176,124 +172,7 @@ PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); - - return 0; -} - -static_assert(HTB_LOG_MAX_PARAMS <= IMG_UINT32_MAX, - "HTB_LOG_MAX_PARAMS must not be larger than IMG_UINT32_MAX"); - -static IMG_INT -PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psHTBLogIN_UI8, - IMG_UINT8 * psHTBLogOUT_UI8, CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN = - (PVRSRV_BRIDGE_IN_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogIN_UI8, 0); - PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT = - (PVRSRV_BRIDGE_OUT_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogOUT_UI8, 0); - - IMG_UINT32 *ui32ArgsInt = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) + 0; - - if (unlikely(psHTBLogIN->ui32NumArgs > HTB_LOG_MAX_PARAMS)) - { - psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto HTBLog_exit; - } - - PVR_UNREFERENCED_PARAMETER(psConnection); - - if (ui64BufferSize > IMG_UINT32_MAX) - { - psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto HTBLog_exit; - } - - ui32BufferSize = (IMG_UINT32) ui64BufferSize; - - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; - - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBLogIN; - - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); - - if (!pArrayArgsBuffer) - { - psHTBLogOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto HTBLog_exit; - } - } - } - - if (psHTBLogIN->ui32NumArgs != 0) - { - ui32ArgsInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32); - } - - /* Copy the data over */ - if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32ArgsInt, (const void __user *)psHTBLogIN->pui32Args, - psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto HTBLog_exit; - } - } - - psHTBLogOUT->eError = - HTBLogKM(psHTBLogIN->ui32PID, - psHTBLogIN->ui32TID, - psHTBLogIN->ui64TimeStamp, - psHTBLogIN->ui32SF, psHTBLogIN->ui32NumArgs, ui32ArgsInt); - -HTBLog_exit: - - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psHTBLogOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -319,10 +198,9 @@ PVRSRV_ERROR InitHTBUFFERBridge(void) PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), "OSLockCreate"); SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, - PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock); - - SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG, - PVRSRVBridgeHTBLog, pHTBUFFERBridgeLock); + PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_HTBCONTROL), + sizeof(PVRSRV_BRIDGE_OUT_HTBCONTROL)); return PVRSRV_OK; } @@ -336,8 +214,6 @@ void DeinitHTBUFFERBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG); - } #else /* EXCLUDE_HTBUFFER_BRIDGE */ /* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined, diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/client_mm_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/client_mm_bridge.h index 94262864083c..d5ba6dd74da3 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/client_mm_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/client_mm_bridge.h @@ -85,8 +85,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); -IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); - IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32NumPhysChunks, @@ -101,30 +99,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags, PVRSRV_MEMALLOCFLAGS_T * puiOutFlags); -IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, - IMG_DEVMEM_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 * pui32MappingTable, - IMG_UINT32 ui32Log2PageSize, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 ui32AnnotationLength, - const IMG_CHAR * puiAnnotation, - IMG_PID ui32PID, - IMG_HANDLE * phPMRPtr, - IMG_UINT32 ui32PDumpFlags, - PVRSRV_MEMALLOCFLAGS_T * puiOutFlags); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve1(IMG_HANDLE hBridge, IMG_HANDLE hPMR); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve2(IMG_HANDLE hBridge, IMG_HANDLE hPMR); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve3(IMG_HANDLE hBridge, - IMG_HANDLE hMapping, IMG_HANDLE hPMR); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve4(IMG_HANDLE hBridge, - IMG_HANDLE hMapping, IMG_HANDLE hPMR); - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, IMG_BOOL bbKernelMemoryCtx, IMG_HANDLE * phDevMemServerContext, @@ -138,66 +112,45 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_UINT32 ui32HeapConfigIndex, IMG_UINT32 ui32HeapIndex, - IMG_DEV_VIRTADDR sHeapBaseAddr, - IMG_DEVMEM_SIZE_T uiHeapLength, - IMG_UINT32 ui32Log2DataPageSize, IMG_HANDLE * phDevmemHeapPtr); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, - IMG_HANDLE hDevmemServerHeap, - IMG_HANDLE hReservation, - IMG_HANDLE hPMR, - PVRSRV_MEMALLOCFLAGS_T uiMapFlags, - IMG_HANDLE * phMapping); + IMG_HANDLE hReservation, IMG_HANDLE hPMR); -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping); +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hReservation); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, IMG_HANDLE hDevmemServerHeap, IMG_DEV_VIRTADDR sAddress, IMG_DEVMEM_SIZE_T uiLength, + PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_HANDLE * phReservation); +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRangeAndMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_HANDLE * phReservation); + IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation); IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, - IMG_HANDLE hSrvDevMemHeap, - IMG_HANDLE hPMR, IMG_UINT32 ui32AllocPageCount, IMG_UINT32 * pui32AllocPageIndices, IMG_UINT32 ui32FreePageCount, IMG_UINT32 * pui32FreePageIndices, IMG_UINT32 ui32SparseFlags, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_UINT64 ui64CPUVAddr); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge, - IMG_HANDLE hReservation, - IMG_HANDLE hPMR, - IMG_UINT32 ui32PageCount, - IMG_UINT32 ui32PhysicalPgOffset, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddr); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, - IMG_HANDLE hReservation, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_UINT32 ui32PageCount); + IMG_HANDLE hReservation); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_DEV_VIRTADDR sAddress); -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge, - IMG_HANDLE hDevmemCtx, - IMG_DEV_VIRTADDR sAddress, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate); - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_UINT64 ui64FBSCEntries); @@ -226,29 +179,56 @@ IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, IMG_UINT32 * pui32Log2ImportAlignmentOut); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, - IMG_HANDLE hDevm, - IMG_UINT32 ui32PID, IMG_BOOL bRegister); - -IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge, - IMG_UINT32 * pui32PhysHeapCount); + IMG_HANDLE hDevmemCtx, + IMG_BOOL bRegister); IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge, IMG_UINT32 ui32PhysHeapCount, PVRSRV_PHYS_HEAP * peaPhysHeapID, - PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats); + PHYS_HEAP_MEM_STATS_V1 * pasapPhysHeapMemStats); IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge, PVRSRV_PHYS_HEAP * peHeap); -IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge, - IMG_UINT32 ui32PhysHeapCount, - PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats); - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_DEV_VIRTADDR * psFaultAddress); -IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, - IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid); +IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge, + IMG_UINT32 ui32ui32StatType, + IMG_PID ui32pid); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE * phReservation); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hReservation); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysPageOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32VirtPageOffset); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_UINT32 ui32VirtPageOffset, + IMG_UINT32 ui32PageCount); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapVRangeToBackingPage(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_UINT32 ui32PageCount, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32VirtPageOffset); + +IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo2(IMG_HANDLE hBridge, + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP * peaPhysHeapID, + PHYS_HEAP_MEM_STATS_V2 * pasapPhysHeapMemStats); #endif /* CLIENT_MM_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/client_mm_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/client_mm_direct_bridge.c index 701af68241d0..c1810e959ca8 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/client_mm_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/client_mm_direct_bridge.c @@ -67,6 +67,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, IMG_UINT32 * pui32Log2Contig, IMG_UINT64 * pui64Password) { +#if defined(SUPPORT_INSECURE_EXPORT) PVRSRV_ERROR eError; PMR *psPMRInt; PMR_EXPORT *psPMRExportInt = NULL; @@ -78,10 +79,21 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, *phPMRExport = psPMRExportInt; return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hPMR); + PVR_UNREFERENCED_PARAMETER(phPMRExport); + PVR_UNREFERENCED_PARAMETER(pui64Size); + PVR_UNREFERENCED_PARAMETER(pui32Log2Contig); + PVR_UNREFERENCED_PARAMETER(pui64Password); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif } IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport) { +#if defined(SUPPORT_INSECURE_EXPORT) PVRSRV_ERROR eError; PMR_EXPORT *psPMRExportInt; PVR_UNREFERENCED_PARAMETER(hBridge); @@ -91,6 +103,12 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hP eError = PMRUnexportPMR(psPMRExportInt); return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hPMRExport); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif } IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge, @@ -142,6 +160,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, IMG_UINT64 ui64uiSize, IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR) { +#if defined(SUPPORT_INSECURE_EXPORT) PVRSRV_ERROR eError; PMR_EXPORT *psPMRExportInt; PMR *psPMRInt = NULL; @@ -155,6 +174,15 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, *phPMR = psPMRInt; return eError; +#else + PVR_UNREFERENCED_PARAMETER(hPMRExport); + PVR_UNREFERENCED_PARAMETER(ui64uiPassword); + PVR_UNREFERENCED_PARAMETER(ui64uiSize); + PVR_UNREFERENCED_PARAMETER(ui32uiLog2Contig); + PVR_UNREFERENCED_PARAMETER(phPMR); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif } IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, @@ -189,19 +217,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psPMRInt = (PMR *) hPMR; - - eError = PMRUnrefUnlockPMR(psPMRInt); - - return eError; -} - IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32NumPhysChunks, @@ -235,97 +250,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, - IMG_DEVMEM_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 * pui32MappingTable, - IMG_UINT32 ui32Log2PageSize, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 ui32AnnotationLength, - const IMG_CHAR * puiAnnotation, - IMG_PID ui32PID, - IMG_HANDLE * phPMRPtr, - IMG_UINT32 ui32PDumpFlags, - PVRSRV_MEMALLOCFLAGS_T * puiOutFlags) -{ - PVRSRV_ERROR eError; - PMR *psPMRPtrInt = NULL; - - eError = - PhysmemNewRamBackedLockedPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), - uiSize, - ui32NumPhysChunks, - ui32NumVirtChunks, - pui32MappingTable, - ui32Log2PageSize, - uiFlags, - ui32AnnotationLength, - puiAnnotation, - ui32PID, &psPMRPtrInt, ui32PDumpFlags, puiOutFlags); - - *phPMRPtr = psPMRPtrInt; - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve1(IMG_HANDLE hBridge, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psPMRInt = (PMR *) hPMR; - - eError = DevmemCompatReserve1(psPMRInt); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve2(IMG_HANDLE hBridge, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psPMRInt = (PMR *) hPMR; - - eError = DevmemCompatReserve2(psPMRInt); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve3(IMG_HANDLE hBridge, - IMG_HANDLE hMapping, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - DEVMEMINT_MAPPING *psMappingInt; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psMappingInt = (DEVMEMINT_MAPPING *) hMapping; - psPMRInt = (PMR *) hPMR; - - eError = DevmemCompatReserve3(psMappingInt, psPMRInt); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve4(IMG_HANDLE hBridge, - IMG_HANDLE hMapping, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - DEVMEMINT_MAPPING *psMappingInt; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psMappingInt = (DEVMEMINT_MAPPING *) hMapping; - psPMRInt = (PMR *) hPMR; - - eError = DevmemCompatReserve4(psMappingInt, psPMRInt); - - return eError; -} - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, IMG_BOOL bbKernelMemoryCtx, IMG_HANDLE * phDevMemServerContext, @@ -364,9 +288,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_UINT32 ui32HeapConfigIndex, IMG_UINT32 ui32HeapIndex, - IMG_DEV_VIRTADDR sHeapBaseAddr, - IMG_DEVMEM_SIZE_T uiHeapLength, - IMG_UINT32 ui32Log2DataPageSize, IMG_HANDLE * phDevmemHeapPtr) { PVRSRV_ERROR eError; @@ -378,10 +299,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, eError = DevmemIntHeapCreate(psDevmemCtxInt, - ui32HeapConfigIndex, - ui32HeapIndex, - sHeapBaseAddr, - uiHeapLength, ui32Log2DataPageSize, &psDevmemHeapPtrInt); + ui32HeapConfigIndex, ui32HeapIndex, &psDevmemHeapPtrInt); *phDevmemHeapPtr = psDevmemHeapPtrInt; return eError; @@ -401,40 +319,30 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HAN } IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, - IMG_HANDLE hDevmemServerHeap, - IMG_HANDLE hReservation, - IMG_HANDLE hPMR, - PVRSRV_MEMALLOCFLAGS_T uiMapFlags, - IMG_HANDLE * phMapping) + IMG_HANDLE hReservation, IMG_HANDLE hPMR) { PVRSRV_ERROR eError; - DEVMEMINT_HEAP *psDevmemServerHeapInt; DEVMEMINT_RESERVATION *psReservationInt; PMR *psPMRInt; - DEVMEMINT_MAPPING *psMappingInt = NULL; PVR_UNREFERENCED_PARAMETER(hBridge); - psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; psPMRInt = (PMR *) hPMR; - eError = - DevmemIntMapPMR(psDevmemServerHeapInt, - psReservationInt, psPMRInt, uiMapFlags, &psMappingInt); + eError = DevmemIntMapPMR(psReservationInt, psPMRInt); - *phMapping = psMappingInt; return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping) +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hReservation) { PVRSRV_ERROR eError; - DEVMEMINT_MAPPING *psMappingInt; + DEVMEMINT_RESERVATION *psReservationInt; PVR_UNREFERENCED_PARAMETER(hBridge); - psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; - eError = DevmemIntUnmapPMR(psMappingInt); + eError = DevmemIntUnmapPMR(psReservationInt); return eError; } @@ -443,93 +351,69 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, IMG_HANDLE hDevmemServerHeap, IMG_DEV_VIRTADDR sAddress, IMG_DEVMEM_SIZE_T uiLength, + PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_HANDLE * phReservation) { PVRSRV_ERROR eError; DEVMEMINT_HEAP *psDevmemServerHeapInt; DEVMEMINT_RESERVATION *psReservationInt = NULL; - PVR_UNREFERENCED_PARAMETER(hBridge); psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; eError = - DevmemIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt); + DevmemIntReserveRange(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemServerHeapInt, + sAddress, uiLength, uiFlags, &psReservationInt); *phReservation = psReservationInt; return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation) -{ - PVRSRV_ERROR eError; - DEVMEMINT_RESERVATION *psReservationInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; - - eError = DevmemIntUnreserveRange(psReservationInt); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, - IMG_HANDLE hSrvDevMemHeap, - IMG_HANDLE hPMR, - IMG_UINT32 ui32AllocPageCount, - IMG_UINT32 * pui32AllocPageIndices, - IMG_UINT32 ui32FreePageCount, - IMG_UINT32 * pui32FreePageIndices, - IMG_UINT32 ui32SparseFlags, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddr, IMG_UINT64 ui64CPUVAddr) +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRangeAndMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_HANDLE * phReservation) { PVRSRV_ERROR eError; - DEVMEMINT_HEAP *psSrvDevMemHeapInt; + DEVMEMINT_HEAP *psDevmemServerHeapInt; PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); + DEVMEMINT_RESERVATION *psReservationInt = NULL; - psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap; + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; psPMRInt = (PMR *) hPMR; eError = - DevmemIntChangeSparse(psSrvDevMemHeapInt, - psPMRInt, - ui32AllocPageCount, - pui32AllocPageIndices, - ui32FreePageCount, - pui32FreePageIndices, - ui32SparseFlags, uiFlags, sDevVAddr, ui64CPUVAddr); + DevmemIntReserveRangeAndMapPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemServerHeapInt, + sAddress, + uiLength, psPMRInt, uiFlags, &psReservationInt); + *phReservation = psReservationInt; return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge, - IMG_HANDLE hReservation, - IMG_HANDLE hPMR, - IMG_UINT32 ui32PageCount, - IMG_UINT32 ui32PhysicalPgOffset, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddr) +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation) { PVRSRV_ERROR eError; DEVMEMINT_RESERVATION *psReservationInt; - PMR *psPMRInt; PVR_UNREFERENCED_PARAMETER(hBridge); psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; - psPMRInt = (PMR *) hPMR; - eError = - DevmemIntMapPages(psReservationInt, - psPMRInt, ui32PageCount, ui32PhysicalPgOffset, uiFlags, sDevVAddr); + eError = DevmemIntUnreserveRange(psReservationInt); return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, - IMG_HANDLE hReservation, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_UINT32 ui32PageCount) +IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32SparseFlags, IMG_HANDLE hReservation) { PVRSRV_ERROR eError; DEVMEMINT_RESERVATION *psReservationInt; @@ -537,7 +421,11 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; - eError = DevmemIntUnmapPages(psReservationInt, sDevVAddr, ui32PageCount); + eError = + DevmemIntChangeSparse(ui32AllocPageCount, + pui32AllocPageIndices, + ui32FreePageCount, + pui32FreePageIndices, ui32SparseFlags, psReservationInt); return eError; } @@ -558,33 +446,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge, - IMG_HANDLE hDevmemCtx, - IMG_DEV_VIRTADDR sAddress, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate) -{ -#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) - PVRSRV_ERROR eError; - DEVMEMINT_CTX *psDevmemCtxInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; - - eError = DevmemIntFlushDevSLCRange(psDevmemCtxInt, sAddress, uiSize, bInvalidate); - - return eError; -#else - PVR_UNREFERENCED_PARAMETER(hBridge); - PVR_UNREFERENCED_PARAMETER(hDevmemCtx); - PVR_UNREFERENCED_PARAMETER(sAddress); - PVR_UNREFERENCED_PARAMETER(uiSize); - PVR_UNREFERENCED_PARAMETER(bInvalidate); - - return PVRSRV_ERROR_NOT_IMPLEMENTED; -#endif -} - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_UINT64 ui64FBSCEntries) @@ -675,28 +536,16 @@ IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, } IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, - IMG_HANDLE hDevm, - IMG_UINT32 ui32PID, IMG_BOOL bRegister) + IMG_HANDLE hDevmemCtx, + IMG_BOOL bRegister) { PVRSRV_ERROR eError; - DEVMEMINT_CTX *psDevmInt; + DEVMEMINT_CTX *psDevmemCtxInt; PVR_UNREFERENCED_PARAMETER(hBridge); - psDevmInt = (DEVMEMINT_CTX *) hDevm; - - eError = DevmemIntRegisterPFNotifyKM(psDevmInt, ui32PID, bRegister); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge, - IMG_UINT32 * pui32PhysHeapCount) -{ - PVRSRV_ERROR eError; + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; - eError = - PVRSRVGetMaxPhysHeapCountKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), - pui32PhysHeapCount); + eError = DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, bRegister); return eError; } @@ -704,7 +553,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge, IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge, IMG_UINT32 ui32PhysHeapCount, PVRSRV_PHYS_HEAP * peaPhysHeapID, - PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats) + PHYS_HEAP_MEM_STATS_V1 * pasapPhysHeapMemStats) { PVRSRV_ERROR eError; @@ -726,19 +575,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge, return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge, - IMG_UINT32 ui32PhysHeapCount, - PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats) -{ - PVRSRV_ERROR eError; - - eError = - PVRSRVGetHeapPhysMemUsageKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), - ui32PhysHeapCount, pasapPhysHeapMemStats); - - return eError; -} - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_DEV_VIRTADDR * psFaultAddress) @@ -755,8 +591,9 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, - IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid) +IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge, + IMG_UINT32 ui32ui32StatType, + IMG_PID ui32pid) { #if defined(PVRSRV_ENABLE_PROCESS_STATS) PVRSRV_ERROR eError; @@ -773,3 +610,110 @@ IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, return PVRSRV_ERROR_NOT_IMPLEMENTED; #endif } + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE * phReservation) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemServerHeapInt; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; + + eError = + DevmemXIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt); + + *phReservation = psReservationInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hReservation) +{ + PVRSRV_ERROR eError; + DEVMEMXINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; + + eError = DevmemXIntUnreserveRange(psReservationInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysPageOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32VirtPageOffset) +{ + PVRSRV_ERROR eError; + DEVMEMXINT_RESERVATION *psReservationInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemXIntMapPages(psReservationInt, + psPMRInt, + ui32PageCount, ui32PhysPageOffset, uiFlags, ui32VirtPageOffset); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_UINT32 ui32VirtPageOffset, + IMG_UINT32 ui32PageCount) +{ + PVRSRV_ERROR eError; + DEVMEMXINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; + + eError = DevmemXIntUnmapPages(psReservationInt, ui32VirtPageOffset, ui32PageCount); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapVRangeToBackingPage(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_UINT32 ui32PageCount, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32VirtPageOffset) +{ + PVRSRV_ERROR eError; + DEVMEMXINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; + + eError = + DevmemXIntMapVRangeToBackingPage(psReservationInt, + ui32PageCount, uiFlags, ui32VirtPageOffset); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo2(IMG_HANDLE hBridge, + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP * peaPhysHeapID, + PHYS_HEAP_MEM_STATS_V2 * pasapPhysHeapMemStats) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVPhysHeapGetMemInfo2KM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32PhysHeapCount, peaPhysHeapID, pasapPhysHeapMemStats); + + return eError; +} diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/common_mm_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/common_mm_bridge.h index 56cacdffdf18..3b3c8c2dc215 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/common_mm_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/common_mm_bridge.h @@ -64,39 +64,35 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5 #define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6 #define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7 -#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8 -#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9 -#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+10 -#define PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE1 PVRSRV_BRIDGE_MM_CMD_FIRST+11 -#define PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE2 PVRSRV_BRIDGE_MM_CMD_FIRST+12 -#define PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE3 PVRSRV_BRIDGE_MM_CMD_FIRST+13 -#define PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE4 PVRSRV_BRIDGE_MM_CMD_FIRST+14 -#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+15 -#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+16 -#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+17 -#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+18 -#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+19 -#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+20 -#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+21 -#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+22 -#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+23 -#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+24 -#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+25 -#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+26 -#define PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE PVRSRV_BRIDGE_MM_CMD_FIRST+27 -#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+28 -#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+29 -#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+30 -#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+31 -#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+32 -#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+33 -#define PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+34 -#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO PVRSRV_BRIDGE_MM_CMD_FIRST+35 -#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP PVRSRV_BRIDGE_MM_CMD_FIRST+36 -#define PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE PVRSRV_BRIDGE_MM_CMD_FIRST+37 -#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+38 -#define PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS PVRSRV_BRIDGE_MM_CMD_FIRST+39 -#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+39) +#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+9 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+10 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+11 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+12 +#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+13 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+14 +#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+15 +#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGEANDMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+16 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+17 +#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+18 +#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+19 +#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+20 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+21 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+22 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+23 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+24 +#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+25 +#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO PVRSRV_BRIDGE_MM_CMD_FIRST+26 +#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP PVRSRV_BRIDGE_MM_CMD_FIRST+27 +#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+28 +#define PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT PVRSRV_BRIDGE_MM_CMD_FIRST+29 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+30 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+31 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+32 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+33 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTMAPVRANGETOBACKINGPAGE PVRSRV_BRIDGE_MM_CMD_FIRST+34 +#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO2 PVRSRV_BRIDGE_MM_CMD_FIRST+35 +#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+35) /******************************************* PMRExportPMR @@ -239,22 +235,6 @@ typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_PMRUNREFPMR; -/******************************************* - PMRUnrefUnlockPMR - *******************************************/ - -/* Bridge in structure for PMRUnrefUnlockPMR */ -typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG -{ - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR; - -/* Bridge out structure for PMRUnrefUnlockPMR */ -typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR; - /******************************************* PhysmemNewRamBackedPMR *******************************************/ @@ -282,99 +262,6 @@ typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG PVRSRV_MEMALLOCFLAGS_T uiOutFlags; } __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR; -/******************************************* - PhysmemNewRamBackedLockedPMR - *******************************************/ - -/* Bridge in structure for PhysmemNewRamBackedLockedPMR */ -typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG -{ - IMG_DEVMEM_SIZE_T uiSize; - IMG_UINT32 *pui32MappingTable; - const IMG_CHAR *puiAnnotation; - IMG_UINT32 ui32AnnotationLength; - IMG_UINT32 ui32Log2PageSize; - IMG_UINT32 ui32NumPhysChunks; - IMG_UINT32 ui32NumVirtChunks; - IMG_UINT32 ui32PDumpFlags; - IMG_PID ui32PID; - PVRSRV_MEMALLOCFLAGS_T uiFlags; -} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR; - -/* Bridge out structure for PhysmemNewRamBackedLockedPMR */ -typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG -{ - IMG_HANDLE hPMRPtr; - PVRSRV_ERROR eError; - PVRSRV_MEMALLOCFLAGS_T uiOutFlags; -} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR; - -/******************************************* - DevmemCompatReserve1 - *******************************************/ - -/* Bridge in structure for DevmemCompatReserve1 */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE1_TAG -{ - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE1; - -/* Bridge out structure for DevmemCompatReserve1 */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE1_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE1; - -/******************************************* - DevmemCompatReserve2 - *******************************************/ - -/* Bridge in structure for DevmemCompatReserve2 */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE2_TAG -{ - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE2; - -/* Bridge out structure for DevmemCompatReserve2 */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE2_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE2; - -/******************************************* - DevmemCompatReserve3 - *******************************************/ - -/* Bridge in structure for DevmemCompatReserve3 */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE3_TAG -{ - IMG_HANDLE hMapping; - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE3; - -/* Bridge out structure for DevmemCompatReserve3 */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE3_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE3; - -/******************************************* - DevmemCompatReserve4 - *******************************************/ - -/* Bridge in structure for DevmemCompatReserve4 */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE4_TAG -{ - IMG_HANDLE hMapping; - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE4; - -/* Bridge out structure for DevmemCompatReserve4 */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE4_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE4; - /******************************************* DevmemIntCtxCreate *******************************************/ @@ -417,12 +304,9 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG /* Bridge in structure for DevmemIntHeapCreate */ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG { - IMG_DEV_VIRTADDR sHeapBaseAddr; - IMG_DEVMEM_SIZE_T uiHeapLength; IMG_HANDLE hDevmemCtx; IMG_UINT32 ui32HeapConfigIndex; IMG_UINT32 ui32HeapIndex; - IMG_UINT32 ui32Log2DataPageSize; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE; /* Bridge out structure for DevmemIntHeapCreate */ @@ -455,16 +339,13 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG /* Bridge in structure for DevmemIntMapPMR */ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG { - IMG_HANDLE hDevmemServerHeap; IMG_HANDLE hPMR; IMG_HANDLE hReservation; - PVRSRV_MEMALLOCFLAGS_T uiMapFlags; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR; /* Bridge out structure for DevmemIntMapPMR */ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG { - IMG_HANDLE hMapping; PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR; @@ -475,7 +356,7 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG /* Bridge in structure for DevmemIntUnmapPMR */ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG { - IMG_HANDLE hMapping; + IMG_HANDLE hReservation; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR; /* Bridge out structure for DevmemIntUnmapPMR */ @@ -494,6 +375,7 @@ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG IMG_DEV_VIRTADDR sAddress; IMG_DEVMEM_SIZE_T uiLength; IMG_HANDLE hDevmemServerHeap; + PVRSRV_MEMALLOCFLAGS_T uiFlags; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE; /* Bridge out structure for DevmemIntReserveRange */ @@ -503,6 +385,27 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE; +/******************************************* + DevmemIntReserveRangeAndMapPMR + *******************************************/ + +/* Bridge in structure for DevmemIntReserveRangeAndMapPMR */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR_TAG +{ + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiLength; + IMG_HANDLE hDevmemServerHeap; + IMG_HANDLE hPMR; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR; + +/* Bridge out structure for DevmemIntReserveRangeAndMapPMR */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR_TAG +{ + IMG_HANDLE hReservation; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR; + /******************************************* DevmemIntUnreserveRange *******************************************/ @@ -526,16 +429,12 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG /* Bridge in structure for ChangeSparseMem */ typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG { - IMG_DEV_VIRTADDR sDevVAddr; - IMG_UINT64 ui64CPUVAddr; - IMG_HANDLE hPMR; - IMG_HANDLE hSrvDevMemHeap; + IMG_HANDLE hReservation; IMG_UINT32 *pui32AllocPageIndices; IMG_UINT32 *pui32FreePageIndices; IMG_UINT32 ui32AllocPageCount; IMG_UINT32 ui32FreePageCount; IMG_UINT32 ui32SparseFlags; - PVRSRV_MEMALLOCFLAGS_T uiFlags; } __packed PVRSRV_BRIDGE_IN_CHANGESPARSEMEM; /* Bridge out structure for ChangeSparseMem */ @@ -544,45 +443,6 @@ typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM; -/******************************************* - DevmemIntMapPages - *******************************************/ - -/* Bridge in structure for DevmemIntMapPages */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG -{ - IMG_DEV_VIRTADDR sDevVAddr; - IMG_HANDLE hPMR; - IMG_HANDLE hReservation; - IMG_UINT32 ui32PageCount; - IMG_UINT32 ui32PhysicalPgOffset; - PVRSRV_MEMALLOCFLAGS_T uiFlags; -} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES; - -/* Bridge out structure for DevmemIntMapPages */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES; - -/******************************************* - DevmemIntUnmapPages - *******************************************/ - -/* Bridge in structure for DevmemIntUnmapPages */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG -{ - IMG_DEV_VIRTADDR sDevVAddr; - IMG_HANDLE hReservation; - IMG_UINT32 ui32PageCount; -} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES; - -/* Bridge out structure for DevmemIntUnmapPages */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES; - /******************************************* DevmemIsVDevAddrValid *******************************************/ @@ -600,25 +460,6 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID; -/******************************************* - DevmemFlushDevSLCRange - *******************************************/ - -/* Bridge in structure for DevmemFlushDevSLCRange */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE_TAG -{ - IMG_DEV_VIRTADDR sAddress; - IMG_DEVMEM_SIZE_T uiSize; - IMG_HANDLE hDevmemCtx; - IMG_BOOL bInvalidate; -} __packed PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE; - -/* Bridge out structure for DevmemFlushDevSLCRange */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE; - /******************************************* DevmemInvalidateFBSCTable *******************************************/ @@ -721,8 +562,7 @@ typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG /* Bridge in structure for DevmemIntRegisterPFNotifyKM */ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG { - IMG_HANDLE hDevm; - IMG_UINT32 ui32PID; + IMG_HANDLE hDevmemCtx; IMG_BOOL bRegister; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM; @@ -732,23 +572,6 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM; -/******************************************* - GetMaxPhysHeapCount - *******************************************/ - -/* Bridge in structure for GetMaxPhysHeapCount */ -typedef struct PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT_TAG -{ - IMG_UINT32 ui32EmptyStructPlaceholder; -} __packed PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT; - -/* Bridge out structure for GetMaxPhysHeapCount */ -typedef struct PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT_TAG -{ - PVRSRV_ERROR eError; - IMG_UINT32 ui32PhysHeapCount; -} __packed PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT; - /******************************************* PhysHeapGetMemInfo *******************************************/ @@ -756,7 +579,7 @@ typedef struct PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT_TAG /* Bridge in structure for PhysHeapGetMemInfo */ typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO_TAG { - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; + PHYS_HEAP_MEM_STATS_V1 *pasapPhysHeapMemStats; PVRSRV_PHYS_HEAP *peaPhysHeapID; IMG_UINT32 ui32PhysHeapCount; } __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO; @@ -764,7 +587,7 @@ typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO_TAG /* Bridge out structure for PhysHeapGetMemInfo */ typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO_TAG { - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; + PHYS_HEAP_MEM_STATS_V1 *pasapPhysHeapMemStats; PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO; @@ -785,24 +608,6 @@ typedef struct PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP_TAG PVRSRV_PHYS_HEAP eHeap; } __packed PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP; -/******************************************* - GetHeapPhysMemUsage - *******************************************/ - -/* Bridge in structure for GetHeapPhysMemUsage */ -typedef struct PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE_TAG -{ - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; - IMG_UINT32 ui32PhysHeapCount; -} __packed PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE; - -/* Bridge out structure for GetHeapPhysMemUsage */ -typedef struct PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE_TAG -{ - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE; - /******************************************* DevmemGetFaultAddress *******************************************/ @@ -821,20 +626,132 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG } __packed PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS; /******************************************* - PVRSRVUpdateOOMStats + PVRSRVStatsUpdateOOMStat *******************************************/ -/* Bridge in structure for PVRSRVUpdateOOMStats */ -typedef struct PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS_TAG +/* Bridge in structure for PVRSRVStatsUpdateOOMStat */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT_TAG { IMG_PID ui32pid; IMG_UINT32 ui32ui32StatType; -} __packed PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS; +} __packed PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT; + +/* Bridge out structure for PVRSRVStatsUpdateOOMStat */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT; + +/******************************************* + DevmemXIntReserveRange + *******************************************/ + +/* Bridge in structure for DevmemXIntReserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE_TAG +{ + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiLength; + IMG_HANDLE hDevmemServerHeap; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE; + +/* Bridge out structure for DevmemXIntReserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE; + +/******************************************* + DevmemXIntUnreserveRange + *******************************************/ + +/* Bridge in structure for DevmemXIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE; + +/* Bridge out structure for DevmemXIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE; + +/******************************************* + DevmemXIntMapPages + *******************************************/ + +/* Bridge in structure for DevmemXIntMapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES_TAG +{ + IMG_HANDLE hPMR; + IMG_HANDLE hReservation; + IMG_UINT32 ui32PageCount; + IMG_UINT32 ui32PhysPageOffset; + IMG_UINT32 ui32VirtPageOffset; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES; + +/* Bridge out structure for DevmemXIntMapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES; + +/******************************************* + DevmemXIntUnmapPages + *******************************************/ + +/* Bridge in structure for DevmemXIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES_TAG +{ + IMG_HANDLE hReservation; + IMG_UINT32 ui32PageCount; + IMG_UINT32 ui32VirtPageOffset; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES; + +/* Bridge out structure for DevmemXIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES; + +/******************************************* + DevmemXIntMapVRangeToBackingPage + *******************************************/ + +/* Bridge in structure for DevmemXIntMapVRangeToBackingPage */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE_TAG +{ + IMG_HANDLE hReservation; + IMG_UINT32 ui32PageCount; + IMG_UINT32 ui32VirtPageOffset; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE; + +/* Bridge out structure for DevmemXIntMapVRangeToBackingPage */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE; + +/******************************************* + PhysHeapGetMemInfo2 + *******************************************/ + +/* Bridge in structure for PhysHeapGetMemInfo2 */ +typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2_TAG +{ + PHYS_HEAP_MEM_STATS_V2 *pasapPhysHeapMemStats; + PVRSRV_PHYS_HEAP *peaPhysHeapID; + IMG_UINT32 ui32PhysHeapCount; +} __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2; -/* Bridge out structure for PVRSRVUpdateOOMStats */ -typedef struct PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS_TAG +/* Bridge out structure for PhysHeapGetMemInfo2 */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2_TAG { + PHYS_HEAP_MEM_STATS_V2 *pasapPhysHeapMemStats; PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS; +} __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2; #endif /* COMMON_MM_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/server_mm_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/server_mm_bridge.c index a983224d0bdd..a79a92f5e9d1 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/server_mm_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mm_bridge/server_mm_bridge.c @@ -68,17 +68,20 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include +#if defined(SUPPORT_INSECURE_EXPORT) static PVRSRV_ERROR ReleasePMRExport(void *pvData) { PVR_UNREFERENCED_PARAMETER(pvData); return PVRSRV_OK; } +#endif /* *************************************************************************** * Server-side bridge entry points */ +#if defined(SUPPORT_INSECURE_EXPORT) static PVRSRV_ERROR _PMRExportPMRpsPMRExportIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -238,23 +241,28 @@ PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psPMRExportInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); } - if (psPMRExportInt) + else if (psPMRExportInt) { LockHandle(KERNEL_HANDLE_BASE); PMRUnexportPMR(psPMRExportInt); UnlockHandle(KERNEL_HANDLE_BASE); } + } return 0; } +#else +#define PVRSRVBridgePMRExportPMR NULL +#endif + +#if defined(SUPPORT_INSECURE_EXPORT) + static IMG_INT PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psPMRUnexportPMRIN_UI8, @@ -346,6 +354,10 @@ PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry, return 0; } +#else +#define PVRSRVBridgePMRUnexportPMR NULL +#endif + static IMG_INT PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psPMRGetUIDIN_UI8, @@ -527,6 +539,7 @@ PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, return 0; } +#if defined(SUPPORT_INSECURE_EXPORT) static PVRSRV_ERROR _PMRImportPMRpsPMRIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -622,6 +635,10 @@ PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry, return 0; } +#else +#define PVRSRVBridgePMRImportPMR NULL +#endif + static PVRSRV_ERROR _PMRLocalImportPMRpsPMRIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -751,42 +768,6 @@ PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry, return 0; } -static IMG_INT -PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPMRUnrefUnlockPMRIN_UI8, - IMG_UINT8 * psPMRUnrefUnlockPMROUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN = - (PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMRIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT = - (PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMROUT_UI8, 0); - - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); - - psPMRUnrefUnlockPMROUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR, - PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - if (unlikely((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) && - (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto PMRUnrefUnlockPMR_exit; - } - - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -PMRUnrefUnlockPMR_exit: - - return 0; -} - static PVRSRV_ERROR _PhysmemNewRamBackedPMRpsPMRPtrIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -818,9 +799,7 @@ PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -850,7 +829,6 @@ PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long)); @@ -866,7 +844,6 @@ PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -983,385 +960,351 @@ PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } -static PVRSRV_ERROR _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease(void *pvData) +static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData) { PVRSRV_ERROR eError; - eError = PMRUnrefUnlockPMR((PMR *) pvData); + eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); return eError; } -static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, - "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, - "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); - static IMG_INT -PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPhysmemNewRamBackedLockedPMRIN_UI8, - IMG_UINT8 * psPhysmemNewRamBackedLockedPMROUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxCreateIN_UI8, + IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMRIN = - (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) - IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMRIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMROUT = - (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) - IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMROUT_UI8, 0); - - IMG_UINT32 *ui32MappingTableInt = NULL; - IMG_CHAR *uiAnnotationInt = NULL; - PMR *psPMRPtrInt = NULL; + PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8, + 0); - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif + DEVMEMINT_CTX *psDevMemServerContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * - sizeof(IMG_UINT32)) + - ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * - sizeof(IMG_CHAR)) + 0; + psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL; - if (unlikely - (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) + psDevmemIntCtxCreateOUT->eError = + DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection), + psDevmemIntCtxCreateIN->bbKernelMemoryCtx, + &psDevMemServerContextInt, + &hPrivDataInt, &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto PhysmemNewRamBackedLockedPMR_exit; + goto DevmemIntCtxCreate_exit; } - if (unlikely - (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN)) + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + (void *) + psDevMemServerContextInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntCtxCreatepsDevMemServerContextIntRelease); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto PhysmemNewRamBackedLockedPMR_exit; + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; } - if (ui64BufferSize > IMG_UINT32_MAX) + psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT-> + hPrivData, + (void *)hPrivDataInt, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psDevmemIntCtxCreateOUT-> + hDevMemServerContext); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto PhysmemNewRamBackedLockedPMR_exit; + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; } - ui32BufferSize = (IMG_UINT32) ui64BufferSize; + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; +DevmemIntCtxCreate_exit: - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) + if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemIntCtxCreateOUT->hDevMemServerContext) { - IMG_BYTE *pInputBuffer = - (IMG_BYTE *) (void *)psPhysmemNewRamBackedLockedPMRIN; + PVRSRV_ERROR eError; - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); - if (!pArrayArgsBuffer) + eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) { - psPhysmemNewRamBackedLockedPMROUT->eError = - PVRSRV_ERROR_OUT_OF_MEMORY; - goto PhysmemNewRamBackedLockedPMR_exit; + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); } - } - } - - if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0) - { - ui32MappingTableInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32); - } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Copy the data over */ - if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32MappingTableInt, - (const void __user *)psPhysmemNewRamBackedLockedPMRIN->pui32MappingTable, - psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) != - PVRSRV_OK) - { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); - goto PhysmemNewRamBackedLockedPMR_exit; } - } - if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0) - { - uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR); - } - /* Copy the data over */ - if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0) - { - if (OSCopyFromUser - (NULL, uiAnnotationInt, - (const void __user *)psPhysmemNewRamBackedLockedPMRIN->puiAnnotation, - psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != - PVRSRV_OK) + else if (psDevMemServerContextInt) { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto PhysmemNewRamBackedLockedPMR_exit; + DevmemIntCtxDestroy(psDevMemServerContextInt); } - ((IMG_CHAR *) - uiAnnotationInt)[(psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * - sizeof(IMG_CHAR)) - 1] = '\0'; - } - psPhysmemNewRamBackedLockedPMROUT->eError = - PhysmemNewRamBackedLockedPMR(psConnection, OSGetDevNode(psConnection), - psPhysmemNewRamBackedLockedPMRIN->uiSize, - psPhysmemNewRamBackedLockedPMRIN->ui32NumPhysChunks, - psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks, - ui32MappingTableInt, - psPhysmemNewRamBackedLockedPMRIN->ui32Log2PageSize, - psPhysmemNewRamBackedLockedPMRIN->uiFlags, - psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength, - uiAnnotationInt, - psPhysmemNewRamBackedLockedPMRIN->ui32PID, - &psPMRPtrInt, - psPhysmemNewRamBackedLockedPMRIN->ui32PDumpFlags, - &psPhysmemNewRamBackedLockedPMROUT->uiOutFlags); - /* Exit early if bridged call fails */ - if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) - { - goto PhysmemNewRamBackedLockedPMR_exit; } - /* Lock over handle creation. */ + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8, + IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8, + 0); + + /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - psPhysmemNewRamBackedLockedPMROUT->eError = - PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psPhysmemNewRamBackedLockedPMROUT->hPMRPtr, - (void *)psPMRPtrInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease); - if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) + psDevmemIntCtxDestroyOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntCtxDestroyIN-> + hDevmemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely + ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) + && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))) { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto PhysmemNewRamBackedLockedPMR_exit; + goto DevmemIntCtxDestroy_exit; } - /* Release now we have created handles. */ + /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); -PhysmemNewRamBackedLockedPMR_exit: - - if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK) - { - if (psPMRPtrInt) - { - LockHandle(KERNEL_HANDLE_BASE); - PMRUnrefUnlockPMR(psPMRPtrInt); - UnlockHandle(KERNEL_HANDLE_BASE); - } - } - - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psPhysmemNewRamBackedLockedPMROUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); +DevmemIntCtxDestroy_exit: return 0; } +static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData); + return eError; +} + static IMG_INT -PVRSRVBridgeDevmemCompatReserve1(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemCompatReserve1IN_UI8, - IMG_UINT8 * psDevmemCompatReserve1OUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapCreateIN_UI8, + IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE1 *psDevmemCompatReserve1IN = - (PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE1 *) IMG_OFFSET_ADDR(psDevmemCompatReserve1IN_UI8, + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE1 *psDevmemCompatReserve1OUT = - (PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE1 *) - IMG_OFFSET_ADDR(psDevmemCompatReserve1OUT_UI8, 0); - IMG_HANDLE hPMR = psDevmemCompatReserve1IN->hPMR; - PMR *psPMRInt = NULL; + IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemCompatReserve1OUT->eError = + psDevmemIntHeapCreateOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemCompatReserve1OUT->eError != PVRSRV_OK)) + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve1_exit; + goto DevmemIntHeapCreate_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemCompatReserve1OUT->eError = DevmemCompatReserve1(psPMRInt); - -DevmemCompatReserve1_exit: + psDevmemIntHeapCreateOUT->eError = + DevmemIntHeapCreate(psDevmemCtxInt, + psDevmemIntHeapCreateIN->ui32HeapConfigIndex, + psDevmemIntHeapCreateIN->ui32HeapIndex, &psDevmemHeapPtrInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + goto DevmemIntHeapCreate_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntHeapCreateOUT-> + hDevmemHeapPtr, + (void *)psDevmemHeapPtrInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapCreate_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntHeapCreate_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psPMRInt) + if (psDevmemCtxInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); + if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemHeapPtrInt) + { + DevmemIntHeapDestroy(psDevmemHeapPtrInt); + } + } + return 0; } static IMG_INT -PVRSRVBridgeDevmemCompatReserve2(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemCompatReserve2IN_UI8, - IMG_UINT8 * psDevmemCompatReserve2OUT_UI8, +PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8, + IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE2 *psDevmemCompatReserve2IN = - (PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE2 *) IMG_OFFSET_ADDR(psDevmemCompatReserve2IN_UI8, + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE2 *psDevmemCompatReserve2OUT = - (PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE2 *) - IMG_OFFSET_ADDR(psDevmemCompatReserve2OUT_UI8, 0); - - IMG_HANDLE hPMR = psDevmemCompatReserve2IN->hPMR; - PMR *psPMRInt = NULL; + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0); - /* Lock over handle lookup. */ + /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psDevmemCompatReserve2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemCompatReserve2OUT->eError != PVRSRV_OK)) + psDevmemIntHeapDestroyOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + if (unlikely((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) && + (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))) { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve2_exit; + goto DevmemIntHeapDestroy_exit; } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemCompatReserve2OUT->eError = DevmemCompatReserve2(psPMRInt); -DevmemCompatReserve2_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ + /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); +DevmemIntHeapDestroy_exit: + return 0; } static IMG_INT -PVRSRVBridgeDevmemCompatReserve3(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemCompatReserve3IN_UI8, - IMG_UINT8 * psDevmemCompatReserve3OUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntMapPMRIN_UI8, + IMG_UINT8 * psDevmemIntMapPMROUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE3 *psDevmemCompatReserve3IN = - (PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE3 *) IMG_OFFSET_ADDR(psDevmemCompatReserve3IN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE3 *psDevmemCompatReserve3OUT = - (PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE3 *) - IMG_OFFSET_ADDR(psDevmemCompatReserve3OUT_UI8, 0); + PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0); - IMG_HANDLE hMapping = psDevmemCompatReserve3IN->hMapping; - DEVMEMINT_MAPPING *psMappingInt = NULL; - IMG_HANDLE hPMR = psDevmemCompatReserve3IN->hPMR; + IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR; PMR *psPMRInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemCompatReserve3OUT->eError = + psDevmemIntMapPMROUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psMappingInt, - hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE); - if (unlikely(psDevmemCompatReserve3OUT->eError != PVRSRV_OK)) + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve3_exit; + goto DevmemIntMapPMR_exit; } /* Look up the address from the handle */ - psDevmemCompatReserve3OUT->eError = + psDevmemIntMapPMROUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **)&psPMRInt, hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemCompatReserve3OUT->eError != PVRSRV_OK)) + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve3_exit; + goto DevmemIntMapPMR_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemCompatReserve3OUT->eError = DevmemCompatReserve3(psMappingInt, psPMRInt); + psDevmemIntMapPMROUT->eError = DevmemIntMapPMR(psReservationInt, psPMRInt); -DevmemCompatReserve3_exit: +DevmemIntMapPMR_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psMappingInt) + if (psReservationInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } /* Unreference the previously looked up handle */ @@ -1377,69 +1320,48 @@ PVRSRVBridgeDevmemCompatReserve3(IMG_UINT32 ui32DispatchTableEntry, } static IMG_INT -PVRSRVBridgeDevmemCompatReserve4(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemCompatReserve4IN_UI8, - IMG_UINT8 * psDevmemCompatReserve4OUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8, + IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE4 *psDevmemCompatReserve4IN = - (PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE4 *) IMG_OFFSET_ADDR(psDevmemCompatReserve4IN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE4 *psDevmemCompatReserve4OUT = - (PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE4 *) - IMG_OFFSET_ADDR(psDevmemCompatReserve4OUT_UI8, 0); + PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0); - IMG_HANDLE hMapping = psDevmemCompatReserve4IN->hMapping; - DEVMEMINT_MAPPING *psMappingInt = NULL; - IMG_HANDLE hPMR = psDevmemCompatReserve4IN->hPMR; - PMR *psPMRInt = NULL; + IMG_HANDLE hReservation = psDevmemIntUnmapPMRIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemCompatReserve4OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psMappingInt, - hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE); - if (unlikely(psDevmemCompatReserve4OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve4_exit; - } - - /* Look up the address from the handle */ - psDevmemCompatReserve4OUT->eError = + psDevmemIntUnmapPMROUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemCompatReserve4OUT->eError != PVRSRV_OK)) + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemIntUnmapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve4_exit; + goto DevmemIntUnmapPMR_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemCompatReserve4OUT->eError = DevmemCompatReserve4(psMappingInt, psPMRInt); + psDevmemIntUnmapPMROUT->eError = DevmemIntUnmapPMR(psReservationInt); -DevmemCompatReserve4_exit: +DevmemIntUnmapPMR_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psMappingInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); - } - - /* Unreference the previously looked up handle */ - if (psPMRInt) + if (psReservationInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); @@ -1447,249 +1369,218 @@ PVRSRVBridgeDevmemCompatReserve4(IMG_UINT32 ui32DispatchTableEntry, return 0; } -static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData) +static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData) { PVRSRV_ERROR eError; - eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); + eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); return eError; } static IMG_INT -PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntCtxCreateIN_UI8, - IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntReserveRangeIN_UI8, + IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8, - 0); + PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0); - DEVMEMINT_CTX *psDevMemServerContextInt = NULL; - IMG_HANDLE hPrivDataInt = NULL; + IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + DEVMEMINT_RESERVATION *psReservationInt = NULL; - psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL; + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); - psDevmemIntCtxCreateOUT->eError = - DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection), - psDevmemIntCtxCreateIN->bbKernelMemoryCtx, - &psDevMemServerContextInt, - &hPrivDataInt, &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize); + /* Look up the address from the handle */ + psDevmemIntReserveRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntReserveRangeOUT->eError = + DevmemIntReserveRange(psConnection, OSGetDevNode(psConnection), + psDevmemServerHeapInt, + psDevmemIntReserveRangeIN->sAddress, + psDevmemIntReserveRangeIN->uiLength, + psDevmemIntReserveRangeIN->uiFlags, &psReservationInt); /* Exit early if bridged call fails */ - if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) { - goto DevmemIntCtxCreate_exit; + goto DevmemIntReserveRange_exit; } /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntCtxCreateOUT-> - hDevMemServerContext, - (void *) - psDevMemServerContextInt, - PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _DevmemIntCtxCreatepsDevMemServerContextIntRelease); - if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntCtxCreate_exit; - } - - psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntCtxCreateOUT-> - hPrivData, - (void *)hPrivDataInt, - PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntReserveRangeOUT-> + hReservation, + (void *)psReservationInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - psDevmemIntCtxCreateOUT-> - hDevMemServerContext); - if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + (PFN_HANDLE_RELEASE) & + _DevmemIntReserveRangepsReservationIntRelease); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntCtxCreate_exit; + goto DevmemIntReserveRange_exit; } /* Release now we have created handles. */ UnlockHandle(psConnection->psHandleBase); -DevmemIntCtxCreate_exit: +DevmemIntReserveRange_exit: - if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK) + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) { - if (psDevmemIntCtxCreateOUT->hDevMemServerContext) + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK) + { + if (psReservationInt) { - PVRSRV_ERROR eError; - - /* Lock over handle creation cleanup. */ - LockHandle(psConnection->psHandleBase); - - eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntCtxCreateOUT-> - hDevMemServerContext, - PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); - if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", __func__, PVRSRVGetErrorString(eError))); - } - /* Releasing the handle should free/destroy/release the resource. - * This should never fail... */ - PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - - /* Avoid freeing/destroying/releasing the resource a second time below */ - psDevMemServerContextInt = NULL; - /* Release now we have cleaned up creation handles. */ - UnlockHandle(psConnection->psHandleBase); - - } - - if (psDevMemServerContextInt) - { - DevmemIntCtxDestroy(psDevMemServerContextInt); - } - } - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8, - IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8, - 0); - - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntCtxDestroyOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntCtxDestroyIN-> - hDevmemServerContext, - PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); - if (unlikely - ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) - && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntCtxDestroy_exit; - } - - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntCtxDestroy_exit: + DevmemIntUnreserveRange(psReservationInt); + } + } return 0; } -static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData) +static PVRSRV_ERROR _DevmemIntReserveRangeAndMapPMRpsReservationIntRelease(void *pvData) { PVRSRV_ERROR eError; - eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData); + eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); return eError; } static IMG_INT -PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntHeapCreateIN_UI8, - IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntReserveRangeAndMapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntReserveRangeAndMapPMRIN_UI8, + IMG_UINT8 * psDevmemIntReserveRangeAndMapPMROUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8, - 0); - - IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx; - DEVMEMINT_CTX *psDevmemCtxInt = NULL; - DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; + PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR *psDevmemIntReserveRangeAndMapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeAndMapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR *psDevmemIntReserveRangeAndMapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeAndMapPMROUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeAndMapPMRIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + IMG_HANDLE hPMR = psDevmemIntReserveRangeAndMapPMRIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_RESERVATION *psReservationInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemIntHeapCreateOUT->eError = + psDevmemIntReserveRangeAndMapPMROUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemCtxInt, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); - if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); + if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntHeapCreate_exit; + goto DevmemIntReserveRangeAndMapPMR_exit; + } + + /* Look up the address from the handle */ + psDevmemIntReserveRangeAndMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRangeAndMapPMR_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemIntHeapCreateOUT->eError = - DevmemIntHeapCreate(psDevmemCtxInt, - psDevmemIntHeapCreateIN->ui32HeapConfigIndex, - psDevmemIntHeapCreateIN->ui32HeapIndex, - psDevmemIntHeapCreateIN->sHeapBaseAddr, - psDevmemIntHeapCreateIN->uiHeapLength, - psDevmemIntHeapCreateIN->ui32Log2DataPageSize, &psDevmemHeapPtrInt); + psDevmemIntReserveRangeAndMapPMROUT->eError = + DevmemIntReserveRangeAndMapPMR(psConnection, OSGetDevNode(psConnection), + psDevmemServerHeapInt, + psDevmemIntReserveRangeAndMapPMRIN->sAddress, + psDevmemIntReserveRangeAndMapPMRIN->uiLength, + psPMRInt, + psDevmemIntReserveRangeAndMapPMRIN->uiFlags, + &psReservationInt); /* Exit early if bridged call fails */ - if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) { - goto DevmemIntHeapCreate_exit; + goto DevmemIntReserveRangeAndMapPMR_exit; } /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntHeapCreateOUT-> - hDevmemHeapPtr, - (void *)psDevmemHeapPtrInt, - PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease); - if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + psDevmemIntReserveRangeAndMapPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntReserveRangeAndMapPMROUT->hReservation, + (void *)psReservationInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntReserveRangeAndMapPMRpsReservationIntRelease); + if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntHeapCreate_exit; + goto DevmemIntReserveRangeAndMapPMR_exit; } /* Release now we have created handles. */ UnlockHandle(psConnection->psHandleBase); -DevmemIntHeapCreate_exit: +DevmemIntReserveRangeAndMapPMR_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psDevmemCtxInt) + if (psDevmemServerHeapInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK) + if (psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK) { - if (psDevmemHeapPtrInt) + if (psReservationInt) { - DevmemIntHeapDestroy(psDevmemHeapPtrInt); + DevmemIntUnreserveRange(psReservationInt); } } @@ -1697,406 +1588,97 @@ PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, } static IMG_INT -PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8, - IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8, + IMG_UINT8 * psDevmemIntUnreserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *) - IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0); + PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0); /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - psDevmemIntHeapDestroyOUT->eError = + psDevmemIntUnreserveRangeOUT->eError = PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap, - PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); - if (unlikely((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) && - (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))) + (IMG_HANDLE) psDevmemIntUnreserveRangeIN-> + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + if (unlikely + ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) + && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) { PVR_DPF((PVR_DBG_ERROR, "%s: %s", - __func__, PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->eError))); + __func__, PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto DevmemIntHeapDestroy_exit; + goto DevmemIntUnreserveRange_exit; } /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); -DevmemIntHeapDestroy_exit: +DevmemIntUnreserveRange_exit: return 0; } -static PVRSRV_ERROR _DevmemIntMapPMRpsMappingIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = DevmemIntUnmapPMR((DEVMEMINT_MAPPING *) pvData); - return eError; -} +static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, + "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); +static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, + "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntMapPMRIN_UI8, - IMG_UINT8 * psDevmemIntMapPMROUT_UI8, CONNECTION_DATA * psConnection) +PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psChangeSparseMemIN_UI8, + IMG_UINT8 * psChangeSparseMemOUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0); + PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN = + (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT = + (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0); - IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap; - DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; - IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation; + IMG_UINT32 *ui32AllocPageIndicesInt = NULL; + IMG_UINT32 *ui32FreePageIndicesInt = NULL; + IMG_HANDLE hReservation = psChangeSparseMemIN->hReservation; DEVMEMINT_RESERVATION *psReservationInt = NULL; - IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR; - PMR *psPMRInt = NULL; - DEVMEMINT_MAPPING *psMappingInt = NULL; - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; - /* Look up the address from the handle */ - psDevmemIntMapPMROUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemServerHeapInt, - hDevmemServerHeap, - PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPMR_exit; + psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; } - /* Look up the address from the handle */ - psDevmemIntMapPMROUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psReservationInt, - hReservation, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPMR_exit; + psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; } - /* Look up the address from the handle */ - psDevmemIntMapPMROUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + if (ui64BufferSize > IMG_UINT32_MAX) { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPMR_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemIntMapPMROUT->eError = - DevmemIntMapPMR(psDevmemServerHeapInt, - psReservationInt, - psPMRInt, psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt); - /* Exit early if bridged call fails */ - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) - { - goto DevmemIntMapPMR_exit; - } - - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntMapPMROUT->hMapping, - (void *)psMappingInt, - PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _DevmemIntMapPMRpsMappingIntRelease); - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPMR_exit; - } - - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntMapPMR_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psDevmemServerHeapInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); - } - - /* Unreference the previously looked up handle */ - if (psReservationInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); - } - - /* Unreference the previously looked up handle */ - if (psPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - if (psDevmemIntMapPMROUT->eError != PVRSRV_OK) - { - if (psMappingInt) - { - DevmemIntUnmapPMR(psMappingInt); - } - } - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8, - IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0); - - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntUnmapPMROUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping, - PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); - if (unlikely((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) && - (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", __func__, PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntUnmapPMR_exit; - } - - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntUnmapPMR_exit: - - return 0; -} - -static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); - return eError; -} - -static IMG_INT -PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntReserveRangeIN_UI8, - IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *) - IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *) - IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0); - - IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap; - DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; - DEVMEMINT_RESERVATION *psReservationInt = NULL; - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psDevmemIntReserveRangeOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemServerHeapInt, - hDevmemServerHeap, - PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); - if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntReserveRange_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemIntReserveRangeOUT->eError = - DevmemIntReserveRange(psDevmemServerHeapInt, - psDevmemIntReserveRangeIN->sAddress, - psDevmemIntReserveRangeIN->uiLength, &psReservationInt); - /* Exit early if bridged call fails */ - if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) - { - goto DevmemIntReserveRange_exit; - } - - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntReserveRangeOUT-> - hReservation, - (void *)psReservationInt, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _DevmemIntReserveRangepsReservationIntRelease); - if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntReserveRange_exit; - } - - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntReserveRange_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psDevmemServerHeapInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK) - { - if (psReservationInt) - { - DevmemIntUnreserveRange(psReservationInt); - } - } - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8, - IMG_UINT8 * psDevmemIntUnreserveRangeOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *) - IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *) - IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0); - - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntUnreserveRangeOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntUnreserveRangeIN-> - hReservation, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); - if (unlikely - ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) - && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntUnreserveRange_exit; - } - - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntUnreserveRange_exit: - - return 0; -} - -static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, - "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, - "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); - -static IMG_INT -PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psChangeSparseMemIN_UI8, - IMG_UINT8 * psChangeSparseMemOUT_UI8, CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN = - (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0); - PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT = - (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0); - - IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap; - DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL; - IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR; - PMR *psPMRInt = NULL; - IMG_UINT32 *ui32AllocPageIndicesInt = NULL; - IMG_UINT32 *ui32FreePageIndicesInt = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; - - if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) - { - psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto ChangeSparseMem_exit; - } - - if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) - { - psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto ChangeSparseMem_exit; - } - - if (ui64BufferSize > IMG_UINT32_MAX) - { - psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto ChangeSparseMem_exit; + psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto ChangeSparseMem_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long)); @@ -2112,7 +1694,6 @@ PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -2162,199 +1743,34 @@ PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; goto ChangeSparseMem_exit; - } - } - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psChangeSparseMemOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psSrvDevMemHeapInt, - hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); - if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto ChangeSparseMem_exit; - } - - /* Look up the address from the handle */ - psChangeSparseMemOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto ChangeSparseMem_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psChangeSparseMemOUT->eError = - DevmemIntChangeSparse(psSrvDevMemHeapInt, - psPMRInt, - psChangeSparseMemIN->ui32AllocPageCount, - ui32AllocPageIndicesInt, - psChangeSparseMemIN->ui32FreePageCount, - ui32FreePageIndicesInt, - psChangeSparseMemIN->ui32SparseFlags, - psChangeSparseMemIN->uiFlags, - psChangeSparseMemIN->sDevVAddr, - psChangeSparseMemIN->ui64CPUVAddr); - -ChangeSparseMem_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psSrvDevMemHeapInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); - } - - /* Unreference the previously looked up handle */ - if (psPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psChangeSparseMemOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntMapPagesIN_UI8, - IMG_UINT8 * psDevmemIntMapPagesOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesOUT_UI8, 0); - - IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation; - DEVMEMINT_RESERVATION *psReservationInt = NULL; - IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR; - PMR *psPMRInt = NULL; - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psDevmemIntMapPagesOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psReservationInt, - hReservation, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); - if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPages_exit; - } - - /* Look up the address from the handle */ - psDevmemIntMapPagesOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPages_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemIntMapPagesOUT->eError = - DevmemIntMapPages(psReservationInt, - psPMRInt, - psDevmemIntMapPagesIN->ui32PageCount, - psDevmemIntMapPagesIN->ui32PhysicalPgOffset, - psDevmemIntMapPagesIN->uiFlags, psDevmemIntMapPagesIN->sDevVAddr); - -DevmemIntMapPages_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psReservationInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); - } - - /* Unreference the previously looked up handle */ - if (psPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntUnmapPagesIN_UI8, - IMG_UINT8 * psDevmemIntUnmapPagesOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesOUT_UI8, - 0); - - IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation; - DEVMEMINT_RESERVATION *psReservationInt = NULL; + } + } /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemIntUnmapPagesOUT->eError = + psChangeSparseMemOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **)&psReservationInt, hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); - if (unlikely(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK)) + if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntUnmapPages_exit; + goto ChangeSparseMem_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemIntUnmapPagesOUT->eError = - DevmemIntUnmapPages(psReservationInt, - psDevmemIntUnmapPagesIN->sDevVAddr, - psDevmemIntUnmapPagesIN->ui32PageCount); + psChangeSparseMemOUT->eError = + DevmemIntChangeSparse(psChangeSparseMemIN->ui32AllocPageCount, + ui32AllocPageIndicesInt, + psChangeSparseMemIN->ui32FreePageCount, + ui32FreePageIndicesInt, + psChangeSparseMemIN->ui32SparseFlags, psReservationInt); -DevmemIntUnmapPages_exit: +ChangeSparseMem_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); @@ -2368,6 +1784,15 @@ PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psChangeSparseMemOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + return 0; } @@ -2424,67 +1849,6 @@ PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry, return 0; } -#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) - -static IMG_INT -PVRSRVBridgeDevmemFlushDevSLCRange(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemFlushDevSLCRangeIN_UI8, - IMG_UINT8 * psDevmemFlushDevSLCRangeOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeIN = - (PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *) - IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *) - IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeOUT_UI8, 0); - - IMG_HANDLE hDevmemCtx = psDevmemFlushDevSLCRangeIN->hDevmemCtx; - DEVMEMINT_CTX *psDevmemCtxInt = NULL; - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psDevmemFlushDevSLCRangeOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemCtxInt, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); - if (unlikely(psDevmemFlushDevSLCRangeOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemFlushDevSLCRange_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemFlushDevSLCRangeOUT->eError = - DevmemIntFlushDevSLCRange(psDevmemCtxInt, - psDevmemFlushDevSLCRangeIN->sAddress, - psDevmemFlushDevSLCRangeIN->uiSize, - psDevmemFlushDevSLCRangeIN->bInvalidate); - -DevmemFlushDevSLCRange_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psDevmemCtxInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - return 0; -} - -#else -#define PVRSRVBridgeDevmemFlushDevSLCRange NULL -#endif - #if defined(RGX_FEATURE_FBCDC) static IMG_INT @@ -2604,9 +1968,7 @@ PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -2632,7 +1994,6 @@ PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long)); @@ -2648,7 +2009,6 @@ PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -2704,11 +2064,7 @@ PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -2733,9 +2089,7 @@ PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -2759,7 +2113,6 @@ PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long)); @@ -2775,7 +2128,6 @@ PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -2786,111 +2138,324 @@ PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, } } } - - if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0) + + if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0) + { + puiHeapNameOutInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR); + } + + psHeapCfgHeapDetailsOUT->eError = + HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapDetailsIN->ui32HeapConfigIndex, + psHeapCfgHeapDetailsIN->ui32HeapIndex, + psHeapCfgHeapDetailsIN->ui32HeapNameBufSz, + puiHeapNameOutInt, + &psHeapCfgHeapDetailsOUT->sDevVAddrBase, + &psHeapCfgHeapDetailsOUT->uiHeapLength, + &psHeapCfgHeapDetailsOUT->uiReservedRegionLength, + &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut, + &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut); + /* Exit early if bridged call fails */ + if (unlikely(psHeapCfgHeapDetailsOUT->eError != PVRSRV_OK)) + { + goto HeapCfgHeapDetails_exit; + } + + /* If dest ptr is non-null and we have data to copy */ + if ((puiHeapNameOutInt) && + ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut, + puiHeapNameOutInt, + (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HeapCfgHeapDetails_exit; + } + } + +HeapCfgHeapDetails_exit: + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psHeapCfgHeapDetailsOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntRegisterPFNotifyKMIN_UI8, + IMG_UINT8 * psDevmemIntRegisterPFNotifyKMOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntRegisterPFNotifyKMOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntRegisterPFNotifyKM_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntRegisterPFNotifyKMOUT->eError = + DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, psDevmemIntRegisterPFNotifyKMIN->bRegister); + +DevmemIntRegisterPFNotifyKM_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX, + "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgePhysHeapGetMemInfo(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysHeapGetMemInfoIN_UI8, + IMG_UINT8 * psPhysHeapGetMemInfoOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoIN = + (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoOUT = + (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoOUT_UI8, + 0); + + PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL; + PHYS_HEAP_MEM_STATS_V1 *pasapPhysHeapMemStatsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) + + ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * + sizeof(PHYS_HEAP_MEM_STATS_V1)) + 0; + + if (unlikely(psPhysHeapGetMemInfoIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)) + { + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysHeapGetMemInfo_exit; + } + + psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats = + psPhysHeapGetMemInfoIN->pasapPhysHeapMemStats; + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto PhysHeapGetMemInfo_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysHeapGetMemInfo_exit; + } + } + } + + if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) + { + eaPhysHeapIDInt = + (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP); + } + + /* Copy the data over */ + if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0) + { + if (OSCopyFromUser + (NULL, eaPhysHeapIDInt, + (const void __user *)psPhysHeapGetMemInfoIN->peaPhysHeapID, + psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) != + PVRSRV_OK) + { + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysHeapGetMemInfo_exit; + } + } + if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) { - puiHeapNameOutInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR); + pasapPhysHeapMemStatsInt = + (PHYS_HEAP_MEM_STATS_V1 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_V1); } - psHeapCfgHeapDetailsOUT->eError = - HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection), - psHeapCfgHeapDetailsIN->ui32HeapConfigIndex, - psHeapCfgHeapDetailsIN->ui32HeapIndex, - psHeapCfgHeapDetailsIN->ui32HeapNameBufSz, - puiHeapNameOutInt, - &psHeapCfgHeapDetailsOUT->sDevVAddrBase, - &psHeapCfgHeapDetailsOUT->uiHeapLength, - &psHeapCfgHeapDetailsOUT->uiReservedRegionLength, - &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut, - &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut); + psPhysHeapGetMemInfoOUT->eError = + PVRSRVPhysHeapGetMemInfoKM(psConnection, OSGetDevNode(psConnection), + psPhysHeapGetMemInfoIN->ui32PhysHeapCount, + eaPhysHeapIDInt, pasapPhysHeapMemStatsInt); /* Exit early if bridged call fails */ - if (unlikely(psHeapCfgHeapDetailsOUT->eError != PVRSRV_OK)) + if (unlikely(psPhysHeapGetMemInfoOUT->eError != PVRSRV_OK)) { - goto HeapCfgHeapDetails_exit; + goto PhysHeapGetMemInfo_exit; } /* If dest ptr is non-null and we have data to copy */ - if ((puiHeapNameOutInt) && - ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)) + if ((pasapPhysHeapMemStatsInt) && + ((psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_V1)) > 0)) { if (unlikely (OSCopyToUser - (NULL, (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut, - puiHeapNameOutInt, - (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK)) + (NULL, (void __user *)psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats, + pasapPhysHeapMemStatsInt, + (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * + sizeof(PHYS_HEAP_MEM_STATS_V1))) != PVRSRV_OK)) { - psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto HeapCfgHeapDetails_exit; + goto PhysHeapGetMemInfo_exit; } } -HeapCfgHeapDetails_exit: +PhysHeapGetMemInfo_exit: /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psHeapCfgHeapDetailsOUT->eError == PVRSRV_OK) + if (psPhysHeapGetMemInfoOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } static IMG_INT -PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntRegisterPFNotifyKMIN_UI8, - IMG_UINT8 * psDevmemIntRegisterPFNotifyKMOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeGetDefaultPhysicalHeap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetDefaultPhysicalHeapIN_UI8, + IMG_UINT8 * psGetDefaultPhysicalHeapOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *) - IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *) - IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0); + PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapIN = + (PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *) + IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapOUT = + (PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *) + IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN); + + psGetDefaultPhysicalHeapOUT->eError = + PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection), + &psGetDefaultPhysicalHeapOUT->eHeap); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemGetFaultAddressIN_UI8, + IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN = + (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0); - IMG_HANDLE hDevm = psDevmemIntRegisterPFNotifyKMIN->hDevm; - DEVMEMINT_CTX *psDevmInt = NULL; + IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemIntRegisterPFNotifyKMOUT->eError = + psDevmemGetFaultAddressOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmInt, - hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); - if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)) + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntRegisterPFNotifyKM_exit; + goto DevmemGetFaultAddress_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemIntRegisterPFNotifyKMOUT->eError = - DevmemIntRegisterPFNotifyKM(psDevmInt, - psDevmemIntRegisterPFNotifyKMIN->ui32PID, - psDevmemIntRegisterPFNotifyKMIN->bRegister); + psDevmemGetFaultAddressOUT->eError = + DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection), + psDevmemCtxInt, &psDevmemGetFaultAddressOUT->sFaultAddress); -DevmemIntRegisterPFNotifyKM_exit: +DevmemGetFaultAddress_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psDevmInt) + if (psDevmemCtxInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); @@ -2898,198 +2463,358 @@ PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, return 0; } +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + static IMG_INT -PVRSRVBridgeGetMaxPhysHeapCount(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psGetMaxPhysHeapCountIN_UI8, - IMG_UINT8 * psGetMaxPhysHeapCountOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgePVRSRVStatsUpdateOOMStat(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVStatsUpdateOOMStatIN_UI8, + IMG_UINT8 * psPVRSRVStatsUpdateOOMStatOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountIN = - (PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountOUT = - (PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountOUT_UI8, - 0); - - PVR_UNREFERENCED_PARAMETER(psGetMaxPhysHeapCountIN); - - psGetMaxPhysHeapCountOUT->eError = - PVRSRVGetMaxPhysHeapCountKM(psConnection, OSGetDevNode(psConnection), - &psGetMaxPhysHeapCountOUT->ui32PhysHeapCount); + PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatIN = + (PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *) + IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *) + IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatOUT_UI8, 0); + + psPVRSRVStatsUpdateOOMStatOUT->eError = + PVRSRVStatsUpdateOOMStat(psConnection, OSGetDevNode(psConnection), + psPVRSRVStatsUpdateOOMStatIN->ui32ui32StatType, + psPVRSRVStatsUpdateOOMStatIN->ui32pid); return 0; } -static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX, - "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX"); +#else +#define PVRSRVBridgePVRSRVStatsUpdateOOMStat NULL +#endif -static IMG_INT -PVRSRVBridgePhysHeapGetMemInfo(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPhysHeapGetMemInfoIN_UI8, - IMG_UINT8 * psPhysHeapGetMemInfoOUT_UI8, - CONNECTION_DATA * psConnection) +static PVRSRV_ERROR _DevmemXIntReserveRangepsReservationIntRelease(void *pvData) { - PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoIN = - (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoOUT = - (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoOUT_UI8, - 0); + PVRSRV_ERROR eError; + eError = DevmemXIntUnreserveRange((DEVMEMXINT_RESERVATION *) pvData); + return eError; +} - PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL; - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL; +static IMG_INT +PVRSRVBridgeDevmemXIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntReserveRangeIN_UI8, + IMG_UINT8 * psDevmemXIntReserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemXIntReserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemXIntReserveRangeOUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = psDevmemXIntReserveRangeIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) + - ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) + - 0; + /* Look up the address from the handle */ + psDevmemXIntReserveRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); + if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntReserveRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); - if (unlikely(psPhysHeapGetMemInfoIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)) + psDevmemXIntReserveRangeOUT->eError = + DevmemXIntReserveRange(psDevmemServerHeapInt, + psDevmemXIntReserveRangeIN->sAddress, + psDevmemXIntReserveRangeIN->uiLength, &psReservationInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)) { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto PhysHeapGetMemInfo_exit; + goto DevmemXIntReserveRange_exit; } - psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats = - psPhysHeapGetMemInfoIN->pasapPhysHeapMemStats; + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); - if (ui64BufferSize > IMG_UINT32_MAX) + psDevmemXIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemXIntReserveRangeOUT-> + hReservation, + (void *)psReservationInt, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemXIntReserveRangepsReservationIntRelease); + if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)) { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto PhysHeapGetMemInfo_exit; + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntReserveRange_exit; } - ui32BufferSize = (IMG_UINT32) ui64BufferSize; + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; +DevmemXIntReserveRange_exit: - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoIN; + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - if (!pArrayArgsBuffer) - { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto PhysHeapGetMemInfo_exit; - } + if (psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK) + { + if (psReservationInt) + { + DevmemXIntUnreserveRange(psReservationInt); } } - if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemXIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntUnreserveRangeIN_UI8, + IMG_UINT8 * psDevmemXIntUnreserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemXIntUnreserveRangeOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemXIntUnreserveRangeIN-> + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); + if (unlikely + ((psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_OK) + && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) { - eaPhysHeapIDInt = - (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP); + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemXIntUnreserveRangeOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntUnreserveRange_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemXIntUnreserveRange_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemXIntMapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntMapPagesIN_UI8, + IMG_UINT8 * psDevmemXIntMapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesOUT_UI8, + 0); + + IMG_HANDLE hReservation = psDevmemXIntMapPagesIN->hReservation; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemXIntMapPagesIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemXIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntMapPages_exit; } - /* Copy the data over */ - if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0) + /* Look up the address from the handle */ + psDevmemXIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK)) { - if (OSCopyFromUser - (NULL, eaPhysHeapIDInt, - (const void __user *)psPhysHeapGetMemInfoIN->peaPhysHeapID, - psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) != - PVRSRV_OK) - { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto PhysHeapGetMemInfo_exit; - } + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntMapPages_exit; } - if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemXIntMapPagesOUT->eError = + DevmemXIntMapPages(psReservationInt, + psPMRInt, + psDevmemXIntMapPagesIN->ui32PageCount, + psDevmemXIntMapPagesIN->ui32PhysPageOffset, + psDevmemXIntMapPagesIN->uiFlags, + psDevmemXIntMapPagesIN->ui32VirtPageOffset); + +DevmemXIntMapPages_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) { - pasapPhysHeapMemStatsInt = - (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS); + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); } - psPhysHeapGetMemInfoOUT->eError = - PVRSRVPhysHeapGetMemInfoKM(psConnection, OSGetDevNode(psConnection), - psPhysHeapGetMemInfoIN->ui32PhysHeapCount, - eaPhysHeapIDInt, pasapPhysHeapMemStatsInt); - /* Exit early if bridged call fails */ - if (unlikely(psPhysHeapGetMemInfoOUT->eError != PVRSRV_OK)) + /* Unreference the previously looked up handle */ + if (psPMRInt) { - goto PhysHeapGetMemInfo_exit; + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* If dest ptr is non-null and we have data to copy */ - if ((pasapPhysHeapMemStatsInt) && - ((psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0)) - { - if (unlikely - (OSCopyToUser - (NULL, (void __user *)psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats, - pasapPhysHeapMemStatsInt, - (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) != - PVRSRV_OK)) - { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; +} - goto PhysHeapGetMemInfo_exit; - } +static IMG_INT +PVRSRVBridgeDevmemXIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntUnmapPagesIN_UI8, + IMG_UINT8 * psDevmemXIntUnmapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesOUT_UI8, 0); + + IMG_HANDLE hReservation = psDevmemXIntUnmapPagesIN->hReservation; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemXIntUnmapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemXIntUnmapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntUnmapPages_exit; } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); -PhysHeapGetMemInfo_exit: + psDevmemXIntUnmapPagesOUT->eError = + DevmemXIntUnmapPages(psReservationInt, + psDevmemXIntUnmapPagesIN->ui32VirtPageOffset, + psDevmemXIntUnmapPagesIN->ui32PageCount); - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psPhysHeapGetMemInfoOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ +DevmemXIntUnmapPages_exit: -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); return 0; } static IMG_INT -PVRSRVBridgeGetDefaultPhysicalHeap(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psGetDefaultPhysicalHeapIN_UI8, - IMG_UINT8 * psGetDefaultPhysicalHeapOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemXIntMapVRangeToBackingPage(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntMapVRangeToBackingPageIN_UI8, + IMG_UINT8 * psDevmemXIntMapVRangeToBackingPageOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapIN = - (PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *) - IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapIN_UI8, 0); - PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapOUT = - (PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *) - IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapOUT_UI8, 0); + PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE *psDevmemXIntMapVRangeToBackingPageIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE *) + IMG_OFFSET_ADDR(psDevmemXIntMapVRangeToBackingPageIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE *psDevmemXIntMapVRangeToBackingPageOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE *) + IMG_OFFSET_ADDR(psDevmemXIntMapVRangeToBackingPageOUT_UI8, 0); - PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN); + IMG_HANDLE hReservation = psDevmemXIntMapVRangeToBackingPageIN->hReservation; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; - psGetDefaultPhysicalHeapOUT->eError = - PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection), - &psGetDefaultPhysicalHeapOUT->eHeap); + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemXIntMapVRangeToBackingPageOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemXIntMapVRangeToBackingPageOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntMapVRangeToBackingPage_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemXIntMapVRangeToBackingPageOUT->eError = + DevmemXIntMapVRangeToBackingPage(psReservationInt, + psDevmemXIntMapVRangeToBackingPageIN->ui32PageCount, + psDevmemXIntMapVRangeToBackingPageIN->uiFlags, + psDevmemXIntMapVRangeToBackingPageIN-> + ui32VirtPageOffset); + +DevmemXIntMapVRangeToBackingPage_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); return 0; } @@ -3098,54 +2823,53 @@ static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX, "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeGetHeapPhysMemUsage(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psGetHeapPhysMemUsageIN_UI8, - IMG_UINT8 * psGetHeapPhysMemUsageOUT_UI8, +PVRSRVBridgePhysHeapGetMemInfo2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysHeapGetMemInfo2IN_UI8, + IMG_UINT8 * psPhysHeapGetMemInfo2OUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageIN = - (PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageIN_UI8, + PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2 *psPhysHeapGetMemInfo2IN = + (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2 *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfo2IN_UI8, 0); - PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageOUT = - (PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageOUT_UI8, + PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2 *psPhysHeapGetMemInfo2OUT = + (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2 *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfo2OUT_UI8, 0); - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL; + PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL; + PHYS_HEAP_MEM_STATS_V2 *pasapPhysHeapMemStatsInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psGetHeapPhysMemUsageIN->ui32PhysHeapCount * - sizeof(PHYS_HEAP_MEM_STATS)) + 0; + ((IMG_UINT64) psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) + + ((IMG_UINT64) psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * + sizeof(PHYS_HEAP_MEM_STATS_V2)) + 0; - if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST) + if (unlikely(psPhysHeapGetMemInfo2IN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)) { - psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto GetHeapPhysMemUsage_exit; + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysHeapGetMemInfo2_exit; } - psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats = - psGetHeapPhysMemUsageIN->pasapPhysHeapMemStats; + psPhysHeapGetMemInfo2OUT->pasapPhysHeapMemStats = + psPhysHeapGetMemInfo2IN->pasapPhysHeapMemStats; if (ui64BufferSize > IMG_UINT32_MAX) { - psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto GetHeapPhysMemUsage_exit; + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto PhysHeapGetMemInfo2_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psGetHeapPhysMemUsageIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psPhysHeapGetMemInfo2IN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -3153,156 +2877,93 @@ PVRSRVBridgeGetHeapPhysMemUsage(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetHeapPhysMemUsageIN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfo2IN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto GetHeapPhysMemUsage_exit; + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysHeapGetMemInfo2_exit; } } } - if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount != 0) + if (psPhysHeapGetMemInfo2IN->ui32PhysHeapCount != 0) + { + eaPhysHeapIDInt = + (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP); + } + + /* Copy the data over */ + if (psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0) + { + if (OSCopyFromUser + (NULL, eaPhysHeapIDInt, + (const void __user *)psPhysHeapGetMemInfo2IN->peaPhysHeapID, + psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) != + PVRSRV_OK) + { + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysHeapGetMemInfo2_exit; + } + } + if (psPhysHeapGetMemInfo2IN->ui32PhysHeapCount != 0) { pasapPhysHeapMemStatsInt = - (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + (PHYS_HEAP_MEM_STATS_V2 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ui32NextOffset += - psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS); + psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_V2); } - psGetHeapPhysMemUsageOUT->eError = - PVRSRVGetHeapPhysMemUsageKM(psConnection, OSGetDevNode(psConnection), - psGetHeapPhysMemUsageIN->ui32PhysHeapCount, - pasapPhysHeapMemStatsInt); + psPhysHeapGetMemInfo2OUT->eError = + PVRSRVPhysHeapGetMemInfo2KM(psConnection, OSGetDevNode(psConnection), + psPhysHeapGetMemInfo2IN->ui32PhysHeapCount, + eaPhysHeapIDInt, pasapPhysHeapMemStatsInt); /* Exit early if bridged call fails */ - if (unlikely(psGetHeapPhysMemUsageOUT->eError != PVRSRV_OK)) + if (unlikely(psPhysHeapGetMemInfo2OUT->eError != PVRSRV_OK)) { - goto GetHeapPhysMemUsage_exit; + goto PhysHeapGetMemInfo2_exit; } /* If dest ptr is non-null and we have data to copy */ if ((pasapPhysHeapMemStatsInt) && - ((psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0)) + ((psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_V2)) > 0)) { if (unlikely (OSCopyToUser - (NULL, (void __user *)psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats, + (NULL, (void __user *)psPhysHeapGetMemInfo2OUT->pasapPhysHeapMemStats, pasapPhysHeapMemStatsInt, - (psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) != - PVRSRV_OK)) + (psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * + sizeof(PHYS_HEAP_MEM_STATS_V2))) != PVRSRV_OK)) { - psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto GetHeapPhysMemUsage_exit; + goto PhysHeapGetMemInfo2_exit; } } -GetHeapPhysMemUsage_exit: +PhysHeapGetMemInfo2_exit: /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psGetHeapPhysMemUsageOUT->eError == PVRSRV_OK) + if (psPhysHeapGetMemInfo2OUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } -static IMG_INT -PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemGetFaultAddressIN_UI8, - IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN = - (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *) - IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *) - IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0); - - IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx; - DEVMEMINT_CTX *psDevmemCtxInt = NULL; - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psDevmemGetFaultAddressOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemCtxInt, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); - if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemGetFaultAddress_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemGetFaultAddressOUT->eError = - DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection), - psDevmemCtxInt, &psDevmemGetFaultAddressOUT->sFaultAddress); - -DevmemGetFaultAddress_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psDevmemCtxInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - return 0; -} - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) - -static IMG_INT -PVRSRVBridgePVRSRVUpdateOOMStats(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPVRSRVUpdateOOMStatsIN_UI8, - IMG_UINT8 * psPVRSRVUpdateOOMStatsOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsIN = - (PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *) IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsOUT = - (PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *) - IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsOUT_UI8, 0); - - psPVRSRVUpdateOOMStatsOUT->eError = - PVRSRVStatsUpdateOOMStat(psConnection, OSGetDevNode(psConnection), - psPVRSRVUpdateOOMStatsIN->ui32ui32StatType, - psPVRSRVUpdateOOMStatsIN->ui32pid); - - return 0; -} - -#else -#define PVRSRVBridgePVRSRVUpdateOOMStats NULL -#endif - /* *************************************************************************** * Server bridge dispatch related glue */ @@ -3317,124 +2978,179 @@ PVRSRV_ERROR InitMMBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, - PVRSRVBridgePMRExportPMR, NULL); + PVRSRVBridgePMRExportPMR, NULL, sizeof(PVRSRV_BRIDGE_IN_PMREXPORTPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMREXPORTPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, - PVRSRVBridgePMRUnexportPMR, NULL); + PVRSRVBridgePMRUnexportPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID, - NULL); + NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRGETUID), + sizeof(PVRSRV_BRIDGE_OUT_PMRGETUID)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, - PVRSRVBridgePMRMakeLocalImportHandle, NULL); + PVRSRVBridgePMRMakeLocalImportHandle, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE), + sizeof(PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, - PVRSRVBridgePMRUnmakeLocalImportHandle, NULL); + PVRSRVBridgePMRUnmakeLocalImportHandle, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE), + sizeof(PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, - PVRSRVBridgePMRImportPMR, NULL); + PVRSRVBridgePMRImportPMR, NULL, sizeof(PVRSRV_BRIDGE_IN_PMRIMPORTPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMRIMPORTPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, - PVRSRVBridgePMRLocalImportPMR, NULL); + PVRSRVBridgePMRLocalImportPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, - PVRSRVBridgePMRUnrefPMR, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, - PVRSRVBridgePMRUnrefUnlockPMR, NULL); + PVRSRVBridgePMRUnrefPMR, NULL, sizeof(PVRSRV_BRIDGE_IN_PMRUNREFPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMRUNREFPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, - PVRSRVBridgePhysmemNewRamBackedPMR, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR, - PVRSRVBridgePhysmemNewRamBackedLockedPMR, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE1, - PVRSRVBridgeDevmemCompatReserve1, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE2, - PVRSRVBridgeDevmemCompatReserve2, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE3, - PVRSRVBridgeDevmemCompatReserve3, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE4, - PVRSRVBridgeDevmemCompatReserve4, NULL); + PVRSRVBridgePhysmemNewRamBackedPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, - PVRSRVBridgeDevmemIntCtxCreate, NULL); + PVRSRVBridgeDevmemIntCtxCreate, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, - PVRSRVBridgeDevmemIntCtxDestroy, NULL); + PVRSRVBridgeDevmemIntCtxDestroy, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, - PVRSRVBridgeDevmemIntHeapCreate, NULL); + PVRSRVBridgeDevmemIntHeapCreate, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, - PVRSRVBridgeDevmemIntHeapDestroy, NULL); + PVRSRVBridgeDevmemIntHeapDestroy, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, - PVRSRVBridgeDevmemIntMapPMR, NULL); + PVRSRVBridgeDevmemIntMapPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, - PVRSRVBridgeDevmemIntUnmapPMR, NULL); + PVRSRVBridgeDevmemIntUnmapPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, - PVRSRVBridgeDevmemIntReserveRange, NULL); + PVRSRVBridgeDevmemIntReserveRange, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGEANDMAPPMR, + PVRSRVBridgeDevmemIntReserveRangeAndMapPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, - PVRSRVBridgeDevmemIntUnreserveRange, NULL); + PVRSRVBridgeDevmemIntUnreserveRange, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, - PVRSRVBridgeChangeSparseMem, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES, - PVRSRVBridgeDevmemIntMapPages, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES, - PVRSRVBridgeDevmemIntUnmapPages, NULL); + PVRSRVBridgeChangeSparseMem, NULL, + sizeof(PVRSRV_BRIDGE_IN_CHANGESPARSEMEM), + sizeof(PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, - PVRSRVBridgeDevmemIsVDevAddrValid, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE, - PVRSRVBridgeDevmemFlushDevSLCRange, NULL); + PVRSRVBridgeDevmemIsVDevAddrValid, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE, - PVRSRVBridgeDevmemInvalidateFBSCTable, NULL); + PVRSRVBridgeDevmemInvalidateFBSCTable, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, - PVRSRVBridgeHeapCfgHeapConfigCount, NULL); + PVRSRVBridgeHeapCfgHeapConfigCount, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, - PVRSRVBridgeHeapCfgHeapCount, NULL); + PVRSRVBridgeHeapCfgHeapCount, NULL, + sizeof(PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT), + sizeof(PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, - PVRSRVBridgeHeapCfgHeapConfigName, NULL); + PVRSRVBridgeHeapCfgHeapConfigName, NULL, + sizeof(PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME), + sizeof(PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, - PVRSRVBridgeHeapCfgHeapDetails, NULL); + PVRSRVBridgeHeapCfgHeapDetails, NULL, + sizeof(PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS), + sizeof(PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, - PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT, - PVRSRVBridgeGetMaxPhysHeapCount, NULL); + PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO, - PVRSRVBridgePhysHeapGetMemInfo, NULL); + PVRSRVBridgePhysHeapGetMemInfo, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO), + sizeof(PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP, - PVRSRVBridgeGetDefaultPhysicalHeap, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE, - PVRSRVBridgeGetHeapPhysMemUsage, NULL); + PVRSRVBridgeGetDefaultPhysicalHeap, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS, - PVRSRVBridgeDevmemGetFaultAddress, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS, - PVRSRVBridgePVRSRVUpdateOOMStats, NULL); + PVRSRVBridgeDevmemGetFaultAddress, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT, + PVRSRVBridgePVRSRVStatsUpdateOOMStat, NULL, + sizeof(PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT), + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE, + PVRSRVBridgeDevmemXIntReserveRange, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE, + PVRSRVBridgeDevmemXIntUnreserveRange, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES, + PVRSRVBridgeDevmemXIntMapPages, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES, + PVRSRVBridgeDevmemXIntUnmapPages, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPVRANGETOBACKINGPAGE, + PVRSRVBridgeDevmemXIntMapVRangeToBackingPage, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO2, + PVRSRVBridgePhysHeapGetMemInfo2, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2), + sizeof(PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2)); return PVRSRV_OK; } @@ -3461,20 +3177,8 @@ void DeinitMMBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE1); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE2); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE3); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE4); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE); UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY); @@ -3489,18 +3193,14 @@ void DeinitMMBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGEANDMAPPMR); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE); UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE); UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT); @@ -3513,16 +3213,25 @@ void DeinitMMBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO); UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMXINTMAPVRANGETOBACKINGPAGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO2); } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mmextmem_bridge/server_mmextmem_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mmextmem_bridge/server_mmextmem_bridge.c index 954713f7691c..538d04cdf0e0 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/mmextmem_bridge/server_mmextmem_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/mmextmem_bridge/server_mmextmem_bridge.c @@ -148,7 +148,9 @@ PVRSRV_ERROR InitMMEXTMEMBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_MMEXTMEM, PVRSRV_BRIDGE_MMEXTMEM_PHYSMEMWRAPEXTMEM, - PVRSRVBridgePhysmemWrapExtMem, NULL); + PVRSRVBridgePhysmemWrapExtMem, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdump_bridge/server_pdump_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdump_bridge/server_pdump_bridge.c index 862343f31cac..6b3baf668710 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdump_bridge/server_pdump_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdump_bridge/server_pdump_bridge.c @@ -90,9 +90,7 @@ PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -115,7 +113,6 @@ PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPDumpImageDescriptorIN), sizeof(unsigned long)); @@ -131,7 +128,6 @@ PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -242,11 +238,7 @@ PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -271,9 +263,7 @@ PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -295,7 +285,6 @@ PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), sizeof(unsigned long)); @@ -311,7 +300,6 @@ PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -358,11 +346,7 @@ PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -410,9 +394,7 @@ PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -434,7 +416,6 @@ PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPDumpDataDescriptorIN), sizeof(unsigned long)); @@ -450,7 +431,6 @@ PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -532,11 +512,7 @@ PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -556,16 +532,24 @@ PVRSRV_ERROR InitPDUMPBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR, - PVRSRVBridgePDumpImageDescriptor, NULL); + PVRSRVBridgePDumpImageDescriptor, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT, - PVRSRVBridgePVRSRVPDumpComment, NULL); + PVRSRVBridgePVRSRVPDumpComment, NULL, + sizeof(PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT), + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME, - PVRSRVBridgePVRSRVPDumpSetFrame, NULL); + PVRSRVBridgePVRSRVPDumpSetFrame, NULL, + sizeof(PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME), + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR, - PVRSRVBridgePDumpDataDescriptor, NULL); + PVRSRVBridgePDumpDataDescriptor, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h index 8bda83aeb299..a2ba27b88b30 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h @@ -63,7 +63,8 @@ IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hB IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval, - IMG_UINT32 ui32MaxParamFileSize); + IMG_UINT32 ui32MaxParamFileSize, + IMG_UINT32 ui32AutoTermTimeout); IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge, IMG_BOOL * pbpbIsLastCaptureFrame); diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c index 7ba92988a462..a2bc30360aea 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c @@ -74,14 +74,17 @@ IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hB IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval, - IMG_UINT32 ui32MaxParamFileSize) + IMG_UINT32 ui32MaxParamFileSize, + IMG_UINT32 ui32AutoTermTimeout) { PVRSRV_ERROR eError; eError = PDumpSetDefaultCaptureParamsKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ui32Mode, - ui32Start, ui32End, ui32Interval, ui32MaxParamFileSize); + ui32Start, + ui32End, + ui32Interval, ui32MaxParamFileSize, ui32AutoTermTimeout); return eError; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h index cacfb870510a..f710e4c06131 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h @@ -100,6 +100,7 @@ typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG /* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */ typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG { + IMG_UINT32 ui32AutoTermTimeout; IMG_UINT32 ui32End; IMG_UINT32 ui32Interval; IMG_UINT32 ui32MaxParamFileSize; diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c index 4edc0fc39a16..2d5578cf83f9 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c @@ -134,7 +134,9 @@ PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 ui32DispatchTableEntry psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End, psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval, psPVRSRVPDumpSetDefaultCaptureParamsIN-> - ui32MaxParamFileSize); + ui32MaxParamFileSize, + psPVRSRVPDumpSetDefaultCaptureParamsIN-> + ui32AutoTermTimeout); return 0; } @@ -199,22 +201,28 @@ PVRSRV_ERROR InitPDUMPCTRLBridge(void) PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock), "OSLockCreate"); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE, - PVRSRVBridgePVRSRVPDumpGetState, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpGetState, pPDUMPCTRLBridgeLock, 0, + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME, - PVRSRVBridgePVRSRVPDumpGetFrame, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpGetFrame, pPDUMPCTRLBridgeLock, 0, + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS, - PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams, pPDUMPCTRLBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS), + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME, - PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame, pPDUMPCTRLBridgeLock, 0, + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP, - PVRSRVBridgePVRSRVPDumpForceCaptureStop, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpForceCaptureStop, pPDUMPCTRLBridgeLock, 0, + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c index b1906b385871..1358f75a7537 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c @@ -224,12 +224,12 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBrid { PVRSRV_ERROR eError; DEVMEMINT_CTX *psDevmemServerContextInt; - PVR_UNREFERENCED_PARAMETER(hBridge); psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext; eError = - DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt, + DevmemIntPDumpSaveToFileVirtual(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemServerContextInt, sAddress, uiSize, ui32ArraySize, diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c index 2563b7a0fe30..38e1a6610fd5 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c @@ -251,9 +251,7 @@ PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -275,7 +273,6 @@ PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), sizeof(unsigned long)); @@ -291,7 +288,6 @@ PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -368,11 +364,7 @@ PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -403,9 +395,7 @@ PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -437,7 +427,6 @@ PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), sizeof(unsigned long)); @@ -453,7 +442,6 @@ PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -562,11 +550,7 @@ PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -752,9 +736,7 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -779,7 +761,6 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), sizeof(unsigned long)); @@ -796,7 +777,6 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -852,7 +832,8 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, UnlockHandle(psConnection->psHandleBase); psDevmemIntPDumpSaveToFileVirtualOUT->eError = - DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt, + DevmemIntPDumpSaveToFileVirtual(psConnection, OSGetDevNode(psConnection), + psDevmemServerContextInt, psDevmemIntPDumpSaveToFileVirtualIN->sAddress, psDevmemIntPDumpSaveToFileVirtualIN->uiSize, psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize, @@ -880,11 +861,7 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -904,32 +881,49 @@ PVRSRV_ERROR InitPDUMPMMBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM, - PVRSRVBridgePMRPDumpLoadMem, NULL); + PVRSRVBridgePMRPDumpLoadMem, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32, - PVRSRVBridgePMRPDumpLoadMemValue32, NULL); + PVRSRVBridgePMRPDumpLoadMemValue32, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64, - PVRSRVBridgePMRPDumpLoadMemValue64, NULL); + PVRSRVBridgePMRPDumpLoadMemValue64, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE, - PVRSRVBridgePMRPDumpSaveToFile, NULL); + PVRSRVBridgePMRPDumpSaveToFile, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR, - PVRSRVBridgePMRPDumpSymbolicAddr, NULL); + PVRSRVBridgePMRPDumpSymbolicAddr, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32, - PVRSRVBridgePMRPDumpPol32, NULL); + PVRSRVBridgePMRPDumpPol32, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPPOL32), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32, - PVRSRVBridgePMRPDumpCheck32, NULL); + PVRSRVBridgePMRPDumpCheck32, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP, - PVRSRVBridgePMRPDumpCBP, NULL); + PVRSRVBridgePMRPDumpCBP, NULL, sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPCBP), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPCBP)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL, - PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual, NULL); + PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c index 2e8e50e639c1..9facf794011e 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c @@ -91,9 +91,7 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -111,7 +109,6 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long)); @@ -127,7 +124,6 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -221,17 +217,16 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psSDInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psHandleBase); } - if (psSDInt) + else if (psSDInt) { TLServerCloseStreamKM(psSDInt); } + } /* Allocated space should be equal to the last updated offset */ @@ -240,11 +235,7 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -408,9 +399,7 @@ PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -437,7 +426,6 @@ PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long)); @@ -453,7 +441,6 @@ PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -523,11 +510,7 @@ PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -654,9 +637,7 @@ PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0; @@ -677,7 +658,6 @@ PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long)); @@ -693,7 +673,6 @@ PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -763,11 +742,7 @@ PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -787,28 +762,42 @@ PVRSRV_ERROR InitPVRTLBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, - PVRSRVBridgeTLOpenStream, NULL); + PVRSRVBridgeTLOpenStream, NULL, sizeof(PVRSRV_BRIDGE_IN_TLOPENSTREAM), + sizeof(PVRSRV_BRIDGE_OUT_TLOPENSTREAM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, - PVRSRVBridgeTLCloseStream, NULL); + PVRSRVBridgeTLCloseStream, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLCLOSESTREAM), + sizeof(PVRSRV_BRIDGE_OUT_TLCLOSESTREAM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, - PVRSRVBridgeTLAcquireData, NULL); + PVRSRVBridgeTLAcquireData, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLACQUIREDATA), + sizeof(PVRSRV_BRIDGE_OUT_TLACQUIREDATA)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, - PVRSRVBridgeTLReleaseData, NULL); + PVRSRVBridgeTLReleaseData, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLRELEASEDATA), + sizeof(PVRSRV_BRIDGE_OUT_TLRELEASEDATA)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, - PVRSRVBridgeTLDiscoverStreams, NULL); + PVRSRVBridgeTLDiscoverStreams, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS), + sizeof(PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, - PVRSRVBridgeTLReserveStream, NULL); + PVRSRVBridgeTLReserveStream, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLRESERVESTREAM), + sizeof(PVRSRV_BRIDGE_OUT_TLRESERVESTREAM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, - PVRSRVBridgeTLCommitStream, NULL); + PVRSRVBridgeTLCommitStream, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLCOMMITSTREAM), + sizeof(PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, - PVRSRVBridgeTLWriteData, NULL); + PVRSRVBridgeTLWriteData, NULL, sizeof(PVRSRV_BRIDGE_IN_TLWRITEDATA), + sizeof(PVRSRV_BRIDGE_OUT_TLWRITEDATA)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h index 70454e0a9fb0..ca5290a88266 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h @@ -68,12 +68,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* Bridge in structure for RGXSetBreakpoint */ typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG { + IMG_UINT64 ui64TempSpillingAddr; IMG_HANDLE hPrivData; IMG_UINT32 eFWDataMaster; IMG_UINT32 ui32BreakpointAddr; IMG_UINT32 ui32DM; IMG_UINT32 ui32HandlerAddr; - IMG_UINT32 ui32TempSpillingAddr; } __packed PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT; /* Bridge out structure for RGXSetBreakpoint */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c index b909b3d82948..78fd2b405e92 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c @@ -100,7 +100,7 @@ PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry, PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt, psRGXSetBreakpointIN->eFWDataMaster, - psRGXSetBreakpointIN->ui32TempSpillingAddr, + psRGXSetBreakpointIN->ui64TempSpillingAddr, psRGXSetBreakpointIN->ui32BreakpointAddr, psRGXSetBreakpointIN->ui32HandlerAddr, psRGXSetBreakpointIN->ui32DM); @@ -316,23 +316,33 @@ PVRSRV_ERROR InitRGXBREAKPOINTBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT, - PVRSRVBridgeRGXSetBreakpoint, NULL); + PVRSRVBridgeRGXSetBreakpoint, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT, - PVRSRVBridgeRGXClearBreakpoint, NULL); + PVRSRVBridgeRGXClearBreakpoint, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT), + sizeof(PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT, - PVRSRVBridgeRGXEnableBreakpoint, NULL); + PVRSRVBridgeRGXEnableBreakpoint, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT), + sizeof(PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT, - PVRSRVBridgeRGXDisableBreakpoint, NULL); + PVRSRVBridgeRGXDisableBreakpoint, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT), + sizeof(PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS, - PVRSRVBridgeRGXOverallocateBPRegisters, NULL); + PVRSRVBridgeRGXOverallocateBPRegisters, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS), + sizeof(PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h index b526412445c2..aa1d2b73985a 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h @@ -58,12 +58,16 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0 #define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1 #define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2 -#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3 -#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4 -#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXCMP_RGXSENDCANCELCMD PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5 #define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6 #define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7 -#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7) +#define PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXCMP_RGXCDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXCMP_RGXCDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+11) /******************************************* RGXCreateComputeContext @@ -123,6 +127,24 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA; +/******************************************* + RGXSendCancelCmd + *******************************************/ + +/* Bridge in structure for RGXSendCancelCmd */ +typedef struct PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD_TAG +{ + IMG_HANDLE hComputeContext; + IMG_INT32 i32FirstIntJobRefToCancel; + IMG_INT32 i32LastIntJobRefToCancel; +} __packed PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD; + +/* Bridge out structure for RGXSendCancelCmd */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD; + /******************************************* RGXSetComputeContextPriority *******************************************/ @@ -157,11 +179,70 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG } __packed PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; /******************************************* - RGXKickCDM2 + RGXSetComputeContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Input; + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32Property; +} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY; + +/* Bridge out structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY; + +/******************************************* + RGXGetLastDeviceError + *******************************************/ + +/* Bridge in structure for RGXGetLastDeviceError */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR; + +/* Bridge out structure for RGXGetLastDeviceError */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Error; +} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR; + +/******************************************* + RGXKickTimestampQuery *******************************************/ -/* Bridge in structure for RGXKickCDM2 */ -typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG +/* Bridge in structure for RGXKickTimestampQuery */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY_TAG +{ + IMG_HANDLE hComputeContext; + IMG_BYTE *pui8DMCmd; + IMG_CHAR *puiUpdateFenceName; + PVRSRV_FENCE hCheckFenceFd; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_UINT32 ui32CmdSize; + IMG_UINT32 ui32ExtJobRef; +} __packed PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY; + +/* Bridge out structure for RGXKickTimestampQuery */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE hUpdateFence; +} __packed PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY; + +/******************************************* + RGXKickCDM + *******************************************/ + +/* Bridge in structure for RGXKickCDM */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM_TAG { IMG_UINT64 ui64DeadlineInus; IMG_HANDLE hComputeContext; @@ -173,6 +254,7 @@ typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG IMG_HANDLE *phClientUpdateUFOSyncPrimBlock; IMG_HANDLE *phSyncPMRs; PVRSRV_FENCE hCheckFenceFd; + PVRSRV_FENCE hExportFenceToSignal; PVRSRV_TIMELINE hUpdateTimeline; IMG_UINT32 ui32ClientUpdateCount; IMG_UINT32 ui32CmdSize; @@ -181,49 +263,47 @@ typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG IMG_UINT32 ui32NumOfWorkitems; IMG_UINT32 ui32PDumpFlags; IMG_UINT32 ui32SyncPMRCount; -} __packed PVRSRV_BRIDGE_IN_RGXKICKCDM2; +} __packed PVRSRV_BRIDGE_IN_RGXKICKCDM; -/* Bridge out structure for RGXKickCDM2 */ -typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG +/* Bridge out structure for RGXKickCDM */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM_TAG { PVRSRV_ERROR eError; PVRSRV_FENCE hUpdateFence; -} __packed PVRSRV_BRIDGE_OUT_RGXKICKCDM2; + IMG_UINT32 ui32IntJobRef; +} __packed PVRSRV_BRIDGE_OUT_RGXKICKCDM; /******************************************* - RGXSetComputeContextProperty + RGXCDMGetSharedMemory *******************************************/ -/* Bridge in structure for RGXSetComputeContextProperty */ -typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG +/* Bridge in structure for RGXCDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXCDMGETSHAREDMEMORY_TAG { - IMG_UINT64 ui64Input; - IMG_HANDLE hComputeContext; - IMG_UINT32 ui32Property; -} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY; + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXCDMGETSHAREDMEMORY; -/* Bridge out structure for RGXSetComputeContextProperty */ -typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG +/* Bridge out structure for RGXCDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY_TAG { - IMG_UINT64 ui64Output; + IMG_HANDLE hCLIPMRMem; PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY; +} __packed PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY; /******************************************* - RGXGetLastDeviceError + RGXCDMReleaseSharedMemory *******************************************/ -/* Bridge in structure for RGXGetLastDeviceError */ -typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR_TAG +/* Bridge in structure for RGXCDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY_TAG { - IMG_UINT32 ui32EmptyStructPlaceholder; -} __packed PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR; + IMG_HANDLE hPMRMem; +} __packed PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY; -/* Bridge out structure for RGXGetLastDeviceError */ -typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR_TAG +/* Bridge out structure for RGXCDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY_TAG { PVRSRV_ERROR eError; - IMG_UINT32 ui32Error; -} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR; +} __packed PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY; #endif /* COMMON_RGXCMP_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c index 55b772f6499f..8910cfb5ba6a 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c @@ -100,9 +100,7 @@ PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -148,7 +146,6 @@ PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long)); @@ -164,7 +161,6 @@ PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -307,11 +303,7 @@ PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -439,6 +431,73 @@ PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static IMG_INT +PVRSRVBridgeRGXSendCancelCmd(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSendCancelCmdIN_UI8, + IMG_UINT8 * psRGXSendCancelCmdOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD *psRGXSendCancelCmdIN = + (PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD *) IMG_OFFSET_ADDR(psRGXSendCancelCmdIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD *psRGXSendCancelCmdOUT = + (PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD *) IMG_OFFSET_ADDR(psRGXSendCancelCmdOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXSendCancelCmdIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXSendCancelCmdOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXSendCancelCmd_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSendCancelCmdOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSendCancelCmdOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSendCancelCmd_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSendCancelCmdOUT->eError = + PVRSRVRGXSendCancelCmdKM(psComputeContextInt, + psRGXSendCancelCmdIN->i32FirstIntJobRefToCancel, + psRGXSendCancelCmdIN->i32LastIntJobRefToCancel); + +RGXSendCancelCmd_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + static IMG_INT PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psRGXSetComputeContextPriorityIN_UI8, @@ -579,6 +638,296 @@ PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static IMG_INT +PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetComputeContextPropertyIN_UI8, + IMG_UINT8 * psRGXSetComputeContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXSetComputeContextPropertyIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXSetComputeContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXSetComputeContextProperty_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetComputeContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt, + psRGXSetComputeContextPropertyIN->ui32Property, + psRGXSetComputeContextPropertyIN->ui64Input, + &psRGXSetComputeContextPropertyOUT->ui64Output); + +RGXSetComputeContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetLastDeviceErrorIN_UI8, + IMG_UINT8 * psRGXGetLastDeviceErrorOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorIN = + (PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *) + IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorOUT = + (PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *) + IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorOUT_UI8, 0); + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXGetLastDeviceErrorOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXGetLastDeviceError_exit; + } + } + + PVR_UNREFERENCED_PARAMETER(psRGXGetLastDeviceErrorIN); + + psRGXGetLastDeviceErrorOUT->eError = + PVRSRVRGXGetLastDeviceErrorKM(psConnection, OSGetDevNode(psConnection), + &psRGXGetLastDeviceErrorOUT->ui32Error); + +RGXGetLastDeviceError_exit: + + return 0; +} + +static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, + "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgeRGXKickTimestampQuery(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickTimestampQueryIN_UI8, + IMG_UINT8 * psRGXKickTimestampQueryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryIN = + (PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *) + IMG_OFFSET_ADDR(psRGXKickTimestampQueryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryOUT = + (PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *) + IMG_OFFSET_ADDR(psRGXKickTimestampQueryOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXKickTimestampQueryIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_BYTE *ui8DMCmdInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + ((IMG_UINT64) psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXKickTimestampQueryIN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTimestampQuery_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXKickTimestampQuery_exit; + } + } + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXKickTimestampQuery_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickTimestampQueryIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTimestampQueryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickTimestampQuery_exit; + } + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickTimestampQueryIN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTimestampQuery_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXKickTimestampQueryIN->ui32CmdSize != 0) + { + ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8DMCmdInt, (const void __user *)psRGXKickTimestampQueryIN->pui8DMCmd, + psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTimestampQuery_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickTimestampQueryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXKickTimestampQueryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTimestampQuery_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickTimestampQueryOUT->eError = + PVRSRVRGXKickTimestampQueryKM(psComputeContextInt, + psRGXKickTimestampQueryIN->hCheckFenceFd, + psRGXKickTimestampQueryIN->hUpdateTimeline, + &psRGXKickTimestampQueryOUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickTimestampQueryIN->ui32CmdSize, + ui8DMCmdInt, psRGXKickTimestampQueryIN->ui32ExtJobRef); + +RGXKickTimestampQuery_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXKickTimestampQueryOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, @@ -589,16 +938,16 @@ static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXKickCDM2IN_UI8, - IMG_UINT8 * psRGXKickCDM2OUT_UI8, CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXKickCDM(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickCDMIN_UI8, + IMG_UINT8 * psRGXKickCDMOUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXKICKCDM2 *psRGXKickCDM2IN = - (PVRSRV_BRIDGE_IN_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2IN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *psRGXKickCDM2OUT = - (PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2OUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXKICKCDM *psRGXKickCDMIN = + (PVRSRV_BRIDGE_IN_RGXKICKCDM *) IMG_OFFSET_ADDR(psRGXKickCDMIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKCDM *psRGXKickCDMOUT = + (PVRSRV_BRIDGE_OUT_RGXKICKCDM *) IMG_OFFSET_ADDR(psRGXKickCDMOUT_UI8, 0); - IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext; + IMG_HANDLE hComputeContext = psRGXKickCDMIN->hComputeContext; RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL; IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL; @@ -612,38 +961,36 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + ((IMG_UINT64) psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32SyncPMRCount * sizeof(PMR *)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; - if (unlikely(psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) + if (unlikely(psRGXKickCDMIN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM_exit; } - if (unlikely(psRGXKickCDM2IN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + if (unlikely(psRGXKickCDMIN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM_exit; } - if (unlikely(psRGXKickCDM2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + if (unlikely(psRGXKickCDMIN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM_exit; } { @@ -654,26 +1001,25 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK)) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXKickCDM_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXKickCDMIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -681,88 +1027,87 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickCDM2IN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickCDMIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickCDM_exit; } } } - if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount != 0) { psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); OSCachedMemSet(psClientUpdateUFOSyncPrimBlockInt, 0, - psRGXKickCDM2IN->ui32ClientUpdateCount * + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)); ui32NextOffset += - psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) { if (OSCopyFromUser (NULL, hClientUpdateUFOSyncPrimBlockInt2, - (const void __user *)psRGXKickCDM2IN->phClientUpdateUFOSyncPrimBlock, - psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + (const void __user *)psRGXKickCDMIN->phClientUpdateUFOSyncPrimBlock, + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } - if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount != 0) { ui32ClientUpdateOffsetInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) { if (OSCopyFromUser (NULL, ui32ClientUpdateOffsetInt, - (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateOffset, - psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + (const void __user *)psRGXKickCDMIN->pui32ClientUpdateOffset, + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } - if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount != 0) { ui32ClientUpdateValueInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) { if (OSCopyFromUser (NULL, ui32ClientUpdateValueInt, - (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateValue, - psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + (const void __user *)psRGXKickCDMIN->pui32ClientUpdateValue, + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } @@ -777,73 +1122,73 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, { if (OSCopyFromUser (NULL, uiUpdateFenceNameInt, - (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName, + (const void __user *)psRGXKickCDMIN->puiUpdateFenceName, PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - 1] = '\0'; } - if (psRGXKickCDM2IN->ui32CmdSize != 0) + if (psRGXKickCDMIN->ui32CmdSize != 0) { ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE); + ui32NextOffset += psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0) + if (psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0) { if (OSCopyFromUser - (NULL, ui8DMCmdInt, (const void __user *)psRGXKickCDM2IN->pui8DMCmd, - psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + (NULL, ui8DMCmdInt, (const void __user *)psRGXKickCDMIN->pui8DMCmd, + psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } - if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) + if (psRGXKickCDMIN->ui32SyncPMRCount != 0) { ui32SyncPMRFlagsInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + ui32NextOffset += psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_UINT32); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + if (psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) { if (OSCopyFromUser (NULL, ui32SyncPMRFlagsInt, - (const void __user *)psRGXKickCDM2IN->pui32SyncPMRFlags, - psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + (const void __user *)psRGXKickCDMIN->pui32SyncPMRFlags, + psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } - if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) + if (psRGXKickCDMIN->ui32SyncPMRCount != 0) { psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psSyncPMRsInt, 0, psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)); - ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *); + OSCachedMemSet(psSyncPMRsInt, 0, psRGXKickCDMIN->ui32SyncPMRCount * sizeof(PMR *)); + ui32NextOffset += psRGXKickCDMIN->ui32SyncPMRCount * sizeof(PMR *); hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + ui32NextOffset += psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_HANDLE); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + if (psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) { if (OSCopyFromUser - (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickCDM2IN->phSyncPMRs, - psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickCDMIN->phSyncPMRs, + psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } @@ -851,34 +1196,34 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psRGXKickCDM2OUT->eError = + psRGXKickCDMOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **)&psComputeContextInt, hComputeContext, PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); - if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } { IMG_UINT32 i; - for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + for (i = 0; i < psRGXKickCDMIN->ui32ClientUpdateCount; i++) { /* Look up the address from the handle */ - psRGXKickCDM2OUT->eError = + psRGXKickCDMOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **) &psClientUpdateUFOSyncPrimBlockInt[i], hClientUpdateUFOSyncPrimBlockInt2[i], PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); - if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } } @@ -886,46 +1231,47 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, { IMG_UINT32 i; - for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) + for (i = 0; i < psRGXKickCDMIN->ui32SyncPMRCount; i++) { /* Look up the address from the handle */ - psRGXKickCDM2OUT->eError = + psRGXKickCDMOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **)&psSyncPMRsInt[i], hSyncPMRsInt2[i], PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psRGXKickCDM2OUT->eError = + psRGXKickCDMOUT->eError = PVRSRVRGXKickCDMKM(psComputeContextInt, - psRGXKickCDM2IN->ui32ClientUpdateCount, + psRGXKickCDMIN->ui32ClientUpdateCount, psClientUpdateUFOSyncPrimBlockInt, ui32ClientUpdateOffsetInt, ui32ClientUpdateValueInt, - psRGXKickCDM2IN->hCheckFenceFd, - psRGXKickCDM2IN->hUpdateTimeline, - &psRGXKickCDM2OUT->hUpdateFence, + psRGXKickCDMIN->hCheckFenceFd, + psRGXKickCDMIN->hUpdateTimeline, + &psRGXKickCDMOUT->hUpdateFence, uiUpdateFenceNameInt, - psRGXKickCDM2IN->ui32CmdSize, + psRGXKickCDMIN->hExportFenceToSignal, + psRGXKickCDMIN->ui32CmdSize, ui8DMCmdInt, - psRGXKickCDM2IN->ui32PDumpFlags, - psRGXKickCDM2IN->ui32ExtJobRef, - psRGXKickCDM2IN->ui32SyncPMRCount, + psRGXKickCDMIN->ui32PDumpFlags, + psRGXKickCDMIN->ui32ExtJobRef, + psRGXKickCDMIN->ui32SyncPMRCount, ui32SyncPMRFlagsInt, psSyncPMRsInt, - psRGXKickCDM2IN->ui32NumOfWorkgroups, - psRGXKickCDM2IN->ui32NumOfWorkitems, - psRGXKickCDM2IN->ui64DeadlineInus); + psRGXKickCDMIN->ui32NumOfWorkgroups, + psRGXKickCDMIN->ui32NumOfWorkitems, + psRGXKickCDMIN->ui64DeadlineInus, &psRGXKickCDMOUT->ui32IntJobRef); -RGXKickCDM2_exit: +RGXKickCDM_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); @@ -942,7 +1288,7 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, { IMG_UINT32 i; - for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + for (i = 0; i < psRGXKickCDMIN->ui32ClientUpdateCount; i++) { /* Unreference the previously looked up handle */ @@ -960,7 +1306,7 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, { IMG_UINT32 i; - for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) + for (i = 0; i < psRGXKickCDMIN->ui32SyncPMRCount; i++) { /* Unreference the previously looked up handle */ @@ -977,35 +1323,37 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXKickCDM2OUT->eError == PVRSRV_OK) + if (psRGXKickCDMOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } +static PVRSRV_ERROR _RGXCDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXCDMReleaseSharedMemoryKM((PMR *) pvData); + return eError; +} + static IMG_INT -PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXSetComputeContextPropertyIN_UI8, - IMG_UINT8 * psRGXSetComputeContextPropertyOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXCDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCDMGetSharedMemoryIN_UI8, + IMG_UINT8 * psRGXCDMGetSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyIN = - (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *) - IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyOUT = - (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *) - IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXCDMGETSHAREDMEMORY *psRGXCDMGetSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXCDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXCDMGetSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY *psRGXCDMGetSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXCDMGetSharedMemoryOUT_UI8, 0); - IMG_HANDLE hComputeContext = psRGXSetComputeContextPropertyIN->hComputeContext; - RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + PMR *psCLIPMRMemInt = NULL; { PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); @@ -1015,65 +1363,68 @@ PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK)) { - psRGXSetComputeContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + psRGXCDMGetSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; - goto RGXSetComputeContextProperty_exit; + goto RGXCDMGetSharedMemory_exit; } } - /* Lock over handle lookup. */ + PVR_UNREFERENCED_PARAMETER(psRGXCDMGetSharedMemoryIN); + + psRGXCDMGetSharedMemoryOUT->eError = + PVRSRVRGXCDMGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection), + &psCLIPMRMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + goto RGXCDMGetSharedMemory_exit; + } + + /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psRGXSetComputeContextPropertyOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psComputeContextInt, - hComputeContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); - if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK)) + psRGXCDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCDMGetSharedMemoryOUT-> + hCLIPMRMem, + (void *)psCLIPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCDMGetSharedMemorypsCLIPMRMemIntRelease); + if (unlikely(psRGXCDMGetSharedMemoryOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXSetComputeContextProperty_exit; + goto RGXCDMGetSharedMemory_exit; } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - psRGXSetComputeContextPropertyOUT->eError = - PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt, - psRGXSetComputeContextPropertyIN->ui32Property, - psRGXSetComputeContextPropertyIN->ui64Input, - &psRGXSetComputeContextPropertyOUT->ui64Output); - -RGXSetComputeContextProperty_exit: + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); +RGXCDMGetSharedMemory_exit: - /* Unreference the previously looked up handle */ - if (psComputeContextInt) + if (psRGXCDMGetSharedMemoryOUT->eError != PVRSRV_OK) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hComputeContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + if (psCLIPMRMemInt) + { + PVRSRVRGXCDMReleaseSharedMemoryKM(psCLIPMRMemInt); + } } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); return 0; } static IMG_INT -PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXGetLastDeviceErrorIN_UI8, - IMG_UINT8 * psRGXGetLastDeviceErrorOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXCDMReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCDMReleaseSharedMemoryIN_UI8, + IMG_UINT8 * psRGXCDMReleaseSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorIN = - (PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *) - IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorOUT = - (PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *) - IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY *psRGXCDMReleaseSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXCDMReleaseSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY *psRGXCDMReleaseSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXCDMReleaseSharedMemoryOUT_UI8, 0); { PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); @@ -1083,19 +1434,34 @@ PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry, !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK)) { - psRGXGetLastDeviceErrorOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + psRGXCDMReleaseSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; - goto RGXGetLastDeviceError_exit; + goto RGXCDMReleaseSharedMemory_exit; } } - PVR_UNREFERENCED_PARAMETER(psRGXGetLastDeviceErrorIN); + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); - psRGXGetLastDeviceErrorOUT->eError = - PVRSRVRGXGetLastDeviceErrorKM(psConnection, OSGetDevNode(psConnection), - &psRGXGetLastDeviceErrorOUT->ui32Error); + psRGXCDMReleaseSharedMemoryOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXCDMReleaseSharedMemoryIN->hPMRMem, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); + if (unlikely((psRGXCDMReleaseSharedMemoryOUT->eError != PVRSRV_OK) && + (psRGXCDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psRGXCDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXCDMReleaseSharedMemoryOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXCDMReleaseSharedMemory_exit; + } -RGXGetLastDeviceError_exit: + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCDMReleaseSharedMemory_exit: return 0; } @@ -1114,31 +1480,64 @@ PVRSRV_ERROR InitRGXCMPBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, - PVRSRVBridgeRGXCreateComputeContext, NULL); + PVRSRVBridgeRGXCreateComputeContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, - PVRSRVBridgeRGXDestroyComputeContext, NULL); + PVRSRVBridgeRGXDestroyComputeContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, - PVRSRVBridgeRGXFlushComputeData, NULL); + PVRSRVBridgeRGXFlushComputeData, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA), + sizeof(PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSENDCANCELCMD, + PVRSRVBridgeRGXSendCancelCmd, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD), + sizeof(PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, - PVRSRVBridgeRGXSetComputeContextPriority, NULL); + PVRSRVBridgeRGXSetComputeContextPriority, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, - PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2, - PVRSRVBridgeRGXKickCDM2, NULL); + PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE), + sizeof(PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY, - PVRSRVBridgeRGXSetComputeContextProperty, NULL); + PVRSRVBridgeRGXSetComputeContextProperty, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR, - PVRSRVBridgeRGXGetLastDeviceError, NULL); + PVRSRVBridgeRGXGetLastDeviceError, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY, + PVRSRVBridgeRGXKickTimestampQuery, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY), + sizeof(PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM, + PVRSRVBridgeRGXKickCDM, NULL, sizeof(PVRSRV_BRIDGE_IN_RGXKICKCDM), + sizeof(PVRSRV_BRIDGE_OUT_RGXKICKCDM)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCDMGETSHAREDMEMORY, + PVRSRVBridgeRGXCDMGetSharedMemory, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCDMRELEASESHAREDMEMORY, + PVRSRVBridgeRGXCDMReleaseSharedMemory, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY), + sizeof(PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY)); return PVRSRV_OK; } @@ -1156,17 +1555,26 @@ void DeinitRGXCMPBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSENDCANCELCMD); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCDMGETSHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXCDMRELEASESHAREDMEMORY); + } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h index 227f01ae6b7b..534b8e9536ed 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h @@ -58,15 +58,21 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST 0 #define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0 #define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+8 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+9 -#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+9) +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWEROFF PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWERON PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+12 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+13 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+14 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+15 +#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+15) /******************************************* RGXFWDebugSetFWLog @@ -100,6 +106,54 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST; +/******************************************* + RGXFWDebugPowerOff + *******************************************/ + +/* Bridge in structure for RGXFWDebugPowerOff */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWEROFF_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWEROFF; + +/* Bridge out structure for RGXFWDebugPowerOff */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF; + +/******************************************* + RGXFWDebugPowerOn + *******************************************/ + +/* Bridge in structure for RGXFWDebugPowerOn */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWERON_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWERON; + +/* Bridge out structure for RGXFWDebugPowerOn */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON; + +/******************************************* + RGXFWDebugSetVzConnectionCooldownPeriodInSec + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetVzConnectionCooldownPeriodInSec */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC_TAG +{ + IMG_UINT32 ui32VzConne; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC; + +/* Bridge out structure for RGXFWDebugSetVzConnectionCooldownPeriodInSec */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC; + /******************************************* RGXFWDebugSetHCSDeadline *******************************************/ @@ -117,21 +171,71 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG } __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE; /******************************************* - RGXFWDebugSetOSidPriority + RGXFWDebugSetDriverPriority *******************************************/ -/* Bridge in structure for RGXFWDebugSetOSidPriority */ -typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY_TAG +/* Bridge in structure for RGXFWDebugSetDriverPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY_TAG { - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; IMG_UINT32 ui32Priority; -} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY; -/* Bridge out structure for RGXFWDebugSetOSidPriority */ -typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG +/* Bridge out structure for RGXFWDebugSetDriverPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY_TAG { PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY; + +/******************************************* + RGXFWDebugSetDriverTimeSlice + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetDriverTimeSlice */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE_TAG +{ + IMG_UINT32 ui32DriverID; + IMG_UINT32 ui32TSPercentage; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE; + +/* Bridge out structure for RGXFWDebugSetDriverTimeSlice */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE; + +/******************************************* + RGXFWDebugSetDriverTimeSliceInterval + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetDriverTimeSliceInterval */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL_TAG +{ + IMG_UINT32 ui32TSIntervalMs; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL; + +/* Bridge out structure for RGXFWDebugSetDriverTimeSliceInterval */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL; + +/******************************************* + RGXFWDebugSetDriverIsolationGroup + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetDriverIsolationGroup */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG +{ + IMG_UINT32 ui32DriverID; + IMG_UINT32 ui32IsolationGroup; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP; + +/* Bridge out structure for RGXFWDebugSetDriverIsolationGroup */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP; /******************************************* RGXFWDebugSetOSNewOnlineState @@ -140,8 +244,8 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG /* Bridge in structure for RGXFWDebugSetOSNewOnlineState */ typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG { + IMG_UINT32 ui32DriverID; IMG_UINT32 ui32OSNewState; - IMG_UINT32 ui32OSid; } __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE; /* Bridge out structure for RGXFWDebugSetOSNewOnlineState */ @@ -158,7 +262,7 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP_TAG { IMG_UINT64 ui64ui64GuestHeapBase; - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; } __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP; /* Bridge out structure for RGXFWDebugMapGuestHeap */ @@ -206,7 +310,7 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE_TAG /* Bridge in structure for RGXCurrentTime */ typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG { - IMG_UINT32 ui32EmptyStructPlaceholder; + IMG_UINT8 ui8TimerType; } __packed PVRSRV_BRIDGE_IN_RGXCURRENTTIME; /* Bridge out structure for RGXCurrentTime */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c index 37eb7b3497cd..e5646490aa2d 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c @@ -108,6 +108,71 @@ PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static IMG_INT +PVRSRVBridgeRGXFWDebugPowerOff(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugPowerOffIN_UI8, + IMG_UINT8 * psRGXFWDebugPowerOffOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWEROFF *psRGXFWDebugPowerOffIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWEROFF *) IMG_OFFSET_ADDR(psRGXFWDebugPowerOffIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF *psRGXFWDebugPowerOffOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF *) IMG_OFFSET_ADDR(psRGXFWDebugPowerOffOUT_UI8, + 0); + + PVR_UNREFERENCED_PARAMETER(psRGXFWDebugPowerOffIN); + + psRGXFWDebugPowerOffOUT->eError = + PVRSRVRGXFWDebugPowerOffKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugPowerOn(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugPowerOnIN_UI8, + IMG_UINT8 * psRGXFWDebugPowerOnOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWERON *psRGXFWDebugPowerOnIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWERON *) IMG_OFFSET_ADDR(psRGXFWDebugPowerOnIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON *psRGXFWDebugPowerOnOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON *) IMG_OFFSET_ADDR(psRGXFWDebugPowerOnOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXFWDebugPowerOnIN); + + psRGXFWDebugPowerOnOUT->eError = + PVRSRVRGXFWDebugPowerOnKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetVzConnectionCooldownPeriodInSec(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC + *psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC *) + IMG_OFFSET_ADDR(psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC + *psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC *) + IMG_OFFSET_ADDR(psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT_UI8, 0); + + psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT->eError = + PVRSRVRGXFWDebugSetVzConnectionCooldownPeriodInSecKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN-> + ui32VzConne); + + return 0; +} + static IMG_INT PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psRGXFWDebugSetHCSDeadlineIN_UI8, @@ -129,22 +194,94 @@ PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry, } static IMG_INT -PVRSRVBridgeRGXFWDebugSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXFWDebugSetOSidPriorityIN_UI8, - IMG_UINT8 * psRGXFWDebugSetOSidPriorityOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXFWDebugSetDriverPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetDriverPriorityIN_UI8, + IMG_UINT8 * psRGXFWDebugSetDriverPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityOUT_UI8, 0); + + psRGXFWDebugSetDriverPriorityOUT->eError = + PVRSRVRGXFWDebugSetDriverPriorityKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetDriverPriorityIN->ui32DriverID, + psRGXFWDebugSetDriverPriorityIN->ui32Priority); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetDriverTimeSlice(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetDriverTimeSliceIN_UI8, + IMG_UINT8 * psRGXFWDebugSetDriverTimeSliceOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityIN = - (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *) - IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityOUT = - (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *) - IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityOUT_UI8, 0); - - psRGXFWDebugSetOSidPriorityOUT->eError = - PVRSRVRGXFWDebugSetOSidPriorityKM(psConnection, OSGetDevNode(psConnection), - psRGXFWDebugSetOSidPriorityIN->ui32OSid, - psRGXFWDebugSetOSidPriorityIN->ui32Priority); + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE *psRGXFWDebugSetDriverTimeSliceIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE *psRGXFWDebugSetDriverTimeSliceOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceOUT_UI8, 0); + + psRGXFWDebugSetDriverTimeSliceOUT->eError = + PVRSRVRGXFWDebugSetDriverTimeSliceKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetDriverTimeSliceIN->ui32DriverID, + psRGXFWDebugSetDriverTimeSliceIN-> + ui32TSPercentage); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetDriverTimeSliceInterval(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetDriverTimeSliceIntervalIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetDriverTimeSliceIntervalOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL + *psRGXFWDebugSetDriverTimeSliceIntervalIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceIntervalIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL + *psRGXFWDebugSetDriverTimeSliceIntervalOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceIntervalOUT_UI8, 0); + + psRGXFWDebugSetDriverTimeSliceIntervalOUT->eError = + PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetDriverTimeSliceIntervalIN-> + ui32TSIntervalMs); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetDriverIsolationGroupIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetDriverIsolationGroupOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupOUT + = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupOUT_UI8, 0); + + psRGXFWDebugSetDriverIsolationGroupOUT->eError = + PVRSRVRGXFWDebugSetDriverIsolationGroupKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetDriverIsolationGroupIN-> + ui32DriverID, + psRGXFWDebugSetDriverIsolationGroupIN-> + ui32IsolationGroup); return 0; } @@ -164,7 +301,7 @@ PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry, psRGXFWDebugSetOSNewOnlineStateOUT->eError = PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, OSGetDevNode(psConnection), - psRGXFWDebugSetOSNewOnlineStateIN->ui32OSid, + psRGXFWDebugSetOSNewOnlineStateIN->ui32DriverID, psRGXFWDebugSetOSNewOnlineStateIN-> ui32OSNewState); @@ -186,7 +323,7 @@ PVRSRVBridgeRGXFWDebugMapGuestHeap(IMG_UINT32 ui32DispatchTableEntry, psRGXFWDebugMapGuestHeapOUT->eError = PVRSRVRGXFWDebugMapGuestHeapKM(psConnection, OSGetDevNode(psConnection), - psRGXFWDebugMapGuestHeapIN->ui32OSid, + psRGXFWDebugMapGuestHeapIN->ui32DriverID, psRGXFWDebugMapGuestHeapIN->ui64ui64GuestHeapBase); return 0; @@ -242,41 +379,14 @@ PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry, PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT = (PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeOUT_UI8, 0); - PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN); - psRGXCurrentTimeOUT->eError = PVRSRVRGXCurrentTime(psConnection, OSGetDevNode(psConnection), - &psRGXCurrentTimeOUT->ui64Time); - - return 0; -} - -#if defined(SUPPORT_VALIDATION) - -static IMG_INT -PVRSRVBridgeRGXFWDebugInjectFault(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXFWDebugInjectFaultIN_UI8, - IMG_UINT8 * psRGXFWDebugInjectFaultOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultIN = - (PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *) - IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultOUT = - (PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *) - IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultOUT_UI8, 0); - - PVR_UNREFERENCED_PARAMETER(psRGXFWDebugInjectFaultIN); - - psRGXFWDebugInjectFaultOUT->eError = - PVRSRVRGXFWDebugInjectFaultKM(psConnection, OSGetDevNode(psConnection)); + psRGXCurrentTimeIN->ui8TimerType, &psRGXCurrentTimeOUT->ui64Time); return 0; } -#else #define PVRSRVBridgeRGXFWDebugInjectFault NULL -#endif /* *************************************************************************** * Server bridge dispatch related glue @@ -292,38 +402,89 @@ PVRSRV_ERROR InitRGXFWDBGBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG, - PVRSRVBridgeRGXFWDebugSetFWLog, NULL); + PVRSRVBridgeRGXFWDebugSetFWLog, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST, - PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL); + PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWEROFF, + PVRSRVBridgeRGXFWDebugPowerOff, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWERON, + PVRSRVBridgeRGXFWDebugPowerOn, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC, + PVRSRVBridgeRGXFWDebugSetVzConnectionCooldownPeriodInSec, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC), + sizeof + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE, - PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL); + PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, - PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY, - PVRSRVBridgeRGXFWDebugSetOSidPriority, NULL); + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY, + PVRSRVBridgeRGXFWDebugSetDriverPriority, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICE, + PVRSRVBridgeRGXFWDebugSetDriverTimeSlice, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL, + PVRSRVBridgeRGXFWDebugSetDriverTimeSliceInterval, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP, + PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE, - PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL); + PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP, - PVRSRVBridgeRGXFWDebugMapGuestHeap, NULL); + PVRSRVBridgeRGXFWDebugMapGuestHeap, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE, - PVRSRVBridgeRGXFWDebugPHRConfigure, NULL); + PVRSRVBridgeRGXFWDebugPHRConfigure, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE, - PVRSRVBridgeRGXFWDebugWdgConfigure, NULL); + PVRSRVBridgeRGXFWDebugWdgConfigure, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME, - PVRSRVBridgeRGXCurrentTime, NULL); + PVRSRVBridgeRGXCurrentTime, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCURRENTTIME), + sizeof(PVRSRV_BRIDGE_OUT_RGXCURRENTTIME)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT, - PVRSRVBridgeRGXFWDebugInjectFault, NULL); + PVRSRVBridgeRGXFWDebugInjectFault, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT)); return PVRSRV_OK; } @@ -339,11 +500,27 @@ void DeinitRGXFWDBGBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWEROFF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWERON); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, - PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY); + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE); diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h index 1804c250277f..d1d664f2d05f 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h @@ -55,16 +55,74 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_hwperf.h" #define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+6 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+7 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+8 -#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+8) +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFTIMESTAMP PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXOPENHWPERFCLIENTSTREAM PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCLOSEHWPERFCLIENTSTREAM PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXWRITEHWPERFCLIENTEVENT PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+12 +#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+12) + +/******************************************* + RGXGetConfiguredHWPerfCounters + *******************************************/ + +/* Bridge in structure for RGXGetConfiguredHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG +{ + RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; + IMG_UINT32 ui32BlockID; +} __packed PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS; + +/* Bridge out structure for RGXGetConfiguredHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG +{ + RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS; + +/******************************************* + RGXGetEnabledHWPerfBlocks + *******************************************/ + +/* Bridge in structure for RGXGetEnabledHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS_TAG +{ + IMG_UINT32 *pui32EnabledBlockIDs; + IMG_UINT32 ui32ArrayLen; +} __packed PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS; + +/* Bridge out structure for RGXGetEnabledHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS_TAG +{ + IMG_UINT32 *pui32EnabledBlockIDs; + PVRSRV_ERROR eError; + IMG_UINT32 ui32BlockCount; +} __packed PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS; + +/******************************************* + RGXGetHWPerfTimeStamp + *******************************************/ + +/* Bridge in structure for RGXGetHWPerfTimeStamp */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP; + +/* Bridge out structure for RGXGetHWPerfTimeStamp */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP_TAG +{ + IMG_UINT64 ui64TimeStamp; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP; /******************************************* RGXCtrlHWPerf @@ -101,23 +159,6 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS; -/******************************************* - RGXConfigMuxHWPerfCounters - *******************************************/ - -/* Bridge in structure for RGXConfigMuxHWPerfCounters */ -typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS_TAG -{ - RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigs; - IMG_UINT32 ui32ArrayLen; -} __packed PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS; - -/* Bridge out structure for RGXConfigMuxHWPerfCounters */ -typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS; - /******************************************* RGXControlHWPerfBlocks *******************************************/ @@ -136,6 +177,23 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS; +/******************************************* + RGXConfigMuxHWPerfCounters + *******************************************/ + +/* Bridge in structure for RGXConfigMuxHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS_TAG +{ + RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigs; + IMG_UINT32 ui32ArrayLen; +} __packed PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS; + +/* Bridge out structure for RGXConfigMuxHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS; + /******************************************* RGXConfigCustomCounters *******************************************/ @@ -191,40 +249,54 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS_TAG } __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS; /******************************************* - RGXGetConfiguredHWPerfCounters + RGXOpenHWPerfClientStream *******************************************/ -/* Bridge in structure for RGXGetConfiguredHWPerfCounters */ -typedef struct PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG +/* Bridge in structure for RGXOpenHWPerfClientStream */ +typedef struct PVRSRV_BRIDGE_IN_RGXOPENHWPERFCLIENTSTREAM_TAG { - RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; - IMG_UINT32 ui32BlockID; -} __packed PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS; + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXOPENHWPERFCLIENTSTREAM; -/* Bridge out structure for RGXGetConfiguredHWPerfCounters */ -typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG +/* Bridge out structure for RGXOpenHWPerfClientStream */ +typedef struct PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM_TAG { - RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; + IMG_HANDLE hSD; PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS; +} __packed PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM; /******************************************* - RGXGetEnabledHWPerfBlocks + RGXCloseHWPerfClientStream *******************************************/ -/* Bridge in structure for RGXGetEnabledHWPerfBlocks */ -typedef struct PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS_TAG +/* Bridge in structure for RGXCloseHWPerfClientStream */ +typedef struct PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM_TAG { - IMG_UINT32 *pui32EnabledBlockIDs; - IMG_UINT32 ui32ArrayLen; -} __packed PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS; + IMG_HANDLE hSD; +} __packed PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM; -/* Bridge out structure for RGXGetEnabledHWPerfBlocks */ -typedef struct PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS_TAG +/* Bridge out structure for RGXCloseHWPerfClientStream */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM_TAG { - IMG_UINT32 *pui32EnabledBlockIDs; PVRSRV_ERROR eError; - IMG_UINT32 ui32BlockCount; -} __packed PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS; +} __packed PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM; + +/******************************************* + RGXWriteHWPerfClientEvent + *******************************************/ + +/* Bridge in structure for RGXWriteHWPerfClientEvent */ +typedef struct PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT_TAG +{ + IMG_HANDLE hSD; + IMG_BYTE *pui8Data; + IMG_UINT32 ui32Size; +} __packed PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT; + +/* Bridge out structure for RGXWriteHWPerfClientEvent */ +typedef struct PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT; #endif /* COMMON_RGXHWPERF_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c index 71938f6bac5e..fc8d05d6036a 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c @@ -66,6 +66,251 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * Server-side bridge entry points */ +static_assert(1 <= IMG_UINT32_MAX, "1 must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgeRGXGetConfiguredHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetConfiguredHWPerfCountersIN_UI8, + IMG_UINT8 * psRGXGetConfiguredHWPerfCountersOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersIN = + (PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersOUT = + (PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersOUT_UI8, 0); + + RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCountersInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; + + psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters = + psRGXGetConfiguredHWPerfCountersIN->psConfiguredCounters; + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXGetConfiguredHWPerfCounters_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfCountersIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXGetConfiguredHWPerfCountersIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXGetConfiguredHWPerfCountersOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXGetConfiguredHWPerfCounters_exit; + } + } + } + + if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) + { + psConfiguredCountersInt = + (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK); + } + + psRGXGetConfiguredHWPerfCountersOUT->eError = + PVRSRVRGXGetConfiguredHWPerfCountersKM(psConnection, OSGetDevNode(psConnection), + psRGXGetConfiguredHWPerfCountersIN->ui32BlockID, + psConfiguredCountersInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXGetConfiguredHWPerfCountersOUT->eError != PVRSRV_OK)) + { + goto RGXGetConfiguredHWPerfCounters_exit; + } + + /* If dest ptr is non-null and we have data to copy */ + if ((psConfiguredCountersInt) && ((1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters, + psConfiguredCountersInt, + (1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK))) != PVRSRV_OK)) + { + psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXGetConfiguredHWPerfCounters_exit; + } + } + +RGXGetConfiguredHWPerfCounters_exit: + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXGetConfiguredHWPerfCountersOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetEnabledHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetEnabledHWPerfBlocksIN_UI8, + IMG_UINT8 * psRGXGetEnabledHWPerfBlocksOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksIN = + (PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksOUT = + (PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksOUT_UI8, 0); + + IMG_UINT32 *pui32EnabledBlockIDsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) + 0; + + psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs = + psRGXGetEnabledHWPerfBlocksIN->pui32EnabledBlockIDs; + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXGetEnabledHWPerfBlocks_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXGetEnabledHWPerfBlocksIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXGetEnabledHWPerfBlocksIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXGetEnabledHWPerfBlocks_exit; + } + } + } + + if (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen != 0) + { + pui32EnabledBlockIDsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32); + } + + psRGXGetEnabledHWPerfBlocksOUT->eError = + PVRSRVRGXGetEnabledHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), + psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen, + &psRGXGetEnabledHWPerfBlocksOUT->ui32BlockCount, + pui32EnabledBlockIDsInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXGetEnabledHWPerfBlocksOUT->eError != PVRSRV_OK)) + { + goto RGXGetEnabledHWPerfBlocks_exit; + } + + /* If dest ptr is non-null and we have data to copy */ + if ((pui32EnabledBlockIDsInt) && + ((psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs, + pui32EnabledBlockIDsInt, + (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32))) != + PVRSRV_OK)) + { + psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXGetEnabledHWPerfBlocks_exit; + } + } + +RGXGetEnabledHWPerfBlocks_exit: + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXGetEnabledHWPerfBlocksOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetHWPerfTimeStamp(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetHWPerfTimeStampIN_UI8, + IMG_UINT8 * psRGXGetHWPerfTimeStampOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP *psRGXGetHWPerfTimeStampIN = + (PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP *) + IMG_OFFSET_ADDR(psRGXGetHWPerfTimeStampIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP *psRGXGetHWPerfTimeStampOUT = + (PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP *) + IMG_OFFSET_ADDR(psRGXGetHWPerfTimeStampOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfTimeStampIN); + + psRGXGetHWPerfTimeStampOUT->eError = + PVRSRVRGXGetHWPerfTimeStampKM(psConnection, OSGetDevNode(psConnection), + &psRGXGetHWPerfTimeStampOUT->ui64TimeStamp); + + return 0; +} + static IMG_INT PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psRGXCtrlHWPerfIN_UI8, @@ -110,51 +355,47 @@ static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX, "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeRGXConfigMuxHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXConfigMuxHWPerfCountersIN_UI8, - IMG_UINT8 * psRGXConfigMuxHWPerfCountersOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXControlHWPerfBlocksIN_UI8, + IMG_UINT8 * psRGXControlHWPerfBlocksOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersIN = - (PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *) - IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersOUT = - (PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *) - IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksIN = + (PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksOUT = + (PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksOUT_UI8, 0); - RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigsInt = NULL; + IMG_UINT16 *ui16BlockIDsInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * - sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) + 0; + ((IMG_UINT64) psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) + 0; - if (unlikely(psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) + if (unlikely(psRGXControlHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) { - psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXConfigMuxHWPerfCounters_exit; + psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXControlHWPerfBlocks_exit; } if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXConfigMuxHWPerfCounters_exit; + psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXControlHWPerfBlocks_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXConfigMuxHWPerfCountersIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -162,68 +403,57 @@ PVRSRVBridgeRGXConfigMuxHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = - (IMG_BYTE *) (void *)psRGXConfigMuxHWPerfCountersIN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXControlHWPerfBlocksIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXConfigMuxHWPerfCountersOUT->eError = - PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXConfigMuxHWPerfCounters_exit; + psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXControlHWPerfBlocks_exit; } } } - if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen != 0) + if (psRGXControlHWPerfBlocksIN->ui32ArrayLen != 0) { - psBlockConfigsInt = - (RGX_HWPERF_CONFIG_MUX_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, - ui32NextOffset); - ui32NextOffset += - psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * - sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK); + ui16BlockIDsInt = (IMG_UINT16 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16); } /* Copy the data over */ - if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK) > 0) + if (psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0) { if (OSCopyFromUser - (NULL, psBlockConfigsInt, - (const void __user *)psRGXConfigMuxHWPerfCountersIN->psBlockConfigs, - psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * - sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) != PVRSRV_OK) + (NULL, ui16BlockIDsInt, + (const void __user *)psRGXControlHWPerfBlocksIN->pui16BlockIDs, + psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK) { - psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXConfigMuxHWPerfCounters_exit; + goto RGXControlHWPerfBlocks_exit; } } - psRGXConfigMuxHWPerfCountersOUT->eError = - PVRSRVRGXConfigMuxHWPerfCountersKM(psConnection, OSGetDevNode(psConnection), - psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen, - psBlockConfigsInt); + psRGXControlHWPerfBlocksOUT->eError = + PVRSRVRGXControlHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), + psRGXControlHWPerfBlocksIN->bEnable, + psRGXControlHWPerfBlocksIN->ui32ArrayLen, + ui16BlockIDsInt); -RGXConfigMuxHWPerfCounters_exit: +RGXControlHWPerfBlocks_exit: /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXConfigMuxHWPerfCountersOUT->eError == PVRSRV_OK) + if (psRGXControlHWPerfBlocksOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -233,50 +463,48 @@ static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX, "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXControlHWPerfBlocksIN_UI8, - IMG_UINT8 * psRGXControlHWPerfBlocksOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXConfigMuxHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXConfigMuxHWPerfCountersIN_UI8, + IMG_UINT8 * psRGXConfigMuxHWPerfCountersOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksIN = - (PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *) - IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksOUT = - (PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *) - IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersIN = + (PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersOUT = + (PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersOUT_UI8, 0); - IMG_UINT16 *ui16BlockIDsInt = NULL; + RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigsInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) + 0; + ((IMG_UINT64) psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) + 0; - if (unlikely(psRGXControlHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) + if (unlikely(psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) { - psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXControlHWPerfBlocks_exit; + psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXConfigMuxHWPerfCounters_exit; } if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXControlHWPerfBlocks_exit; + psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXConfigMuxHWPerfCounters_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXConfigMuxHWPerfCountersIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -284,62 +512,63 @@ PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXControlHWPerfBlocksIN; + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXConfigMuxHWPerfCountersIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXControlHWPerfBlocks_exit; + psRGXConfigMuxHWPerfCountersOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXConfigMuxHWPerfCounters_exit; } } } - if (psRGXControlHWPerfBlocksIN->ui32ArrayLen != 0) + if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen != 0) { - ui16BlockIDsInt = (IMG_UINT16 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16); + psBlockConfigsInt = + (RGX_HWPERF_CONFIG_MUX_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, + ui32NextOffset); + ui32NextOffset += + psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK); } /* Copy the data over */ - if (psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0) + if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK) > 0) { if (OSCopyFromUser - (NULL, ui16BlockIDsInt, - (const void __user *)psRGXControlHWPerfBlocksIN->pui16BlockIDs, - psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK) + (NULL, psBlockConfigsInt, + (const void __user *)psRGXConfigMuxHWPerfCountersIN->psBlockConfigs, + psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) != PVRSRV_OK) { - psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXControlHWPerfBlocks_exit; + goto RGXConfigMuxHWPerfCounters_exit; } } - psRGXControlHWPerfBlocksOUT->eError = - PVRSRVRGXControlHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), - psRGXControlHWPerfBlocksIN->bEnable, - psRGXControlHWPerfBlocksIN->ui32ArrayLen, - ui16BlockIDsInt); + psRGXConfigMuxHWPerfCountersOUT->eError = + PVRSRVRGXConfigMuxHWPerfCountersKM(psConnection, OSGetDevNode(psConnection), + psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen, + psBlockConfigsInt); -RGXControlHWPerfBlocks_exit: +RGXConfigMuxHWPerfCounters_exit: /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXControlHWPerfBlocksOUT->eError == PVRSRV_OK) + if (psRGXConfigMuxHWPerfCountersOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -365,9 +594,7 @@ PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -391,7 +618,6 @@ PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN), sizeof(unsigned long)); @@ -407,7 +633,6 @@ PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -456,11 +681,7 @@ PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -486,9 +707,7 @@ PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -511,7 +730,6 @@ PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), sizeof(unsigned long)); @@ -527,7 +745,6 @@ PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -576,11 +793,7 @@ PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -607,9 +820,7 @@ PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) + 0; @@ -628,7 +839,6 @@ PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfMuxCountersIN), @@ -646,7 +856,6 @@ PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -702,178 +911,162 @@ PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } -static_assert(1 <= IMG_UINT32_MAX, "1 must not be larger than IMG_UINT32_MAX"); +static PVRSRV_ERROR _RGXOpenHWPerfClientStreampsSDIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXCloseHWPerfClientStreamKM((HWPERF_STREAM_DESC *) pvData); + return eError; +} static IMG_INT -PVRSRVBridgeRGXGetConfiguredHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXGetConfiguredHWPerfCountersIN_UI8, - IMG_UINT8 * psRGXGetConfiguredHWPerfCountersOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXOpenHWPerfClientStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXOpenHWPerfClientStreamIN_UI8, + IMG_UINT8 * psRGXOpenHWPerfClientStreamOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersIN = - (PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *) - IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersOUT = - (PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *) - IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXOPENHWPERFCLIENTSTREAM *psRGXOpenHWPerfClientStreamIN = + (PVRSRV_BRIDGE_IN_RGXOPENHWPERFCLIENTSTREAM *) + IMG_OFFSET_ADDR(psRGXOpenHWPerfClientStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM *psRGXOpenHWPerfClientStreamOUT = + (PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM *) + IMG_OFFSET_ADDR(psRGXOpenHWPerfClientStreamOUT_UI8, 0); - RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCountersInt = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; + HWPERF_STREAM_DESC *psSDInt = NULL; - psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters = - psRGXGetConfiguredHWPerfCountersIN->psConfiguredCounters; + PVR_UNREFERENCED_PARAMETER(psRGXOpenHWPerfClientStreamIN); - if (ui64BufferSize > IMG_UINT32_MAX) + psRGXOpenHWPerfClientStreamOUT->eError = + PVRSRVRGXOpenHWPerfClientStreamKM(psConnection, OSGetDevNode(psConnection), &psSDInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXOpenHWPerfClientStreamOUT->eError != PVRSRV_OK)) { - psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXGetConfiguredHWPerfCounters_exit; + goto RGXOpenHWPerfClientStream_exit; } - ui32BufferSize = (IMG_UINT32) ui64BufferSize; - - if (ui32BufferSize != 0) + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXOpenHWPerfClientStreamOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXOpenHWPerfClientStreamOUT->hSD, (void *)psSDInt, + PVRSRV_HANDLE_TYPE_PVR_HWPERF_SD, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXOpenHWPerfClientStreampsSDIntRelease); + if (unlikely(psRGXOpenHWPerfClientStreamOUT->eError != PVRSRV_OK)) { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfCountersIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; - - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = - (IMG_BYTE *) (void *)psRGXGetConfiguredHWPerfCountersIN; - - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); - - if (!pArrayArgsBuffer) - { - psRGXGetConfiguredHWPerfCountersOUT->eError = - PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXGetConfiguredHWPerfCounters_exit; - } - } + UnlockHandle(psConnection->psHandleBase); + goto RGXOpenHWPerfClientStream_exit; } - if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) - { - psConfiguredCountersInt = - (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK); - } + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - psRGXGetConfiguredHWPerfCountersOUT->eError = - PVRSRVRGXGetConfiguredHWPerfCountersKM(psConnection, OSGetDevNode(psConnection), - psRGXGetConfiguredHWPerfCountersIN->ui32BlockID, - psConfiguredCountersInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXGetConfiguredHWPerfCountersOUT->eError != PVRSRV_OK)) - { - goto RGXGetConfiguredHWPerfCounters_exit; - } +RGXOpenHWPerfClientStream_exit: - /* If dest ptr is non-null and we have data to copy */ - if ((psConfiguredCountersInt) && ((1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) > 0)) + if (psRGXOpenHWPerfClientStreamOUT->eError != PVRSRV_OK) { - if (unlikely - (OSCopyToUser - (NULL, - (void __user *)psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters, - psConfiguredCountersInt, - (1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK))) != PVRSRV_OK)) + if (psSDInt) { - psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXGetConfiguredHWPerfCounters_exit; + PVRSRVRGXCloseHWPerfClientStreamKM(psSDInt); } } -RGXGetConfiguredHWPerfCounters_exit: + return 0; +} - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXGetConfiguredHWPerfCountersOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ +static IMG_INT +PVRSRVBridgeRGXCloseHWPerfClientStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCloseHWPerfClientStreamIN_UI8, + IMG_UINT8 * psRGXCloseHWPerfClientStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM *psRGXCloseHWPerfClientStreamIN = + (PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM *) + IMG_OFFSET_ADDR(psRGXCloseHWPerfClientStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM *psRGXCloseHWPerfClientStreamOUT = + (PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM *) + IMG_OFFSET_ADDR(psRGXCloseHWPerfClientStreamOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXCloseHWPerfClientStreamOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXCloseHWPerfClientStreamIN->hSD, + PVRSRV_HANDLE_TYPE_PVR_HWPERF_SD); + if (unlikely((psRGXCloseHWPerfClientStreamOUT->eError != PVRSRV_OK) && + (psRGXCloseHWPerfClientStreamOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psRGXCloseHWPerfClientStreamOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXCloseHWPerfClientStreamOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXCloseHWPerfClientStream_exit; + } -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCloseHWPerfClientStream_exit: return 0; } +static_assert(PVRSRVTL_MAX_PACKET_SIZE <= IMG_UINT32_MAX, + "PVRSRVTL_MAX_PACKET_SIZE must not be larger than IMG_UINT32_MAX"); + static IMG_INT -PVRSRVBridgeRGXGetEnabledHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXGetEnabledHWPerfBlocksIN_UI8, - IMG_UINT8 * psRGXGetEnabledHWPerfBlocksOUT_UI8, +PVRSRVBridgeRGXWriteHWPerfClientEvent(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXWriteHWPerfClientEventIN_UI8, + IMG_UINT8 * psRGXWriteHWPerfClientEventOUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksIN = - (PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *) - IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksOUT = - (PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *) - IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT *psRGXWriteHWPerfClientEventIN = + (PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT *) + IMG_OFFSET_ADDR(psRGXWriteHWPerfClientEventIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT *psRGXWriteHWPerfClientEventOUT = + (PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT *) + IMG_OFFSET_ADDR(psRGXWriteHWPerfClientEventOUT_UI8, 0); - IMG_UINT32 *pui32EnabledBlockIDsInt = NULL; + IMG_HANDLE hSD = psRGXWriteHWPerfClientEventIN->hSD; + HWPERF_STREAM_DESC *psSDInt = NULL; + IMG_BYTE *ui8DataInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) + 0; + ((IMG_UINT64) psRGXWriteHWPerfClientEventIN->ui32Size * sizeof(IMG_BYTE)) + 0; - psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs = - psRGXGetEnabledHWPerfBlocksIN->pui32EnabledBlockIDs; + if (unlikely(psRGXWriteHWPerfClientEventIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE)) + { + psRGXWriteHWPerfClientEventOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXWriteHWPerfClientEvent_exit; + } if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXGetEnabledHWPerfBlocks_exit; + psRGXWriteHWPerfClientEventOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXWriteHWPerfClientEvent_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXGetEnabledHWPerfBlocksIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXWriteHWPerfClientEventIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -881,71 +1074,82 @@ PVRSRVBridgeRGXGetEnabledHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXGetEnabledHWPerfBlocksIN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXWriteHWPerfClientEventIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXGetEnabledHWPerfBlocks_exit; + psRGXWriteHWPerfClientEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXWriteHWPerfClientEvent_exit; } } } - if (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen != 0) - { - pui32EnabledBlockIDsInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32); - } - - psRGXGetEnabledHWPerfBlocksOUT->eError = - PVRSRVRGXGetEnabledHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), - psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen, - &psRGXGetEnabledHWPerfBlocksOUT->ui32BlockCount, - pui32EnabledBlockIDsInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXGetEnabledHWPerfBlocksOUT->eError != PVRSRV_OK)) + if (psRGXWriteHWPerfClientEventIN->ui32Size != 0) { - goto RGXGetEnabledHWPerfBlocks_exit; + ui8DataInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXWriteHWPerfClientEventIN->ui32Size * sizeof(IMG_BYTE); } - /* If dest ptr is non-null and we have data to copy */ - if ((pui32EnabledBlockIDsInt) && - ((psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) > 0)) + /* Copy the data over */ + if (psRGXWriteHWPerfClientEventIN->ui32Size * sizeof(IMG_BYTE) > 0) { - if (unlikely - (OSCopyToUser - (NULL, (void __user *)psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs, - pui32EnabledBlockIDsInt, - (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32))) != - PVRSRV_OK)) + if (OSCopyFromUser + (NULL, ui8DataInt, (const void __user *)psRGXWriteHWPerfClientEventIN->pui8Data, + psRGXWriteHWPerfClientEventIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK) { - psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXWriteHWPerfClientEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXGetEnabledHWPerfBlocks_exit; + goto RGXWriteHWPerfClientEvent_exit; } } -RGXGetEnabledHWPerfBlocks_exit: + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXWriteHWPerfClientEventOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, PVRSRV_HANDLE_TYPE_PVR_HWPERF_SD, IMG_TRUE); + if (unlikely(psRGXWriteHWPerfClientEventOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXWriteHWPerfClientEvent_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXWriteHWPerfClientEventOUT->eError = + PVRSRVRGXWriteHWPerfClientEventKM(psSDInt, + psRGXWriteHWPerfClientEventIN->ui32Size, ui8DataInt); + +RGXWriteHWPerfClientEvent_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_HWPERF_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXGetEnabledHWPerfBlocksOUT->eError == PVRSRV_OK) + if (psRGXWriteHWPerfClientEventOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -964,40 +1168,79 @@ void DeinitRGXHWPERFBridge(void); PVRSRV_ERROR InitRGXHWPERFBridge(void) { + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS, + PVRSRVBridgeRGXGetConfiguredHWPerfCounters, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS), + sizeof(PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS, + PVRSRVBridgeRGXGetEnabledHWPerfBlocks, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS), + sizeof(PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFTIMESTAMP, + PVRSRVBridgeRGXGetHWPerfTimeStamp, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP)); + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, - PVRSRVBridgeRGXCtrlHWPerf, NULL); + PVRSRVBridgeRGXCtrlHWPerf, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCTRLHWPERF), + sizeof(PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS, - PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL); + PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS, - PVRSRVBridgeRGXConfigMuxHWPerfCounters, NULL); + PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS, + PVRSRVBridgeRGXControlHWPerfBlocks, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS), + sizeof(PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS, - PVRSRVBridgeRGXControlHWPerfBlocks, NULL); + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS, + PVRSRVBridgeRGXConfigMuxHWPerfCounters, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS), + sizeof(PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS, - PVRSRVBridgeRGXConfigCustomCounters, NULL); + PVRSRVBridgeRGXConfigCustomCounters, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS), + sizeof(PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS, - PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL); + PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS), + sizeof(PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS, - PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters, NULL); + PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFMUXCOUNTERS), + sizeof(PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS, - PVRSRVBridgeRGXGetConfiguredHWPerfCounters, NULL); + PVRSRV_BRIDGE_RGXHWPERF_RGXOPENHWPERFCLIENTSTREAM, + PVRSRVBridgeRGXOpenHWPerfClientStream, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS, - PVRSRVBridgeRGXGetEnabledHWPerfBlocks, NULL); + PVRSRV_BRIDGE_RGXHWPERF_RGXCLOSEHWPERFCLIENTSTREAM, + PVRSRVBridgeRGXCloseHWPerfClientStream, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM), + sizeof(PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXWRITEHWPERFCLIENTEVENT, + PVRSRVBridgeRGXWriteHWPerfClientEvent, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT), + sizeof(PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT)); return PVRSRV_OK; } @@ -1008,16 +1251,25 @@ PVRSRV_ERROR InitRGXHWPERFBridge(void) void DeinitRGXHWPERFBridge(void) { + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFTIMESTAMP); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS); + PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS); + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS); @@ -1029,9 +1281,12 @@ void DeinitRGXHWPERFBridge(void) PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS); + PVRSRV_BRIDGE_RGXHWPERF_RGXOPENHWPERFCLIENTSTREAM); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS); + PVRSRV_BRIDGE_RGXHWPERF_RGXCLOSEHWPERFCLIENTSTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXWRITEHWPERFCLIENTEVENT); } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c index 233e0fe3b4c2..8d943cd536e2 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c @@ -61,6 +61,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) + /* *************************************************************************** * Server-side bridge entry points */ @@ -228,9 +230,7 @@ PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -257,7 +257,6 @@ PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long)); @@ -273,7 +272,6 @@ PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -461,11 +459,7 @@ PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -532,6 +526,9 @@ PVRSRVBridgeRGXSetKickSyncContextProperty(IMG_UINT32 ui32DispatchTableEntry, * Server bridge dispatch related glue */ +#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */ + +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) PVRSRV_ERROR InitRGXKICKSYNCBridge(void); void DeinitRGXKICKSYNCBridge(void); @@ -543,18 +540,25 @@ PVRSRV_ERROR InitRGXKICKSYNCBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, - PVRSRVBridgeRGXCreateKickSyncContext, NULL); + PVRSRVBridgeRGXCreateKickSyncContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, - PVRSRVBridgeRGXDestroyKickSyncContext, NULL); + PVRSRVBridgeRGXDestroyKickSyncContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2, - PVRSRVBridgeRGXKickSync2, NULL); + PVRSRVBridgeRGXKickSync2, NULL, sizeof(PVRSRV_BRIDGE_IN_RGXKICKSYNC2), + sizeof(PVRSRV_BRIDGE_OUT_RGXKICKSYNC2)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY, - PVRSRVBridgeRGXSetKickSyncContextProperty, NULL); + PVRSRVBridgeRGXSetKickSyncContextProperty, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY)); return PVRSRV_OK; } @@ -577,3 +581,13 @@ void DeinitRGXKICKSYNCBridge(void) PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY); } +#else /* SUPPORT_RGXKICKSYNC_BRIDGE */ +/* This bridge is conditional on SUPPORT_RGXKICKSYNC_BRIDGE - when not defined, + * do not populate the dispatch table with its functions + */ +#define InitRGXKICKSYNCBridge() \ + PVRSRV_OK + +#define DeinitRGXKICKSYNCBridge() + +#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c index ed34b211c135..7ba5126053dc 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c @@ -76,19 +76,9 @@ IMG_INTERNAL PVRSRV_ERROR BridgePDumpSignatureBuffer(IMG_HANDLE hBridge, IMG_UIN IMG_INTERNAL PVRSRV_ERROR BridgePDumpComputeCRCSignatureCheck(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags) { -#if defined(SUPPORT_VALIDATION) - PVRSRV_ERROR eError; - - eError = - PVRSRVPDumpComputeCRCSignatureCheckKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), - ui32PDumpFlags); - - return eError; -#else PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); return PVRSRV_ERROR_NOT_IMPLEMENTED; -#endif } IMG_INTERNAL PVRSRV_ERROR BridgePDumpCRCSignatureCheck(IMG_HANDLE hBridge, diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c index e9a5d42422ce..58ed4ebb037f 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c @@ -102,32 +102,7 @@ PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry, return 0; } -#if defined(SUPPORT_VALIDATION) - -static IMG_INT -PVRSRVBridgePDumpComputeCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPDumpComputeCRCSignatureCheckIN_UI8, - IMG_UINT8 * psPDumpComputeCRCSignatureCheckOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK *psPDumpComputeCRCSignatureCheckIN = - (PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK *) - IMG_OFFSET_ADDR(psPDumpComputeCRCSignatureCheckIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK *psPDumpComputeCRCSignatureCheckOUT = - (PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK *) - IMG_OFFSET_ADDR(psPDumpComputeCRCSignatureCheckOUT_UI8, 0); - - psPDumpComputeCRCSignatureCheckOUT->eError = - PVRSRVPDumpComputeCRCSignatureCheckKM(psConnection, OSGetDevNode(psConnection), - psPDumpComputeCRCSignatureCheckIN-> - ui32PDumpFlags); - - return 0; -} - -#else #define PVRSRVBridgePDumpComputeCRCSignatureCheck NULL -#endif static IMG_INT PVRSRVBridgePDumpCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry, @@ -203,25 +178,37 @@ PVRSRV_ERROR InitRGXPDUMPBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER, - PVRSRVBridgePDumpTraceBuffer, NULL); + PVRSRVBridgePDumpTraceBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER, - PVRSRVBridgePDumpSignatureBuffer, NULL); + PVRSRVBridgePDumpSignatureBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPCOMPUTECRCSIGNATURECHECK, - PVRSRVBridgePDumpComputeCRCSignatureCheck, NULL); + PVRSRVBridgePDumpComputeCRCSignatureCheck, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK, - PVRSRVBridgePDumpCRCSignatureCheck, NULL); + PVRSRVBridgePDumpCRCSignatureCheck, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPRECOMMAND, - PVRSRVBridgePDumpValCheckPreCommand, NULL); + PVRSRVBridgePDumpValCheckPreCommand, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPOSTCOMMAND, - PVRSRVBridgePDumpValCheckPostCommand, NULL); + PVRSRVBridgePDumpValCheckPostCommand, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c index 4cdcb127299b..548401470d09 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c @@ -184,23 +184,30 @@ PVRSRV_ERROR InitRGXREGCONFIGBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE, - PVRSRVBridgeRGXSetRegConfigType, NULL); + PVRSRVBridgeRGXSetRegConfigType, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG, - PVRSRVBridgeRGXAddRegconfig, NULL); + PVRSRVBridgeRGXAddRegconfig, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXADDREGCONFIG), + sizeof(PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG, - PVRSRVBridgeRGXClearRegConfig, NULL); + PVRSRVBridgeRGXClearRegConfig, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG, - PVRSRVBridgeRGXEnableRegConfig, NULL); + PVRSRVBridgeRGXEnableRegConfig, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG, - PVRSRVBridgeRGXDisableRegConfig, NULL); + PVRSRVBridgeRGXDisableRegConfig, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h index 49cae125f3f2..cf215caeddff 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h @@ -57,64 +57,23 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_sync_km.h" #define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0 -#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0 -#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1 -#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2 -#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3 -#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4 -#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5 -#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6 -#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7 -#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8 -#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9 -#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10 -#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11 -#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12 -#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13 -#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13) - -/******************************************* - RGXCreateHWRTDataSet - *******************************************/ - -/* Bridge in structure for RGXCreateHWRTDataSet */ -typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG -{ - IMG_UINT64 ui64FlippedMultiSampleCtl; - IMG_UINT64 ui64MultiSampleCtl; - IMG_DEV_VIRTADDR *psMacrotileArrayDevVAddr; - IMG_DEV_VIRTADDR *psPMMlistDevVAddr; - IMG_DEV_VIRTADDR *psRTCDevVAddr; - IMG_DEV_VIRTADDR *psRgnHeaderDevVAddr; - IMG_DEV_VIRTADDR *psTailPtrsDevVAddr; - IMG_DEV_VIRTADDR *psVHeapTableDevVAddr; - IMG_HANDLE *phKmHwRTDataSet; - IMG_HANDLE *phapsFreeLists; - IMG_UINT32 ui32ISPMergeLowerX; - IMG_UINT32 ui32ISPMergeLowerY; - IMG_UINT32 ui32ISPMergeScaleX; - IMG_UINT32 ui32ISPMergeScaleY; - IMG_UINT32 ui32ISPMergeUpperX; - IMG_UINT32 ui32ISPMergeUpperY; - IMG_UINT32 ui32ISPMtileSize; - IMG_UINT32 ui32MTileStride; - IMG_UINT32 ui32PPPScreen; - IMG_UINT32 ui32RgnHeaderSize; - IMG_UINT32 ui32TEAA; - IMG_UINT32 ui32TEMTILE1; - IMG_UINT32 ui32TEMTILE2; - IMG_UINT32 ui32TEScreen; - IMG_UINT32 ui32TPCSize; - IMG_UINT32 ui32TPCStride; - IMG_UINT16 ui16MaxRTs; -} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET; - -/* Bridge out structure for RGXCreateHWRTDataSet */ -typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG -{ - IMG_HANDLE *phKmHwRTDataSet; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET; +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSENDZSSTOREDISABLE PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+15 +#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+15) /******************************************* RGXDestroyHWRTDataSet @@ -200,32 +159,6 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER; -/******************************************* - RGXCreateFreeList - *******************************************/ - -/* Bridge in structure for RGXCreateFreeList */ -typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG -{ - IMG_DEV_VIRTADDR spsFreeListDevVAddr; - IMG_DEVMEM_OFFSET_T uiPMROffset; - IMG_HANDLE hMemCtxPrivData; - IMG_HANDLE hsFreeListPMR; - IMG_HANDLE hsGlobalFreeList; - IMG_UINT32 ui32GrowFLPages; - IMG_UINT32 ui32GrowParamThreshold; - IMG_UINT32 ui32InitFLPages; - IMG_UINT32 ui32MaxFLPages; - IMG_BOOL bbFreeListCheck; -} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST; - -/* Bridge out structure for RGXCreateFreeList */ -typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG -{ - IMG_HANDLE hCleanupCookie; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST; - /******************************************* RGXDestroyFreeList *******************************************/ @@ -242,35 +175,6 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST; -/******************************************* - RGXCreateRenderContext - *******************************************/ - -/* Bridge in structure for RGXCreateRenderContext */ -typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG -{ - IMG_DEV_VIRTADDR sVDMCallStackAddr; - IMG_UINT64 ui64RobustnessAddress; - IMG_HANDLE hPrivData; - IMG_BYTE *pui8FrameworkCmd; - IMG_BYTE *pui8StaticRenderContextState; - IMG_INT32 i32Priority; - IMG_UINT32 ui32ContextFlags; - IMG_UINT32 ui32FrameworkCmdSize; - IMG_UINT32 ui32Max3DDeadlineMS; - IMG_UINT32 ui32MaxTADeadlineMS; - IMG_UINT32 ui32PackedCCBSizeU8888; - IMG_UINT32 ui32StaticRenderContextStateSize; - IMG_UINT32 ui32ui32CallStackDepth; -} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT; - -/* Bridge out structure for RGXCreateRenderContext */ -typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG -{ - IMG_HANDLE hRenderContext; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT; - /******************************************* RGXDestroyRenderContext *******************************************/ @@ -287,6 +191,25 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT; +/******************************************* + RGXSendZSStoreDisable + *******************************************/ + +/* Bridge in structure for RGXSendZSStoreDisable */ +typedef struct PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE_TAG +{ + IMG_HANDLE hRenderContext; + IMG_INT32 i32ExtJobRefToDisableZSStore; + IMG_BOOL bDisableDepthStore; + IMG_BOOL bDisableStencilStore; +} __packed PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE; + +/* Bridge out structure for RGXSendZSStoreDisable */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE; + /******************************************* RGXSetRenderContextPriority *******************************************/ @@ -401,4 +324,143 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY; +/******************************************* + RGXCreateHWRTDataSet + *******************************************/ + +/* Bridge in structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG +{ + IMG_UINT64 ui64FlippedMultiSampleCtl; + IMG_UINT64 ui64MultiSampleCtl; + IMG_DEV_VIRTADDR *psMacrotileArrayDevVAddr; + IMG_DEV_VIRTADDR *psPMMlistDevVAddr; + IMG_DEV_VIRTADDR *psRTCDevVAddr; + IMG_DEV_VIRTADDR *psRgnHeaderDevVAddr; + IMG_DEV_VIRTADDR *psTailPtrsDevVAddr; + IMG_DEV_VIRTADDR *psVHeapTableDevVAddr; + IMG_HANDLE *phKmHwRTDataSet; + IMG_HANDLE *phapsFreeLists; + IMG_UINT32 ui32ISPMergeLowerX; + IMG_UINT32 ui32ISPMergeLowerY; + IMG_UINT32 ui32ISPMergeScaleX; + IMG_UINT32 ui32ISPMergeScaleY; + IMG_UINT32 ui32ISPMergeUpperX; + IMG_UINT32 ui32ISPMergeUpperY; + IMG_UINT32 ui32ISPMtileSize; + IMG_UINT32 ui32MTileStride; + IMG_UINT32 ui32PPPScreen; + IMG_UINT32 ui32RgnHeaderSize; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TPCStride; + IMG_UINT16 ui16MaxRTs; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET; + +/* Bridge out structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG +{ + IMG_HANDLE *phKmHwRTDataSet; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET; + +/******************************************* + RGXCreateFreeList + *******************************************/ + +/* Bridge in structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG +{ + IMG_HANDLE hFreeListReservation; + IMG_HANDLE hMemCtxPrivData; + IMG_HANDLE hsGlobalFreeList; + IMG_UINT32 ui32GrowFLPages; + IMG_UINT32 ui32GrowParamThreshold; + IMG_UINT32 ui32InitFLPages; + IMG_UINT32 ui32MaxFLPages; + IMG_BOOL bbFreeListCheck; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST; + +/* Bridge out structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG +{ + IMG_HANDLE hCleanupCookie; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST; + +/******************************************* + RGXCreateRenderContext + *******************************************/ + +/* Bridge in structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG +{ + IMG_DEV_VIRTADDR sVDMCallStackAddr; + IMG_UINT64 ui64RobustnessAddress; + IMG_HANDLE hPrivData; + IMG_BYTE *pui8FrameworkCmd; + IMG_BYTE *pui8StaticRenderContextState; + IMG_INT32 i32Priority; + IMG_UINT32 ui32ContextFlags; + IMG_UINT32 ui32FrameworkCmdSize; + IMG_UINT32 ui32Max3DDeadlineMS; + IMG_UINT32 ui32MaxTADeadlineMS; + IMG_UINT32 ui32PackedCCBSizeU8888; + IMG_UINT32 ui32StaticRenderContextStateSize; + IMG_UINT32 ui32ui32CallStackDepth; +} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT; + +/* Bridge out structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG +{ + IMG_HANDLE hRenderContext; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT; + +/******************************************* + RGXCreateHWRTDataSet2 + *******************************************/ + +/* Bridge in structure for RGXCreateHWRTDataSet2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2_TAG +{ + IMG_UINT64 ui64FlippedMultiSampleCtl; + IMG_UINT64 ui64MultiSampleCtl; + IMG_HANDLE hPMMlistsReservation; + IMG_DEV_VIRTADDR *psMacrotileArrayDevVAddr; + IMG_DEV_VIRTADDR *psRTCDevVAddr; + IMG_DEV_VIRTADDR *psRgnHeaderDevVAddr; + IMG_DEV_VIRTADDR *psTailPtrsDevVAddr; + IMG_DEV_VIRTADDR *psVHeapTableDevVAddr; + IMG_HANDLE *phKmHwRTDataSet; + IMG_HANDLE *phapsFreeLists; + IMG_UINT32 ui32ISPMergeLowerX; + IMG_UINT32 ui32ISPMergeLowerY; + IMG_UINT32 ui32ISPMergeScaleX; + IMG_UINT32 ui32ISPMergeScaleY; + IMG_UINT32 ui32ISPMergeUpperX; + IMG_UINT32 ui32ISPMergeUpperY; + IMG_UINT32 ui32ISPMtileSize; + IMG_UINT32 ui32MTileStride; + IMG_UINT32 ui32PPPScreen; + IMG_UINT32 ui32RgnHeaderSize; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TPCStride; + IMG_UINT16 ui16MaxRTs; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2; + +/* Bridge out structure for RGXCreateHWRTDataSet2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2_TAG +{ + IMG_HANDLE *phKmHwRTDataSet; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2; + #endif /* COMMON_RGXTA3D_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c index 2194f9a9b296..121ec9714cd3 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c @@ -65,1015 +65,1569 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * Server-side bridge entry points */ -static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); - return eError; -} - -static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); - static IMG_INT -PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8, - IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN = - (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT = - (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *) - IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0); - IMG_DEV_VIRTADDR *sVHeapTableDevVAddrInt = NULL; - IMG_DEV_VIRTADDR *sPMMlistDevVAddrInt = NULL; - RGX_FREELIST **psapsFreeListsInt = NULL; - IMG_HANDLE *hapsFreeListsInt2 = NULL; - IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL; - IMG_DEV_VIRTADDR *sMacrotileArrayDevVAddrInt = NULL; - IMG_DEV_VIRTADDR *sRgnHeaderDevVAddrInt = NULL; - IMG_DEV_VIRTADDR *sRTCDevVAddrInt = NULL; - RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL; - IMG_HANDLE *hKmHwRTDataSetInt2 = NULL; + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif + psRGXDestroyHWRTDataSetOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyHWRTDataSetIN-> + hKmHwRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + if (unlikely + ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK) + && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyHWRTDataSet_exit; + } - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + - ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0; + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); - psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet = psRGXCreateHWRTDataSetIN->phKmHwRTDataSet; +RGXDestroyHWRTDataSet_exit: - if (ui64BufferSize > IMG_UINT32_MAX) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXCreateHWRTDataSet_exit; - } + return 0; +} - ui32BufferSize = (IMG_UINT32) ui64BufferSize; +static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData); + return eError; +} - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; +static IMG_INT +PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateZSBufferIN_UI8, + IMG_UINT8 * psRGXCreateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0); - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN; + IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR; + PMR *psPMRInt = NULL; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); - if (!pArrayArgsBuffer) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXCreateHWRTDataSet_exit; - } - } + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; } + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) { - sVHeapTableDevVAddrInt = - (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + psRGXCreateZSBufferOUT->eError = + RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection), + psReservationInt, + psPMRInt, psRGXCreateZSBufferIN->uiMapFlags, &pssZSBufferKMInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) { - if (OSCopyFromUser - (NULL, sVHeapTableDevVAddrInt, - (const void __user *)psRGXCreateHWRTDataSetIN->psVHeapTableDevVAddr, - RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXCreateHWRTDataSet_exit; - } + goto RGXCreateZSBuffer_exit; } + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateZSBufferOUT-> + hsZSBufferKM, + (void *)pssZSBufferKMInt, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateZSBufferpssZSBufferKMIntRelease); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) { - sPMMlistDevVAddrInt = - (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; } - /* Copy the data over */ - if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) - { - if (OSCopyFromUser - (NULL, sPMMlistDevVAddrInt, - (const void __user *)psRGXCreateHWRTDataSetIN->psPMMlistDevVAddr, - RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - goto RGXCreateHWRTDataSet_exit; - } +RGXCreateZSBuffer_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } + /* Unreference the previously looked up handle */ + if (psPMRInt) { - psapsFreeListsInt = - (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psapsFreeListsInt, 0, - RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)); - ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *); - hapsFreeListsInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE); + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0) + if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK) { - if (OSCopyFromUser - (NULL, hapsFreeListsInt2, - (const void __user *)psRGXCreateHWRTDataSetIN->phapsFreeLists, - RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) + if (pssZSBufferKMInt) { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXCreateHWRTDataSet_exit; + RGXDestroyZSBufferKM(pssZSBufferKMInt); } } - { - sTailPtrsDevVAddrInt = - (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); - } - - /* Copy the data over */ - if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) - { - if (OSCopyFromUser - (NULL, sTailPtrsDevVAddrInt, - (const void __user *)psRGXCreateHWRTDataSetIN->psTailPtrsDevVAddr, - RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; +} - goto RGXCreateHWRTDataSet_exit; - } - } +static IMG_INT +PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyZSBufferIN_UI8, + IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8, + 0); - { - sMacrotileArrayDevVAddrInt = - (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); - } + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + psRGXDestroyZSBufferOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyZSBufferIN-> + hsZSBufferMemDesc, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + if (unlikely + ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) + && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) { - if (OSCopyFromUser - (NULL, sMacrotileArrayDevVAddrInt, - (const void __user *)psRGXCreateHWRTDataSetIN->psMacrotileArrayDevVAddr, - RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXCreateHWRTDataSet_exit; - } + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyZSBuffer_exit; } - { - sRgnHeaderDevVAddrInt = - (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); - } + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) - { - if (OSCopyFromUser - (NULL, sRgnHeaderDevVAddrInt, - (const void __user *)psRGXCreateHWRTDataSetIN->psRgnHeaderDevVAddr, - RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; +RGXDestroyZSBuffer_exit: - goto RGXCreateHWRTDataSet_exit; - } - } + return 0; +} - { - sRTCDevVAddrInt = - (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); - } +static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData); + return eError; +} - /* Copy the data over */ - if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) - { - if (OSCopyFromUser - (NULL, sRTCDevVAddrInt, - (const void __user *)psRGXCreateHWRTDataSetIN->psRTCDevVAddr, - RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; +static IMG_INT +PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXPopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8, + 0); - goto RGXCreateHWRTDataSet_exit; - } - } - if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) - { - psKmHwRTDataSetInt = - (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psKmHwRTDataSetInt, 0, - RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)); - ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *); - hKmHwRTDataSetInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE); - } + IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; + RGX_POPULATION *pssPopulationInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); + /* Look up the address from the handle */ + psRGXPopulateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssZSBufferKMInt, + hsZSBufferKM, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) { - IMG_UINT32 i; - - for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) - { - /* Look up the address from the handle */ - psRGXCreateHWRTDataSetOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psapsFreeListsInt[i], - hapsFreeListsInt2[i], - PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); - if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateHWRTDataSet_exit; - } - } + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psRGXCreateHWRTDataSetOUT->eError = - RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection), - sVHeapTableDevVAddrInt, - sPMMlistDevVAddrInt, - psapsFreeListsInt, - psRGXCreateHWRTDataSetIN->ui32PPPScreen, - psRGXCreateHWRTDataSetIN->ui64MultiSampleCtl, - psRGXCreateHWRTDataSetIN->ui64FlippedMultiSampleCtl, - psRGXCreateHWRTDataSetIN->ui32TPCStride, - sTailPtrsDevVAddrInt, - psRGXCreateHWRTDataSetIN->ui32TPCSize, - psRGXCreateHWRTDataSetIN->ui32TEScreen, - psRGXCreateHWRTDataSetIN->ui32TEAA, - psRGXCreateHWRTDataSetIN->ui32TEMTILE1, - psRGXCreateHWRTDataSetIN->ui32TEMTILE2, - psRGXCreateHWRTDataSetIN->ui32MTileStride, - psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerX, - psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerY, - psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperX, - psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperY, - psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleX, - psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleY, - sMacrotileArrayDevVAddrInt, - sRgnHeaderDevVAddrInt, - sRTCDevVAddrInt, - psRGXCreateHWRTDataSetIN->ui32RgnHeaderSize, - psRGXCreateHWRTDataSetIN->ui32ISPMtileSize, - psRGXCreateHWRTDataSetIN->ui16MaxRTs, psKmHwRTDataSetInt); + psRGXPopulateZSBufferOUT->eError = + RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt); /* Exit early if bridged call fails */ - if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) { - goto RGXCreateHWRTDataSet_exit; + goto RGXPopulateZSBuffer_exit; } /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - if (hKmHwRTDataSetInt2) + + psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXPopulateZSBufferOUT-> + hsPopulation, + (void *)pssPopulationInt, + PVRSRV_HANDLE_TYPE_RGX_POPULATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXPopulateZSBufferpssPopulationIntRelease); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) { - IMG_UINT32 i; + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; + } - for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) - { + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - psRGXCreateHWRTDataSetOUT->eError = - PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &hKmHwRTDataSetInt2[i], - (void *)psKmHwRTDataSetInt[i], - PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease); - if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateHWRTDataSet_exit; - } +RGXPopulateZSBuffer_exit: - } + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (pssZSBufferKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsZSBufferKM, PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); } - /* Release now we have created handles. */ + /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - /* If dest ptr is non-null and we have data to copy */ - if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0)) + if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK) { - if (unlikely - (OSCopyToUser - (NULL, (void __user *)psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet, - hKmHwRTDataSetInt2, - (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK)) + if (pssPopulationInt) { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXCreateHWRTDataSet_exit; + RGXUnpopulateZSBufferKM(pssPopulationInt); } } -RGXCreateHWRTDataSet_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); + return 0; +} - if (hapsFreeListsInt2) - { - IMG_UINT32 i; - - for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) - { +static IMG_INT +PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0); - /* Unreference the previously looked up handle */ - if (psapsFreeListsInt && psapsFreeListsInt[i]) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hapsFreeListsInt2[i], - PVRSRV_HANDLE_TYPE_RGX_FREELIST); - } - } - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); - if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK) + psRGXUnpopulateZSBufferOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation, + PVRSRV_HANDLE_TYPE_RGX_POPULATION); + if (unlikely((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) && + (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) { - { - IMG_UINT32 i; - - if (hKmHwRTDataSetInt2) - { - for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) - { - if (hKmHwRTDataSetInt2[i]) - { - RGXDestroyHWRTDataSet(hKmHwRTDataSetInt2[i]); - } - } - } - } + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXUnpopulateZSBuffer_exit; } - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXCreateHWRTDataSetOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); +RGXUnpopulateZSBuffer_exit: return 0; } static IMG_INT -PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8, - IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyFreeListIN_UI8, + IMG_UINT8 * psRGXDestroyFreeListOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN = - (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *) - IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT = - (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *) - IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8, + 0); /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - psRGXDestroyHWRTDataSetOUT->eError = + psRGXDestroyFreeListOUT->eError = PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXDestroyHWRTDataSetIN-> - hKmHwRTDataSet, - PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); - if (unlikely - ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK) - && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY))) + (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + if (unlikely((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) && + (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))) { PVR_DPF((PVR_DBG_ERROR, "%s: %s", - __func__, PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT->eError))); + __func__, PVRSRVGetErrorString(psRGXDestroyFreeListOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto RGXDestroyHWRTDataSet_exit; + goto RGXDestroyFreeList_exit; } /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); -RGXDestroyHWRTDataSet_exit: +RGXDestroyFreeList_exit: return 0; } -static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData); - return eError; -} - static IMG_INT -PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXCreateZSBufferIN_UI8, - IMG_UINT8 * psRGXCreateZSBufferOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyRenderContextIN_UI8, + IMG_UINT8 * psRGXDestroyRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN = - (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT = - (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0); - - IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation; - DEVMEMINT_RESERVATION *psReservationInt = NULL; - IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR; - PMR *psPMRInt = NULL; - RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; + PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0); - /* Lock over handle lookup. */ + /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psRGXCreateZSBufferOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psReservationInt, - hReservation, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); - if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + psRGXDestroyRenderContextOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyRenderContextIN-> + hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + if (unlikely + ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))) { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto RGXCreateZSBuffer_exit; + goto RGXDestroyRenderContext_exit; } - /* Look up the address from the handle */ - psRGXCreateZSBufferOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateZSBuffer_exit; - } - /* Release now we have looked up handles. */ + /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); - psRGXCreateZSBufferOUT->eError = - RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection), - psReservationInt, - psPMRInt, psRGXCreateZSBufferIN->uiMapFlags, &pssZSBufferKMInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) - { - goto RGXCreateZSBuffer_exit; - } +RGXDestroyRenderContext_exit: - /* Lock over handle creation. */ + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSendZSStoreDisable(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSendZSStoreDisableIN_UI8, + IMG_UINT8 * psRGXSendZSStoreDisableOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE *psRGXSendZSStoreDisableIN = + (PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE *) + IMG_OFFSET_ADDR(psRGXSendZSStoreDisableIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE *psRGXSendZSStoreDisableOUT = + (PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE *) + IMG_OFFSET_ADDR(psRGXSendZSStoreDisableOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXSendZSStoreDisableIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); - psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXCreateZSBufferOUT-> - hsZSBufferKM, - (void *)pssZSBufferKMInt, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXCreateZSBufferpssZSBufferKMIntRelease); - if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + /* Look up the address from the handle */ + psRGXSendZSStoreDisableOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSendZSStoreDisableOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXCreateZSBuffer_exit; + goto RGXSendZSStoreDisable_exit; } - - /* Release now we have created handles. */ + /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); -RGXCreateZSBuffer_exit: + psRGXSendZSStoreDisableOUT->eError = + PVRSRVRGXSendZSStoreDisableKM(psConnection, OSGetDevNode(psConnection), + psRenderContextInt, + psRGXSendZSStoreDisableIN->bDisableDepthStore, + psRGXSendZSStoreDisableIN->bDisableStencilStore, + psRGXSendZSStoreDisableIN->i32ExtJobRefToDisableZSStore); + +RGXSendZSStoreDisable_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psReservationInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); - } - - /* Unreference the previously looked up handle */ - if (psPMRInt) + if (psRenderContextInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK) - { - if (pssZSBufferKMInt) - { - RGXDestroyZSBufferKM(pssZSBufferKMInt); - } - } - return 0; } static IMG_INT -PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXDestroyZSBufferIN_UI8, - IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetRenderContextPriorityIN_UI8, + IMG_UINT8 * psRGXSetRenderContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN = - (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT = - (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8, - 0); + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0); - /* Lock over handle destruction. */ + IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); - psRGXDestroyZSBufferOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXDestroyZSBufferIN-> - hsZSBufferMemDesc, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); - if (unlikely - ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) - && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + /* Look up the address from the handle */ + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)) { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto RGXDestroyZSBuffer_exit; + goto RGXSetRenderContextPriority_exit; } - - /* Release now we have destroyed handles. */ + /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); -RGXDestroyZSBuffer_exit: + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection), + psRenderContextInt, + psRGXSetRenderContextPriorityIN->i32Priority); - return 0; -} +RGXSetRenderContextPriority_exit: -static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData); - return eError; + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; } static IMG_INT -PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXPopulateZSBufferIN_UI8, - IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXRenderContextStalledIN_UI8, + IMG_UINT8 * psRGXRenderContextStalledOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN = - (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT = - (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8, - 0); + PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN = + (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT = + (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0); - IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM; - RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; - RGX_POPULATION *pssPopulationInt = NULL; + IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psRGXPopulateZSBufferOUT->eError = + psRGXRenderContextStalledOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&pssZSBufferKMInt, - hsZSBufferKM, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); - if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXPopulateZSBuffer_exit; + goto RGXRenderContextStalled_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psRGXPopulateZSBufferOUT->eError = - RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) - { - goto RGXPopulateZSBuffer_exit; - } - - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); - - psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXPopulateZSBufferOUT-> - hsPopulation, - (void *)pssPopulationInt, - PVRSRV_HANDLE_TYPE_RGX_POPULATION, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXPopulateZSBufferpssPopulationIntRelease); - if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXPopulateZSBuffer_exit; - } - - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); + psRGXRenderContextStalledOUT->eError = RGXRenderContextStalledKM(psRenderContextInt); -RGXPopulateZSBuffer_exit: +RGXRenderContextStalled_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (pssZSBufferKMInt) + if (psRenderContextInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hsZSBufferKM, PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK) - { - if (pssPopulationInt) - { - RGXUnpopulateZSBufferKM(pssPopulationInt); - } - } - return 0; } +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, + "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, + "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); + static IMG_INT -PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8, - IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickTA3D2IN_UI8, + IMG_UINT8 * psRGXKickTA3D2OUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN = - (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *) - IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT = - (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *) - IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN = + (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAFenceValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32Client3DUpdateValueInt = NULL; + IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock; + SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_CHAR *uiUpdateFenceName3DInt = NULL; + IMG_BYTE *ui8TACmdInt = NULL; + IMG_BYTE *ui83DPRCmdInt = NULL; + IMG_BYTE *ui83DCmdInt = NULL; + IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet; + RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL; + IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer; + RGX_ZSBUFFER_DATA *psZSBufferInt = NULL; + IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer; + RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + + if (unlikely(psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32TACmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui323DPRCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui323DCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXKickTA3D2_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTA3D2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickTA3D2_exit; + } + } + } + + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + psClientTAFenceSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psClientTAFenceSyncPrimBlockInt, 0, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAFenceSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientTAFenceSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN->phClientTAFenceSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + ui32ClientTAFenceSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAFenceSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + ui32ClientTAFenceValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAFenceValueInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceValue, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + psClientTAUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psClientTAUpdateSyncPrimBlockInt, 0, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientTAUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN->phClientTAUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateValue, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + psClient3DUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psClient3DUpdateSyncPrimBlockInt, 0, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClient3DUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClient3DUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN->phClient3DUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateSyncOffset, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateValue, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + { + uiUpdateFenceName3DInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceName3DInt, + (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName3D, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXKickTA3D2IN->ui32TACmdSize != 0) + { + ui8TACmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8TACmdInt, (const void __user *)psRGXKickTA3D2IN->pui8TACmd, + psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0) + { + ui83DPRCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui83DPRCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DPRCmd, + psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DCmdSize != 0) + { + ui83DCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui83DCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DCmd, + psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psSyncPMRsInt, 0, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)); + ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickTA3D2IN->phSyncPMRs, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psClientTAFenceSyncPrimBlockInt[i], + hClientTAFenceSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psClientTAUpdateSyncPrimBlockInt[i], + hClientTAUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psClient3DUpdateSyncPrimBlockInt[i], + hClient3DUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPRFenceUFOSyncPrimBlockInt, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + if (psRGXKickTA3D2IN->hKMHWRTDataSet) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKMHWRTDataSetInt, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hZSBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psZSBufferInt, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMSAAScratchBufferInt, + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickTA3D2OUT->eError = + PVRSRVRGXKickTA3DKM(psRenderContextInt, + psRGXKickTA3D2IN->ui32ClientTAFenceCount, + psClientTAFenceSyncPrimBlockInt, + ui32ClientTAFenceSyncOffsetInt, + ui32ClientTAFenceValueInt, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount, + psClientTAUpdateSyncPrimBlockInt, + ui32ClientTAUpdateSyncOffsetInt, + ui32ClientTAUpdateValueInt, + psRGXKickTA3D2IN->ui32Client3DUpdateCount, + psClient3DUpdateSyncPrimBlockInt, + ui32Client3DUpdateSyncOffsetInt, + ui32Client3DUpdateValueInt, + psPRFenceUFOSyncPrimBlockInt, + psRGXKickTA3D2IN->ui32PRFenceUFOSyncOffset, + psRGXKickTA3D2IN->ui32PRFenceValue, + psRGXKickTA3D2IN->hCheckFence, + psRGXKickTA3D2IN->hUpdateTimeline, + &psRGXKickTA3D2OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickTA3D2IN->hCheckFence3D, + psRGXKickTA3D2IN->hUpdateTimeline3D, + &psRGXKickTA3D2OUT->hUpdateFence3D, + uiUpdateFenceName3DInt, + psRGXKickTA3D2IN->ui32TACmdSize, + ui8TACmdInt, + psRGXKickTA3D2IN->ui323DPRCmdSize, + ui83DPRCmdInt, + psRGXKickTA3D2IN->ui323DCmdSize, + ui83DCmdInt, + psRGXKickTA3D2IN->ui32ExtJobRef, + psRGXKickTA3D2IN->bbKickTA, + psRGXKickTA3D2IN->bbKickPR, + psRGXKickTA3D2IN->bbKick3D, + psRGXKickTA3D2IN->bbAbort, + psRGXKickTA3D2IN->ui32PDumpFlags, + psKMHWRTDataSetInt, + psZSBufferInt, + psMSAAScratchBufferInt, + psRGXKickTA3D2IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXKickTA3D2IN->ui32RenderTargetSize, + psRGXKickTA3D2IN->ui32NumberOfDrawCalls, + psRGXKickTA3D2IN->ui32NumberOfIndices, + psRGXKickTA3D2IN->ui32NumberOfMRTs, psRGXKickTA3D2IN->ui64Deadline); - /* Lock over handle destruction. */ +RGXKickTA3D2_exit: + + /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); - psRGXUnpopulateZSBufferOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation, - PVRSRV_HANDLE_TYPE_RGX_POPULATION); - if (unlikely((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) && - (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + /* Unreference the previously looked up handle */ + if (psRenderContextInt) { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto RGXUnpopulateZSBuffer_exit; + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); } - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -RGXUnpopulateZSBuffer_exit: - - return 0; -} + if (hClientTAFenceSyncPrimBlockInt2) + { + IMG_UINT32 i; -static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = RGXDestroyFreeList((RGX_FREELIST *) pvData); - return eError; -} + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { -static IMG_INT -PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXCreateFreeListIN_UI8, - IMG_UINT8 * psRGXCreateFreeListOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN = - (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT = - (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0); + /* Unreference the previously looked up handle */ + if (psClientTAFenceSyncPrimBlockInt && psClientTAFenceSyncPrimBlockInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClientTAFenceSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } - IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData; - IMG_HANDLE hMemCtxPrivDataInt = NULL; - IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList; - RGX_FREELIST *pssGlobalFreeListInt = NULL; - IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR; - PMR *pssFreeListPMRInt = NULL; - RGX_FREELIST *psCleanupCookieInt = NULL; + if (hClientTAUpdateSyncPrimBlockInt2) + { + IMG_UINT32 i; - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { - /* Look up the address from the handle */ - psRGXCreateFreeListOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&hMemCtxPrivDataInt, - hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateFreeList_exit; + /* Unreference the previously looked up handle */ + if (psClientTAUpdateSyncPrimBlockInt && psClientTAUpdateSyncPrimBlockInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClientTAUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } } - if (psRGXCreateFreeListIN->hsGlobalFreeList) + if (hClient3DUpdateSyncPrimBlockInt2) { - /* Look up the address from the handle */ - psRGXCreateFreeListOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&pssGlobalFreeListInt, - hsGlobalFreeList, - PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateFreeList_exit; + + /* Unreference the previously looked up handle */ + if (psClient3DUpdateSyncPrimBlockInt && psClient3DUpdateSyncPrimBlockInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClient3DUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } } } - /* Look up the address from the handle */ - psRGXCreateFreeListOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&pssFreeListPMRInt, - hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + /* Unreference the previously looked up handle */ + if (psPRFenceUFOSyncPrimBlockInt) { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateFreeList_exit; + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - psRGXCreateFreeListOUT->eError = - RGXCreateFreeList(psConnection, OSGetDevNode(psConnection), - hMemCtxPrivDataInt, - psRGXCreateFreeListIN->ui32MaxFLPages, - psRGXCreateFreeListIN->ui32InitFLPages, - psRGXCreateFreeListIN->ui32GrowFLPages, - psRGXCreateFreeListIN->ui32GrowParamThreshold, - pssGlobalFreeListInt, - psRGXCreateFreeListIN->bbFreeListCheck, - psRGXCreateFreeListIN->spsFreeListDevVAddr, - pssFreeListPMRInt, - psRGXCreateFreeListIN->uiPMROffset, &psCleanupCookieInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + if (psRGXKickTA3D2IN->hKMHWRTDataSet) { - goto RGXCreateFreeList_exit; - } - - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); - psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXCreateFreeListOUT-> - hCleanupCookie, - (void *)psCleanupCookieInt, - PVRSRV_HANDLE_TYPE_RGX_FREELIST, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXCreateFreeListpsCleanupCookieIntRelease); - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateFreeList_exit; + /* Unreference the previously looked up handle */ + if (psKMHWRTDataSetInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + } } - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); - -RGXCreateFreeList_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (hMemCtxPrivDataInt) + if (psRGXKickTA3D2IN->hZSBuffer) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + + /* Unreference the previously looked up handle */ + if (psZSBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } } - if (psRGXCreateFreeListIN->hsGlobalFreeList) + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) { /* Unreference the previously looked up handle */ - if (pssGlobalFreeListInt) + if (psMSAAScratchBufferInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hsGlobalFreeList, - PVRSRV_HANDLE_TYPE_RGX_FREELIST); + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); } } - /* Unreference the previously looked up handle */ - if (pssFreeListPMRInt) + if (hSyncPMRsInt2) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); + IMG_UINT32 i; - if (psRGXCreateFreeListOUT->eError != PVRSRV_OK) - { - if (psCleanupCookieInt) + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) { - RGXDestroyFreeList(psCleanupCookieInt); + + /* Unreference the previously looked up handle */ + if (psSyncPMRsInt && psSyncPMRsInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } } } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXKickTA3D2OUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); return 0; } -static IMG_INT -PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXDestroyFreeListIN_UI8, - IMG_UINT8 * psRGXDestroyFreeListOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN = - (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT = - (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8, - 0); +static IMG_INT +PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetRenderContextPropertyIN_UI8, + IMG_UINT8 * psRGXSetRenderContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXSetRenderContextPropertyIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetRenderContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt, + psRGXSetRenderContextPropertyIN->ui32Property, + psRGXSetRenderContextPropertyIN->ui64Input, + &psRGXSetRenderContextPropertyOUT->ui64Output); + +RGXSetRenderContextProperty_exit: - /* Lock over handle destruction. */ + /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); - psRGXDestroyFreeListOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie, - PVRSRV_HANDLE_TYPE_RGX_FREELIST); - if (unlikely((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) && - (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))) + /* Unreference the previously looked up handle */ + if (psRenderContextInt) { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psRGXDestroyFreeListOUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto RGXDestroyFreeList_exit; + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); } - - /* Release now we have destroyed handles. */ + /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); -RGXDestroyFreeList_exit: - return 0; } -static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void *pvData) +static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease(void *pvData) { PVRSRV_ERROR eError; - eError = PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) pvData); + eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); return eError; } -static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -static_assert(RGXFWIF_STATIC_RENDERCONTEXT_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_STATIC_RENDERCONTEXT_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXCreateRenderContextIN_UI8, - IMG_UINT8 * psRGXCreateRenderContextOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN = - (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *) - IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT = - (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *) - IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0); - IMG_BYTE *ui8FrameworkCmdInt = NULL; - IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData; - IMG_HANDLE hPrivDataInt = NULL; - IMG_BYTE *ui8StaticRenderContextStateInt = NULL; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + IMG_DEV_VIRTADDR *sVHeapTableDevVAddrInt = NULL; + IMG_DEV_VIRTADDR *sPMMlistDevVAddrInt = NULL; + RGX_FREELIST **psapsFreeListsInt = NULL; + IMG_HANDLE *hapsFreeListsInt2 = NULL; + IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL; + IMG_DEV_VIRTADDR *sMacrotileArrayDevVAddrInt = NULL; + IMG_DEV_VIRTADDR *sRgnHeaderDevVAddrInt = NULL; + IMG_DEV_VIRTADDR *sRTCDevVAddrInt = NULL; + RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL; + IMG_HANDLE *hKmHwRTDataSetInt2 = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * - sizeof(IMG_BYTE)) + 0; - - if (unlikely(psRGXCreateRenderContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) - { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXCreateRenderContext_exit; - } + ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0; - if (unlikely - (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize > - RGXFWIF_STATIC_RENDERCONTEXT_SIZE)) - { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXCreateRenderContext_exit; - } + psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet = psRGXCreateHWRTDataSetIN->phKmHwRTDataSet; if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXCreateRenderContext_exit; + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXCreateHWRTDataSet_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -1081,462 +1635,606 @@ PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRenderContextIN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXCreateRenderContext_exit; + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateHWRTDataSet_exit; } } } - if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0) { - ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); + sVHeapTableDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); } /* Copy the data over */ - if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) + if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) { if (OSCopyFromUser - (NULL, ui8FrameworkCmdInt, - (const void __user *)psRGXCreateRenderContextIN->pui8FrameworkCmd, - psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != - PVRSRV_OK) + (NULL, sVHeapTableDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSetIN->psVHeapTableDevVAddr, + RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } + + { + sPMMlistDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); + } + + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + { + if (OSCopyFromUser + (NULL, sPMMlistDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSetIN->psPMMlistDevVAddr, + RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } + + { + psapsFreeListsInt = + (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psapsFreeListsInt, 0, + RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)); + ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *); + hapsFreeListsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hapsFreeListsInt2, + (const void __user *)psRGXCreateHWRTDataSetIN->phapsFreeLists, + RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } + + { + sTailPtrsDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); + } + + /* Copy the data over */ + if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + { + if (OSCopyFromUser + (NULL, sTailPtrsDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSetIN->psTailPtrsDevVAddr, + RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } + + { + sMacrotileArrayDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); + } + + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + { + if (OSCopyFromUser + (NULL, sMacrotileArrayDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSetIN->psMacrotileArrayDevVAddr, + RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } + + { + sRgnHeaderDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); + } + + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + { + if (OSCopyFromUser + (NULL, sRgnHeaderDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSetIN->psRgnHeaderDevVAddr, + RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXCreateRenderContext_exit; + goto RGXCreateHWRTDataSet_exit; } } - if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0) + { - ui8StaticRenderContextStateInt = - (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE); + sRTCDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); } /* Copy the data over */ - if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE) > 0) + if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) { if (OSCopyFromUser - (NULL, ui8StaticRenderContextStateInt, - (const void __user *)psRGXCreateRenderContextIN->pui8StaticRenderContextState, - psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * - sizeof(IMG_BYTE)) != PVRSRV_OK) + (NULL, sRTCDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSetIN->psRTCDevVAddr, + RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXCreateRenderContext_exit; + goto RGXCreateHWRTDataSet_exit; } } + if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) + { + psKmHwRTDataSetInt = + (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psKmHwRTDataSetInt, 0, + RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *); + hKmHwRTDataSetInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE); + } /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psRGXCreateRenderContextOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&hPrivDataInt, - hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); - if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateRenderContext_exit; + IMG_UINT32 i; + + for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) + { + /* Look up the address from the handle */ + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psapsFreeListsInt[i], + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + } } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psRGXCreateRenderContextOUT->eError = - PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection), - psRGXCreateRenderContextIN->i32Priority, - psRGXCreateRenderContextIN->sVDMCallStackAddr, - psRGXCreateRenderContextIN->ui32ui32CallStackDepth, - psRGXCreateRenderContextIN->ui32FrameworkCmdSize, - ui8FrameworkCmdInt, - hPrivDataInt, - psRGXCreateRenderContextIN-> - ui32StaticRenderContextStateSize, - ui8StaticRenderContextStateInt, - psRGXCreateRenderContextIN->ui32PackedCCBSizeU8888, - psRGXCreateRenderContextIN->ui32ContextFlags, - psRGXCreateRenderContextIN->ui64RobustnessAddress, - psRGXCreateRenderContextIN->ui32MaxTADeadlineMS, - psRGXCreateRenderContextIN->ui32Max3DDeadlineMS, - &psRenderContextInt); + psRGXCreateHWRTDataSetOUT->eError = + RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection), + sVHeapTableDevVAddrInt, + sPMMlistDevVAddrInt, + psapsFreeListsInt, + psRGXCreateHWRTDataSetIN->ui32PPPScreen, + psRGXCreateHWRTDataSetIN->ui64MultiSampleCtl, + psRGXCreateHWRTDataSetIN->ui64FlippedMultiSampleCtl, + psRGXCreateHWRTDataSetIN->ui32TPCStride, + sTailPtrsDevVAddrInt, + psRGXCreateHWRTDataSetIN->ui32TPCSize, + psRGXCreateHWRTDataSetIN->ui32TEScreen, + psRGXCreateHWRTDataSetIN->ui32TEAA, + psRGXCreateHWRTDataSetIN->ui32TEMTILE1, + psRGXCreateHWRTDataSetIN->ui32TEMTILE2, + psRGXCreateHWRTDataSetIN->ui32MTileStride, + psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerX, + psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerY, + psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperX, + psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperY, + psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleX, + psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleY, + sMacrotileArrayDevVAddrInt, + sRgnHeaderDevVAddrInt, + sRTCDevVAddrInt, + psRGXCreateHWRTDataSetIN->ui32RgnHeaderSize, + psRGXCreateHWRTDataSetIN->ui32ISPMtileSize, + psRGXCreateHWRTDataSetIN->ui16MaxRTs, psKmHwRTDataSetInt); /* Exit early if bridged call fails */ - if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) { - goto RGXCreateRenderContext_exit; + goto RGXCreateHWRTDataSet_exit; } /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - - psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXCreateRenderContextOUT-> - hRenderContext, - (void *)psRenderContextInt, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXCreateRenderContextpsRenderContextIntRelease); - if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + if (hKmHwRTDataSetInt2) { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateRenderContext_exit; - } + IMG_UINT32 i; + + for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) + { + + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &hKmHwRTDataSetInt2[i], + (void *)psKmHwRTDataSetInt[i], + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + IMG_UINT32 j; + /* Ensure the remaining handles are set to NULL. hKmHwRTDataSetInt2[i] was + * zeroed when calling PVRSRVAllocHandleUnlocked, so we start at the next + * element. If it was the last iteration, the loop doesn't run. + */ + for (j = i + 1; j < RGXMKIF_NUM_RTDATAS; j++) + { + hKmHwRTDataSetInt2[j] = NULL; + } + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + } + } /* Release now we have created handles. */ UnlockHandle(psConnection->psHandleBase); -RGXCreateRenderContext_exit: + /* If dest ptr is non-null and we have data to copy */ + if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet, + hKmHwRTDataSetInt2, + (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK)) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } + +RGXCreateHWRTDataSet_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); - /* Unreference the previously looked up handle */ - if (hPrivDataInt) + if (hapsFreeListsInt2) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + IMG_UINT32 i; + + for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) + { + + /* Unreference the previously looked up handle */ + if (psapsFreeListsInt && psapsFreeListsInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + } + } } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK) + if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK) { - if (psRenderContextInt) + if (hKmHwRTDataSetInt2) { - PVRSRVRGXDestroyRenderContextKM(psRenderContextInt); - } - } + PVRSRV_ERROR eError; - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXCreateRenderContextOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); + { + IMG_UINT32 idx; + for (idx = 0; idx < RGXMKIF_NUM_RTDATAS; idx++) + { + if (hKmHwRTDataSetInt2[idx]) + { - return 0; -} + eError = + PVRSRVDestroyHandleUnlocked(psConnection-> + psHandleBase, + hKmHwRTDataSetInt2 + [idx], + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); -static IMG_INT -PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXDestroyRenderContextIN_UI8, - IMG_UINT8 * psRGXDestroyRenderContextOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN = - (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *) - IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT = - (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *) - IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0); + } + else if (psKmHwRTDataSetInt[idx]) + { + /* Free/Destroy/Release the resource */ + RGXDestroyHWRTDataSet(psKmHwRTDataSetInt[idx]); + } + } + } - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + else if (psKmHwRTDataSetInt) + { + IMG_UINT32 i; + for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) + { + if (psKmHwRTDataSetInt[i]) + { + RGXDestroyHWRTDataSet(psKmHwRTDataSetInt[i]); + } + } + } - psRGXDestroyRenderContextOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXDestroyRenderContextIN-> - hCleanupCookie, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); - if (unlikely - ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) - && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto RGXDestroyRenderContext_exit; } - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXCreateHWRTDataSetOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ -RGXDestroyRenderContext_exit: + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); return 0; } +static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyFreeList((RGX_FREELIST *) pvData); + return eError; +} + static IMG_INT -PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXSetRenderContextPriorityIN_UI8, - IMG_UINT8 * psRGXSetRenderContextPriorityOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateFreeListIN_UI8, + IMG_UINT8 * psRGXCreateFreeListOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN = - (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *) - IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT = - (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *) - IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN = + (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0); - IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData; + IMG_HANDLE hMemCtxPrivDataInt = NULL; + IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList; + RGX_FREELIST *pssGlobalFreeListInt = NULL; + IMG_HANDLE hFreeListReservation = psRGXCreateFreeListIN->hFreeListReservation; + DEVMEMINT_RESERVATION *psFreeListReservationInt = NULL; + RGX_FREELIST *psCleanupCookieInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psRGXSetRenderContextPriorityOUT->eError = + psRGXCreateFreeListOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psRenderContextInt, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); - if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)) + (void **)&hMemCtxPrivDataInt, + hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXSetRenderContextPriority_exit; + goto RGXCreateFreeList_exit; } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - psRGXSetRenderContextPriorityOUT->eError = - PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection), - psRenderContextInt, - psRGXSetRenderContextPriorityIN->i32Priority); - -RGXSetRenderContextPriority_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); + if (psRGXCreateFreeListIN->hsGlobalFreeList) + { + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssGlobalFreeListInt, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + } - /* Unreference the previously looked up handle */ - if (psRenderContextInt) + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psFreeListReservationInt, + hFreeListReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; } - /* Release now we have cleaned up look up handles. */ + /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - return 0; -} - -static IMG_INT -PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXRenderContextStalledIN_UI8, - IMG_UINT8 * psRGXRenderContextStalledOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN = - (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *) - IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT = - (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *) - IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0); - - IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + psRGXCreateFreeListOUT->eError = + RGXCreateFreeList(psConnection, OSGetDevNode(psConnection), + hMemCtxPrivDataInt, + psRGXCreateFreeListIN->ui32MaxFLPages, + psRGXCreateFreeListIN->ui32InitFLPages, + psRGXCreateFreeListIN->ui32GrowFLPages, + psRGXCreateFreeListIN->ui32GrowParamThreshold, + pssGlobalFreeListInt, + psRGXCreateFreeListIN->bbFreeListCheck, + psFreeListReservationInt, &psCleanupCookieInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + goto RGXCreateFreeList_exit; + } - /* Lock over handle lookup. */ + /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psRGXRenderContextStalledOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psRenderContextInt, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); - if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)) + psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateFreeListOUT-> + hCleanupCookie, + (void *)psCleanupCookieInt, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateFreeListpsCleanupCookieIntRelease); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXRenderContextStalled_exit; + goto RGXCreateFreeList_exit; } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - psRGXRenderContextStalledOUT->eError = RGXRenderContextStalledKM(psRenderContextInt); + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); -RGXRenderContextStalled_exit: +RGXCreateFreeList_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psRenderContextInt) + if (hMemCtxPrivDataInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - return 0; -} - -static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, - "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, - "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, - "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, - "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, - "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, - "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); - -static IMG_INT -PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXKickTA3D2IN_UI8, - IMG_UINT8 * psRGXKickTA3D2OUT_UI8, CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN = - (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT = - (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0); - - IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; - SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL; - IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL; - IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL; - IMG_UINT32 *ui32ClientTAFenceValueInt = NULL; - SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL; - IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL; - IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL; - IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL; - SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL; - IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL; - IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL; - IMG_UINT32 *ui32Client3DUpdateValueInt = NULL; - IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock; - SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL; - IMG_CHAR *uiUpdateFenceNameInt = NULL; - IMG_CHAR *uiUpdateFenceName3DInt = NULL; - IMG_BYTE *ui8TACmdInt = NULL; - IMG_BYTE *ui83DPRCmdInt = NULL; - IMG_BYTE *ui83DCmdInt = NULL; - IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet; - RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL; - IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer; - RGX_ZSBUFFER_DATA *psZSBufferInt = NULL; - IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer; - RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL; - IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; - PMR **psSyncPMRsInt = NULL; - IMG_HANDLE *hSyncPMRsInt2 = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + - ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; - if (unlikely(psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS)) + if (psRGXCreateFreeListIN->hsGlobalFreeList) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; + + /* Unreference the previously looked up handle */ + if (pssGlobalFreeListInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + } } - if (unlikely(psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS)) + /* Unreference the previously looked up handle */ + if (psFreeListReservationInt) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hFreeListReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - if (unlikely(psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS)) + if (psRGXCreateFreeListOUT->eError != PVRSRV_OK) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; + if (psCleanupCookieInt) + { + RGXDestroyFreeList(psCleanupCookieInt); + } } - if (unlikely(psRGXKickTA3D2IN->ui32TACmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; - } + return 0; +} - if (unlikely(psRGXKickTA3D2IN->ui323DPRCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; - } +static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) pvData); + return eError; +} - if (unlikely(psRGXKickTA3D2IN->ui323DCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) +static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_STATIC_RENDERCONTEXT_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_STATIC_RENDERCONTEXT_SIZE must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateRenderContextIN_UI8, + IMG_UINT8 * psRGXCreateRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0); + + IMG_BYTE *ui8FrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + IMG_BYTE *ui8StaticRenderContextStateInt = NULL; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * + sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXCreateRenderContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRenderContext_exit; } - if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + if (unlikely + (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize > + RGXFWIF_STATIC_RENDERCONTEXT_SIZE)) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRenderContext_exit; } if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXKickTA3D2_exit; + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXCreateRenderContext_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -1544,764 +2242,622 @@ PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTA3D2IN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRenderContextIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXKickTA3D2_exit; - } - } - } - - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) - { - psClientTAFenceSyncPrimBlockInt = - (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psClientTAFenceSyncPrimBlockInt, 0, - psRGXKickTA3D2IN->ui32ClientTAFenceCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)); - ui32NextOffset += - psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *); - hClientTAFenceSyncPrimBlockInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE); - } - - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0) - { - if (OSCopyFromUser - (NULL, hClientTAFenceSyncPrimBlockInt2, - (const void __user *)psRGXKickTA3D2IN->phClientTAFenceSyncPrimBlock, - psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) - { - ui32ClientTAFenceSyncOffsetInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); - } - - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32ClientTAFenceSyncOffsetInt, - (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceSyncOffset, - psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) - { - ui32ClientTAFenceValueInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); - } - - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32ClientTAFenceValueInt, - (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceValue, - psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) - { - psClientTAUpdateSyncPrimBlockInt = - (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psClientTAUpdateSyncPrimBlockInt, 0, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)); - ui32NextOffset += - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); - hClientTAUpdateSyncPrimBlockInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE); - } - - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0) - { - if (OSCopyFromUser - (NULL, hClientTAUpdateSyncPrimBlockInt2, - (const void __user *)psRGXKickTA3D2IN->phClientTAUpdateSyncPrimBlock, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) - { - ui32ClientTAUpdateSyncOffsetInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); - } - - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32ClientTAUpdateSyncOffsetInt, - (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateSyncOffset, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) - { - ui32ClientTAUpdateValueInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); - } - - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32ClientTAUpdateValueInt, - (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateValue, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) - { - psClient3DUpdateSyncPrimBlockInt = - (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psClient3DUpdateSyncPrimBlockInt, 0, - psRGXKickTA3D2IN->ui32Client3DUpdateCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)); - ui32NextOffset += - psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); - hClient3DUpdateSyncPrimBlockInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE); - } - - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0) - { - if (OSCopyFromUser - (NULL, hClient3DUpdateSyncPrimBlockInt2, - (const void __user *)psRGXKickTA3D2IN->phClient3DUpdateSyncPrimBlock, - psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateRenderContext_exit; + } } } - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + + if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0) { - ui32Client3DUpdateSyncOffsetInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); + ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); } /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) { if (OSCopyFromUser - (NULL, ui32Client3DUpdateSyncOffsetInt, - (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateSyncOffset, - psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + (NULL, ui8FrameworkCmdInt, + (const void __user *)psRGXCreateRenderContextIN->pui8FrameworkCmd, + psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickTA3D2_exit; + goto RGXCreateRenderContext_exit; } } - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0) { - ui32Client3DUpdateValueInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); + ui8StaticRenderContextStateInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE); } /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE) > 0) { if (OSCopyFromUser - (NULL, ui32Client3DUpdateValueInt, - (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateValue, - psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + (NULL, ui8StaticRenderContextStateInt, + (const void __user *)psRGXCreateRenderContextIN->pui8StaticRenderContextState, + psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * + sizeof(IMG_BYTE)) != PVRSRV_OK) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickTA3D2_exit; + goto RGXCreateRenderContext_exit; } } - { - uiUpdateFenceNameInt = - (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); - } + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + /* Look up the address from the handle */ + psRGXCreateRenderContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) { - if (OSCopyFromUser - (NULL, uiUpdateFenceNameInt, - (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName, - PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; - } - ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - - 1] = '\0'; + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + psRGXCreateRenderContextOUT->eError = + PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection), + psRGXCreateRenderContextIN->i32Priority, + psRGXCreateRenderContextIN->sVDMCallStackAddr, + psRGXCreateRenderContextIN->ui32ui32CallStackDepth, + psRGXCreateRenderContextIN->ui32FrameworkCmdSize, + ui8FrameworkCmdInt, + hPrivDataInt, + psRGXCreateRenderContextIN-> + ui32StaticRenderContextStateSize, + ui8StaticRenderContextStateInt, + psRGXCreateRenderContextIN->ui32PackedCCBSizeU8888, + psRGXCreateRenderContextIN->ui32ContextFlags, + psRGXCreateRenderContextIN->ui64RobustnessAddress, + psRGXCreateRenderContextIN->ui32MaxTADeadlineMS, + psRGXCreateRenderContextIN->ui32Max3DDeadlineMS, + &psRenderContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) { - uiUpdateFenceName3DInt = - (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + goto RGXCreateRenderContext_exit; } - /* Copy the data over */ - if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) - { - if (OSCopyFromUser - (NULL, uiUpdateFenceName3DInt, - (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName3D, - PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - ((IMG_CHAR *) uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - - 1] = '\0'; - } - if (psRGXKickTA3D2IN->ui32TACmdSize != 0) + psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateRenderContextOUT-> + hRenderContext, + (void *)psRenderContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateRenderContextpsRenderContextIntRelease); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) { - ui8TACmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; } - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0) - { - if (OSCopyFromUser - (NULL, ui8TACmdInt, (const void __user *)psRGXKickTA3D2IN->pui8TACmd, - psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0) +RGXCreateRenderContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) { - ui83DPRCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE); + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0) + if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK) { - if (OSCopyFromUser - (NULL, ui83DPRCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DPRCmd, - psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + if (psRenderContextInt) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; + PVRSRVRGXDestroyRenderContextKM(psRenderContextInt); } } - if (psRGXKickTA3D2IN->ui323DCmdSize != 0) + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXCreateRenderContextOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _RGXCreateHWRTDataSet2psKmHwRTDataSetIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); + return eError; +} + +static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgeRGXCreateHWRTDataSet2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateHWRTDataSet2IN_UI8, + IMG_UINT8 * psRGXCreateHWRTDataSet2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2 *psRGXCreateHWRTDataSet2IN = + (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2 *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSet2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2 *psRGXCreateHWRTDataSet2OUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2 *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSet2OUT_UI8, 0); + + IMG_DEV_VIRTADDR *sVHeapTableDevVAddrInt = NULL; + IMG_HANDLE hPMMlistsReservation = psRGXCreateHWRTDataSet2IN->hPMMlistsReservation; + DEVMEMINT_RESERVATION *psPMMlistsReservationInt = NULL; + RGX_FREELIST **psapsFreeListsInt = NULL; + IMG_HANDLE *hapsFreeListsInt2 = NULL; + IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL; + IMG_DEV_VIRTADDR *sMacrotileArrayDevVAddrInt = NULL; + IMG_DEV_VIRTADDR *sRgnHeaderDevVAddrInt = NULL; + IMG_DEV_VIRTADDR *sRTCDevVAddrInt = NULL; + RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL; + IMG_HANDLE *hKmHwRTDataSetInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0; + + psRGXCreateHWRTDataSet2OUT->phKmHwRTDataSet = psRGXCreateHWRTDataSet2IN->phKmHwRTDataSet; + + if (ui64BufferSize > IMG_UINT32_MAX) { - ui83DCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE); + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXCreateHWRTDataSet2_exit; } - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0) + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) { - if (OSCopyFromUser - (NULL, ui83DCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DCmd, - psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSet2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSet2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); - goto RGXKickTA3D2_exit; + if (!pArrayArgsBuffer) + { + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateHWRTDataSet2_exit; + } } } - if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { - ui32SyncPMRFlagsInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + sVHeapTableDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); } /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) { if (OSCopyFromUser - (NULL, ui32SyncPMRFlagsInt, - (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags, - psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + (NULL, sVHeapTableDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSet2IN->psVHeapTableDevVAddr, + RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickTA3D2_exit; + goto RGXCreateHWRTDataSet2_exit; } } - if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { - psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psSyncPMRsInt, 0, - psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)); - ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *); - hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + psapsFreeListsInt = + (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psapsFreeListsInt, 0, + RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)); + ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *); + hapsFreeListsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE); } /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0) { if (OSCopyFromUser - (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickTA3D2IN->phSyncPMRs, - psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + (NULL, hapsFreeListsInt2, + (const void __user *)psRGXCreateHWRTDataSet2IN->phapsFreeLists, + RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickTA3D2_exit; + goto RGXCreateHWRTDataSet2_exit; } } - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psRenderContextInt, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - { - IMG_UINT32 i; - - for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) - { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psClientTAFenceSyncPrimBlockInt[i], - hClientTAFenceSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, - IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - } + sTailPtrsDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); } + /* Copy the data over */ + if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) { - IMG_UINT32 i; - - for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + if (OSCopyFromUser + (NULL, sTailPtrsDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSet2IN->psTailPtrsDevVAddr, + RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **) - &psClientTAUpdateSyncPrimBlockInt[i], - hClientTAUpdateSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, - IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - } - } - - { - IMG_UINT32 i; + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) - { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **) - &psClient3DUpdateSyncPrimBlockInt[i], - hClient3DUpdateSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, - IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } + goto RGXCreateHWRTDataSet2_exit; } } - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPRFenceUFOSyncPrimBlockInt, - hPRFenceUFOSyncPrimBlock, - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; + sMacrotileArrayDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); } - if (psRGXKickTA3D2IN->hKMHWRTDataSet) + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psKMHWRTDataSetInt, - hKMHWRTDataSet, - PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + if (OSCopyFromUser + (NULL, sMacrotileArrayDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSet2IN->psMacrotileArrayDevVAddr, + RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - } + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - if (psRGXKickTA3D2IN->hZSBuffer) - { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psZSBufferInt, - hZSBuffer, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; + goto RGXCreateHWRTDataSet2_exit; } } - if (psRGXKickTA3D2IN->hMSAAScratchBuffer) { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psMSAAScratchBufferInt, - hMSAAScratchBuffer, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } + sRgnHeaderDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); } + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) { - IMG_UINT32 i; - - for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + if (OSCopyFromUser + (NULL, sRgnHeaderDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSet2IN->psRgnHeaderDevVAddr, + RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psSyncPMRsInt[i], - hSyncPMRsInt2[i], - PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - } - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psRGXKickTA3D2OUT->eError = - PVRSRVRGXKickTA3DKM(psRenderContextInt, - psRGXKickTA3D2IN->ui32ClientTAFenceCount, - psClientTAFenceSyncPrimBlockInt, - ui32ClientTAFenceSyncOffsetInt, - ui32ClientTAFenceValueInt, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount, - psClientTAUpdateSyncPrimBlockInt, - ui32ClientTAUpdateSyncOffsetInt, - ui32ClientTAUpdateValueInt, - psRGXKickTA3D2IN->ui32Client3DUpdateCount, - psClient3DUpdateSyncPrimBlockInt, - ui32Client3DUpdateSyncOffsetInt, - ui32Client3DUpdateValueInt, - psPRFenceUFOSyncPrimBlockInt, - psRGXKickTA3D2IN->ui32PRFenceUFOSyncOffset, - psRGXKickTA3D2IN->ui32PRFenceValue, - psRGXKickTA3D2IN->hCheckFence, - psRGXKickTA3D2IN->hUpdateTimeline, - &psRGXKickTA3D2OUT->hUpdateFence, - uiUpdateFenceNameInt, - psRGXKickTA3D2IN->hCheckFence3D, - psRGXKickTA3D2IN->hUpdateTimeline3D, - &psRGXKickTA3D2OUT->hUpdateFence3D, - uiUpdateFenceName3DInt, - psRGXKickTA3D2IN->ui32TACmdSize, - ui8TACmdInt, - psRGXKickTA3D2IN->ui323DPRCmdSize, - ui83DPRCmdInt, - psRGXKickTA3D2IN->ui323DCmdSize, - ui83DCmdInt, - psRGXKickTA3D2IN->ui32ExtJobRef, - psRGXKickTA3D2IN->bbKickTA, - psRGXKickTA3D2IN->bbKickPR, - psRGXKickTA3D2IN->bbKick3D, - psRGXKickTA3D2IN->bbAbort, - psRGXKickTA3D2IN->ui32PDumpFlags, - psKMHWRTDataSetInt, - psZSBufferInt, - psMSAAScratchBufferInt, - psRGXKickTA3D2IN->ui32SyncPMRCount, - ui32SyncPMRFlagsInt, - psSyncPMRsInt, - psRGXKickTA3D2IN->ui32RenderTargetSize, - psRGXKickTA3D2IN->ui32NumberOfDrawCalls, - psRGXKickTA3D2IN->ui32NumberOfIndices, - psRGXKickTA3D2IN->ui32NumberOfMRTs, psRGXKickTA3D2IN->ui64Deadline); + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -RGXKickTA3D2_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet2_exit; + } + } - /* Unreference the previously looked up handle */ - if (psRenderContextInt) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + sRTCDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); } - if (hClientTAFenceSyncPrimBlockInt2) + /* Copy the data over */ + if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) { - IMG_UINT32 i; - - for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + if (OSCopyFromUser + (NULL, sRTCDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSet2IN->psRTCDevVAddr, + RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) { + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - /* Unreference the previously looked up handle */ - if (psClientTAFenceSyncPrimBlockInt && psClientTAFenceSyncPrimBlockInt[i]) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hClientTAFenceSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); - } + goto RGXCreateHWRTDataSet2_exit; } } - - if (hClientTAUpdateSyncPrimBlockInt2) + if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) { - IMG_UINT32 i; + psKmHwRTDataSetInt = + (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psKmHwRTDataSetInt, 0, + RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *); + hKmHwRTDataSetInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE); + } - for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) - { + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); - /* Unreference the previously looked up handle */ - if (psClientTAUpdateSyncPrimBlockInt && psClientTAUpdateSyncPrimBlockInt[i]) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hClientTAUpdateSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); - } - } + /* Look up the address from the handle */ + psRGXCreateHWRTDataSet2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMMlistsReservationInt, + hPMMlistsReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet2_exit; } - if (hClient3DUpdateSyncPrimBlockInt2) { IMG_UINT32 i; - for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) { - - /* Unreference the previously looked up handle */ - if (psClient3DUpdateSyncPrimBlockInt && psClient3DUpdateSyncPrimBlockInt[i]) + /* Look up the address from the handle */ + psRGXCreateHWRTDataSet2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psapsFreeListsInt[i], + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hClient3DUpdateSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet2_exit; } } } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Unreference the previously looked up handle */ - if (psPRFenceUFOSyncPrimBlockInt) + psRGXCreateHWRTDataSet2OUT->eError = + RGXCreateHWRTDataSet2(psConnection, OSGetDevNode(psConnection), + sVHeapTableDevVAddrInt, + psPMMlistsReservationInt, + psapsFreeListsInt, + psRGXCreateHWRTDataSet2IN->ui32PPPScreen, + psRGXCreateHWRTDataSet2IN->ui64MultiSampleCtl, + psRGXCreateHWRTDataSet2IN->ui64FlippedMultiSampleCtl, + psRGXCreateHWRTDataSet2IN->ui32TPCStride, + sTailPtrsDevVAddrInt, + psRGXCreateHWRTDataSet2IN->ui32TPCSize, + psRGXCreateHWRTDataSet2IN->ui32TEScreen, + psRGXCreateHWRTDataSet2IN->ui32TEAA, + psRGXCreateHWRTDataSet2IN->ui32TEMTILE1, + psRGXCreateHWRTDataSet2IN->ui32TEMTILE2, + psRGXCreateHWRTDataSet2IN->ui32MTileStride, + psRGXCreateHWRTDataSet2IN->ui32ISPMergeLowerX, + psRGXCreateHWRTDataSet2IN->ui32ISPMergeLowerY, + psRGXCreateHWRTDataSet2IN->ui32ISPMergeUpperX, + psRGXCreateHWRTDataSet2IN->ui32ISPMergeUpperY, + psRGXCreateHWRTDataSet2IN->ui32ISPMergeScaleX, + psRGXCreateHWRTDataSet2IN->ui32ISPMergeScaleY, + sMacrotileArrayDevVAddrInt, + sRgnHeaderDevVAddrInt, + sRTCDevVAddrInt, + psRGXCreateHWRTDataSet2IN->ui32RgnHeaderSize, + psRGXCreateHWRTDataSet2IN->ui32ISPMtileSize, + psRGXCreateHWRTDataSet2IN->ui16MaxRTs, psKmHwRTDataSetInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPRFenceUFOSyncPrimBlock, - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + goto RGXCreateHWRTDataSet2_exit; } - if (psRGXKickTA3D2IN->hKMHWRTDataSet) + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + if (hKmHwRTDataSetInt2) { + IMG_UINT32 i; - /* Unreference the previously looked up handle */ - if (psKMHWRTDataSetInt) + for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hKMHWRTDataSet, - PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + + psRGXCreateHWRTDataSet2OUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &hKmHwRTDataSetInt2[i], + (void *)psKmHwRTDataSetInt[i], + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateHWRTDataSet2psKmHwRTDataSetIntRelease); + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) + { + IMG_UINT32 j; + /* Ensure the remaining handles are set to NULL. hKmHwRTDataSetInt2[i] was + * zeroed when calling PVRSRVAllocHandleUnlocked, so we start at the next + * element. If it was the last iteration, the loop doesn't run. + */ + for (j = i + 1; j < RGXMKIF_NUM_RTDATAS; j++) + { + hKmHwRTDataSetInt2[j] = NULL; + } + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet2_exit; + } + } } + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - if (psRGXKickTA3D2IN->hZSBuffer) + /* If dest ptr is non-null and we have data to copy */ + if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0)) { - - /* Unreference the previously looked up handle */ - if (psZSBufferInt) + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psRGXCreateHWRTDataSet2OUT->phKmHwRTDataSet, + hKmHwRTDataSetInt2, + (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK)) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hZSBuffer, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet2_exit; } } - if (psRGXKickTA3D2IN->hMSAAScratchBuffer) - { +RGXCreateHWRTDataSet2_exit: - /* Unreference the previously looked up handle */ - if (psMSAAScratchBufferInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hMSAAScratchBuffer, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); - } + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMMlistsReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMMlistsReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } - if (hSyncPMRsInt2) + if (hapsFreeListsInt2) { IMG_UINT32 i; - for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) { /* Unreference the previously looked up handle */ - if (psSyncPMRsInt && psSyncPMRsInt[i]) + if (psapsFreeListsInt && psapsFreeListsInt[i]) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hSyncPMRsInt2[i], - PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST); } } } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXKickTA3D2OUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ + if (psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK) + { + if (hKmHwRTDataSetInt2) + { + PVRSRV_ERROR eError; -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); - return 0; -} + { + IMG_UINT32 idx; + for (idx = 0; idx < RGXMKIF_NUM_RTDATAS; idx++) + { + if (hKmHwRTDataSetInt2[idx]) + { -static IMG_INT -PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXSetRenderContextPropertyIN_UI8, - IMG_UINT8 * psRGXSetRenderContextPropertyOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyIN = - (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *) - IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyOUT = - (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *) - IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0); + eError = + PVRSRVDestroyHandleUnlocked(psConnection-> + psHandleBase, + hKmHwRTDataSetInt2 + [idx], + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); - IMG_HANDLE hRenderContext = psRGXSetRenderContextPropertyIN->hRenderContext; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + } + else if (psKmHwRTDataSetInt[idx]) + { + /* Free/Destroy/Release the resource */ + RGXDestroyHWRTDataSet(psKmHwRTDataSetInt[idx]); + } + } + } - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psRGXSetRenderContextPropertyOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psRenderContextInt, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); - if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXSetRenderContextProperty_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); + } - psRGXSetRenderContextPropertyOUT->eError = - PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt, - psRGXSetRenderContextPropertyIN->ui32Property, - psRGXSetRenderContextPropertyIN->ui64Input, - &psRGXSetRenderContextPropertyOUT->ui64Output); + else if (psKmHwRTDataSetInt) + { + IMG_UINT32 i; + for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) + { + if (psKmHwRTDataSetInt[i]) + { + RGXDestroyHWRTDataSet(psKmHwRTDataSetInt[i]); + } + } + } -RGXSetRenderContextProperty_exit: + } - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXCreateHWRTDataSet2OUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ - /* Unreference the previously looked up handle */ - if (psRenderContextInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); return 0; } @@ -2319,49 +2875,86 @@ void DeinitRGXTA3DBridge(void); PVRSRV_ERROR InitRGXTA3DBridge(void) { - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET, - PVRSRVBridgeRGXCreateHWRTDataSet, NULL); - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET, - PVRSRVBridgeRGXDestroyHWRTDataSet, NULL); + PVRSRVBridgeRGXDestroyHWRTDataSet, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, - PVRSRVBridgeRGXCreateZSBuffer, NULL); + PVRSRVBridgeRGXCreateZSBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, - PVRSRVBridgeRGXDestroyZSBuffer, NULL); + PVRSRVBridgeRGXDestroyZSBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, - PVRSRVBridgeRGXPopulateZSBuffer, NULL); + PVRSRVBridgeRGXPopulateZSBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, - PVRSRVBridgeRGXUnpopulateZSBuffer, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, - PVRSRVBridgeRGXCreateFreeList, NULL); + PVRSRVBridgeRGXUnpopulateZSBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, - PVRSRVBridgeRGXDestroyFreeList, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, - PVRSRVBridgeRGXCreateRenderContext, NULL); + PVRSRVBridgeRGXDestroyFreeList, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, - PVRSRVBridgeRGXDestroyRenderContext, NULL); + PVRSRVBridgeRGXDestroyRenderContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSENDZSSTOREDISABLE, + PVRSRVBridgeRGXSendZSStoreDisable, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE), + sizeof(PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, - PVRSRVBridgeRGXSetRenderContextPriority, NULL); + PVRSRVBridgeRGXSetRenderContextPriority, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED, - PVRSRVBridgeRGXRenderContextStalled, NULL); + PVRSRVBridgeRGXRenderContextStalled, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED), + sizeof(PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2, - PVRSRVBridgeRGXKickTA3D2, NULL); + PVRSRVBridgeRGXKickTA3D2, NULL, sizeof(PVRSRV_BRIDGE_IN_RGXKICKTA3D2), + sizeof(PVRSRV_BRIDGE_OUT_RGXKICKTA3D2)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY, - PVRSRVBridgeRGXSetRenderContextProperty, NULL); + PVRSRVBridgeRGXSetRenderContextProperty, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET, + PVRSRVBridgeRGXCreateHWRTDataSet, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, + PVRSRVBridgeRGXCreateFreeList, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEFREELIST), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, + PVRSRVBridgeRGXCreateRenderContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET2, + PVRSRVBridgeRGXCreateHWRTDataSet2, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2)); return PVRSRV_OK; } @@ -2372,8 +2965,6 @@ PVRSRV_ERROR InitRGXTA3DBridge(void) void DeinitRGXTA3DBridge(void) { - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER); @@ -2384,16 +2975,13 @@ void DeinitRGXTA3DBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, - PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSENDZSSTOREDISABLE); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY); @@ -2405,4 +2993,13 @@ void DeinitRGXTA3DBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET2); + } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c index 99e6239cf7a9..4af3cfc35cba 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c @@ -136,15 +136,19 @@ PVRSRV_ERROR InitRGXTIMERQUERYBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY, - PVRSRVBridgeRGXBeginTimerQuery, NULL); + PVRSRVBridgeRGXBeginTimerQuery, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY), + sizeof(PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY, - PVRSRVBridgeRGXEndTimerQuery, NULL); + PVRSRVBridgeRGXEndTimerQuery, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer, - NULL); + NULL, sizeof(PVRSRV_BRIDGE_IN_RGXQUERYTIMER), + sizeof(PVRSRV_BRIDGE_OUT_RGXQUERYTIMER)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h index 9d8397fc90bf..f20fca015dc0 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h @@ -63,7 +63,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5 #define PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+6 #define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7 -#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7) +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER3 PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+8) /******************************************* RGXTDMCreateTransferContext @@ -78,6 +79,7 @@ typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG IMG_INT32 i32Priority; IMG_UINT32 ui32ContextFlags; IMG_UINT32 ui32FrameworkCmdSize; + IMG_UINT32 ui32MaxDeadlineMS; IMG_UINT32 ui32PackedCCBSizeU88; } __packed PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT; @@ -186,7 +188,6 @@ typedef struct PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY_TAG typedef struct PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY_TAG { IMG_HANDLE hCLIPMRMem; - IMG_HANDLE hUSCPMRMem; PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY; @@ -225,4 +226,39 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY; +/******************************************* + RGXTDMSubmitTransfer3 + *******************************************/ + +/* Bridge in structure for RGXTDMSubmitTransfer3 */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3_TAG +{ + IMG_UINT64 ui64DeadlineInus; + IMG_HANDLE hTransferContext; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_UINT32 *pui32UpdateSyncOffset; + IMG_UINT32 *pui32UpdateValue; + IMG_UINT8 *pui8FWCommand; + IMG_CHAR *puiUpdateFenceName; + IMG_HANDLE *phSyncPMRs; + IMG_HANDLE *phUpdateUFOSyncPrimBlock; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_FENCE hExportFenceToSignal; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_UINT32 ui32Characteristic1; + IMG_UINT32 ui32Characteristic2; + IMG_UINT32 ui32ClientUpdateCount; + IMG_UINT32 ui32CommandSize; + IMG_UINT32 ui32ExternalJobReference; + IMG_UINT32 ui32PDumpFlags; + IMG_UINT32 ui32SyncPMRCount; +} __packed PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3; + +/* Bridge out structure for RGXTDMSubmitTransfer3 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE hUpdateFence; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3; + #endif /* COMMON_RGXTQ2_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c index 7dcd29a5ba54..1129b824d850 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c @@ -97,9 +97,7 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -136,7 +134,6 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long)); @@ -153,7 +150,6 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -214,7 +210,9 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, ui32PackedCCBSizeU88, psRGXTDMCreateTransferContextIN->ui32ContextFlags, psRGXTDMCreateTransferContextIN-> - ui64RobustnessAddress, &psTransferContextInt); + ui64RobustnessAddress, + psRGXTDMCreateTransferContextIN->ui32MaxDeadlineMS, + &psTransferContextInt); /* Exit early if bridged call fails */ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) { @@ -269,11 +267,7 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -512,9 +506,7 @@ PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -572,7 +564,6 @@ PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), sizeof(unsigned long)); @@ -588,7 +579,6 @@ PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -891,11 +881,7 @@ PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -908,13 +894,6 @@ static PVRSRV_ERROR _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) return eError; } -static PVRSRV_ERROR _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); - return eError; -} - static IMG_INT PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psRGXTDMGetSharedMemoryIN_UI8, @@ -929,7 +908,6 @@ PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryOUT_UI8, 0); PMR *psCLIPMRMemInt = NULL; - PMR *psUSCPMRMemInt = NULL; { PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); @@ -949,7 +927,7 @@ PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, psRGXTDMGetSharedMemoryOUT->eError = PVRSRVRGXTDMGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection), - &psCLIPMRMemInt, &psUSCPMRMemInt); + &psCLIPMRMemInt); /* Exit early if bridged call fails */ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) { @@ -973,20 +951,6 @@ PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, goto RGXTDMGetSharedMemory_exit; } - psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXTDMGetSharedMemoryOUT-> - hUSCPMRMem, - (void *)psUSCPMRMemInt, - PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease); - if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXTDMGetSharedMemory_exit; - } - /* Release now we have created handles. */ UnlockHandle(psConnection->psHandleBase); @@ -998,10 +962,6 @@ PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, { PVRSRVRGXTDMReleaseSharedMemoryKM(psCLIPMRMemInt); } - if (psUSCPMRMemInt) - { - PVRSRVRGXTDMReleaseSharedMemoryKM(psUSCPMRMemInt); - } } return 0; @@ -1134,6 +1094,424 @@ PVRSRVBridgeRGXTDMSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, + "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgeRGXTDMSubmitTransfer3(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMSubmitTransfer3IN_UI8, + IMG_UINT8 * psRGXTDMSubmitTransfer3OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3 *psRGXTDMSubmitTransfer3IN = + (PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer3IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3 *psRGXTDMSubmitTransfer3OUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer3OUT_UI8, 0); + + IMG_HANDLE hTransferContext = psRGXTDMSubmitTransfer3IN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_UINT8 *ui8FWCommandInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32CommandSize * sizeof(IMG_UINT8)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(PMR *)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + + if (unlikely(psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer3_exit; + } + + if (unlikely + (psRGXTDMSubmitTransfer3IN->ui32CommandSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer3_exit; + } + + if (unlikely(psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer3_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMSubmitTransfer3_exit; + } + } + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXTDMSubmitTransfer3_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer3IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXTDMSubmitTransfer3IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXTDMSubmitTransfer3_exit; + } + } + } + + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount != 0) + { + psUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psUpdateUFOSyncPrimBlockInt, 0, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)); + ui32NextOffset += + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hUpdateUFOSyncPrimBlockInt2, + (const void __user *)psRGXTDMSubmitTransfer3IN->phUpdateUFOSyncPrimBlock, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != + PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount != 0) + { + ui32UpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateSyncOffsetInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->pui32UpdateSyncOffset, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount != 0) + { + ui32UpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateValueInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->pui32UpdateValue, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXTDMSubmitTransfer3IN->ui32CommandSize != 0) + { + ui8FWCommandInt = (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer3IN->ui32CommandSize * sizeof(IMG_UINT8); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32CommandSize * sizeof(IMG_UINT8) > 0) + { + if (OSCopyFromUser + (NULL, ui8FWCommandInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->pui8FWCommand, + psRGXTDMSubmitTransfer3IN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + if (psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->pui32SyncPMRFlags, + psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + if (psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psSyncPMRsInt, 0, + psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(PMR *)); + ui32NextOffset += psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, + (const void __user *)psRGXTDMSubmitTransfer3IN->phSyncPMRs, + psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer3OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer3OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer3_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer3OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psUpdateUFOSyncPrimBlockInt[i], + hUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer3OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer3_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer3OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer3OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer3_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSubmitTransfer3OUT->eError = + PVRSRVRGXTDMSubmitTransfer3KM(psTransferContextInt, + psRGXTDMSubmitTransfer3IN->ui32PDumpFlags, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount, + psUpdateUFOSyncPrimBlockInt, + ui32UpdateSyncOffsetInt, + ui32UpdateValueInt, + psRGXTDMSubmitTransfer3IN->hCheckFenceFD, + psRGXTDMSubmitTransfer3IN->hUpdateTimeline, + &psRGXTDMSubmitTransfer3OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXTDMSubmitTransfer3IN->hExportFenceToSignal, + psRGXTDMSubmitTransfer3IN->ui32CommandSize, + ui8FWCommandInt, + psRGXTDMSubmitTransfer3IN->ui32ExternalJobReference, + psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXTDMSubmitTransfer3IN->ui32Characteristic1, + psRGXTDMSubmitTransfer3IN->ui32Characteristic2, + psRGXTDMSubmitTransfer3IN->ui64DeadlineInus); + +RGXTDMSubmitTransfer3_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + + if (hUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (psUpdateUFOSyncPrimBlockInt && psUpdateUFOSyncPrimBlockInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (psSyncPMRsInt && psSyncPMRsInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXTDMSubmitTransfer3OUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + /* *************************************************************************** * Server bridge dispatch related glue */ @@ -1149,32 +1527,52 @@ PVRSRV_ERROR InitRGXTQ2Bridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, - PVRSRVBridgeRGXTDMCreateTransferContext, NULL); + PVRSRVBridgeRGXTDMCreateTransferContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, - PVRSRVBridgeRGXTDMDestroyTransferContext, NULL); + PVRSRVBridgeRGXTDMDestroyTransferContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, - PVRSRVBridgeRGXTDMSetTransferContextPriority, NULL); + PVRSRVBridgeRGXTDMSetTransferContextPriority, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, - PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL); + PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2, - PVRSRVBridgeRGXTDMSubmitTransfer2, NULL); + PVRSRVBridgeRGXTDMSubmitTransfer2, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY, - PVRSRVBridgeRGXTDMGetSharedMemory, NULL); + PVRSRVBridgeRGXTDMGetSharedMemory, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY, - PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL); + PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY, - PVRSRVBridgeRGXTDMSetTransferContextProperty, NULL); + PVRSRVBridgeRGXTDMSetTransferContextProperty, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER3, + PVRSRVBridgeRGXTDMSubmitTransfer3, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3)); return PVRSRV_OK; } @@ -1207,4 +1605,6 @@ void DeinitRGXTQ2Bridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER3); + } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h index 4f71182a39e7..31d90dc0f993 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h @@ -59,8 +59,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1 #define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2 #define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3 -#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4 -#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4) +#define PVRSRV_BRIDGE_RGXTQ_RGXTQGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTQ_RGXTQRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER3 PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+7) /******************************************* RGXCreateTransferContext @@ -81,9 +84,7 @@ typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG /* Bridge out structure for RGXCreateTransferContext */ typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG { - IMG_HANDLE hCLIPMRMem; IMG_HANDLE hTransferContext; - IMG_HANDLE hUSCPMRMem; PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT; @@ -154,6 +155,39 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2_TAG PVRSRV_FENCE h3DUpdateFence; } __packed PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2; +/******************************************* + RGXTQGetSharedMemory + *******************************************/ + +/* Bridge in structure for RGXTQGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY; + +/* Bridge out structure for RGXTQGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY_TAG +{ + IMG_HANDLE hCLIPMRMem; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY; + +/******************************************* + RGXTQReleaseSharedMemory + *******************************************/ + +/* Bridge in structure for RGXTQReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY_TAG +{ + IMG_HANDLE hPMRMem; +} __packed PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY; + +/* Bridge out structure for RGXTQReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY; + /******************************************* RGXSetTransferContextProperty *******************************************/ @@ -173,4 +207,39 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY; +/******************************************* + RGXSubmitTransfer3 + *******************************************/ + +/* Bridge in structure for RGXSubmitTransfer3 */ +typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER3_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 *pui32ClientUpdateCount; + IMG_UINT32 *pui32CommandSize; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_UINT32 *pui32TQPrepareFlags; + IMG_UINT32 **pui32UpdateSyncOffset; + IMG_UINT32 **pui32UpdateValue; + IMG_UINT8 **pui8FWCommand; + IMG_CHAR *puiUpdateFenceName; + IMG_HANDLE *phSyncPMRs; + IMG_HANDLE **phUpdateUFOSyncPrimBlock; + PVRSRV_TIMELINE h2DUpdateTimeline; + PVRSRV_TIMELINE h3DUpdateTimeline; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_FENCE hExportFenceToSignal; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32PrepareCount; + IMG_UINT32 ui32SyncPMRCount; +} __packed PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER3; + +/* Bridge out structure for RGXSubmitTransfer3 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER3_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE h2DUpdateFence; + PVRSRV_FENCE h3DUpdateFence; +} __packed PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER3; + #endif /* COMMON_RGXTQ_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c index 778de5ce1335..f50c1184676a 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c @@ -95,14 +95,10 @@ PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData; IMG_HANDLE hPrivDataInt = NULL; RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; - PMR *psCLIPMRMemInt = NULL; - PMR *psUSCPMRMemInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -114,8 +110,6 @@ PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, goto RGXCreateTransferContext_exit; } - psRGXCreateTransferContextOUT->hTransferContext = NULL; - if (ui64BufferSize > IMG_UINT32_MAX) { psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; @@ -126,7 +120,6 @@ PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), sizeof(unsigned long)); @@ -142,7 +135,6 @@ PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -201,8 +193,7 @@ PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, psRGXCreateTransferContextIN->ui32PackedCCBSizeU8888, psRGXCreateTransferContextIN->ui32ContextFlags, psRGXCreateTransferContextIN->ui64RobustnessAddress, - &psTransferContextInt, - &psCLIPMRMemInt, &psUSCPMRMemInt); + &psTransferContextInt); /* Exit early if bridged call fails */ if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) { @@ -226,32 +217,6 @@ PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, goto RGXCreateTransferContext_exit; } - psRGXCreateTransferContextOUT->eError = - PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, - &psRGXCreateTransferContextOUT->hCLIPMRMem, - (void *)psCLIPMRMemInt, - PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - psRGXCreateTransferContextOUT->hTransferContext); - if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateTransferContext_exit; - } - - psRGXCreateTransferContextOUT->eError = - PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, - &psRGXCreateTransferContextOUT->hUSCPMRMem, - (void *)psUSCPMRMemInt, - PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - psRGXCreateTransferContextOUT->hTransferContext); - if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateTransferContext_exit; - } - /* Release now we have created handles. */ UnlockHandle(psConnection->psHandleBase); @@ -271,34 +236,6 @@ PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK) { - if (psRGXCreateTransferContextOUT->hTransferContext) - { - PVRSRV_ERROR eError; - - /* Lock over handle creation cleanup. */ - LockHandle(psConnection->psHandleBase); - - eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) - psRGXCreateTransferContextOUT-> - hTransferContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); - if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", __func__, PVRSRVGetErrorString(eError))); - } - /* Releasing the handle should free/destroy/release the resource. - * This should never fail... */ - PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - - /* Avoid freeing/destroying/releasing the resource a second time below */ - psTransferContextInt = NULL; - /* Release now we have cleaned up creation handles. */ - UnlockHandle(psConnection->psHandleBase); - - } - if (psTransferContextInt) { PVRSRVRGXDestroyTransferContextKM(psTransferContextInt); @@ -311,11 +248,7 @@ PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -457,9 +390,7 @@ PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; IMG_BYTE *pArrayArgsBuffer2 = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -505,7 +436,6 @@ PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXSubmitTransfer2IN), sizeof(unsigned long)); @@ -521,7 +451,6 @@ PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -1079,11 +1008,7 @@ PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); /* Allocated space should be equal to the last updated offset */ @@ -1098,6 +1023,110 @@ PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static PVRSRV_ERROR _RGXTQGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXTQReleaseSharedMemoryKM((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXTQGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTQGetSharedMemoryIN_UI8, + IMG_UINT8 * psRGXTQGetSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY *psRGXTQGetSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY *) IMG_OFFSET_ADDR(psRGXTQGetSharedMemoryIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY *psRGXTQGetSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTQGetSharedMemoryOUT_UI8, 0); + + PMR *psCLIPMRMemInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psRGXTQGetSharedMemoryIN); + + psRGXTQGetSharedMemoryOUT->eError = + PVRSRVRGXTQGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection), &psCLIPMRMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + goto RGXTQGetSharedMemory_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXTQGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTQGetSharedMemoryOUT-> + hCLIPMRMem, + (void *)psCLIPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTQGetSharedMemorypsCLIPMRMemIntRelease); + if (unlikely(psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTQGetSharedMemory_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTQGetSharedMemory_exit: + + if (psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK) + { + if (psCLIPMRMemInt) + { + PVRSRVRGXTQReleaseSharedMemoryKM(psCLIPMRMemInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTQReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTQReleaseSharedMemoryIN_UI8, + IMG_UINT8 * psRGXTQReleaseSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY *psRGXTQReleaseSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTQReleaseSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY *psRGXTQReleaseSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTQReleaseSharedMemoryOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXTQReleaseSharedMemoryOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXTQReleaseSharedMemoryIN->hPMRMem, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); + if (unlikely((psRGXTQReleaseSharedMemoryOUT->eError != PVRSRV_OK) && + (psRGXTQReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psRGXTQReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXTQReleaseSharedMemoryOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXTQReleaseSharedMemory_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTQReleaseSharedMemory_exit: + + return 0; +} + static IMG_INT PVRSRVBridgeRGXSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psRGXSetTransferContextPropertyIN_UI8, @@ -1155,59 +1184,762 @@ PVRSRVBridgeRGXSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, return 0; } -/* *************************************************************************** - * Server bridge dispatch related glue - */ +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, + "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -#endif /* SUPPORT_RGXTQ_BRIDGE */ +static IMG_INT +PVRSRVBridgeRGXSubmitTransfer3(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSubmitTransfer3IN_UI8, + IMG_UINT8 * psRGXSubmitTransfer3OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER3 *psRGXSubmitTransfer3IN = + (PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER3 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer3IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER3 *psRGXSubmitTransfer3OUT = + (PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER3 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer3OUT_UI8, + 0); -#if defined(SUPPORT_RGXTQ_BRIDGE) -PVRSRV_ERROR InitRGXTQBridge(void); -void DeinitRGXTQBridge(void); + IMG_HANDLE hTransferContext = psRGXSubmitTransfer3IN->hTransferContext; + RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; + IMG_UINT32 *ui32ClientUpdateCountInt = NULL; + SYNC_PRIMITIVE_BLOCK ***psUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL; + IMG_UINT32 **ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_UINT32 *ui32CommandSizeInt = NULL; + IMG_UINT8 **ui8FWCommandInt = NULL; + IMG_UINT32 *ui32TQPrepareFlagsInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; -/* - * Register all RGXTQ functions with services - */ -PVRSRV_ERROR InitRGXTQBridge(void) -{ + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BYTE *pArrayArgsBuffer2 = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, - PVRSRVBridgeRGXCreateTransferContext, NULL); + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(PMR *)) + + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + IMG_UINT32 ui32BufferSize2 = 0; + IMG_UINT32 ui32NextOffset2 = 0; - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, - PVRSRVBridgeRGXDestroyTransferContext, NULL); + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, - PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, - PVRSRVBridgeRGXSetTransferContextPriority, NULL); + ui64BufferSize += + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32PrepareCount * + sizeof(SYNC_PRIMITIVE_BLOCK **)); + ui64BufferSize += + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_HANDLE **)); + ui64BufferSize += + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32 *)); + ui64BufferSize += + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32 *)); + ui64BufferSize += + ((IMG_UINT64) psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT8 *)); + } - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2, - PVRSRVBridgeRGXSubmitTransfer2, NULL); + if (unlikely(psRGXSubmitTransfer3IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXSubmitTransfer3_exit; + } - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, - PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY, - PVRSRVBridgeRGXSetTransferContextProperty, NULL); + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXSubmitTransfer3_exit; + } - return PVRSRV_OK; -} + ui32BufferSize = (IMG_UINT32) ui64BufferSize; -/* - * Unregister all rgxtq functions with services - */ -void DeinitRGXTQBridge(void) -{ + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXSubmitTransfer3IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT); + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXSubmitTransfer3IN; - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT); + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, - PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY); + if (!pArrayArgsBuffer) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXSubmitTransfer3_exit; + } + } + } - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2); + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + ui32ClientUpdateCountInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32); + } - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, - PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY); + /* Copy the data over */ + if (psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateCountInt, + (const void __user *)psRGXSubmitTransfer3IN->pui32ClientUpdateCount, + psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + } + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + /* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */ + psUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK ***) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **); + /* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */ + hUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_HANDLE); + } + + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + /* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */ + ui32UpdateSyncOffsetInt = + (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32 *); + } + + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + /* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */ + ui32UpdateValueInt = + (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32 *); + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXSubmitTransfer3IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + ui32CommandSizeInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32CommandSizeInt, + (const void __user *)psRGXSubmitTransfer3IN->pui32CommandSize, + psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + } + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + /* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */ + ui8FWCommandInt = (IMG_UINT8 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT8 *); + } + + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + ui32TQPrepareFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32TQPrepareFlagsInt, + (const void __user *)psRGXSubmitTransfer3IN->pui32TQPrepareFlags, + psRGXSubmitTransfer3IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + } + if (psRGXSubmitTransfer3IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXSubmitTransfer3IN->pui32SyncPMRFlags, + psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + } + if (psRGXSubmitTransfer3IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psSyncPMRsInt, 0, + psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(PMR *)); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, (const void __user *)psRGXSubmitTransfer3IN->phSyncPMRs, + psRGXSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + } + + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + ui64BufferSize = 0; + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + ui64BufferSize += + ((IMG_UINT64) ui32ClientUpdateCountInt[i] * + sizeof(SYNC_PRIMITIVE_BLOCK *)); + ui64BufferSize += + ((IMG_UINT64) ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *)); + ui64BufferSize += + ((IMG_UINT64) ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)); + ui64BufferSize += + ((IMG_UINT64) ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)); + ui64BufferSize += ((IMG_UINT64) ui32CommandSizeInt[i] * sizeof(IMG_UINT8)); + } + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXSubmitTransfer3_exit; + } + ui32BufferSize2 = (IMG_UINT32) ui64BufferSize; + } + + if (ui32BufferSize2 != 0) + { + pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2); + + if (!pArrayArgsBuffer2) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXSubmitTransfer3_exit; + } + } + + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + if (ui32ClientUpdateCountInt[i] > PVRSRV_MAX_SYNCS) + { + psRGXSubmitTransfer3OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXSubmitTransfer3_exit; + } + + /* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */ + psUpdateUFOSyncPrimBlockInt[i] = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer2, + ui32NextOffset2); + OSCachedMemSet(psUpdateUFOSyncPrimBlockInt[i], 0, + ui32ClientUpdateCountInt[i] * + sizeof(SYNC_PRIMITIVE_BLOCK *)); + ui32NextOffset2 += + ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *); + /* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */ + hUpdateUFOSyncPrimBlockInt2[i] = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); + ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE); + } + } + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + /* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */ + ui32UpdateSyncOffsetInt[i] = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); + ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + } + } + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + /* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */ + ui32UpdateValueInt[i] = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); + ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + } + } + if (psRGXSubmitTransfer3IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + if (ui32CommandSizeInt[i] > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE) + { + psRGXSubmitTransfer3OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXSubmitTransfer3_exit; + } + + /* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */ + ui8FWCommandInt[i] = + (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); + ui32NextOffset2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8); + } + } + + { + IMG_UINT32 i; + IMG_HANDLE **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer3IN-> + phUpdateUFOSyncPrimBlock[i], sizeof(IMG_HANDLE **)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + + /* Copy the data over */ + if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) > 0) + { + if (OSCopyFromUser + (NULL, (hUpdateUFOSyncPrimBlockInt2[i]), + (const void __user *)psPtr, + (ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE))) != + PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + } + } + } + + { + IMG_UINT32 i; + IMG_UINT32 **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer3IN->pui32UpdateSyncOffset[i], + sizeof(IMG_UINT32 **)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + + /* Copy the data over */ + if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0) + { + if (OSCopyFromUser + (NULL, (ui32UpdateSyncOffsetInt[i]), (const void __user *)psPtr, + (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != + PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + } + } + } + + { + IMG_UINT32 i; + IMG_UINT32 **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer3IN->pui32UpdateValue[i], + sizeof(IMG_UINT32 **)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + + /* Copy the data over */ + if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0) + { + if (OSCopyFromUser + (NULL, (ui32UpdateValueInt[i]), (const void __user *)psPtr, + (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != + PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + } + } + } + + { + IMG_UINT32 i; + IMG_UINT8 **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer3IN->pui8FWCommand[i], + sizeof(IMG_UINT8 **)) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + + /* Copy the data over */ + if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0) + { + if (OSCopyFromUser + (NULL, (ui8FWCommandInt[i]), (const void __user *)psPtr, + (ui32CommandSizeInt[i] * sizeof(IMG_UINT8))) != PVRSRV_OK) + { + psRGXSubmitTransfer3OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer3_exit; + } + } + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSubmitTransfer3OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSubmitTransfer3OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSubmitTransfer3_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + IMG_UINT32 j; + for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) + { + /* Look up the address from the handle */ + psRGXSubmitTransfer3OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psUpdateUFOSyncPrimBlockInt[i][j], + hUpdateUFOSyncPrimBlockInt2[i][j], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXSubmitTransfer3OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSubmitTransfer3_exit; + } + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer3IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXSubmitTransfer3OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXSubmitTransfer3OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSubmitTransfer3_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSubmitTransfer3OUT->eError = + PVRSRVRGXSubmitTransfer3KM(psTransferContextInt, + psRGXSubmitTransfer3IN->ui32PrepareCount, + ui32ClientUpdateCountInt, + psUpdateUFOSyncPrimBlockInt, + ui32UpdateSyncOffsetInt, + ui32UpdateValueInt, + psRGXSubmitTransfer3IN->hCheckFenceFD, + psRGXSubmitTransfer3IN->h2DUpdateTimeline, + &psRGXSubmitTransfer3OUT->h2DUpdateFence, + psRGXSubmitTransfer3IN->h3DUpdateTimeline, + &psRGXSubmitTransfer3OUT->h3DUpdateFence, + uiUpdateFenceNameInt, + psRGXSubmitTransfer3IN->hExportFenceToSignal, + ui32CommandSizeInt, + ui8FWCommandInt, + ui32TQPrepareFlagsInt, + psRGXSubmitTransfer3IN->ui32ExtJobRef, + psRGXSubmitTransfer3IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, psSyncPMRsInt); + +RGXSubmitTransfer3_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + } + + if (hUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer3IN->ui32PrepareCount; i++) + { + IMG_UINT32 j; + for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) + { + + /* Unreference the previously looked up handle */ + if (psUpdateUFOSyncPrimBlockInt && psUpdateUFOSyncPrimBlockInt[i] + && psUpdateUFOSyncPrimBlockInt[i][j]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hUpdateUFOSyncPrimBlockInt2[i] + [j], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer3IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (psSyncPMRsInt && psSyncPMRsInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXSubmitTransfer3OUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXSubmitTransfer3OUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (pArrayArgsBuffer2) + OSFreeMemNoStats(pArrayArgsBuffer2); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* SUPPORT_RGXTQ_BRIDGE */ + +#if defined(SUPPORT_RGXTQ_BRIDGE) +PVRSRV_ERROR InitRGXTQBridge(void); +void DeinitRGXTQBridge(void); + +/* + * Register all RGXTQ functions with services + */ +PVRSRV_ERROR InitRGXTQBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, + PVRSRVBridgeRGXCreateTransferContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, + PVRSRVBridgeRGXDestroyTransferContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, + PVRSRVBridgeRGXSetTransferContextPriority, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2, + PVRSRVBridgeRGXSubmitTransfer2, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2), + sizeof(PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQGETSHAREDMEMORY, + PVRSRVBridgeRGXTQGetSharedMemory, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQRELEASESHAREDMEMORY, + PVRSRVBridgeRGXTQReleaseSharedMemory, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY), + sizeof(PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY, + PVRSRVBridgeRGXSetTransferContextProperty, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER3, + PVRSRVBridgeRGXSubmitTransfer3, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER3), + sizeof(PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER3)); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxtq functions with services + */ +void DeinitRGXTQBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQGETSHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQRELEASESHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER3); } #else /* SUPPORT_RGXTQ_BRIDGE */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/client_ri_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/client_ri_bridge.h index b3c42e6f496e..c419e21e9d07 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/client_ri_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/client_ri_bridge.h @@ -62,8 +62,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, const IMG_CHAR * puiTextB, IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size, - IMG_BOOL bIsImport, - IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle); + IMG_UINT64 ui64Flags, IMG_HANDLE * phRIHandle); IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge, IMG_UINT32 ui32TextBSize, diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/client_ri_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/client_ri_direct_bridge.c index 74eaf18e4aeb..de037ee78be5 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/client_ri_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/client_ri_direct_bridge.c @@ -70,21 +70,24 @@ IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, const IMG_CHAR * puiTextB, IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size, - IMG_BOOL bIsImport, - IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle) + IMG_UINT64 ui64Flags, IMG_HANDLE * phRIHandle) { PVRSRV_ERROR eError; PMR *psPMRHandleInt; RI_HANDLE psRIHandleInt = NULL; - PVR_UNREFERENCED_PARAMETER(hBridge); psPMRHandleInt = (PMR *) hPMRHandle; eError = - RIWriteMEMDESCEntryKM(psPMRHandleInt, + RIWriteMEMDESCEntryKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psPMRHandleInt, ui32TextBSize, - puiTextB, - ui64Offset, ui64Size, bIsImport, bIsSuballoc, &psRIHandleInt); + puiTextB, ui64Offset, ui64Size, ui64Flags, &psRIHandleInt); + + if (eError != PVRSRV_OK) + { + return eError; + } *phRIHandle = psRIHandleInt; return eError; diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/common_ri_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/common_ri_bridge.h index 967ed5395518..baa41adccdfe 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/common_ri_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/common_ri_bridge.h @@ -88,13 +88,12 @@ typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG /* Bridge in structure for RIWriteMEMDESCEntry */ typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG { + IMG_UINT64 ui64Flags; IMG_UINT64 ui64Offset; IMG_UINT64 ui64Size; IMG_HANDLE hPMRHandle; const IMG_CHAR *puiTextB; IMG_UINT32 ui32TextBSize; - IMG_BOOL bIsImport; - IMG_BOOL bIsSuballoc; } __packed PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY; /* Bridge out structure for RIWriteMEMDESCEntry */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/server_ri_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/server_ri_bridge.c index 73806ccea9bb..f8a54d70ef0c 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/server_ri_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/ri_bridge/server_ri_bridge.c @@ -143,9 +143,7 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -167,7 +165,6 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long)); @@ -183,7 +180,6 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -234,13 +230,13 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, UnlockHandle(psConnection->psHandleBase); psRIWriteMEMDESCEntryOUT->eError = - RIWriteMEMDESCEntryKM(psPMRHandleInt, + RIWriteMEMDESCEntryKM(psConnection, OSGetDevNode(psConnection), + psPMRHandleInt, psRIWriteMEMDESCEntryIN->ui32TextBSize, uiTextBInt, psRIWriteMEMDESCEntryIN->ui64Offset, psRIWriteMEMDESCEntryIN->ui64Size, - psRIWriteMEMDESCEntryIN->bIsImport, - psRIWriteMEMDESCEntryIN->bIsSuballoc, &psRIHandleInt); + psRIWriteMEMDESCEntryIN->ui64Flags, &psRIHandleInt); /* Exit early if bridged call fails */ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) { @@ -295,11 +291,7 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -333,9 +325,7 @@ PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -357,7 +347,6 @@ PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long)); @@ -373,7 +362,6 @@ PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -455,11 +443,7 @@ PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -705,31 +689,47 @@ PVRSRV_ERROR InitRIBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, - PVRSRVBridgeRIWritePMREntry, NULL); + PVRSRVBridgeRIWritePMREntry, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY), + sizeof(PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, - PVRSRVBridgeRIWriteMEMDESCEntry, NULL); + PVRSRVBridgeRIWriteMEMDESCEntry, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY), + sizeof(PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, - PVRSRVBridgeRIWriteProcListEntry, NULL); + PVRSRVBridgeRIWriteProcListEntry, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY), + sizeof(PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, - PVRSRVBridgeRIUpdateMEMDESCAddr, NULL); + PVRSRVBridgeRIUpdateMEMDESCAddr, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR), + sizeof(PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, - PVRSRVBridgeRIDeleteMEMDESCEntry, NULL); + PVRSRVBridgeRIDeleteMEMDESCEntry, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY), + sizeof(PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList, - NULL); + NULL, + sizeof(PVRSRV_BRIDGE_IN_RIDUMPLIST), + sizeof(PVRSRV_BRIDGE_OUT_RIDUMPLIST)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll, - NULL); + NULL, 0, sizeof(PVRSRV_BRIDGE_OUT_RIDUMPALL)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, - PVRSRVBridgeRIDumpProcess, NULL); + PVRSRVBridgeRIDumpProcess, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIDUMPPROCESS), + sizeof(PVRSRV_BRIDGE_OUT_RIDUMPPROCESS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER, - PVRSRVBridgeRIWritePMREntryWithOwner, NULL); + PVRSRVBridgeRIWritePMREntryWithOwner, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER), + sizeof(PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/srvcore_bridge/common_srvcore_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/rogue/srvcore_bridge/common_srvcore_bridge.h index 9a3a3d186bb4..603ba5b1d89e 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/srvcore_bridge/common_srvcore_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/srvcore_bridge/common_srvcore_bridge.h @@ -320,7 +320,7 @@ typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG /* Bridge in structure for FindProcessMemStats */ typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG { - IMG_UINT32 *pui32MemStatsArray; + IMG_UINT64 *pui64MemStatsArray; IMG_UINT32 ui32ArrSize; IMG_UINT32 ui32PID; IMG_BOOL bbAllProcessStats; @@ -329,7 +329,7 @@ typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG /* Bridge out structure for FindProcessMemStats */ typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG { - IMG_UINT32 *pui32MemStatsArray; + IMG_UINT64 *pui64MemStatsArray; PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS; diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/srvcore_bridge/server_srvcore_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/srvcore_bridge/server_srvcore_bridge.c index d0dfdad448f0..d40e53b244f8 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/srvcore_bridge/server_srvcore_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/srvcore_bridge/server_srvcore_bridge.c @@ -457,9 +457,7 @@ PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -481,7 +479,6 @@ PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long)); @@ -497,7 +494,6 @@ PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -542,11 +538,7 @@ PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -587,9 +579,7 @@ PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -613,7 +603,6 @@ PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), sizeof(unsigned long)); @@ -629,7 +618,6 @@ PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -679,11 +667,7 @@ PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -758,17 +742,15 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, (PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8, 0); - IMG_UINT32 *pui32MemStatsArrayInt = NULL; + IMG_UINT64 *pui64MemStatsArrayInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) + 0; + ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) + 0; if (psFindProcessMemStatsIN->ui32ArrSize > PVRSRV_PROCESS_STAT_TYPE_COUNT) { @@ -778,7 +760,7 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, PVR_UNREFERENCED_PARAMETER(psConnection); - psFindProcessMemStatsOUT->pui32MemStatsArray = psFindProcessMemStatsIN->pui32MemStatsArray; + psFindProcessMemStatsOUT->pui64MemStatsArray = psFindProcessMemStatsIN->pui64MemStatsArray; if (ui64BufferSize > IMG_UINT32_MAX) { @@ -790,7 +772,6 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long)); @@ -806,7 +787,6 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -820,16 +800,16 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, if (psFindProcessMemStatsIN->ui32ArrSize != 0) { - pui32MemStatsArrayInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32); + pui64MemStatsArrayInt = + (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64); } psFindProcessMemStatsOUT->eError = PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID, psFindProcessMemStatsIN->ui32ArrSize, psFindProcessMemStatsIN->bbAllProcessStats, - pui32MemStatsArrayInt); + pui64MemStatsArrayInt); /* Exit early if bridged call fails */ if (unlikely(psFindProcessMemStatsOUT->eError != PVRSRV_OK)) { @@ -837,14 +817,14 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, } /* If dest ptr is non-null and we have data to copy */ - if ((pui32MemStatsArrayInt) && - ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0)) + if ((pui64MemStatsArrayInt) && + ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) > 0)) { if (unlikely (OSCopyToUser - (NULL, (void __user *)psFindProcessMemStatsOUT->pui32MemStatsArray, - pui32MemStatsArrayInt, - (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32))) != PVRSRV_OK)) + (NULL, (void __user *)psFindProcessMemStatsOUT->pui64MemStatsArray, + pui64MemStatsArrayInt, + (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64))) != PVRSRV_OK)) { psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; @@ -860,11 +840,7 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -978,55 +954,82 @@ PVRSRV_ERROR InitSRVCOREBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT, - PVRSRVBridgeConnect, NULL); + PVRSRVBridgeConnect, NULL, sizeof(PVRSRV_BRIDGE_IN_CONNECT), + sizeof(PVRSRV_BRIDGE_OUT_CONNECT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT, - PVRSRVBridgeDisconnect, NULL); + PVRSRVBridgeDisconnect, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_DISCONNECT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, - PVRSRVBridgeAcquireGlobalEventObject, NULL); + PVRSRVBridgeAcquireGlobalEventObject, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, - PVRSRVBridgeReleaseGlobalEventObject, NULL); + PVRSRVBridgeReleaseGlobalEventObject, NULL, + sizeof(PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT), + sizeof(PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, - PVRSRVBridgeEventObjectOpen, NULL); + PVRSRVBridgeEventObjectOpen, NULL, + sizeof(PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN), + sizeof(PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, - PVRSRVBridgeEventObjectWait, NULL); + PVRSRVBridgeEventObjectWait, NULL, + sizeof(PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT), + sizeof(PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, - PVRSRVBridgeEventObjectClose, NULL); + PVRSRVBridgeEventObjectClose, NULL, + sizeof(PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE), + sizeof(PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, - PVRSRVBridgeDumpDebugInfo, NULL); + PVRSRVBridgeDumpDebugInfo, NULL, + sizeof(PVRSRV_BRIDGE_IN_DUMPDEBUGINFO), + sizeof(PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, - PVRSRVBridgeGetDevClockSpeed, NULL); + PVRSRVBridgeGetDevClockSpeed, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, - PVRSRVBridgeHWOpTimeout, NULL); + PVRSRVBridgeHWOpTimeout, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_HWOPTIMEOUT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, - PVRSRVBridgeAlignmentCheck, NULL); + PVRSRVBridgeAlignmentCheck, NULL, + sizeof(PVRSRV_BRIDGE_IN_ALIGNMENTCHECK), + sizeof(PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, - PVRSRVBridgeGetDeviceStatus, NULL); + PVRSRVBridgeGetDeviceStatus, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_GETDEVICESTATUS)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO, - PVRSRVBridgeGetMultiCoreInfo, NULL); + PVRSRVBridgeGetMultiCoreInfo, NULL, + sizeof(PVRSRV_BRIDGE_IN_GETMULTICOREINFO), + sizeof(PVRSRV_BRIDGE_OUT_GETMULTICOREINFO)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, - PVRSRVBridgeEventObjectWaitTimeout, NULL); + PVRSRVBridgeEventObjectWaitTimeout, NULL, + sizeof(PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT), + sizeof(PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS, - PVRSRVBridgeFindProcessMemStats, NULL); + PVRSRVBridgeFindProcessMemStats, NULL, + sizeof(PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS), + sizeof(PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE, - PVRSRVBridgeAcquireInfoPage, NULL); + PVRSRVBridgeAcquireInfoPage, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE, - PVRSRVBridgeReleaseInfoPage, NULL); + PVRSRVBridgeReleaseInfoPage, NULL, + sizeof(PVRSRV_BRIDGE_IN_RELEASEINFOPAGE), + sizeof(PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/sync_bridge/server_sync_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/sync_bridge/server_sync_bridge.c index 4788fc1fb723..a0d9210086c9 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/sync_bridge/server_sync_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/sync_bridge/server_sync_bridge.c @@ -167,17 +167,16 @@ PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psSyncHandleInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psHandleBase); } - if (psSyncHandleInt) + else if (psSyncHandleInt) { PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); } + } return 0; @@ -527,9 +526,7 @@ PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -551,7 +548,6 @@ PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long)); @@ -567,7 +563,6 @@ PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -615,11 +610,7 @@ PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -683,35 +674,53 @@ PVRSRV_ERROR InitSYNCBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, - PVRSRVBridgeAllocSyncPrimitiveBlock, NULL); + PVRSRVBridgeAllocSyncPrimitiveBlock, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, - PVRSRVBridgeFreeSyncPrimitiveBlock, NULL); + PVRSRVBridgeFreeSyncPrimitiveBlock, NULL, + sizeof(PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK), + sizeof(PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, - PVRSRVBridgeSyncPrimSet, NULL); + PVRSRVBridgeSyncPrimSet, NULL, sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMSET), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMSET)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, - PVRSRVBridgeSyncPrimPDump, NULL); + PVRSRVBridgeSyncPrimPDump, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, - PVRSRVBridgeSyncPrimPDumpValue, NULL); + PVRSRVBridgeSyncPrimPDumpValue, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, - PVRSRVBridgeSyncPrimPDumpPol, NULL); + PVRSRVBridgeSyncPrimPDumpPol, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, - PVRSRVBridgeSyncPrimPDumpCBP, NULL); + PVRSRVBridgeSyncPrimPDumpCBP, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, - PVRSRVBridgeSyncAllocEvent, NULL); + PVRSRVBridgeSyncAllocEvent, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCALLOCEVENT), + sizeof(PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, - PVRSRVBridgeSyncFreeEvent, NULL); + PVRSRVBridgeSyncFreeEvent, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFREEEVENT), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFREEEVENT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL, - PVRSRVBridgeSyncCheckpointSignalledPDumpPol, NULL); + PVRSRVBridgeSyncCheckpointSignalledPDumpPol, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL), + sizeof(PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/syncfallback_bridge/server_syncfallback_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/syncfallback_bridge/server_syncfallback_bridge.c index 800a9dd4c189..68e650d89602 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/syncfallback_bridge/server_syncfallback_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/syncfallback_bridge/server_syncfallback_bridge.c @@ -103,9 +103,7 @@ PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -128,7 +126,6 @@ PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbTimelineCreatePVRIN), sizeof(unsigned long)); @@ -144,7 +141,6 @@ PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -226,11 +222,7 @@ PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -391,9 +383,7 @@ PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -415,7 +405,6 @@ PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbFenceMergeIN), sizeof(unsigned long)); @@ -431,7 +420,6 @@ PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -556,11 +544,7 @@ PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -678,9 +662,7 @@ PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -717,7 +699,6 @@ PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbFenceDumpIN), sizeof(unsigned long)); @@ -733,7 +714,6 @@ PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -853,11 +833,7 @@ PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -891,9 +867,7 @@ PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -915,7 +889,6 @@ PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbTimelineCreateSWIN), sizeof(unsigned long)); @@ -931,7 +904,6 @@ PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -1013,11 +985,7 @@ PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -1053,9 +1021,7 @@ PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -1077,7 +1043,6 @@ PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbFenceCreateSWIN), sizeof(unsigned long)); @@ -1093,7 +1058,6 @@ PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -1204,11 +1168,7 @@ PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -1429,16 +1389,15 @@ PVRSRVBridgeSyncFbFenceExportInsecure(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psExportInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); } - if (psExportInt) + else if (psExportInt) { SyncFbFenceExportDestroyInsecure(psExportInt); } + } return 0; @@ -1846,66 +1805,98 @@ PVRSRV_ERROR InitSYNCFALLBACKBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATEPVR, - PVRSRVBridgeSyncFbTimelineCreatePVR, NULL); + PVRSRVBridgeSyncFbTimelineCreatePVR, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINERELEASE, - PVRSRVBridgeSyncFbTimelineRelease, NULL); + PVRSRVBridgeSyncFbTimelineRelease, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUP, - PVRSRVBridgeSyncFbFenceDup, NULL); + PVRSRVBridgeSyncFbFenceDup, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEMERGE, - PVRSRVBridgeSyncFbFenceMerge, NULL); + PVRSRVBridgeSyncFbFenceMerge, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCERELEASE, - PVRSRVBridgeSyncFbFenceRelease, NULL); + PVRSRVBridgeSyncFbFenceRelease, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEWAIT, - PVRSRVBridgeSyncFbFenceWait, NULL); + PVRSRVBridgeSyncFbFenceWait, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUMP, - PVRSRVBridgeSyncFbFenceDump, NULL); + PVRSRVBridgeSyncFbFenceDump, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATESW, - PVRSRVBridgeSyncFbTimelineCreateSW, NULL); + PVRSRVBridgeSyncFbTimelineCreateSW, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCECREATESW, - PVRSRVBridgeSyncFbFenceCreateSW, NULL); + PVRSRVBridgeSyncFbFenceCreateSW, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINEADVANCESW, - PVRSRVBridgeSyncFbTimelineAdvanceSW, NULL); + PVRSRVBridgeSyncFbTimelineAdvanceSW, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTINSECURE, - PVRSRVBridgeSyncFbFenceExportInsecure, NULL); + PVRSRVBridgeSyncFbFenceExportInsecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYINSECURE, - PVRSRVBridgeSyncFbFenceExportDestroyInsecure, NULL); + PVRSRVBridgeSyncFbFenceExportDestroyInsecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTINSECURE, - PVRSRVBridgeSyncFbFenceImportInsecure, NULL); + PVRSRVBridgeSyncFbFenceImportInsecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTSECURE, - PVRSRVBridgeSyncFbFenceExportSecure, NULL); + PVRSRVBridgeSyncFbFenceExportSecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYSECURE, - PVRSRVBridgeSyncFbFenceExportDestroySecure, NULL); + PVRSRVBridgeSyncFbFenceExportDestroySecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTSECURE, - PVRSRVBridgeSyncFbFenceImportSecure, NULL); + PVRSRVBridgeSyncFbFenceImportSecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/rogue/synctracking_bridge/server_synctracking_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/rogue/synctracking_bridge/server_synctracking_bridge.c index adc8ab487a12..06757e5ef8d3 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/rogue/synctracking_bridge/server_synctracking_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/rogue/synctracking_bridge/server_synctracking_bridge.c @@ -132,9 +132,7 @@ PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -156,7 +154,6 @@ PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long)); @@ -172,7 +169,6 @@ PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -285,11 +281,7 @@ PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -310,10 +302,14 @@ PVRSRV_ERROR InitSYNCTRACKINGBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, - PVRSRVBridgeSyncRecordRemoveByHandle, NULL); + PVRSRVBridgeSyncRecordRemoveByHandle, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, - PVRSRVBridgeSyncRecordAdd, NULL); + PVRSRVBridgeSyncRecordAdd, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCRECORDADD), + sizeof(PVRSRV_BRIDGE_OUT_SYNCRECORDADD)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/cache_bridge/server_cache_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/cache_bridge/server_cache_bridge.c index b79d314f0e85..b2f5267cc597 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/cache_bridge/server_cache_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/cache_bridge/server_cache_bridge.c @@ -87,9 +87,7 @@ PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -116,7 +114,6 @@ PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long)); @@ -132,7 +129,6 @@ PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -301,11 +297,7 @@ PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -431,13 +423,16 @@ PVRSRV_ERROR InitCACHEBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, - PVRSRVBridgeCacheOpQueue, NULL); + PVRSRVBridgeCacheOpQueue, NULL, sizeof(PVRSRV_BRIDGE_IN_CACHEOPQUEUE), + sizeof(PVRSRV_BRIDGE_OUT_CACHEOPQUEUE)); SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, - PVRSRVBridgeCacheOpExec, NULL); + PVRSRVBridgeCacheOpExec, NULL, sizeof(PVRSRV_BRIDGE_IN_CACHEOPEXEC), + sizeof(PVRSRV_BRIDGE_OUT_CACHEOPEXEC)); SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG, - PVRSRVBridgeCacheOpLog, NULL); + PVRSRVBridgeCacheOpLog, NULL, sizeof(PVRSRV_BRIDGE_IN_CACHEOPLOG), + sizeof(PVRSRV_BRIDGE_OUT_CACHEOPLOG)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/cmm_bridge/server_cmm_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/cmm_bridge/server_cmm_bridge.c index b95f8589ae99..cc658c793d3b 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/cmm_bridge/server_cmm_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/cmm_bridge/server_cmm_bridge.c @@ -340,17 +340,16 @@ PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psContextInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psHandleBase); } - if (psContextInt) + else if (psContextInt) { DevmemIntCtxDestroy(psContextInt); } + } return 0; @@ -373,13 +372,19 @@ PVRSRV_ERROR InitCMMBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, - PVRSRVBridgeDevmemIntExportCtx, NULL); + PVRSRVBridgeDevmemIntExportCtx, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX)); SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, - PVRSRVBridgeDevmemIntUnexportCtx, NULL); + PVRSRVBridgeDevmemIntUnexportCtx, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX)); SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, - PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL); + PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/devicememhistory_bridge/server_devicememhistory_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/devicememhistory_bridge/server_devicememhistory_bridge.c index 03eb654bf195..72c6a86a0ec8 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/devicememhistory_bridge/server_devicememhistory_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/devicememhistory_bridge/server_devicememhistory_bridge.c @@ -90,9 +90,7 @@ PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; @@ -107,7 +105,6 @@ PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long)); @@ -123,7 +120,6 @@ PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -200,11 +196,7 @@ PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -232,9 +224,7 @@ PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; @@ -249,7 +239,6 @@ PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long)); @@ -265,7 +254,6 @@ PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -342,11 +330,7 @@ PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -372,9 +356,7 @@ PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; @@ -389,7 +371,6 @@ PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long)); @@ -405,7 +386,6 @@ PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -455,11 +435,7 @@ PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -485,9 +461,7 @@ PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; @@ -502,7 +476,6 @@ PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long)); @@ -519,7 +492,6 @@ PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -571,11 +543,7 @@ PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -609,9 +577,7 @@ PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -646,7 +612,6 @@ PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long)); @@ -663,7 +628,6 @@ PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -791,11 +755,7 @@ PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -819,24 +779,33 @@ PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, - PVRSRVBridgeDevicememHistoryMap, pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistoryMap, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP)); SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, - PVRSRVBridgeDevicememHistoryUnmap, pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistoryUnmap, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP)); SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, - PVRSRVBridgeDevicememHistoryMapVRange, pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistoryMapVRange, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, - PVRSRVBridgeDevicememHistoryUnmapVRange, pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistoryUnmapVRange, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, - PVRSRVBridgeDevicememHistorySparseChange, - pDEVICEMEMHISTORYBridgeLock); + PVRSRVBridgeDevicememHistorySparseChange, pDEVICEMEMHISTORYBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/di_bridge/server_di_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/di_bridge/server_di_bridge.c index 3c40afbd676a..154bccdaae5e 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/di_bridge/server_di_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/di_bridge/server_di_bridge.c @@ -90,9 +90,7 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -112,7 +110,6 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDICreateContextIN), sizeof(unsigned long)); @@ -128,7 +125,6 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -190,10 +186,36 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, if (psDICreateContextOUT->eError != PVRSRV_OK) { - if (psContextInt) + if (psDICreateContextOUT->hContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDICreateContextOUT-> + hContext, + PVRSRV_HANDLE_TYPE_DI_CONTEXT); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); + + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + else if (psContextInt) { DIDestroyContextKM(psContextInt); } + } /* Allocated space should be equal to the last updated offset */ @@ -202,11 +224,7 @@ PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -266,9 +284,7 @@ PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + 0; @@ -283,7 +299,6 @@ PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDIReadEntryIN), sizeof(unsigned long)); @@ -299,7 +314,6 @@ PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -370,11 +384,7 @@ PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -402,9 +412,7 @@ PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -427,7 +435,6 @@ PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDIWriteEntryIN), sizeof(unsigned long)); @@ -443,7 +450,6 @@ PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -534,11 +540,7 @@ PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -606,19 +608,27 @@ PVRSRV_ERROR InitDIBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT, - PVRSRVBridgeDICreateContext, NULL); + PVRSRVBridgeDICreateContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_DICREATECONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_DICREATECONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT, - PVRSRVBridgeDIDestroyContext, NULL); + PVRSRVBridgeDIDestroyContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY, - PVRSRVBridgeDIReadEntry, NULL); + PVRSRVBridgeDIReadEntry, NULL, sizeof(PVRSRV_BRIDGE_IN_DIREADENTRY), + sizeof(PVRSRV_BRIDGE_OUT_DIREADENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY, - PVRSRVBridgeDIWriteEntry, NULL); + PVRSRVBridgeDIWriteEntry, NULL, sizeof(PVRSRV_BRIDGE_IN_DIWRITEENTRY), + sizeof(PVRSRV_BRIDGE_OUT_DIWRITEENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES, - PVRSRVBridgeDIListAllEntries, NULL); + PVRSRVBridgeDIListAllEntries, NULL, + sizeof(PVRSRV_BRIDGE_IN_DILISTALLENTRIES), + sizeof(PVRSRV_BRIDGE_OUT_DILISTALLENTRIES)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dma_bridge/server_dma_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dma_bridge/server_dma_bridge.c index 29d45c71380c..7f2c5dd495e3 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dma_bridge/server_dma_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dma_bridge/server_dma_bridge.c @@ -85,9 +85,7 @@ PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -113,7 +111,6 @@ PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDmaTransferIN), sizeof(unsigned long)); @@ -129,7 +126,6 @@ PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -280,11 +276,7 @@ PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -311,9 +303,7 @@ PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -337,7 +327,6 @@ PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDmaSparseMappingTableIN), sizeof(unsigned long)); @@ -353,7 +342,6 @@ PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -432,11 +420,7 @@ PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -476,13 +460,17 @@ PVRSRV_ERROR InitDMABridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMATRANSFER, - PVRSRVBridgeDmaTransfer, NULL); + PVRSRVBridgeDmaTransfer, NULL, sizeof(PVRSRV_BRIDGE_IN_DMATRANSFER), + sizeof(PVRSRV_BRIDGE_OUT_DMATRANSFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE, - PVRSRVBridgeDmaSparseMappingTable, NULL); + PVRSRVBridgeDmaSparseMappingTable, NULL, + sizeof(PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE), + sizeof(PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS, - PVRSRVBridgeDmaDeviceParams, NULL); + PVRSRVBridgeDmaDeviceParams, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dmabuf_bridge/common_dmabuf_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dmabuf_bridge/common_dmabuf_bridge.h index 7547d9f76297..cc631e3e1116 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dmabuf_bridge/common_dmabuf_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dmabuf_bridge/common_dmabuf_bridge.h @@ -55,8 +55,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0 #define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0 -#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1 -#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTGEMHANDLE PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2 #define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3 #define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3) @@ -82,28 +82,6 @@ typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF; -/******************************************* - PhysmemImportDmaBufLocked - *******************************************/ - -/* Bridge in structure for PhysmemImportDmaBufLocked */ -typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED_TAG -{ - const IMG_CHAR *puiName; - IMG_INT ifd; - IMG_UINT32 ui32NameSize; - PVRSRV_MEMALLOCFLAGS_T uiFlags; -} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED; - -/* Bridge out structure for PhysmemImportDmaBufLocked */ -typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED_TAG -{ - IMG_DEVMEM_ALIGN_T uiAlign; - IMG_DEVMEM_SIZE_T uiSize; - IMG_HANDLE hPMRPtr; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED; - /******************************************* PhysmemExportDmaBuf *******************************************/ @@ -121,6 +99,23 @@ typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG IMG_INT iFd; } __packed PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF; +/******************************************* + PhysmemExportGemHandle + *******************************************/ + +/* Bridge in structure for PhysmemExportGemHandle */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE; + +/* Bridge out structure for PhysmemExportGemHandle */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Handle; +} __packed PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE; + /******************************************* PhysmemImportSparseDmaBuf *******************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dmabuf_bridge/server_dmabuf_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dmabuf_bridge/server_dmabuf_bridge.c index 4e784db6ea23..89aa1477f55a 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dmabuf_bridge/server_dmabuf_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/dmabuf_bridge/server_dmabuf_bridge.c @@ -94,9 +94,7 @@ PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -118,7 +116,6 @@ PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), sizeof(unsigned long)); @@ -134,7 +131,6 @@ PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -219,175 +215,7 @@ PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); - - return 0; -} - -static PVRSRV_ERROR _PhysmemImportDmaBufLockedpsPMRPtrIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = PMRUnrefUnlockPMR((PMR *) pvData); - return eError; -} - -static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, - "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); - -static IMG_INT -PVRSRVBridgePhysmemImportDmaBufLocked(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPhysmemImportDmaBufLockedIN_UI8, - IMG_UINT8 * psPhysmemImportDmaBufLockedOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedIN = - (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *) - IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedOUT = - (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *) - IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedOUT_UI8, 0); - - IMG_CHAR *uiNameInt = NULL; - PMR *psPMRPtrInt = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; - - if (unlikely(psPhysmemImportDmaBufLockedIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) - { - psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto PhysmemImportDmaBufLocked_exit; - } - - if (ui64BufferSize > IMG_UINT32_MAX) - { - psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto PhysmemImportDmaBufLocked_exit; - } - - ui32BufferSize = (IMG_UINT32) ui64BufferSize; - - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psPhysmemImportDmaBufLockedIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; - - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufLockedIN; - - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); - - if (!pArrayArgsBuffer) - { - psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto PhysmemImportDmaBufLocked_exit; - } - } - } - - if (psPhysmemImportDmaBufLockedIN->ui32NameSize != 0) - { - uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR); - } - - /* Copy the data over */ - if (psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR) > 0) - { - if (OSCopyFromUser - (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufLockedIN->puiName, - psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) - { - psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto PhysmemImportDmaBufLocked_exit; - } - ((IMG_CHAR *) - uiNameInt)[(psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] = - '\0'; - } - - psPhysmemImportDmaBufLockedOUT->eError = - PhysmemImportDmaBufLocked(psConnection, OSGetDevNode(psConnection), - psPhysmemImportDmaBufLockedIN->ifd, - psPhysmemImportDmaBufLockedIN->uiFlags, - psPhysmemImportDmaBufLockedIN->ui32NameSize, - uiNameInt, - &psPMRPtrInt, - &psPhysmemImportDmaBufLockedOUT->uiSize, - &psPhysmemImportDmaBufLockedOUT->uiAlign); - /* Exit early if bridged call fails */ - if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)) - { - goto PhysmemImportDmaBufLocked_exit; - } - - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); - - psPhysmemImportDmaBufLockedOUT->eError = - PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psPhysmemImportDmaBufLockedOUT->hPMRPtr, (void *)psPMRPtrInt, - PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _PhysmemImportDmaBufLockedpsPMRPtrIntRelease); - if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto PhysmemImportDmaBufLocked_exit; - } - - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); - -PhysmemImportDmaBufLocked_exit: - - if (psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK) - { - if (psPMRPtrInt) - { - LockHandle(KERNEL_HANDLE_BASE); - PMRUnrefUnlockPMR(psPMRPtrInt); - UnlockHandle(KERNEL_HANDLE_BASE); - } - } - - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psPhysmemImportDmaBufLockedOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -446,6 +274,59 @@ PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static IMG_INT +PVRSRVBridgePhysmemExportGemHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemExportGemHandleIN_UI8, + IMG_UINT8 * psPhysmemExportGemHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE *psPhysmemExportGemHandleIN = + (PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE *) + IMG_OFFSET_ADDR(psPhysmemExportGemHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE *psPhysmemExportGemHandleOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE *) + IMG_OFFSET_ADDR(psPhysmemExportGemHandleOUT_UI8, 0); + + IMG_HANDLE hPMR = psPhysmemExportGemHandleIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPhysmemExportGemHandleOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psPhysmemExportGemHandleOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemExportGemHandle_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPhysmemExportGemHandleOUT->eError = + PhysmemExportGemHandle(psConnection, OSGetDevNode(psConnection), + psPMRInt, &psPhysmemExportGemHandleOUT->ui32Handle); + +PhysmemExportGemHandle_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + static PVRSRV_ERROR _PhysmemImportSparseDmaBufpsPMRPtrIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -477,9 +358,7 @@ PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -509,7 +388,6 @@ PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long)); @@ -525,7 +403,6 @@ PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -637,11 +514,7 @@ PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -661,16 +534,24 @@ PVRSRV_ERROR InitDMABUFBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, - PVRSRVBridgePhysmemImportDmaBuf, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED, - PVRSRVBridgePhysmemImportDmaBufLocked, NULL); + PVRSRVBridgePhysmemImportDmaBuf, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF)); SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, - PVRSRVBridgePhysmemExportDmaBuf, NULL); + PVRSRVBridgePhysmemExportDmaBuf, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTGEMHANDLE, + PVRSRVBridgePhysmemExportGemHandle, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMEXPORTGEMHANDLE), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTGEMHANDLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, - PVRSRVBridgePhysmemImportSparseDmaBuf, NULL); + PVRSRVBridgePhysmemImportSparseDmaBuf, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF)); return PVRSRV_OK; } @@ -683,11 +564,10 @@ void DeinitDMABUFBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, - PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTGEMHANDLE); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF); diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/client_htbuffer_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/client_htbuffer_bridge.h index b3514eaba9b8..2c5d18843120 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/client_htbuffer_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/client_htbuffer_bridge.h @@ -61,11 +61,4 @@ IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, IMG_UINT32 ui32EnablePID, IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode); -IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge, - IMG_UINT32 ui32PID, - IMG_UINT32 ui32TID, - IMG_UINT64 ui64TimeStamp, - IMG_UINT32 ui32SF, - IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args); - #endif /* CLIENT_HTBUFFER_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/client_htbuffer_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/client_htbuffer_direct_bridge.c index 9c5833116075..3e6295cdfdf3 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/client_htbuffer_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/client_htbuffer_direct_bridge.c @@ -68,18 +68,3 @@ IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, return eError; } - -IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge, - IMG_UINT32 ui32PID, - IMG_UINT32 ui32TID, - IMG_UINT64 ui64TimeStamp, - IMG_UINT32 ui32SF, - IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args) -{ - PVRSRV_ERROR eError; - PVR_UNREFERENCED_PARAMETER(hBridge); - - eError = HTBLogKM(ui32PID, ui32TID, ui64TimeStamp, ui32SF, ui32NumArgs, pui32Args); - - return eError; -} diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/common_htbuffer_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/common_htbuffer_bridge.h index 69a406b253d9..32be202186dc 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/common_htbuffer_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/common_htbuffer_bridge.h @@ -56,8 +56,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0 #define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0 -#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1 -#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1) +#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0) /******************************************* HTBControl @@ -80,25 +79,4 @@ typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_HTBCONTROL; -/******************************************* - HTBLog - *******************************************/ - -/* Bridge in structure for HTBLog */ -typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG -{ - IMG_UINT64 ui64TimeStamp; - IMG_UINT32 *pui32Args; - IMG_UINT32 ui32NumArgs; - IMG_UINT32 ui32PID; - IMG_UINT32 ui32SF; - IMG_UINT32 ui32TID; -} __packed PVRSRV_BRIDGE_IN_HTBLOG; - -/* Bridge out structure for HTBLog */ -typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_HTBLOG; - #endif /* COMMON_HTBUFFER_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/server_htbuffer_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/server_htbuffer_bridge.c index dd81d914b2f2..9b730d28e5b4 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/server_htbuffer_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/htbuffer_bridge/server_htbuffer_bridge.c @@ -86,9 +86,7 @@ PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -112,7 +110,6 @@ PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long)); @@ -128,7 +125,6 @@ PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -176,124 +172,7 @@ PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); - - return 0; -} - -static_assert(HTB_LOG_MAX_PARAMS <= IMG_UINT32_MAX, - "HTB_LOG_MAX_PARAMS must not be larger than IMG_UINT32_MAX"); - -static IMG_INT -PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psHTBLogIN_UI8, - IMG_UINT8 * psHTBLogOUT_UI8, CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN = - (PVRSRV_BRIDGE_IN_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogIN_UI8, 0); - PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT = - (PVRSRV_BRIDGE_OUT_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogOUT_UI8, 0); - - IMG_UINT32 *ui32ArgsInt = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) + 0; - - if (unlikely(psHTBLogIN->ui32NumArgs > HTB_LOG_MAX_PARAMS)) - { - psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto HTBLog_exit; - } - - PVR_UNREFERENCED_PARAMETER(psConnection); - - if (ui64BufferSize > IMG_UINT32_MAX) - { - psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto HTBLog_exit; - } - - ui32BufferSize = (IMG_UINT32) ui64BufferSize; - - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; - - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBLogIN; - - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); - - if (!pArrayArgsBuffer) - { - psHTBLogOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto HTBLog_exit; - } - } - } - - if (psHTBLogIN->ui32NumArgs != 0) - { - ui32ArgsInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32); - } - - /* Copy the data over */ - if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32ArgsInt, (const void __user *)psHTBLogIN->pui32Args, - psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto HTBLog_exit; - } - } - - psHTBLogOUT->eError = - HTBLogKM(psHTBLogIN->ui32PID, - psHTBLogIN->ui32TID, - psHTBLogIN->ui64TimeStamp, - psHTBLogIN->ui32SF, psHTBLogIN->ui32NumArgs, ui32ArgsInt); - -HTBLog_exit: - - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psHTBLogOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -319,10 +198,9 @@ PVRSRV_ERROR InitHTBUFFERBridge(void) PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), "OSLockCreate"); SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, - PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock); - - SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG, - PVRSRVBridgeHTBLog, pHTBUFFERBridgeLock); + PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_HTBCONTROL), + sizeof(PVRSRV_BRIDGE_OUT_HTBCONTROL)); return PVRSRV_OK; } @@ -336,8 +214,6 @@ void DeinitHTBUFFERBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG); - } #else /* EXCLUDE_HTBUFFER_BRIDGE */ /* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined, diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/client_mm_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/client_mm_bridge.h index 94262864083c..d5ba6dd74da3 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/client_mm_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/client_mm_bridge.h @@ -85,8 +85,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); -IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); - IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32NumPhysChunks, @@ -101,30 +99,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags, PVRSRV_MEMALLOCFLAGS_T * puiOutFlags); -IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, - IMG_DEVMEM_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 * pui32MappingTable, - IMG_UINT32 ui32Log2PageSize, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 ui32AnnotationLength, - const IMG_CHAR * puiAnnotation, - IMG_PID ui32PID, - IMG_HANDLE * phPMRPtr, - IMG_UINT32 ui32PDumpFlags, - PVRSRV_MEMALLOCFLAGS_T * puiOutFlags); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve1(IMG_HANDLE hBridge, IMG_HANDLE hPMR); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve2(IMG_HANDLE hBridge, IMG_HANDLE hPMR); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve3(IMG_HANDLE hBridge, - IMG_HANDLE hMapping, IMG_HANDLE hPMR); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve4(IMG_HANDLE hBridge, - IMG_HANDLE hMapping, IMG_HANDLE hPMR); - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, IMG_BOOL bbKernelMemoryCtx, IMG_HANDLE * phDevMemServerContext, @@ -138,66 +112,45 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_UINT32 ui32HeapConfigIndex, IMG_UINT32 ui32HeapIndex, - IMG_DEV_VIRTADDR sHeapBaseAddr, - IMG_DEVMEM_SIZE_T uiHeapLength, - IMG_UINT32 ui32Log2DataPageSize, IMG_HANDLE * phDevmemHeapPtr); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, - IMG_HANDLE hDevmemServerHeap, - IMG_HANDLE hReservation, - IMG_HANDLE hPMR, - PVRSRV_MEMALLOCFLAGS_T uiMapFlags, - IMG_HANDLE * phMapping); + IMG_HANDLE hReservation, IMG_HANDLE hPMR); -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping); +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hReservation); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, IMG_HANDLE hDevmemServerHeap, IMG_DEV_VIRTADDR sAddress, IMG_DEVMEM_SIZE_T uiLength, + PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_HANDLE * phReservation); +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRangeAndMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_HANDLE * phReservation); + IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation); IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, - IMG_HANDLE hSrvDevMemHeap, - IMG_HANDLE hPMR, IMG_UINT32 ui32AllocPageCount, IMG_UINT32 * pui32AllocPageIndices, IMG_UINT32 ui32FreePageCount, IMG_UINT32 * pui32FreePageIndices, IMG_UINT32 ui32SparseFlags, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_UINT64 ui64CPUVAddr); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge, - IMG_HANDLE hReservation, - IMG_HANDLE hPMR, - IMG_UINT32 ui32PageCount, - IMG_UINT32 ui32PhysicalPgOffset, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddr); - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, - IMG_HANDLE hReservation, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_UINT32 ui32PageCount); + IMG_HANDLE hReservation); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_DEV_VIRTADDR sAddress); -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge, - IMG_HANDLE hDevmemCtx, - IMG_DEV_VIRTADDR sAddress, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate); - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_UINT64 ui64FBSCEntries); @@ -226,29 +179,56 @@ IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, IMG_UINT32 * pui32Log2ImportAlignmentOut); IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, - IMG_HANDLE hDevm, - IMG_UINT32 ui32PID, IMG_BOOL bRegister); - -IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge, - IMG_UINT32 * pui32PhysHeapCount); + IMG_HANDLE hDevmemCtx, + IMG_BOOL bRegister); IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge, IMG_UINT32 ui32PhysHeapCount, PVRSRV_PHYS_HEAP * peaPhysHeapID, - PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats); + PHYS_HEAP_MEM_STATS_V1 * pasapPhysHeapMemStats); IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge, PVRSRV_PHYS_HEAP * peHeap); -IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge, - IMG_UINT32 ui32PhysHeapCount, - PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats); - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_DEV_VIRTADDR * psFaultAddress); -IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, - IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid); +IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge, + IMG_UINT32 ui32ui32StatType, + IMG_PID ui32pid); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE * phReservation); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hReservation); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysPageOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32VirtPageOffset); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_UINT32 ui32VirtPageOffset, + IMG_UINT32 ui32PageCount); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapVRangeToBackingPage(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_UINT32 ui32PageCount, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32VirtPageOffset); + +IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo2(IMG_HANDLE hBridge, + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP * peaPhysHeapID, + PHYS_HEAP_MEM_STATS_V2 * pasapPhysHeapMemStats); #endif /* CLIENT_MM_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/client_mm_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/client_mm_direct_bridge.c index 701af68241d0..c1810e959ca8 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/client_mm_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/client_mm_direct_bridge.c @@ -67,6 +67,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, IMG_UINT32 * pui32Log2Contig, IMG_UINT64 * pui64Password) { +#if defined(SUPPORT_INSECURE_EXPORT) PVRSRV_ERROR eError; PMR *psPMRInt; PMR_EXPORT *psPMRExportInt = NULL; @@ -78,10 +79,21 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, *phPMRExport = psPMRExportInt; return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hPMR); + PVR_UNREFERENCED_PARAMETER(phPMRExport); + PVR_UNREFERENCED_PARAMETER(pui64Size); + PVR_UNREFERENCED_PARAMETER(pui32Log2Contig); + PVR_UNREFERENCED_PARAMETER(pui64Password); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif } IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport) { +#if defined(SUPPORT_INSECURE_EXPORT) PVRSRV_ERROR eError; PMR_EXPORT *psPMRExportInt; PVR_UNREFERENCED_PARAMETER(hBridge); @@ -91,6 +103,12 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hP eError = PMRUnexportPMR(psPMRExportInt); return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hPMRExport); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif } IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge, @@ -142,6 +160,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, IMG_UINT64 ui64uiSize, IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR) { +#if defined(SUPPORT_INSECURE_EXPORT) PVRSRV_ERROR eError; PMR_EXPORT *psPMRExportInt; PMR *psPMRInt = NULL; @@ -155,6 +174,15 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, *phPMR = psPMRInt; return eError; +#else + PVR_UNREFERENCED_PARAMETER(hPMRExport); + PVR_UNREFERENCED_PARAMETER(ui64uiPassword); + PVR_UNREFERENCED_PARAMETER(ui64uiSize); + PVR_UNREFERENCED_PARAMETER(ui32uiLog2Contig); + PVR_UNREFERENCED_PARAMETER(phPMR); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif } IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, @@ -189,19 +217,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psPMRInt = (PMR *) hPMR; - - eError = PMRUnrefUnlockPMR(psPMRInt); - - return eError; -} - IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32NumPhysChunks, @@ -235,97 +250,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, - IMG_DEVMEM_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 * pui32MappingTable, - IMG_UINT32 ui32Log2PageSize, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 ui32AnnotationLength, - const IMG_CHAR * puiAnnotation, - IMG_PID ui32PID, - IMG_HANDLE * phPMRPtr, - IMG_UINT32 ui32PDumpFlags, - PVRSRV_MEMALLOCFLAGS_T * puiOutFlags) -{ - PVRSRV_ERROR eError; - PMR *psPMRPtrInt = NULL; - - eError = - PhysmemNewRamBackedLockedPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), - uiSize, - ui32NumPhysChunks, - ui32NumVirtChunks, - pui32MappingTable, - ui32Log2PageSize, - uiFlags, - ui32AnnotationLength, - puiAnnotation, - ui32PID, &psPMRPtrInt, ui32PDumpFlags, puiOutFlags); - - *phPMRPtr = psPMRPtrInt; - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve1(IMG_HANDLE hBridge, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psPMRInt = (PMR *) hPMR; - - eError = DevmemCompatReserve1(psPMRInt); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve2(IMG_HANDLE hBridge, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psPMRInt = (PMR *) hPMR; - - eError = DevmemCompatReserve2(psPMRInt); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve3(IMG_HANDLE hBridge, - IMG_HANDLE hMapping, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - DEVMEMINT_MAPPING *psMappingInt; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psMappingInt = (DEVMEMINT_MAPPING *) hMapping; - psPMRInt = (PMR *) hPMR; - - eError = DevmemCompatReserve3(psMappingInt, psPMRInt); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemCompatReserve4(IMG_HANDLE hBridge, - IMG_HANDLE hMapping, IMG_HANDLE hPMR) -{ - PVRSRV_ERROR eError; - DEVMEMINT_MAPPING *psMappingInt; - PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psMappingInt = (DEVMEMINT_MAPPING *) hMapping; - psPMRInt = (PMR *) hPMR; - - eError = DevmemCompatReserve4(psMappingInt, psPMRInt); - - return eError; -} - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, IMG_BOOL bbKernelMemoryCtx, IMG_HANDLE * phDevMemServerContext, @@ -364,9 +288,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_UINT32 ui32HeapConfigIndex, IMG_UINT32 ui32HeapIndex, - IMG_DEV_VIRTADDR sHeapBaseAddr, - IMG_DEVMEM_SIZE_T uiHeapLength, - IMG_UINT32 ui32Log2DataPageSize, IMG_HANDLE * phDevmemHeapPtr) { PVRSRV_ERROR eError; @@ -378,10 +299,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, eError = DevmemIntHeapCreate(psDevmemCtxInt, - ui32HeapConfigIndex, - ui32HeapIndex, - sHeapBaseAddr, - uiHeapLength, ui32Log2DataPageSize, &psDevmemHeapPtrInt); + ui32HeapConfigIndex, ui32HeapIndex, &psDevmemHeapPtrInt); *phDevmemHeapPtr = psDevmemHeapPtrInt; return eError; @@ -401,40 +319,30 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HAN } IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, - IMG_HANDLE hDevmemServerHeap, - IMG_HANDLE hReservation, - IMG_HANDLE hPMR, - PVRSRV_MEMALLOCFLAGS_T uiMapFlags, - IMG_HANDLE * phMapping) + IMG_HANDLE hReservation, IMG_HANDLE hPMR) { PVRSRV_ERROR eError; - DEVMEMINT_HEAP *psDevmemServerHeapInt; DEVMEMINT_RESERVATION *psReservationInt; PMR *psPMRInt; - DEVMEMINT_MAPPING *psMappingInt = NULL; PVR_UNREFERENCED_PARAMETER(hBridge); - psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; psPMRInt = (PMR *) hPMR; - eError = - DevmemIntMapPMR(psDevmemServerHeapInt, - psReservationInt, psPMRInt, uiMapFlags, &psMappingInt); + eError = DevmemIntMapPMR(psReservationInt, psPMRInt); - *phMapping = psMappingInt; return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping) +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hReservation) { PVRSRV_ERROR eError; - DEVMEMINT_MAPPING *psMappingInt; + DEVMEMINT_RESERVATION *psReservationInt; PVR_UNREFERENCED_PARAMETER(hBridge); - psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; - eError = DevmemIntUnmapPMR(psMappingInt); + eError = DevmemIntUnmapPMR(psReservationInt); return eError; } @@ -443,93 +351,69 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, IMG_HANDLE hDevmemServerHeap, IMG_DEV_VIRTADDR sAddress, IMG_DEVMEM_SIZE_T uiLength, + PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_HANDLE * phReservation) { PVRSRV_ERROR eError; DEVMEMINT_HEAP *psDevmemServerHeapInt; DEVMEMINT_RESERVATION *psReservationInt = NULL; - PVR_UNREFERENCED_PARAMETER(hBridge); psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; eError = - DevmemIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt); + DevmemIntReserveRange(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemServerHeapInt, + sAddress, uiLength, uiFlags, &psReservationInt); *phReservation = psReservationInt; return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation) -{ - PVRSRV_ERROR eError; - DEVMEMINT_RESERVATION *psReservationInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; - - eError = DevmemIntUnreserveRange(psReservationInt); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, - IMG_HANDLE hSrvDevMemHeap, - IMG_HANDLE hPMR, - IMG_UINT32 ui32AllocPageCount, - IMG_UINT32 * pui32AllocPageIndices, - IMG_UINT32 ui32FreePageCount, - IMG_UINT32 * pui32FreePageIndices, - IMG_UINT32 ui32SparseFlags, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddr, IMG_UINT64 ui64CPUVAddr) +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRangeAndMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_HANDLE * phReservation) { PVRSRV_ERROR eError; - DEVMEMINT_HEAP *psSrvDevMemHeapInt; + DEVMEMINT_HEAP *psDevmemServerHeapInt; PMR *psPMRInt; - PVR_UNREFERENCED_PARAMETER(hBridge); + DEVMEMINT_RESERVATION *psReservationInt = NULL; - psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap; + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; psPMRInt = (PMR *) hPMR; eError = - DevmemIntChangeSparse(psSrvDevMemHeapInt, - psPMRInt, - ui32AllocPageCount, - pui32AllocPageIndices, - ui32FreePageCount, - pui32FreePageIndices, - ui32SparseFlags, uiFlags, sDevVAddr, ui64CPUVAddr); + DevmemIntReserveRangeAndMapPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemServerHeapInt, + sAddress, + uiLength, psPMRInt, uiFlags, &psReservationInt); + *phReservation = psReservationInt; return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge, - IMG_HANDLE hReservation, - IMG_HANDLE hPMR, - IMG_UINT32 ui32PageCount, - IMG_UINT32 ui32PhysicalPgOffset, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddr) +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation) { PVRSRV_ERROR eError; DEVMEMINT_RESERVATION *psReservationInt; - PMR *psPMRInt; PVR_UNREFERENCED_PARAMETER(hBridge); psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; - psPMRInt = (PMR *) hPMR; - eError = - DevmemIntMapPages(psReservationInt, - psPMRInt, ui32PageCount, ui32PhysicalPgOffset, uiFlags, sDevVAddr); + eError = DevmemIntUnreserveRange(psReservationInt); return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, - IMG_HANDLE hReservation, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_UINT32 ui32PageCount) +IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32SparseFlags, IMG_HANDLE hReservation) { PVRSRV_ERROR eError; DEVMEMINT_RESERVATION *psReservationInt; @@ -537,7 +421,11 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; - eError = DevmemIntUnmapPages(psReservationInt, sDevVAddr, ui32PageCount); + eError = + DevmemIntChangeSparse(ui32AllocPageCount, + pui32AllocPageIndices, + ui32FreePageCount, + pui32FreePageIndices, ui32SparseFlags, psReservationInt); return eError; } @@ -558,33 +446,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge, - IMG_HANDLE hDevmemCtx, - IMG_DEV_VIRTADDR sAddress, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate) -{ -#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) - PVRSRV_ERROR eError; - DEVMEMINT_CTX *psDevmemCtxInt; - PVR_UNREFERENCED_PARAMETER(hBridge); - - psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; - - eError = DevmemIntFlushDevSLCRange(psDevmemCtxInt, sAddress, uiSize, bInvalidate); - - return eError; -#else - PVR_UNREFERENCED_PARAMETER(hBridge); - PVR_UNREFERENCED_PARAMETER(hDevmemCtx); - PVR_UNREFERENCED_PARAMETER(sAddress); - PVR_UNREFERENCED_PARAMETER(uiSize); - PVR_UNREFERENCED_PARAMETER(bInvalidate); - - return PVRSRV_ERROR_NOT_IMPLEMENTED; -#endif -} - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_UINT64 ui64FBSCEntries) @@ -675,28 +536,16 @@ IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, } IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, - IMG_HANDLE hDevm, - IMG_UINT32 ui32PID, IMG_BOOL bRegister) + IMG_HANDLE hDevmemCtx, + IMG_BOOL bRegister) { PVRSRV_ERROR eError; - DEVMEMINT_CTX *psDevmInt; + DEVMEMINT_CTX *psDevmemCtxInt; PVR_UNREFERENCED_PARAMETER(hBridge); - psDevmInt = (DEVMEMINT_CTX *) hDevm; - - eError = DevmemIntRegisterPFNotifyKM(psDevmInt, ui32PID, bRegister); - - return eError; -} - -IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge, - IMG_UINT32 * pui32PhysHeapCount) -{ - PVRSRV_ERROR eError; + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; - eError = - PVRSRVGetMaxPhysHeapCountKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), - pui32PhysHeapCount); + eError = DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, bRegister); return eError; } @@ -704,7 +553,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge, IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge, IMG_UINT32 ui32PhysHeapCount, PVRSRV_PHYS_HEAP * peaPhysHeapID, - PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats) + PHYS_HEAP_MEM_STATS_V1 * pasapPhysHeapMemStats) { PVRSRV_ERROR eError; @@ -726,19 +575,6 @@ IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge, return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge, - IMG_UINT32 ui32PhysHeapCount, - PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats) -{ - PVRSRV_ERROR eError; - - eError = - PVRSRVGetHeapPhysMemUsageKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), - ui32PhysHeapCount, pasapPhysHeapMemStats); - - return eError; -} - IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, IMG_HANDLE hDevmemCtx, IMG_DEV_VIRTADDR * psFaultAddress) @@ -755,8 +591,9 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, return eError; } -IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, - IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid) +IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge, + IMG_UINT32 ui32ui32StatType, + IMG_PID ui32pid) { #if defined(PVRSRV_ENABLE_PROCESS_STATS) PVRSRV_ERROR eError; @@ -773,3 +610,110 @@ IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, return PVRSRV_ERROR_NOT_IMPLEMENTED; #endif } + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE * phReservation) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemServerHeapInt; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; + + eError = + DevmemXIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt); + + *phReservation = psReservationInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hReservation) +{ + PVRSRV_ERROR eError; + DEVMEMXINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; + + eError = DevmemXIntUnreserveRange(psReservationInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysPageOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32VirtPageOffset) +{ + PVRSRV_ERROR eError; + DEVMEMXINT_RESERVATION *psReservationInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemXIntMapPages(psReservationInt, + psPMRInt, + ui32PageCount, ui32PhysPageOffset, uiFlags, ui32VirtPageOffset); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_UINT32 ui32VirtPageOffset, + IMG_UINT32 ui32PageCount) +{ + PVRSRV_ERROR eError; + DEVMEMXINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; + + eError = DevmemXIntUnmapPages(psReservationInt, ui32VirtPageOffset, ui32PageCount); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapVRangeToBackingPage(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_UINT32 ui32PageCount, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32VirtPageOffset) +{ + PVRSRV_ERROR eError; + DEVMEMXINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; + + eError = + DevmemXIntMapVRangeToBackingPage(psReservationInt, + ui32PageCount, uiFlags, ui32VirtPageOffset); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo2(IMG_HANDLE hBridge, + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP * peaPhysHeapID, + PHYS_HEAP_MEM_STATS_V2 * pasapPhysHeapMemStats) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVPhysHeapGetMemInfo2KM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32PhysHeapCount, peaPhysHeapID, pasapPhysHeapMemStats); + + return eError; +} diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/common_mm_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/common_mm_bridge.h index 56cacdffdf18..3b3c8c2dc215 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/common_mm_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/common_mm_bridge.h @@ -64,39 +64,35 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5 #define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6 #define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7 -#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8 -#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9 -#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+10 -#define PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE1 PVRSRV_BRIDGE_MM_CMD_FIRST+11 -#define PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE2 PVRSRV_BRIDGE_MM_CMD_FIRST+12 -#define PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE3 PVRSRV_BRIDGE_MM_CMD_FIRST+13 -#define PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE4 PVRSRV_BRIDGE_MM_CMD_FIRST+14 -#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+15 -#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+16 -#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+17 -#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+18 -#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+19 -#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+20 -#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+21 -#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+22 -#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+23 -#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+24 -#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+25 -#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+26 -#define PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE PVRSRV_BRIDGE_MM_CMD_FIRST+27 -#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+28 -#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+29 -#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+30 -#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+31 -#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+32 -#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+33 -#define PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+34 -#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO PVRSRV_BRIDGE_MM_CMD_FIRST+35 -#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP PVRSRV_BRIDGE_MM_CMD_FIRST+36 -#define PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE PVRSRV_BRIDGE_MM_CMD_FIRST+37 -#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+38 -#define PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS PVRSRV_BRIDGE_MM_CMD_FIRST+39 -#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+39) +#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+9 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+10 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+11 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+12 +#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+13 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+14 +#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+15 +#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGEANDMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+16 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+17 +#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+18 +#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+19 +#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+20 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+21 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+22 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+23 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+24 +#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+25 +#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO PVRSRV_BRIDGE_MM_CMD_FIRST+26 +#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP PVRSRV_BRIDGE_MM_CMD_FIRST+27 +#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+28 +#define PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT PVRSRV_BRIDGE_MM_CMD_FIRST+29 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+30 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+31 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+32 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+33 +#define PVRSRV_BRIDGE_MM_DEVMEMXINTMAPVRANGETOBACKINGPAGE PVRSRV_BRIDGE_MM_CMD_FIRST+34 +#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO2 PVRSRV_BRIDGE_MM_CMD_FIRST+35 +#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+35) /******************************************* PMRExportPMR @@ -239,22 +235,6 @@ typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_PMRUNREFPMR; -/******************************************* - PMRUnrefUnlockPMR - *******************************************/ - -/* Bridge in structure for PMRUnrefUnlockPMR */ -typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG -{ - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR; - -/* Bridge out structure for PMRUnrefUnlockPMR */ -typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR; - /******************************************* PhysmemNewRamBackedPMR *******************************************/ @@ -282,99 +262,6 @@ typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG PVRSRV_MEMALLOCFLAGS_T uiOutFlags; } __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR; -/******************************************* - PhysmemNewRamBackedLockedPMR - *******************************************/ - -/* Bridge in structure for PhysmemNewRamBackedLockedPMR */ -typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG -{ - IMG_DEVMEM_SIZE_T uiSize; - IMG_UINT32 *pui32MappingTable; - const IMG_CHAR *puiAnnotation; - IMG_UINT32 ui32AnnotationLength; - IMG_UINT32 ui32Log2PageSize; - IMG_UINT32 ui32NumPhysChunks; - IMG_UINT32 ui32NumVirtChunks; - IMG_UINT32 ui32PDumpFlags; - IMG_PID ui32PID; - PVRSRV_MEMALLOCFLAGS_T uiFlags; -} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR; - -/* Bridge out structure for PhysmemNewRamBackedLockedPMR */ -typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG -{ - IMG_HANDLE hPMRPtr; - PVRSRV_ERROR eError; - PVRSRV_MEMALLOCFLAGS_T uiOutFlags; -} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR; - -/******************************************* - DevmemCompatReserve1 - *******************************************/ - -/* Bridge in structure for DevmemCompatReserve1 */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE1_TAG -{ - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE1; - -/* Bridge out structure for DevmemCompatReserve1 */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE1_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE1; - -/******************************************* - DevmemCompatReserve2 - *******************************************/ - -/* Bridge in structure for DevmemCompatReserve2 */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE2_TAG -{ - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE2; - -/* Bridge out structure for DevmemCompatReserve2 */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE2_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE2; - -/******************************************* - DevmemCompatReserve3 - *******************************************/ - -/* Bridge in structure for DevmemCompatReserve3 */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE3_TAG -{ - IMG_HANDLE hMapping; - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE3; - -/* Bridge out structure for DevmemCompatReserve3 */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE3_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE3; - -/******************************************* - DevmemCompatReserve4 - *******************************************/ - -/* Bridge in structure for DevmemCompatReserve4 */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE4_TAG -{ - IMG_HANDLE hMapping; - IMG_HANDLE hPMR; -} __packed PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE4; - -/* Bridge out structure for DevmemCompatReserve4 */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE4_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE4; - /******************************************* DevmemIntCtxCreate *******************************************/ @@ -417,12 +304,9 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG /* Bridge in structure for DevmemIntHeapCreate */ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG { - IMG_DEV_VIRTADDR sHeapBaseAddr; - IMG_DEVMEM_SIZE_T uiHeapLength; IMG_HANDLE hDevmemCtx; IMG_UINT32 ui32HeapConfigIndex; IMG_UINT32 ui32HeapIndex; - IMG_UINT32 ui32Log2DataPageSize; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE; /* Bridge out structure for DevmemIntHeapCreate */ @@ -455,16 +339,13 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG /* Bridge in structure for DevmemIntMapPMR */ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG { - IMG_HANDLE hDevmemServerHeap; IMG_HANDLE hPMR; IMG_HANDLE hReservation; - PVRSRV_MEMALLOCFLAGS_T uiMapFlags; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR; /* Bridge out structure for DevmemIntMapPMR */ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG { - IMG_HANDLE hMapping; PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR; @@ -475,7 +356,7 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG /* Bridge in structure for DevmemIntUnmapPMR */ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG { - IMG_HANDLE hMapping; + IMG_HANDLE hReservation; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR; /* Bridge out structure for DevmemIntUnmapPMR */ @@ -494,6 +375,7 @@ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG IMG_DEV_VIRTADDR sAddress; IMG_DEVMEM_SIZE_T uiLength; IMG_HANDLE hDevmemServerHeap; + PVRSRV_MEMALLOCFLAGS_T uiFlags; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE; /* Bridge out structure for DevmemIntReserveRange */ @@ -503,6 +385,27 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE; +/******************************************* + DevmemIntReserveRangeAndMapPMR + *******************************************/ + +/* Bridge in structure for DevmemIntReserveRangeAndMapPMR */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR_TAG +{ + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiLength; + IMG_HANDLE hDevmemServerHeap; + IMG_HANDLE hPMR; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR; + +/* Bridge out structure for DevmemIntReserveRangeAndMapPMR */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR_TAG +{ + IMG_HANDLE hReservation; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR; + /******************************************* DevmemIntUnreserveRange *******************************************/ @@ -526,16 +429,12 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG /* Bridge in structure for ChangeSparseMem */ typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG { - IMG_DEV_VIRTADDR sDevVAddr; - IMG_UINT64 ui64CPUVAddr; - IMG_HANDLE hPMR; - IMG_HANDLE hSrvDevMemHeap; + IMG_HANDLE hReservation; IMG_UINT32 *pui32AllocPageIndices; IMG_UINT32 *pui32FreePageIndices; IMG_UINT32 ui32AllocPageCount; IMG_UINT32 ui32FreePageCount; IMG_UINT32 ui32SparseFlags; - PVRSRV_MEMALLOCFLAGS_T uiFlags; } __packed PVRSRV_BRIDGE_IN_CHANGESPARSEMEM; /* Bridge out structure for ChangeSparseMem */ @@ -544,45 +443,6 @@ typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM; -/******************************************* - DevmemIntMapPages - *******************************************/ - -/* Bridge in structure for DevmemIntMapPages */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG -{ - IMG_DEV_VIRTADDR sDevVAddr; - IMG_HANDLE hPMR; - IMG_HANDLE hReservation; - IMG_UINT32 ui32PageCount; - IMG_UINT32 ui32PhysicalPgOffset; - PVRSRV_MEMALLOCFLAGS_T uiFlags; -} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES; - -/* Bridge out structure for DevmemIntMapPages */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES; - -/******************************************* - DevmemIntUnmapPages - *******************************************/ - -/* Bridge in structure for DevmemIntUnmapPages */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG -{ - IMG_DEV_VIRTADDR sDevVAddr; - IMG_HANDLE hReservation; - IMG_UINT32 ui32PageCount; -} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES; - -/* Bridge out structure for DevmemIntUnmapPages */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES; - /******************************************* DevmemIsVDevAddrValid *******************************************/ @@ -600,25 +460,6 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID; -/******************************************* - DevmemFlushDevSLCRange - *******************************************/ - -/* Bridge in structure for DevmemFlushDevSLCRange */ -typedef struct PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE_TAG -{ - IMG_DEV_VIRTADDR sAddress; - IMG_DEVMEM_SIZE_T uiSize; - IMG_HANDLE hDevmemCtx; - IMG_BOOL bInvalidate; -} __packed PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE; - -/* Bridge out structure for DevmemFlushDevSLCRange */ -typedef struct PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE; - /******************************************* DevmemInvalidateFBSCTable *******************************************/ @@ -721,8 +562,7 @@ typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG /* Bridge in structure for DevmemIntRegisterPFNotifyKM */ typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG { - IMG_HANDLE hDevm; - IMG_UINT32 ui32PID; + IMG_HANDLE hDevmemCtx; IMG_BOOL bRegister; } __packed PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM; @@ -732,23 +572,6 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM; -/******************************************* - GetMaxPhysHeapCount - *******************************************/ - -/* Bridge in structure for GetMaxPhysHeapCount */ -typedef struct PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT_TAG -{ - IMG_UINT32 ui32EmptyStructPlaceholder; -} __packed PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT; - -/* Bridge out structure for GetMaxPhysHeapCount */ -typedef struct PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT_TAG -{ - PVRSRV_ERROR eError; - IMG_UINT32 ui32PhysHeapCount; -} __packed PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT; - /******************************************* PhysHeapGetMemInfo *******************************************/ @@ -756,7 +579,7 @@ typedef struct PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT_TAG /* Bridge in structure for PhysHeapGetMemInfo */ typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO_TAG { - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; + PHYS_HEAP_MEM_STATS_V1 *pasapPhysHeapMemStats; PVRSRV_PHYS_HEAP *peaPhysHeapID; IMG_UINT32 ui32PhysHeapCount; } __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO; @@ -764,7 +587,7 @@ typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO_TAG /* Bridge out structure for PhysHeapGetMemInfo */ typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO_TAG { - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; + PHYS_HEAP_MEM_STATS_V1 *pasapPhysHeapMemStats; PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO; @@ -785,24 +608,6 @@ typedef struct PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP_TAG PVRSRV_PHYS_HEAP eHeap; } __packed PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP; -/******************************************* - GetHeapPhysMemUsage - *******************************************/ - -/* Bridge in structure for GetHeapPhysMemUsage */ -typedef struct PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE_TAG -{ - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; - IMG_UINT32 ui32PhysHeapCount; -} __packed PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE; - -/* Bridge out structure for GetHeapPhysMemUsage */ -typedef struct PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE_TAG -{ - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE; - /******************************************* DevmemGetFaultAddress *******************************************/ @@ -821,20 +626,132 @@ typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG } __packed PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS; /******************************************* - PVRSRVUpdateOOMStats + PVRSRVStatsUpdateOOMStat *******************************************/ -/* Bridge in structure for PVRSRVUpdateOOMStats */ -typedef struct PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS_TAG +/* Bridge in structure for PVRSRVStatsUpdateOOMStat */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT_TAG { IMG_PID ui32pid; IMG_UINT32 ui32ui32StatType; -} __packed PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS; +} __packed PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT; + +/* Bridge out structure for PVRSRVStatsUpdateOOMStat */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT; + +/******************************************* + DevmemXIntReserveRange + *******************************************/ + +/* Bridge in structure for DevmemXIntReserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE_TAG +{ + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiLength; + IMG_HANDLE hDevmemServerHeap; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE; + +/* Bridge out structure for DevmemXIntReserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE; + +/******************************************* + DevmemXIntUnreserveRange + *******************************************/ + +/* Bridge in structure for DevmemXIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE; + +/* Bridge out structure for DevmemXIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE; + +/******************************************* + DevmemXIntMapPages + *******************************************/ + +/* Bridge in structure for DevmemXIntMapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES_TAG +{ + IMG_HANDLE hPMR; + IMG_HANDLE hReservation; + IMG_UINT32 ui32PageCount; + IMG_UINT32 ui32PhysPageOffset; + IMG_UINT32 ui32VirtPageOffset; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES; + +/* Bridge out structure for DevmemXIntMapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES; + +/******************************************* + DevmemXIntUnmapPages + *******************************************/ + +/* Bridge in structure for DevmemXIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES_TAG +{ + IMG_HANDLE hReservation; + IMG_UINT32 ui32PageCount; + IMG_UINT32 ui32VirtPageOffset; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES; + +/* Bridge out structure for DevmemXIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES; + +/******************************************* + DevmemXIntMapVRangeToBackingPage + *******************************************/ + +/* Bridge in structure for DevmemXIntMapVRangeToBackingPage */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE_TAG +{ + IMG_HANDLE hReservation; + IMG_UINT32 ui32PageCount; + IMG_UINT32 ui32VirtPageOffset; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE; + +/* Bridge out structure for DevmemXIntMapVRangeToBackingPage */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE; + +/******************************************* + PhysHeapGetMemInfo2 + *******************************************/ + +/* Bridge in structure for PhysHeapGetMemInfo2 */ +typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2_TAG +{ + PHYS_HEAP_MEM_STATS_V2 *pasapPhysHeapMemStats; + PVRSRV_PHYS_HEAP *peaPhysHeapID; + IMG_UINT32 ui32PhysHeapCount; +} __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2; -/* Bridge out structure for PVRSRVUpdateOOMStats */ -typedef struct PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS_TAG +/* Bridge out structure for PhysHeapGetMemInfo2 */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2_TAG { + PHYS_HEAP_MEM_STATS_V2 *pasapPhysHeapMemStats; PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS; +} __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2; #endif /* COMMON_MM_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/server_mm_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/server_mm_bridge.c index a983224d0bdd..a79a92f5e9d1 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/server_mm_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mm_bridge/server_mm_bridge.c @@ -68,17 +68,20 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include +#if defined(SUPPORT_INSECURE_EXPORT) static PVRSRV_ERROR ReleasePMRExport(void *pvData) { PVR_UNREFERENCED_PARAMETER(pvData); return PVRSRV_OK; } +#endif /* *************************************************************************** * Server-side bridge entry points */ +#if defined(SUPPORT_INSECURE_EXPORT) static PVRSRV_ERROR _PMRExportPMRpsPMRExportIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -238,23 +241,28 @@ PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psPMRExportInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); } - if (psPMRExportInt) + else if (psPMRExportInt) { LockHandle(KERNEL_HANDLE_BASE); PMRUnexportPMR(psPMRExportInt); UnlockHandle(KERNEL_HANDLE_BASE); } + } return 0; } +#else +#define PVRSRVBridgePMRExportPMR NULL +#endif + +#if defined(SUPPORT_INSECURE_EXPORT) + static IMG_INT PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psPMRUnexportPMRIN_UI8, @@ -346,6 +354,10 @@ PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry, return 0; } +#else +#define PVRSRVBridgePMRUnexportPMR NULL +#endif + static IMG_INT PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psPMRGetUIDIN_UI8, @@ -527,6 +539,7 @@ PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, return 0; } +#if defined(SUPPORT_INSECURE_EXPORT) static PVRSRV_ERROR _PMRImportPMRpsPMRIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -622,6 +635,10 @@ PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry, return 0; } +#else +#define PVRSRVBridgePMRImportPMR NULL +#endif + static PVRSRV_ERROR _PMRLocalImportPMRpsPMRIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -751,42 +768,6 @@ PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry, return 0; } -static IMG_INT -PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPMRUnrefUnlockPMRIN_UI8, - IMG_UINT8 * psPMRUnrefUnlockPMROUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN = - (PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMRIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT = - (PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMROUT_UI8, 0); - - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); - - psPMRUnrefUnlockPMROUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR, - PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - if (unlikely((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) && - (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto PMRUnrefUnlockPMR_exit; - } - - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -PMRUnrefUnlockPMR_exit: - - return 0; -} - static PVRSRV_ERROR _PhysmemNewRamBackedPMRpsPMRPtrIntRelease(void *pvData) { PVRSRV_ERROR eError; @@ -818,9 +799,7 @@ PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -850,7 +829,6 @@ PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long)); @@ -866,7 +844,6 @@ PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -983,385 +960,351 @@ PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } -static PVRSRV_ERROR _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease(void *pvData) +static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData) { PVRSRV_ERROR eError; - eError = PMRUnrefUnlockPMR((PMR *) pvData); + eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); return eError; } -static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, - "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, - "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); - static IMG_INT -PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPhysmemNewRamBackedLockedPMRIN_UI8, - IMG_UINT8 * psPhysmemNewRamBackedLockedPMROUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxCreateIN_UI8, + IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMRIN = - (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) - IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMRIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMROUT = - (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) - IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMROUT_UI8, 0); - - IMG_UINT32 *ui32MappingTableInt = NULL; - IMG_CHAR *uiAnnotationInt = NULL; - PMR *psPMRPtrInt = NULL; + PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8, + 0); - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif + DEVMEMINT_CTX *psDevMemServerContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * - sizeof(IMG_UINT32)) + - ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * - sizeof(IMG_CHAR)) + 0; + psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL; - if (unlikely - (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) + psDevmemIntCtxCreateOUT->eError = + DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection), + psDevmemIntCtxCreateIN->bbKernelMemoryCtx, + &psDevMemServerContextInt, + &hPrivDataInt, &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto PhysmemNewRamBackedLockedPMR_exit; + goto DevmemIntCtxCreate_exit; } - if (unlikely - (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN)) + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + (void *) + psDevMemServerContextInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntCtxCreatepsDevMemServerContextIntRelease); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto PhysmemNewRamBackedLockedPMR_exit; + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; } - if (ui64BufferSize > IMG_UINT32_MAX) + psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT-> + hPrivData, + (void *)hPrivDataInt, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psDevmemIntCtxCreateOUT-> + hDevMemServerContext); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto PhysmemNewRamBackedLockedPMR_exit; + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; } - ui32BufferSize = (IMG_UINT32) ui64BufferSize; + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; +DevmemIntCtxCreate_exit: - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) + if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemIntCtxCreateOUT->hDevMemServerContext) { - IMG_BYTE *pInputBuffer = - (IMG_BYTE *) (void *)psPhysmemNewRamBackedLockedPMRIN; + PVRSRV_ERROR eError; - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); - if (!pArrayArgsBuffer) + eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) { - psPhysmemNewRamBackedLockedPMROUT->eError = - PVRSRV_ERROR_OUT_OF_MEMORY; - goto PhysmemNewRamBackedLockedPMR_exit; + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); } - } - } - - if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0) - { - ui32MappingTableInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32); - } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Copy the data over */ - if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32MappingTableInt, - (const void __user *)psPhysmemNewRamBackedLockedPMRIN->pui32MappingTable, - psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) != - PVRSRV_OK) - { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); - goto PhysmemNewRamBackedLockedPMR_exit; } - } - if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0) - { - uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR); - } - /* Copy the data over */ - if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0) - { - if (OSCopyFromUser - (NULL, uiAnnotationInt, - (const void __user *)psPhysmemNewRamBackedLockedPMRIN->puiAnnotation, - psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != - PVRSRV_OK) + else if (psDevMemServerContextInt) { - psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto PhysmemNewRamBackedLockedPMR_exit; + DevmemIntCtxDestroy(psDevMemServerContextInt); } - ((IMG_CHAR *) - uiAnnotationInt)[(psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * - sizeof(IMG_CHAR)) - 1] = '\0'; - } - psPhysmemNewRamBackedLockedPMROUT->eError = - PhysmemNewRamBackedLockedPMR(psConnection, OSGetDevNode(psConnection), - psPhysmemNewRamBackedLockedPMRIN->uiSize, - psPhysmemNewRamBackedLockedPMRIN->ui32NumPhysChunks, - psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks, - ui32MappingTableInt, - psPhysmemNewRamBackedLockedPMRIN->ui32Log2PageSize, - psPhysmemNewRamBackedLockedPMRIN->uiFlags, - psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength, - uiAnnotationInt, - psPhysmemNewRamBackedLockedPMRIN->ui32PID, - &psPMRPtrInt, - psPhysmemNewRamBackedLockedPMRIN->ui32PDumpFlags, - &psPhysmemNewRamBackedLockedPMROUT->uiOutFlags); - /* Exit early if bridged call fails */ - if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) - { - goto PhysmemNewRamBackedLockedPMR_exit; } - /* Lock over handle creation. */ + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8, + IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8, + 0); + + /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - psPhysmemNewRamBackedLockedPMROUT->eError = - PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psPhysmemNewRamBackedLockedPMROUT->hPMRPtr, - (void *)psPMRPtrInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease); - if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) + psDevmemIntCtxDestroyOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntCtxDestroyIN-> + hDevmemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely + ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) + && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))) { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto PhysmemNewRamBackedLockedPMR_exit; + goto DevmemIntCtxDestroy_exit; } - /* Release now we have created handles. */ + /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); -PhysmemNewRamBackedLockedPMR_exit: - - if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK) - { - if (psPMRPtrInt) - { - LockHandle(KERNEL_HANDLE_BASE); - PMRUnrefUnlockPMR(psPMRPtrInt); - UnlockHandle(KERNEL_HANDLE_BASE); - } - } - - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psPhysmemNewRamBackedLockedPMROUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); +DevmemIntCtxDestroy_exit: return 0; } +static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData); + return eError; +} + static IMG_INT -PVRSRVBridgeDevmemCompatReserve1(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemCompatReserve1IN_UI8, - IMG_UINT8 * psDevmemCompatReserve1OUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapCreateIN_UI8, + IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE1 *psDevmemCompatReserve1IN = - (PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE1 *) IMG_OFFSET_ADDR(psDevmemCompatReserve1IN_UI8, + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE1 *psDevmemCompatReserve1OUT = - (PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE1 *) - IMG_OFFSET_ADDR(psDevmemCompatReserve1OUT_UI8, 0); - IMG_HANDLE hPMR = psDevmemCompatReserve1IN->hPMR; - PMR *psPMRInt = NULL; + IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemCompatReserve1OUT->eError = + psDevmemIntHeapCreateOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemCompatReserve1OUT->eError != PVRSRV_OK)) + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve1_exit; + goto DevmemIntHeapCreate_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemCompatReserve1OUT->eError = DevmemCompatReserve1(psPMRInt); - -DevmemCompatReserve1_exit: + psDevmemIntHeapCreateOUT->eError = + DevmemIntHeapCreate(psDevmemCtxInt, + psDevmemIntHeapCreateIN->ui32HeapConfigIndex, + psDevmemIntHeapCreateIN->ui32HeapIndex, &psDevmemHeapPtrInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + goto DevmemIntHeapCreate_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntHeapCreateOUT-> + hDevmemHeapPtr, + (void *)psDevmemHeapPtrInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapCreate_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntHeapCreate_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psPMRInt) + if (psDevmemCtxInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); + if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemHeapPtrInt) + { + DevmemIntHeapDestroy(psDevmemHeapPtrInt); + } + } + return 0; } static IMG_INT -PVRSRVBridgeDevmemCompatReserve2(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemCompatReserve2IN_UI8, - IMG_UINT8 * psDevmemCompatReserve2OUT_UI8, +PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8, + IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE2 *psDevmemCompatReserve2IN = - (PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE2 *) IMG_OFFSET_ADDR(psDevmemCompatReserve2IN_UI8, + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE2 *psDevmemCompatReserve2OUT = - (PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE2 *) - IMG_OFFSET_ADDR(psDevmemCompatReserve2OUT_UI8, 0); - - IMG_HANDLE hPMR = psDevmemCompatReserve2IN->hPMR; - PMR *psPMRInt = NULL; + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0); - /* Lock over handle lookup. */ + /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psDevmemCompatReserve2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemCompatReserve2OUT->eError != PVRSRV_OK)) + psDevmemIntHeapDestroyOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + if (unlikely((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) && + (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))) { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve2_exit; + goto DevmemIntHeapDestroy_exit; } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemCompatReserve2OUT->eError = DevmemCompatReserve2(psPMRInt); -DevmemCompatReserve2_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ + /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); +DevmemIntHeapDestroy_exit: + return 0; } static IMG_INT -PVRSRVBridgeDevmemCompatReserve3(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemCompatReserve3IN_UI8, - IMG_UINT8 * psDevmemCompatReserve3OUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntMapPMRIN_UI8, + IMG_UINT8 * psDevmemIntMapPMROUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE3 *psDevmemCompatReserve3IN = - (PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE3 *) IMG_OFFSET_ADDR(psDevmemCompatReserve3IN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE3 *psDevmemCompatReserve3OUT = - (PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE3 *) - IMG_OFFSET_ADDR(psDevmemCompatReserve3OUT_UI8, 0); + PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0); - IMG_HANDLE hMapping = psDevmemCompatReserve3IN->hMapping; - DEVMEMINT_MAPPING *psMappingInt = NULL; - IMG_HANDLE hPMR = psDevmemCompatReserve3IN->hPMR; + IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR; PMR *psPMRInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemCompatReserve3OUT->eError = + psDevmemIntMapPMROUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psMappingInt, - hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE); - if (unlikely(psDevmemCompatReserve3OUT->eError != PVRSRV_OK)) + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve3_exit; + goto DevmemIntMapPMR_exit; } /* Look up the address from the handle */ - psDevmemCompatReserve3OUT->eError = + psDevmemIntMapPMROUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **)&psPMRInt, hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemCompatReserve3OUT->eError != PVRSRV_OK)) + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve3_exit; + goto DevmemIntMapPMR_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemCompatReserve3OUT->eError = DevmemCompatReserve3(psMappingInt, psPMRInt); + psDevmemIntMapPMROUT->eError = DevmemIntMapPMR(psReservationInt, psPMRInt); -DevmemCompatReserve3_exit: +DevmemIntMapPMR_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psMappingInt) + if (psReservationInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } /* Unreference the previously looked up handle */ @@ -1377,69 +1320,48 @@ PVRSRVBridgeDevmemCompatReserve3(IMG_UINT32 ui32DispatchTableEntry, } static IMG_INT -PVRSRVBridgeDevmemCompatReserve4(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemCompatReserve4IN_UI8, - IMG_UINT8 * psDevmemCompatReserve4OUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8, + IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE4 *psDevmemCompatReserve4IN = - (PVRSRV_BRIDGE_IN_DEVMEMCOMPATRESERVE4 *) IMG_OFFSET_ADDR(psDevmemCompatReserve4IN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE4 *psDevmemCompatReserve4OUT = - (PVRSRV_BRIDGE_OUT_DEVMEMCOMPATRESERVE4 *) - IMG_OFFSET_ADDR(psDevmemCompatReserve4OUT_UI8, 0); + PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0); - IMG_HANDLE hMapping = psDevmemCompatReserve4IN->hMapping; - DEVMEMINT_MAPPING *psMappingInt = NULL; - IMG_HANDLE hPMR = psDevmemCompatReserve4IN->hPMR; - PMR *psPMRInt = NULL; + IMG_HANDLE hReservation = psDevmemIntUnmapPMRIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemCompatReserve4OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psMappingInt, - hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE); - if (unlikely(psDevmemCompatReserve4OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve4_exit; - } - - /* Look up the address from the handle */ - psDevmemCompatReserve4OUT->eError = + psDevmemIntUnmapPMROUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemCompatReserve4OUT->eError != PVRSRV_OK)) + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemIntUnmapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemCompatReserve4_exit; + goto DevmemIntUnmapPMR_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemCompatReserve4OUT->eError = DevmemCompatReserve4(psMappingInt, psPMRInt); + psDevmemIntUnmapPMROUT->eError = DevmemIntUnmapPMR(psReservationInt); -DevmemCompatReserve4_exit: +DevmemIntUnmapPMR_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psMappingInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); - } - - /* Unreference the previously looked up handle */ - if (psPMRInt) + if (psReservationInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); @@ -1447,249 +1369,218 @@ PVRSRVBridgeDevmemCompatReserve4(IMG_UINT32 ui32DispatchTableEntry, return 0; } -static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData) +static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData) { PVRSRV_ERROR eError; - eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); + eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); return eError; } static IMG_INT -PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntCtxCreateIN_UI8, - IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntReserveRangeIN_UI8, + IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8, - 0); + PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0); - DEVMEMINT_CTX *psDevMemServerContextInt = NULL; - IMG_HANDLE hPrivDataInt = NULL; + IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + DEVMEMINT_RESERVATION *psReservationInt = NULL; - psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL; + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); - psDevmemIntCtxCreateOUT->eError = - DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection), - psDevmemIntCtxCreateIN->bbKernelMemoryCtx, - &psDevMemServerContextInt, - &hPrivDataInt, &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize); + /* Look up the address from the handle */ + psDevmemIntReserveRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntReserveRangeOUT->eError = + DevmemIntReserveRange(psConnection, OSGetDevNode(psConnection), + psDevmemServerHeapInt, + psDevmemIntReserveRangeIN->sAddress, + psDevmemIntReserveRangeIN->uiLength, + psDevmemIntReserveRangeIN->uiFlags, &psReservationInt); /* Exit early if bridged call fails */ - if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) { - goto DevmemIntCtxCreate_exit; + goto DevmemIntReserveRange_exit; } /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntCtxCreateOUT-> - hDevMemServerContext, - (void *) - psDevMemServerContextInt, - PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _DevmemIntCtxCreatepsDevMemServerContextIntRelease); - if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntCtxCreate_exit; - } - - psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntCtxCreateOUT-> - hPrivData, - (void *)hPrivDataInt, - PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntReserveRangeOUT-> + hReservation, + (void *)psReservationInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - psDevmemIntCtxCreateOUT-> - hDevMemServerContext); - if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + (PFN_HANDLE_RELEASE) & + _DevmemIntReserveRangepsReservationIntRelease); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntCtxCreate_exit; + goto DevmemIntReserveRange_exit; } /* Release now we have created handles. */ UnlockHandle(psConnection->psHandleBase); -DevmemIntCtxCreate_exit: +DevmemIntReserveRange_exit: - if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK) + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) { - if (psDevmemIntCtxCreateOUT->hDevMemServerContext) + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK) + { + if (psReservationInt) { - PVRSRV_ERROR eError; - - /* Lock over handle creation cleanup. */ - LockHandle(psConnection->psHandleBase); - - eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntCtxCreateOUT-> - hDevMemServerContext, - PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); - if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", __func__, PVRSRVGetErrorString(eError))); - } - /* Releasing the handle should free/destroy/release the resource. - * This should never fail... */ - PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - - /* Avoid freeing/destroying/releasing the resource a second time below */ - psDevMemServerContextInt = NULL; - /* Release now we have cleaned up creation handles. */ - UnlockHandle(psConnection->psHandleBase); - - } - - if (psDevMemServerContextInt) - { - DevmemIntCtxDestroy(psDevMemServerContextInt); - } - } - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8, - IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8, - 0); - - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntCtxDestroyOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntCtxDestroyIN-> - hDevmemServerContext, - PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); - if (unlikely - ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) - && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntCtxDestroy_exit; - } - - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntCtxDestroy_exit: + DevmemIntUnreserveRange(psReservationInt); + } + } return 0; } -static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData) +static PVRSRV_ERROR _DevmemIntReserveRangeAndMapPMRpsReservationIntRelease(void *pvData) { PVRSRV_ERROR eError; - eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData); + eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); return eError; } static IMG_INT -PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntHeapCreateIN_UI8, - IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntReserveRangeAndMapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntReserveRangeAndMapPMRIN_UI8, + IMG_UINT8 * psDevmemIntReserveRangeAndMapPMROUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8, - 0); - - IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx; - DEVMEMINT_CTX *psDevmemCtxInt = NULL; - DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; + PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR *psDevmemIntReserveRangeAndMapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeAndMapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR *psDevmemIntReserveRangeAndMapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeAndMapPMROUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeAndMapPMRIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + IMG_HANDLE hPMR = psDevmemIntReserveRangeAndMapPMRIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_RESERVATION *psReservationInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemIntHeapCreateOUT->eError = + psDevmemIntReserveRangeAndMapPMROUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemCtxInt, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); - if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); + if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntHeapCreate_exit; + goto DevmemIntReserveRangeAndMapPMR_exit; + } + + /* Look up the address from the handle */ + psDevmemIntReserveRangeAndMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRangeAndMapPMR_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemIntHeapCreateOUT->eError = - DevmemIntHeapCreate(psDevmemCtxInt, - psDevmemIntHeapCreateIN->ui32HeapConfigIndex, - psDevmemIntHeapCreateIN->ui32HeapIndex, - psDevmemIntHeapCreateIN->sHeapBaseAddr, - psDevmemIntHeapCreateIN->uiHeapLength, - psDevmemIntHeapCreateIN->ui32Log2DataPageSize, &psDevmemHeapPtrInt); + psDevmemIntReserveRangeAndMapPMROUT->eError = + DevmemIntReserveRangeAndMapPMR(psConnection, OSGetDevNode(psConnection), + psDevmemServerHeapInt, + psDevmemIntReserveRangeAndMapPMRIN->sAddress, + psDevmemIntReserveRangeAndMapPMRIN->uiLength, + psPMRInt, + psDevmemIntReserveRangeAndMapPMRIN->uiFlags, + &psReservationInt); /* Exit early if bridged call fails */ - if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) { - goto DevmemIntHeapCreate_exit; + goto DevmemIntReserveRangeAndMapPMR_exit; } /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntHeapCreateOUT-> - hDevmemHeapPtr, - (void *)psDevmemHeapPtrInt, - PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease); - if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + psDevmemIntReserveRangeAndMapPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntReserveRangeAndMapPMROUT->hReservation, + (void *)psReservationInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntReserveRangeAndMapPMRpsReservationIntRelease); + if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntHeapCreate_exit; + goto DevmemIntReserveRangeAndMapPMR_exit; } /* Release now we have created handles. */ UnlockHandle(psConnection->psHandleBase); -DevmemIntHeapCreate_exit: +DevmemIntReserveRangeAndMapPMR_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psDevmemCtxInt) + if (psDevmemServerHeapInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK) + if (psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK) { - if (psDevmemHeapPtrInt) + if (psReservationInt) { - DevmemIntHeapDestroy(psDevmemHeapPtrInt); + DevmemIntUnreserveRange(psReservationInt); } } @@ -1697,406 +1588,97 @@ PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, } static IMG_INT -PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8, - IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8, + IMG_UINT8 * psDevmemIntUnreserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *) - IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0); + PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0); /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - psDevmemIntHeapDestroyOUT->eError = + psDevmemIntUnreserveRangeOUT->eError = PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap, - PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); - if (unlikely((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) && - (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))) + (IMG_HANDLE) psDevmemIntUnreserveRangeIN-> + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + if (unlikely + ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) + && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) { PVR_DPF((PVR_DBG_ERROR, "%s: %s", - __func__, PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->eError))); + __func__, PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto DevmemIntHeapDestroy_exit; + goto DevmemIntUnreserveRange_exit; } /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); -DevmemIntHeapDestroy_exit: +DevmemIntUnreserveRange_exit: return 0; } -static PVRSRV_ERROR _DevmemIntMapPMRpsMappingIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = DevmemIntUnmapPMR((DEVMEMINT_MAPPING *) pvData); - return eError; -} +static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, + "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); +static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, + "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntMapPMRIN_UI8, - IMG_UINT8 * psDevmemIntMapPMROUT_UI8, CONNECTION_DATA * psConnection) +PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psChangeSparseMemIN_UI8, + IMG_UINT8 * psChangeSparseMemOUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0); + PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN = + (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT = + (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0); - IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap; - DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; - IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation; + IMG_UINT32 *ui32AllocPageIndicesInt = NULL; + IMG_UINT32 *ui32FreePageIndicesInt = NULL; + IMG_HANDLE hReservation = psChangeSparseMemIN->hReservation; DEVMEMINT_RESERVATION *psReservationInt = NULL; - IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR; - PMR *psPMRInt = NULL; - DEVMEMINT_MAPPING *psMappingInt = NULL; - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; - /* Look up the address from the handle */ - psDevmemIntMapPMROUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemServerHeapInt, - hDevmemServerHeap, - PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPMR_exit; + psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; } - /* Look up the address from the handle */ - psDevmemIntMapPMROUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psReservationInt, - hReservation, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPMR_exit; + psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; } - /* Look up the address from the handle */ - psDevmemIntMapPMROUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + if (ui64BufferSize > IMG_UINT32_MAX) { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPMR_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemIntMapPMROUT->eError = - DevmemIntMapPMR(psDevmemServerHeapInt, - psReservationInt, - psPMRInt, psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt); - /* Exit early if bridged call fails */ - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) - { - goto DevmemIntMapPMR_exit; - } - - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntMapPMROUT->hMapping, - (void *)psMappingInt, - PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _DevmemIntMapPMRpsMappingIntRelease); - if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPMR_exit; - } - - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntMapPMR_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psDevmemServerHeapInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); - } - - /* Unreference the previously looked up handle */ - if (psReservationInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); - } - - /* Unreference the previously looked up handle */ - if (psPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - if (psDevmemIntMapPMROUT->eError != PVRSRV_OK) - { - if (psMappingInt) - { - DevmemIntUnmapPMR(psMappingInt); - } - } - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8, - IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0); - - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntUnmapPMROUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping, - PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); - if (unlikely((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) && - (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", __func__, PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntUnmapPMR_exit; - } - - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntUnmapPMR_exit: - - return 0; -} - -static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); - return eError; -} - -static IMG_INT -PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntReserveRangeIN_UI8, - IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *) - IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *) - IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0); - - IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap; - DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; - DEVMEMINT_RESERVATION *psReservationInt = NULL; - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psDevmemIntReserveRangeOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemServerHeapInt, - hDevmemServerHeap, - PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); - if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntReserveRange_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemIntReserveRangeOUT->eError = - DevmemIntReserveRange(psDevmemServerHeapInt, - psDevmemIntReserveRangeIN->sAddress, - psDevmemIntReserveRangeIN->uiLength, &psReservationInt); - /* Exit early if bridged call fails */ - if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) - { - goto DevmemIntReserveRange_exit; - } - - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psDevmemIntReserveRangeOUT-> - hReservation, - (void *)psReservationInt, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _DevmemIntReserveRangepsReservationIntRelease); - if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntReserveRange_exit; - } - - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntReserveRange_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psDevmemServerHeapInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK) - { - if (psReservationInt) - { - DevmemIntUnreserveRange(psReservationInt); - } - } - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8, - IMG_UINT8 * psDevmemIntUnreserveRangeOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *) - IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *) - IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0); - - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); - - psDevmemIntUnreserveRangeOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psDevmemIntUnreserveRangeIN-> - hReservation, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); - if (unlikely - ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) - && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntUnreserveRange_exit; - } - - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -DevmemIntUnreserveRange_exit: - - return 0; -} - -static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, - "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, - "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); - -static IMG_INT -PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psChangeSparseMemIN_UI8, - IMG_UINT8 * psChangeSparseMemOUT_UI8, CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN = - (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0); - PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT = - (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0); - - IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap; - DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL; - IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR; - PMR *psPMRInt = NULL; - IMG_UINT32 *ui32AllocPageIndicesInt = NULL; - IMG_UINT32 *ui32FreePageIndicesInt = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; - - if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) - { - psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto ChangeSparseMem_exit; - } - - if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) - { - psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto ChangeSparseMem_exit; - } - - if (ui64BufferSize > IMG_UINT32_MAX) - { - psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto ChangeSparseMem_exit; + psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto ChangeSparseMem_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long)); @@ -2112,7 +1694,6 @@ PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -2162,199 +1743,34 @@ PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; goto ChangeSparseMem_exit; - } - } - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psChangeSparseMemOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psSrvDevMemHeapInt, - hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); - if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto ChangeSparseMem_exit; - } - - /* Look up the address from the handle */ - psChangeSparseMemOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto ChangeSparseMem_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psChangeSparseMemOUT->eError = - DevmemIntChangeSparse(psSrvDevMemHeapInt, - psPMRInt, - psChangeSparseMemIN->ui32AllocPageCount, - ui32AllocPageIndicesInt, - psChangeSparseMemIN->ui32FreePageCount, - ui32FreePageIndicesInt, - psChangeSparseMemIN->ui32SparseFlags, - psChangeSparseMemIN->uiFlags, - psChangeSparseMemIN->sDevVAddr, - psChangeSparseMemIN->ui64CPUVAddr); - -ChangeSparseMem_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psSrvDevMemHeapInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); - } - - /* Unreference the previously looked up handle */ - if (psPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psChangeSparseMemOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntMapPagesIN_UI8, - IMG_UINT8 * psDevmemIntMapPagesOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesOUT_UI8, 0); - - IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation; - DEVMEMINT_RESERVATION *psReservationInt = NULL; - IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR; - PMR *psPMRInt = NULL; - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psDevmemIntMapPagesOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psReservationInt, - hReservation, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); - if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPages_exit; - } - - /* Look up the address from the handle */ - psDevmemIntMapPagesOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemIntMapPages_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemIntMapPagesOUT->eError = - DevmemIntMapPages(psReservationInt, - psPMRInt, - psDevmemIntMapPagesIN->ui32PageCount, - psDevmemIntMapPagesIN->ui32PhysicalPgOffset, - psDevmemIntMapPagesIN->uiFlags, psDevmemIntMapPagesIN->sDevVAddr); - -DevmemIntMapPages_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psReservationInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); - } - - /* Unreference the previously looked up handle */ - if (psPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - return 0; -} - -static IMG_INT -PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntUnmapPagesIN_UI8, - IMG_UINT8 * psDevmemIntUnmapPagesOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesOUT_UI8, - 0); - - IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation; - DEVMEMINT_RESERVATION *psReservationInt = NULL; + } + } /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemIntUnmapPagesOUT->eError = + psChangeSparseMemOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **)&psReservationInt, hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); - if (unlikely(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK)) + if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntUnmapPages_exit; + goto ChangeSparseMem_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemIntUnmapPagesOUT->eError = - DevmemIntUnmapPages(psReservationInt, - psDevmemIntUnmapPagesIN->sDevVAddr, - psDevmemIntUnmapPagesIN->ui32PageCount); + psChangeSparseMemOUT->eError = + DevmemIntChangeSparse(psChangeSparseMemIN->ui32AllocPageCount, + ui32AllocPageIndicesInt, + psChangeSparseMemIN->ui32FreePageCount, + ui32FreePageIndicesInt, + psChangeSparseMemIN->ui32SparseFlags, psReservationInt); -DevmemIntUnmapPages_exit: +ChangeSparseMem_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); @@ -2368,6 +1784,15 @@ PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psChangeSparseMemOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + return 0; } @@ -2424,67 +1849,6 @@ PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry, return 0; } -#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) - -static IMG_INT -PVRSRVBridgeDevmemFlushDevSLCRange(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemFlushDevSLCRangeIN_UI8, - IMG_UINT8 * psDevmemFlushDevSLCRangeOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeIN = - (PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *) - IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *) - IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeOUT_UI8, 0); - - IMG_HANDLE hDevmemCtx = psDevmemFlushDevSLCRangeIN->hDevmemCtx; - DEVMEMINT_CTX *psDevmemCtxInt = NULL; - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psDevmemFlushDevSLCRangeOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemCtxInt, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); - if (unlikely(psDevmemFlushDevSLCRangeOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemFlushDevSLCRange_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemFlushDevSLCRangeOUT->eError = - DevmemIntFlushDevSLCRange(psDevmemCtxInt, - psDevmemFlushDevSLCRangeIN->sAddress, - psDevmemFlushDevSLCRangeIN->uiSize, - psDevmemFlushDevSLCRangeIN->bInvalidate); - -DevmemFlushDevSLCRange_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psDevmemCtxInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - return 0; -} - -#else -#define PVRSRVBridgeDevmemFlushDevSLCRange NULL -#endif - #if defined(RGX_FEATURE_FBCDC) static IMG_INT @@ -2604,9 +1968,7 @@ PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -2632,7 +1994,6 @@ PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long)); @@ -2648,7 +2009,6 @@ PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -2704,11 +2064,7 @@ PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -2733,9 +2089,7 @@ PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -2759,7 +2113,6 @@ PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long)); @@ -2775,7 +2128,6 @@ PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -2786,111 +2138,324 @@ PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, } } } - - if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0) + + if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0) + { + puiHeapNameOutInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR); + } + + psHeapCfgHeapDetailsOUT->eError = + HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapDetailsIN->ui32HeapConfigIndex, + psHeapCfgHeapDetailsIN->ui32HeapIndex, + psHeapCfgHeapDetailsIN->ui32HeapNameBufSz, + puiHeapNameOutInt, + &psHeapCfgHeapDetailsOUT->sDevVAddrBase, + &psHeapCfgHeapDetailsOUT->uiHeapLength, + &psHeapCfgHeapDetailsOUT->uiReservedRegionLength, + &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut, + &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut); + /* Exit early if bridged call fails */ + if (unlikely(psHeapCfgHeapDetailsOUT->eError != PVRSRV_OK)) + { + goto HeapCfgHeapDetails_exit; + } + + /* If dest ptr is non-null and we have data to copy */ + if ((puiHeapNameOutInt) && + ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut, + puiHeapNameOutInt, + (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HeapCfgHeapDetails_exit; + } + } + +HeapCfgHeapDetails_exit: + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psHeapCfgHeapDetailsOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntRegisterPFNotifyKMIN_UI8, + IMG_UINT8 * psDevmemIntRegisterPFNotifyKMOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntRegisterPFNotifyKMOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntRegisterPFNotifyKM_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntRegisterPFNotifyKMOUT->eError = + DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, psDevmemIntRegisterPFNotifyKMIN->bRegister); + +DevmemIntRegisterPFNotifyKM_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX, + "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgePhysHeapGetMemInfo(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysHeapGetMemInfoIN_UI8, + IMG_UINT8 * psPhysHeapGetMemInfoOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoIN = + (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoOUT = + (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoOUT_UI8, + 0); + + PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL; + PHYS_HEAP_MEM_STATS_V1 *pasapPhysHeapMemStatsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) + + ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * + sizeof(PHYS_HEAP_MEM_STATS_V1)) + 0; + + if (unlikely(psPhysHeapGetMemInfoIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)) + { + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysHeapGetMemInfo_exit; + } + + psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats = + psPhysHeapGetMemInfoIN->pasapPhysHeapMemStats; + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto PhysHeapGetMemInfo_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysHeapGetMemInfo_exit; + } + } + } + + if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) + { + eaPhysHeapIDInt = + (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP); + } + + /* Copy the data over */ + if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0) + { + if (OSCopyFromUser + (NULL, eaPhysHeapIDInt, + (const void __user *)psPhysHeapGetMemInfoIN->peaPhysHeapID, + psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) != + PVRSRV_OK) + { + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysHeapGetMemInfo_exit; + } + } + if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) { - puiHeapNameOutInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR); + pasapPhysHeapMemStatsInt = + (PHYS_HEAP_MEM_STATS_V1 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_V1); } - psHeapCfgHeapDetailsOUT->eError = - HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection), - psHeapCfgHeapDetailsIN->ui32HeapConfigIndex, - psHeapCfgHeapDetailsIN->ui32HeapIndex, - psHeapCfgHeapDetailsIN->ui32HeapNameBufSz, - puiHeapNameOutInt, - &psHeapCfgHeapDetailsOUT->sDevVAddrBase, - &psHeapCfgHeapDetailsOUT->uiHeapLength, - &psHeapCfgHeapDetailsOUT->uiReservedRegionLength, - &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut, - &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut); + psPhysHeapGetMemInfoOUT->eError = + PVRSRVPhysHeapGetMemInfoKM(psConnection, OSGetDevNode(psConnection), + psPhysHeapGetMemInfoIN->ui32PhysHeapCount, + eaPhysHeapIDInt, pasapPhysHeapMemStatsInt); /* Exit early if bridged call fails */ - if (unlikely(psHeapCfgHeapDetailsOUT->eError != PVRSRV_OK)) + if (unlikely(psPhysHeapGetMemInfoOUT->eError != PVRSRV_OK)) { - goto HeapCfgHeapDetails_exit; + goto PhysHeapGetMemInfo_exit; } /* If dest ptr is non-null and we have data to copy */ - if ((puiHeapNameOutInt) && - ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)) + if ((pasapPhysHeapMemStatsInt) && + ((psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_V1)) > 0)) { if (unlikely (OSCopyToUser - (NULL, (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut, - puiHeapNameOutInt, - (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK)) + (NULL, (void __user *)psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats, + pasapPhysHeapMemStatsInt, + (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * + sizeof(PHYS_HEAP_MEM_STATS_V1))) != PVRSRV_OK)) { - psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto HeapCfgHeapDetails_exit; + goto PhysHeapGetMemInfo_exit; } } -HeapCfgHeapDetails_exit: +PhysHeapGetMemInfo_exit: /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psHeapCfgHeapDetailsOUT->eError == PVRSRV_OK) + if (psPhysHeapGetMemInfoOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } static IMG_INT -PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemIntRegisterPFNotifyKMIN_UI8, - IMG_UINT8 * psDevmemIntRegisterPFNotifyKMOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeGetDefaultPhysicalHeap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetDefaultPhysicalHeapIN_UI8, + IMG_UINT8 * psGetDefaultPhysicalHeapOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN = - (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *) - IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *) - IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0); + PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapIN = + (PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *) + IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapOUT = + (PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *) + IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN); + + psGetDefaultPhysicalHeapOUT->eError = + PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection), + &psGetDefaultPhysicalHeapOUT->eHeap); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemGetFaultAddressIN_UI8, + IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN = + (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0); - IMG_HANDLE hDevm = psDevmemIntRegisterPFNotifyKMIN->hDevm; - DEVMEMINT_CTX *psDevmInt = NULL; + IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psDevmemIntRegisterPFNotifyKMOUT->eError = + psDevmemGetFaultAddressOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmInt, - hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); - if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)) + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto DevmemIntRegisterPFNotifyKM_exit; + goto DevmemGetFaultAddress_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psDevmemIntRegisterPFNotifyKMOUT->eError = - DevmemIntRegisterPFNotifyKM(psDevmInt, - psDevmemIntRegisterPFNotifyKMIN->ui32PID, - psDevmemIntRegisterPFNotifyKMIN->bRegister); + psDevmemGetFaultAddressOUT->eError = + DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection), + psDevmemCtxInt, &psDevmemGetFaultAddressOUT->sFaultAddress); -DevmemIntRegisterPFNotifyKM_exit: +DevmemGetFaultAddress_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (psDevmInt) + if (psDevmemCtxInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); @@ -2898,198 +2463,358 @@ PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, return 0; } +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + static IMG_INT -PVRSRVBridgeGetMaxPhysHeapCount(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psGetMaxPhysHeapCountIN_UI8, - IMG_UINT8 * psGetMaxPhysHeapCountOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgePVRSRVStatsUpdateOOMStat(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVStatsUpdateOOMStatIN_UI8, + IMG_UINT8 * psPVRSRVStatsUpdateOOMStatOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountIN = - (PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountOUT = - (PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountOUT_UI8, - 0); - - PVR_UNREFERENCED_PARAMETER(psGetMaxPhysHeapCountIN); - - psGetMaxPhysHeapCountOUT->eError = - PVRSRVGetMaxPhysHeapCountKM(psConnection, OSGetDevNode(psConnection), - &psGetMaxPhysHeapCountOUT->ui32PhysHeapCount); + PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatIN = + (PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *) + IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *) + IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatOUT_UI8, 0); + + psPVRSRVStatsUpdateOOMStatOUT->eError = + PVRSRVStatsUpdateOOMStat(psConnection, OSGetDevNode(psConnection), + psPVRSRVStatsUpdateOOMStatIN->ui32ui32StatType, + psPVRSRVStatsUpdateOOMStatIN->ui32pid); return 0; } -static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX, - "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX"); +#else +#define PVRSRVBridgePVRSRVStatsUpdateOOMStat NULL +#endif -static IMG_INT -PVRSRVBridgePhysHeapGetMemInfo(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPhysHeapGetMemInfoIN_UI8, - IMG_UINT8 * psPhysHeapGetMemInfoOUT_UI8, - CONNECTION_DATA * psConnection) +static PVRSRV_ERROR _DevmemXIntReserveRangepsReservationIntRelease(void *pvData) { - PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoIN = - (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoOUT = - (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoOUT_UI8, - 0); + PVRSRV_ERROR eError; + eError = DevmemXIntUnreserveRange((DEVMEMXINT_RESERVATION *) pvData); + return eError; +} - PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL; - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL; +static IMG_INT +PVRSRVBridgeDevmemXIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntReserveRangeIN_UI8, + IMG_UINT8 * psDevmemXIntReserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemXIntReserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemXIntReserveRangeOUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = psDevmemXIntReserveRangeIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) + - ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) + - 0; + /* Look up the address from the handle */ + psDevmemXIntReserveRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); + if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntReserveRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); - if (unlikely(psPhysHeapGetMemInfoIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)) + psDevmemXIntReserveRangeOUT->eError = + DevmemXIntReserveRange(psDevmemServerHeapInt, + psDevmemXIntReserveRangeIN->sAddress, + psDevmemXIntReserveRangeIN->uiLength, &psReservationInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)) { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto PhysHeapGetMemInfo_exit; + goto DevmemXIntReserveRange_exit; } - psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats = - psPhysHeapGetMemInfoIN->pasapPhysHeapMemStats; + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); - if (ui64BufferSize > IMG_UINT32_MAX) + psDevmemXIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemXIntReserveRangeOUT-> + hReservation, + (void *)psReservationInt, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemXIntReserveRangepsReservationIntRelease); + if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)) { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto PhysHeapGetMemInfo_exit; + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntReserveRange_exit; } - ui32BufferSize = (IMG_UINT32) ui64BufferSize; + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; +DevmemXIntReserveRange_exit: - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoIN; + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - if (!pArrayArgsBuffer) - { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto PhysHeapGetMemInfo_exit; - } + if (psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK) + { + if (psReservationInt) + { + DevmemXIntUnreserveRange(psReservationInt); } } - if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemXIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntUnreserveRangeIN_UI8, + IMG_UINT8 * psDevmemXIntUnreserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemXIntUnreserveRangeOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemXIntUnreserveRangeIN-> + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); + if (unlikely + ((psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_OK) + && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) { - eaPhysHeapIDInt = - (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP); + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemXIntUnreserveRangeOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntUnreserveRange_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemXIntUnreserveRange_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemXIntMapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntMapPagesIN_UI8, + IMG_UINT8 * psDevmemXIntMapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesOUT_UI8, + 0); + + IMG_HANDLE hReservation = psDevmemXIntMapPagesIN->hReservation; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemXIntMapPagesIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemXIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntMapPages_exit; } - /* Copy the data over */ - if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0) + /* Look up the address from the handle */ + psDevmemXIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK)) { - if (OSCopyFromUser - (NULL, eaPhysHeapIDInt, - (const void __user *)psPhysHeapGetMemInfoIN->peaPhysHeapID, - psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) != - PVRSRV_OK) - { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto PhysHeapGetMemInfo_exit; - } + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntMapPages_exit; } - if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemXIntMapPagesOUT->eError = + DevmemXIntMapPages(psReservationInt, + psPMRInt, + psDevmemXIntMapPagesIN->ui32PageCount, + psDevmemXIntMapPagesIN->ui32PhysPageOffset, + psDevmemXIntMapPagesIN->uiFlags, + psDevmemXIntMapPagesIN->ui32VirtPageOffset); + +DevmemXIntMapPages_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) { - pasapPhysHeapMemStatsInt = - (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS); + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); } - psPhysHeapGetMemInfoOUT->eError = - PVRSRVPhysHeapGetMemInfoKM(psConnection, OSGetDevNode(psConnection), - psPhysHeapGetMemInfoIN->ui32PhysHeapCount, - eaPhysHeapIDInt, pasapPhysHeapMemStatsInt); - /* Exit early if bridged call fails */ - if (unlikely(psPhysHeapGetMemInfoOUT->eError != PVRSRV_OK)) + /* Unreference the previously looked up handle */ + if (psPMRInt) { - goto PhysHeapGetMemInfo_exit; + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* If dest ptr is non-null and we have data to copy */ - if ((pasapPhysHeapMemStatsInt) && - ((psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0)) - { - if (unlikely - (OSCopyToUser - (NULL, (void __user *)psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats, - pasapPhysHeapMemStatsInt, - (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) != - PVRSRV_OK)) - { - psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; +} - goto PhysHeapGetMemInfo_exit; - } +static IMG_INT +PVRSRVBridgeDevmemXIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntUnmapPagesIN_UI8, + IMG_UINT8 * psDevmemXIntUnmapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *) + IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesOUT_UI8, 0); + + IMG_HANDLE hReservation = psDevmemXIntUnmapPagesIN->hReservation; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemXIntUnmapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemXIntUnmapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntUnmapPages_exit; } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); -PhysHeapGetMemInfo_exit: + psDevmemXIntUnmapPagesOUT->eError = + DevmemXIntUnmapPages(psReservationInt, + psDevmemXIntUnmapPagesIN->ui32VirtPageOffset, + psDevmemXIntUnmapPagesIN->ui32PageCount); - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psPhysHeapGetMemInfoOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ +DevmemXIntUnmapPages_exit: -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); return 0; } static IMG_INT -PVRSRVBridgeGetDefaultPhysicalHeap(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psGetDefaultPhysicalHeapIN_UI8, - IMG_UINT8 * psGetDefaultPhysicalHeapOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeDevmemXIntMapVRangeToBackingPage(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemXIntMapVRangeToBackingPageIN_UI8, + IMG_UINT8 * psDevmemXIntMapVRangeToBackingPageOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapIN = - (PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *) - IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapIN_UI8, 0); - PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapOUT = - (PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *) - IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapOUT_UI8, 0); + PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE *psDevmemXIntMapVRangeToBackingPageIN = + (PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE *) + IMG_OFFSET_ADDR(psDevmemXIntMapVRangeToBackingPageIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE *psDevmemXIntMapVRangeToBackingPageOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE *) + IMG_OFFSET_ADDR(psDevmemXIntMapVRangeToBackingPageOUT_UI8, 0); - PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN); + IMG_HANDLE hReservation = psDevmemXIntMapVRangeToBackingPageIN->hReservation; + DEVMEMXINT_RESERVATION *psReservationInt = NULL; - psGetDefaultPhysicalHeapOUT->eError = - PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection), - &psGetDefaultPhysicalHeapOUT->eHeap); + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemXIntMapVRangeToBackingPageOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemXIntMapVRangeToBackingPageOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemXIntMapVRangeToBackingPage_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemXIntMapVRangeToBackingPageOUT->eError = + DevmemXIntMapVRangeToBackingPage(psReservationInt, + psDevmemXIntMapVRangeToBackingPageIN->ui32PageCount, + psDevmemXIntMapVRangeToBackingPageIN->uiFlags, + psDevmemXIntMapVRangeToBackingPageIN-> + ui32VirtPageOffset); + +DevmemXIntMapVRangeToBackingPage_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); return 0; } @@ -3098,54 +2823,53 @@ static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX, "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeGetHeapPhysMemUsage(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psGetHeapPhysMemUsageIN_UI8, - IMG_UINT8 * psGetHeapPhysMemUsageOUT_UI8, +PVRSRVBridgePhysHeapGetMemInfo2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysHeapGetMemInfo2IN_UI8, + IMG_UINT8 * psPhysHeapGetMemInfo2OUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageIN = - (PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageIN_UI8, + PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2 *psPhysHeapGetMemInfo2IN = + (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2 *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfo2IN_UI8, 0); - PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageOUT = - (PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageOUT_UI8, + PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2 *psPhysHeapGetMemInfo2OUT = + (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2 *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfo2OUT_UI8, 0); - PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL; + PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL; + PHYS_HEAP_MEM_STATS_V2 *pasapPhysHeapMemStatsInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psGetHeapPhysMemUsageIN->ui32PhysHeapCount * - sizeof(PHYS_HEAP_MEM_STATS)) + 0; + ((IMG_UINT64) psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) + + ((IMG_UINT64) psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * + sizeof(PHYS_HEAP_MEM_STATS_V2)) + 0; - if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST) + if (unlikely(psPhysHeapGetMemInfo2IN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)) { - psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto GetHeapPhysMemUsage_exit; + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysHeapGetMemInfo2_exit; } - psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats = - psGetHeapPhysMemUsageIN->pasapPhysHeapMemStats; + psPhysHeapGetMemInfo2OUT->pasapPhysHeapMemStats = + psPhysHeapGetMemInfo2IN->pasapPhysHeapMemStats; if (ui64BufferSize > IMG_UINT32_MAX) { - psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto GetHeapPhysMemUsage_exit; + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto PhysHeapGetMemInfo2_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psGetHeapPhysMemUsageIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psPhysHeapGetMemInfo2IN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -3153,156 +2877,93 @@ PVRSRVBridgeGetHeapPhysMemUsage(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetHeapPhysMemUsageIN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfo2IN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto GetHeapPhysMemUsage_exit; + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysHeapGetMemInfo2_exit; } } } - if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount != 0) + if (psPhysHeapGetMemInfo2IN->ui32PhysHeapCount != 0) + { + eaPhysHeapIDInt = + (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP); + } + + /* Copy the data over */ + if (psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0) + { + if (OSCopyFromUser + (NULL, eaPhysHeapIDInt, + (const void __user *)psPhysHeapGetMemInfo2IN->peaPhysHeapID, + psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) != + PVRSRV_OK) + { + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysHeapGetMemInfo2_exit; + } + } + if (psPhysHeapGetMemInfo2IN->ui32PhysHeapCount != 0) { pasapPhysHeapMemStatsInt = - (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + (PHYS_HEAP_MEM_STATS_V2 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ui32NextOffset += - psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS); + psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_V2); } - psGetHeapPhysMemUsageOUT->eError = - PVRSRVGetHeapPhysMemUsageKM(psConnection, OSGetDevNode(psConnection), - psGetHeapPhysMemUsageIN->ui32PhysHeapCount, - pasapPhysHeapMemStatsInt); + psPhysHeapGetMemInfo2OUT->eError = + PVRSRVPhysHeapGetMemInfo2KM(psConnection, OSGetDevNode(psConnection), + psPhysHeapGetMemInfo2IN->ui32PhysHeapCount, + eaPhysHeapIDInt, pasapPhysHeapMemStatsInt); /* Exit early if bridged call fails */ - if (unlikely(psGetHeapPhysMemUsageOUT->eError != PVRSRV_OK)) + if (unlikely(psPhysHeapGetMemInfo2OUT->eError != PVRSRV_OK)) { - goto GetHeapPhysMemUsage_exit; + goto PhysHeapGetMemInfo2_exit; } /* If dest ptr is non-null and we have data to copy */ if ((pasapPhysHeapMemStatsInt) && - ((psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0)) + ((psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_V2)) > 0)) { if (unlikely (OSCopyToUser - (NULL, (void __user *)psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats, + (NULL, (void __user *)psPhysHeapGetMemInfo2OUT->pasapPhysHeapMemStats, pasapPhysHeapMemStatsInt, - (psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) != - PVRSRV_OK)) + (psPhysHeapGetMemInfo2IN->ui32PhysHeapCount * + sizeof(PHYS_HEAP_MEM_STATS_V2))) != PVRSRV_OK)) { - psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psPhysHeapGetMemInfo2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto GetHeapPhysMemUsage_exit; + goto PhysHeapGetMemInfo2_exit; } } -GetHeapPhysMemUsage_exit: +PhysHeapGetMemInfo2_exit: /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psGetHeapPhysMemUsageOUT->eError == PVRSRV_OK) + if (psPhysHeapGetMemInfo2OUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } -static IMG_INT -PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psDevmemGetFaultAddressIN_UI8, - IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN = - (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *) - IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0); - PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT = - (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *) - IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0); - - IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx; - DEVMEMINT_CTX *psDevmemCtxInt = NULL; - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psDevmemGetFaultAddressOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psDevmemCtxInt, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); - if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto DevmemGetFaultAddress_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psDevmemGetFaultAddressOUT->eError = - DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection), - psDevmemCtxInt, &psDevmemGetFaultAddressOUT->sFaultAddress); - -DevmemGetFaultAddress_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psDevmemCtxInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - return 0; -} - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) - -static IMG_INT -PVRSRVBridgePVRSRVUpdateOOMStats(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPVRSRVUpdateOOMStatsIN_UI8, - IMG_UINT8 * psPVRSRVUpdateOOMStatsOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsIN = - (PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *) IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsOUT = - (PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *) - IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsOUT_UI8, 0); - - psPVRSRVUpdateOOMStatsOUT->eError = - PVRSRVStatsUpdateOOMStat(psConnection, OSGetDevNode(psConnection), - psPVRSRVUpdateOOMStatsIN->ui32ui32StatType, - psPVRSRVUpdateOOMStatsIN->ui32pid); - - return 0; -} - -#else -#define PVRSRVBridgePVRSRVUpdateOOMStats NULL -#endif - /* *************************************************************************** * Server bridge dispatch related glue */ @@ -3317,124 +2978,179 @@ PVRSRV_ERROR InitMMBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, - PVRSRVBridgePMRExportPMR, NULL); + PVRSRVBridgePMRExportPMR, NULL, sizeof(PVRSRV_BRIDGE_IN_PMREXPORTPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMREXPORTPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, - PVRSRVBridgePMRUnexportPMR, NULL); + PVRSRVBridgePMRUnexportPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID, - NULL); + NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRGETUID), + sizeof(PVRSRV_BRIDGE_OUT_PMRGETUID)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, - PVRSRVBridgePMRMakeLocalImportHandle, NULL); + PVRSRVBridgePMRMakeLocalImportHandle, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE), + sizeof(PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, - PVRSRVBridgePMRUnmakeLocalImportHandle, NULL); + PVRSRVBridgePMRUnmakeLocalImportHandle, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE), + sizeof(PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, - PVRSRVBridgePMRImportPMR, NULL); + PVRSRVBridgePMRImportPMR, NULL, sizeof(PVRSRV_BRIDGE_IN_PMRIMPORTPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMRIMPORTPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, - PVRSRVBridgePMRLocalImportPMR, NULL); + PVRSRVBridgePMRLocalImportPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, - PVRSRVBridgePMRUnrefPMR, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, - PVRSRVBridgePMRUnrefUnlockPMR, NULL); + PVRSRVBridgePMRUnrefPMR, NULL, sizeof(PVRSRV_BRIDGE_IN_PMRUNREFPMR), + sizeof(PVRSRV_BRIDGE_OUT_PMRUNREFPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, - PVRSRVBridgePhysmemNewRamBackedPMR, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR, - PVRSRVBridgePhysmemNewRamBackedLockedPMR, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE1, - PVRSRVBridgeDevmemCompatReserve1, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE2, - PVRSRVBridgeDevmemCompatReserve2, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE3, - PVRSRVBridgeDevmemCompatReserve3, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE4, - PVRSRVBridgeDevmemCompatReserve4, NULL); + PVRSRVBridgePhysmemNewRamBackedPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, - PVRSRVBridgeDevmemIntCtxCreate, NULL); + PVRSRVBridgeDevmemIntCtxCreate, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, - PVRSRVBridgeDevmemIntCtxDestroy, NULL); + PVRSRVBridgeDevmemIntCtxDestroy, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, - PVRSRVBridgeDevmemIntHeapCreate, NULL); + PVRSRVBridgeDevmemIntHeapCreate, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, - PVRSRVBridgeDevmemIntHeapDestroy, NULL); + PVRSRVBridgeDevmemIntHeapDestroy, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, - PVRSRVBridgeDevmemIntMapPMR, NULL); + PVRSRVBridgeDevmemIntMapPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, - PVRSRVBridgeDevmemIntUnmapPMR, NULL); + PVRSRVBridgeDevmemIntUnmapPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, - PVRSRVBridgeDevmemIntReserveRange, NULL); + PVRSRVBridgeDevmemIntReserveRange, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGEANDMAPPMR, + PVRSRVBridgeDevmemIntReserveRangeAndMapPMR, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, - PVRSRVBridgeDevmemIntUnreserveRange, NULL); + PVRSRVBridgeDevmemIntUnreserveRange, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, - PVRSRVBridgeChangeSparseMem, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES, - PVRSRVBridgeDevmemIntMapPages, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES, - PVRSRVBridgeDevmemIntUnmapPages, NULL); + PVRSRVBridgeChangeSparseMem, NULL, + sizeof(PVRSRV_BRIDGE_IN_CHANGESPARSEMEM), + sizeof(PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, - PVRSRVBridgeDevmemIsVDevAddrValid, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE, - PVRSRVBridgeDevmemFlushDevSLCRange, NULL); + PVRSRVBridgeDevmemIsVDevAddrValid, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE, - PVRSRVBridgeDevmemInvalidateFBSCTable, NULL); + PVRSRVBridgeDevmemInvalidateFBSCTable, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, - PVRSRVBridgeHeapCfgHeapConfigCount, NULL); + PVRSRVBridgeHeapCfgHeapConfigCount, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, - PVRSRVBridgeHeapCfgHeapCount, NULL); + PVRSRVBridgeHeapCfgHeapCount, NULL, + sizeof(PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT), + sizeof(PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, - PVRSRVBridgeHeapCfgHeapConfigName, NULL); + PVRSRVBridgeHeapCfgHeapConfigName, NULL, + sizeof(PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME), + sizeof(PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, - PVRSRVBridgeHeapCfgHeapDetails, NULL); + PVRSRVBridgeHeapCfgHeapDetails, NULL, + sizeof(PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS), + sizeof(PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, - PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT, - PVRSRVBridgeGetMaxPhysHeapCount, NULL); + PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO, - PVRSRVBridgePhysHeapGetMemInfo, NULL); + PVRSRVBridgePhysHeapGetMemInfo, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO), + sizeof(PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP, - PVRSRVBridgeGetDefaultPhysicalHeap, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE, - PVRSRVBridgeGetHeapPhysMemUsage, NULL); + PVRSRVBridgeGetDefaultPhysicalHeap, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP)); SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS, - PVRSRVBridgeDevmemGetFaultAddress, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS, - PVRSRVBridgePVRSRVUpdateOOMStats, NULL); + PVRSRVBridgeDevmemGetFaultAddress, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT, + PVRSRVBridgePVRSRVStatsUpdateOOMStat, NULL, + sizeof(PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT), + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE, + PVRSRVBridgeDevmemXIntReserveRange, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE, + PVRSRVBridgeDevmemXIntUnreserveRange, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES, + PVRSRVBridgeDevmemXIntMapPages, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES, + PVRSRVBridgeDevmemXIntUnmapPages, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPVRANGETOBACKINGPAGE, + PVRSRVBridgeDevmemXIntMapVRangeToBackingPage, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO2, + PVRSRVBridgePhysHeapGetMemInfo2, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO2), + sizeof(PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO2)); return PVRSRV_OK; } @@ -3461,20 +3177,8 @@ void DeinitMMBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE1); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE2); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE3); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMCOMPATRESERVE4); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE); UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY); @@ -3489,18 +3193,14 @@ void DeinitMMBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGEANDMAPPMR); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE); UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES); - - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE); UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT); @@ -3513,16 +3213,25 @@ void DeinitMMBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO); UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, + PVRSRV_BRIDGE_MM_DEVMEMXINTMAPVRANGETOBACKINGPAGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO2); } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mmextmem_bridge/common_mmextmem_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mmextmem_bridge/common_mmextmem_bridge.h new file mode 100644 index 000000000000..f52637853325 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mmextmem_bridge/common_mmextmem_bridge.h @@ -0,0 +1,80 @@ +/******************************************************************************* +@File +@Title Common bridge header for mmextmem +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for mmextmem +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_MMEXTMEM_BRIDGE_H +#define COMMON_MMEXTMEM_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_MMEXTMEM_CMD_FIRST 0 +#define PVRSRV_BRIDGE_MMEXTMEM_PHYSMEMWRAPEXTMEM PVRSRV_BRIDGE_MMEXTMEM_CMD_FIRST+0 +#define PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST (PVRSRV_BRIDGE_MMEXTMEM_CMD_FIRST+0) + +/******************************************* + PhysmemWrapExtMem + *******************************************/ + +/* Bridge in structure for PhysmemWrapExtMem */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM_TAG +{ + IMG_UINT64 ui64CpuVAddr; + IMG_DEVMEM_SIZE_T uiSize; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM; + +/* Bridge out structure for PhysmemWrapExtMem */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM_TAG +{ + IMG_HANDLE hPMRPtr; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM; + +#endif /* COMMON_MMEXTMEM_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mmextmem_bridge/server_mmextmem_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mmextmem_bridge/server_mmextmem_bridge.c new file mode 100644 index 000000000000..538d04cdf0e0 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/mmextmem_bridge/server_mmextmem_bridge.c @@ -0,0 +1,166 @@ +/******************************************************************************* +@File +@Title Server bridge for mmextmem +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for mmextmem +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_server.h" +#include "pmr.h" +#include "devicemem_heapcfg.h" +#include "physmem.h" +#include "physmem_extmem.h" + +#include "common_mmextmem_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _PhysmemWrapExtMempsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemWrapExtMem(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemWrapExtMemIN_UI8, + IMG_UINT8 * psPhysmemWrapExtMemOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM *psPhysmemWrapExtMemIN = + (PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM *) IMG_OFFSET_ADDR(psPhysmemWrapExtMemIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM *psPhysmemWrapExtMemOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM *) IMG_OFFSET_ADDR(psPhysmemWrapExtMemOUT_UI8, 0); + + PMR *psPMRPtrInt = NULL; + + psPhysmemWrapExtMemOUT->eError = + PhysmemWrapExtMem(psConnection, OSGetDevNode(psConnection), + psPhysmemWrapExtMemIN->uiSize, + psPhysmemWrapExtMemIN->ui64CpuVAddr, + psPhysmemWrapExtMemIN->uiFlags, &psPMRPtrInt); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemWrapExtMemOUT->eError != PVRSRV_OK)) + { + goto PhysmemWrapExtMem_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemWrapExtMemOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemWrapExtMemOUT->hPMRPtr, + (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemWrapExtMempsPMRPtrIntRelease); + if (unlikely(psPhysmemWrapExtMemOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemWrapExtMem_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemWrapExtMem_exit: + + if (psPhysmemWrapExtMemOUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitMMEXTMEMBridge(void); +void DeinitMMEXTMEMBridge(void); + +/* + * Register all MMEXTMEM functions with services + */ +PVRSRV_ERROR InitMMEXTMEMBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_MMEXTMEM, PVRSRV_BRIDGE_MMEXTMEM_PHYSMEMWRAPEXTMEM, + PVRSRVBridgePhysmemWrapExtMem, NULL, + sizeof(PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM), + sizeof(PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM)); + + return PVRSRV_OK; +} + +/* + * Unregister all mmextmem functions with services + */ +void DeinitMMEXTMEMBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MMEXTMEM, PVRSRV_BRIDGE_MMEXTMEM_PHYSMEMWRAPEXTMEM); + +} diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdump_bridge/server_pdump_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdump_bridge/server_pdump_bridge.c index 862343f31cac..6b3baf668710 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdump_bridge/server_pdump_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdump_bridge/server_pdump_bridge.c @@ -90,9 +90,7 @@ PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -115,7 +113,6 @@ PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPDumpImageDescriptorIN), sizeof(unsigned long)); @@ -131,7 +128,6 @@ PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -242,11 +238,7 @@ PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -271,9 +263,7 @@ PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -295,7 +285,6 @@ PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), sizeof(unsigned long)); @@ -311,7 +300,6 @@ PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -358,11 +346,7 @@ PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -410,9 +394,7 @@ PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -434,7 +416,6 @@ PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPDumpDataDescriptorIN), sizeof(unsigned long)); @@ -450,7 +431,6 @@ PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -532,11 +512,7 @@ PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -556,16 +532,24 @@ PVRSRV_ERROR InitPDUMPBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR, - PVRSRVBridgePDumpImageDescriptor, NULL); + PVRSRVBridgePDumpImageDescriptor, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT, - PVRSRVBridgePVRSRVPDumpComment, NULL); + PVRSRVBridgePVRSRVPDumpComment, NULL, + sizeof(PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT), + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME, - PVRSRVBridgePVRSRVPDumpSetFrame, NULL); + PVRSRVBridgePVRSRVPDumpSetFrame, NULL, + sizeof(PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME), + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR, - PVRSRVBridgePDumpDataDescriptor, NULL); + PVRSRVBridgePDumpDataDescriptor, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_bridge.h index 8bda83aeb299..a2ba27b88b30 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_bridge.h @@ -63,7 +63,8 @@ IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hB IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval, - IMG_UINT32 ui32MaxParamFileSize); + IMG_UINT32 ui32MaxParamFileSize, + IMG_UINT32 ui32AutoTermTimeout); IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge, IMG_BOOL * pbpbIsLastCaptureFrame); diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c index 7ba92988a462..a2bc30360aea 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c @@ -74,14 +74,17 @@ IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hB IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval, - IMG_UINT32 ui32MaxParamFileSize) + IMG_UINT32 ui32MaxParamFileSize, + IMG_UINT32 ui32AutoTermTimeout) { PVRSRV_ERROR eError; eError = PDumpSetDefaultCaptureParamsKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ui32Mode, - ui32Start, ui32End, ui32Interval, ui32MaxParamFileSize); + ui32Start, + ui32End, + ui32Interval, ui32MaxParamFileSize, ui32AutoTermTimeout); return eError; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/common_pdumpctrl_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/common_pdumpctrl_bridge.h index cacfb870510a..f710e4c06131 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/common_pdumpctrl_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/common_pdumpctrl_bridge.h @@ -100,6 +100,7 @@ typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG /* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */ typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG { + IMG_UINT32 ui32AutoTermTimeout; IMG_UINT32 ui32End; IMG_UINT32 ui32Interval; IMG_UINT32 ui32MaxParamFileSize; diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/server_pdumpctrl_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/server_pdumpctrl_bridge.c index 4edc0fc39a16..2d5578cf83f9 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/server_pdumpctrl_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpctrl_bridge/server_pdumpctrl_bridge.c @@ -134,7 +134,9 @@ PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 ui32DispatchTableEntry psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End, psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval, psPVRSRVPDumpSetDefaultCaptureParamsIN-> - ui32MaxParamFileSize); + ui32MaxParamFileSize, + psPVRSRVPDumpSetDefaultCaptureParamsIN-> + ui32AutoTermTimeout); return 0; } @@ -199,22 +201,28 @@ PVRSRV_ERROR InitPDUMPCTRLBridge(void) PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock), "OSLockCreate"); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE, - PVRSRVBridgePVRSRVPDumpGetState, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpGetState, pPDUMPCTRLBridgeLock, 0, + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME, - PVRSRVBridgePVRSRVPDumpGetFrame, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpGetFrame, pPDUMPCTRLBridgeLock, 0, + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS, - PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams, pPDUMPCTRLBridgeLock, + sizeof(PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS), + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME, - PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame, pPDUMPCTRLBridgeLock, 0, + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP, - PVRSRVBridgePVRSRVPDumpForceCaptureStop, pPDUMPCTRLBridgeLock); + PVRSRVBridgePVRSRVPDumpForceCaptureStop, pPDUMPCTRLBridgeLock, 0, + sizeof(PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpmm_bridge/client_pdumpmm_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpmm_bridge/client_pdumpmm_direct_bridge.c index b1906b385871..1358f75a7537 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpmm_bridge/client_pdumpmm_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpmm_bridge/client_pdumpmm_direct_bridge.c @@ -224,12 +224,12 @@ IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBrid { PVRSRV_ERROR eError; DEVMEMINT_CTX *psDevmemServerContextInt; - PVR_UNREFERENCED_PARAMETER(hBridge); psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext; eError = - DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt, + DevmemIntPDumpSaveToFileVirtual(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemServerContextInt, sAddress, uiSize, ui32ArraySize, diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpmm_bridge/server_pdumpmm_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpmm_bridge/server_pdumpmm_bridge.c index 2563b7a0fe30..38e1a6610fd5 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpmm_bridge/server_pdumpmm_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pdumpmm_bridge/server_pdumpmm_bridge.c @@ -251,9 +251,7 @@ PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -275,7 +273,6 @@ PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), sizeof(unsigned long)); @@ -291,7 +288,6 @@ PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -368,11 +364,7 @@ PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -403,9 +395,7 @@ PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -437,7 +427,6 @@ PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), sizeof(unsigned long)); @@ -453,7 +442,6 @@ PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -562,11 +550,7 @@ PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -752,9 +736,7 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -779,7 +761,6 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), sizeof(unsigned long)); @@ -796,7 +777,6 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -852,7 +832,8 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, UnlockHandle(psConnection->psHandleBase); psDevmemIntPDumpSaveToFileVirtualOUT->eError = - DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt, + DevmemIntPDumpSaveToFileVirtual(psConnection, OSGetDevNode(psConnection), + psDevmemServerContextInt, psDevmemIntPDumpSaveToFileVirtualIN->sAddress, psDevmemIntPDumpSaveToFileVirtualIN->uiSize, psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize, @@ -880,11 +861,7 @@ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -904,32 +881,49 @@ PVRSRV_ERROR InitPDUMPMMBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM, - PVRSRVBridgePMRPDumpLoadMem, NULL); + PVRSRVBridgePMRPDumpLoadMem, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32, - PVRSRVBridgePMRPDumpLoadMemValue32, NULL); + PVRSRVBridgePMRPDumpLoadMemValue32, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64, - PVRSRVBridgePMRPDumpLoadMemValue64, NULL); + PVRSRVBridgePMRPDumpLoadMemValue64, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE, - PVRSRVBridgePMRPDumpSaveToFile, NULL); + PVRSRVBridgePMRPDumpSaveToFile, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR, - PVRSRVBridgePMRPDumpSymbolicAddr, NULL); + PVRSRVBridgePMRPDumpSymbolicAddr, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32, - PVRSRVBridgePMRPDumpPol32, NULL); + PVRSRVBridgePMRPDumpPol32, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPPOL32), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32, - PVRSRVBridgePMRPDumpCheck32, NULL); + PVRSRVBridgePMRPDumpCheck32, NULL, + sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP, - PVRSRVBridgePMRPDumpCBP, NULL); + PVRSRVBridgePMRPDumpCBP, NULL, sizeof(PVRSRV_BRIDGE_IN_PMRPDUMPCBP), + sizeof(PVRSRV_BRIDGE_OUT_PMRPDUMPCBP)); SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL, - PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual, NULL); + PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual, NULL, + sizeof(PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL), + sizeof(PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pvrtl_bridge/server_pvrtl_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pvrtl_bridge/server_pvrtl_bridge.c index 2e8e50e639c1..9facf794011e 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pvrtl_bridge/server_pvrtl_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/pvrtl_bridge/server_pvrtl_bridge.c @@ -91,9 +91,7 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -111,7 +109,6 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long)); @@ -127,7 +124,6 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -221,17 +217,16 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psSDInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psHandleBase); } - if (psSDInt) + else if (psSDInt) { TLServerCloseStreamKM(psSDInt); } + } /* Allocated space should be equal to the last updated offset */ @@ -240,11 +235,7 @@ PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -408,9 +399,7 @@ PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -437,7 +426,6 @@ PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long)); @@ -453,7 +441,6 @@ PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -523,11 +510,7 @@ PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -654,9 +637,7 @@ PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0; @@ -677,7 +658,6 @@ PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long)); @@ -693,7 +673,6 @@ PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -763,11 +742,7 @@ PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -787,28 +762,42 @@ PVRSRV_ERROR InitPVRTLBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, - PVRSRVBridgeTLOpenStream, NULL); + PVRSRVBridgeTLOpenStream, NULL, sizeof(PVRSRV_BRIDGE_IN_TLOPENSTREAM), + sizeof(PVRSRV_BRIDGE_OUT_TLOPENSTREAM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, - PVRSRVBridgeTLCloseStream, NULL); + PVRSRVBridgeTLCloseStream, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLCLOSESTREAM), + sizeof(PVRSRV_BRIDGE_OUT_TLCLOSESTREAM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, - PVRSRVBridgeTLAcquireData, NULL); + PVRSRVBridgeTLAcquireData, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLACQUIREDATA), + sizeof(PVRSRV_BRIDGE_OUT_TLACQUIREDATA)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, - PVRSRVBridgeTLReleaseData, NULL); + PVRSRVBridgeTLReleaseData, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLRELEASEDATA), + sizeof(PVRSRV_BRIDGE_OUT_TLRELEASEDATA)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, - PVRSRVBridgeTLDiscoverStreams, NULL); + PVRSRVBridgeTLDiscoverStreams, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS), + sizeof(PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, - PVRSRVBridgeTLReserveStream, NULL); + PVRSRVBridgeTLReserveStream, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLRESERVESTREAM), + sizeof(PVRSRV_BRIDGE_OUT_TLRESERVESTREAM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, - PVRSRVBridgeTLCommitStream, NULL); + PVRSRVBridgeTLCommitStream, NULL, + sizeof(PVRSRV_BRIDGE_IN_TLCOMMITSTREAM), + sizeof(PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM)); SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, - PVRSRVBridgeTLWriteData, NULL); + PVRSRVBridgeTLWriteData, NULL, sizeof(PVRSRV_BRIDGE_IN_TLWRITEDATA), + sizeof(PVRSRV_BRIDGE_OUT_TLWRITEDATA)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxcmp_bridge/common_rgxcmp_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxcmp_bridge/common_rgxcmp_bridge.h index b526412445c2..aa1d2b73985a 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxcmp_bridge/common_rgxcmp_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxcmp_bridge/common_rgxcmp_bridge.h @@ -58,12 +58,16 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0 #define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1 #define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2 -#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3 -#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4 -#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXCMP_RGXSENDCANCELCMD PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5 #define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6 #define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7 -#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7) +#define PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXCMP_RGXCDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXCMP_RGXCDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+11) /******************************************* RGXCreateComputeContext @@ -123,6 +127,24 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA; +/******************************************* + RGXSendCancelCmd + *******************************************/ + +/* Bridge in structure for RGXSendCancelCmd */ +typedef struct PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD_TAG +{ + IMG_HANDLE hComputeContext; + IMG_INT32 i32FirstIntJobRefToCancel; + IMG_INT32 i32LastIntJobRefToCancel; +} __packed PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD; + +/* Bridge out structure for RGXSendCancelCmd */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD; + /******************************************* RGXSetComputeContextPriority *******************************************/ @@ -157,11 +179,70 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG } __packed PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; /******************************************* - RGXKickCDM2 + RGXSetComputeContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Input; + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32Property; +} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY; + +/* Bridge out structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY; + +/******************************************* + RGXGetLastDeviceError + *******************************************/ + +/* Bridge in structure for RGXGetLastDeviceError */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR; + +/* Bridge out structure for RGXGetLastDeviceError */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Error; +} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR; + +/******************************************* + RGXKickTimestampQuery *******************************************/ -/* Bridge in structure for RGXKickCDM2 */ -typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG +/* Bridge in structure for RGXKickTimestampQuery */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY_TAG +{ + IMG_HANDLE hComputeContext; + IMG_BYTE *pui8DMCmd; + IMG_CHAR *puiUpdateFenceName; + PVRSRV_FENCE hCheckFenceFd; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_UINT32 ui32CmdSize; + IMG_UINT32 ui32ExtJobRef; +} __packed PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY; + +/* Bridge out structure for RGXKickTimestampQuery */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE hUpdateFence; +} __packed PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY; + +/******************************************* + RGXKickCDM + *******************************************/ + +/* Bridge in structure for RGXKickCDM */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM_TAG { IMG_UINT64 ui64DeadlineInus; IMG_HANDLE hComputeContext; @@ -173,6 +254,7 @@ typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG IMG_HANDLE *phClientUpdateUFOSyncPrimBlock; IMG_HANDLE *phSyncPMRs; PVRSRV_FENCE hCheckFenceFd; + PVRSRV_FENCE hExportFenceToSignal; PVRSRV_TIMELINE hUpdateTimeline; IMG_UINT32 ui32ClientUpdateCount; IMG_UINT32 ui32CmdSize; @@ -181,49 +263,47 @@ typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG IMG_UINT32 ui32NumOfWorkitems; IMG_UINT32 ui32PDumpFlags; IMG_UINT32 ui32SyncPMRCount; -} __packed PVRSRV_BRIDGE_IN_RGXKICKCDM2; +} __packed PVRSRV_BRIDGE_IN_RGXKICKCDM; -/* Bridge out structure for RGXKickCDM2 */ -typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG +/* Bridge out structure for RGXKickCDM */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM_TAG { PVRSRV_ERROR eError; PVRSRV_FENCE hUpdateFence; -} __packed PVRSRV_BRIDGE_OUT_RGXKICKCDM2; + IMG_UINT32 ui32IntJobRef; +} __packed PVRSRV_BRIDGE_OUT_RGXKICKCDM; /******************************************* - RGXSetComputeContextProperty + RGXCDMGetSharedMemory *******************************************/ -/* Bridge in structure for RGXSetComputeContextProperty */ -typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG +/* Bridge in structure for RGXCDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXCDMGETSHAREDMEMORY_TAG { - IMG_UINT64 ui64Input; - IMG_HANDLE hComputeContext; - IMG_UINT32 ui32Property; -} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY; + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXCDMGETSHAREDMEMORY; -/* Bridge out structure for RGXSetComputeContextProperty */ -typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG +/* Bridge out structure for RGXCDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY_TAG { - IMG_UINT64 ui64Output; + IMG_HANDLE hCLIPMRMem; PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY; +} __packed PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY; /******************************************* - RGXGetLastDeviceError + RGXCDMReleaseSharedMemory *******************************************/ -/* Bridge in structure for RGXGetLastDeviceError */ -typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR_TAG +/* Bridge in structure for RGXCDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY_TAG { - IMG_UINT32 ui32EmptyStructPlaceholder; -} __packed PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR; + IMG_HANDLE hPMRMem; +} __packed PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY; -/* Bridge out structure for RGXGetLastDeviceError */ -typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR_TAG +/* Bridge out structure for RGXCDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY_TAG { PVRSRV_ERROR eError; - IMG_UINT32 ui32Error; -} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR; +} __packed PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY; #endif /* COMMON_RGXCMP_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxcmp_bridge/server_rgxcmp_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxcmp_bridge/server_rgxcmp_bridge.c index 55b772f6499f..8910cfb5ba6a 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxcmp_bridge/server_rgxcmp_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxcmp_bridge/server_rgxcmp_bridge.c @@ -100,9 +100,7 @@ PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -148,7 +146,6 @@ PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long)); @@ -164,7 +161,6 @@ PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -307,11 +303,7 @@ PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -439,6 +431,73 @@ PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static IMG_INT +PVRSRVBridgeRGXSendCancelCmd(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSendCancelCmdIN_UI8, + IMG_UINT8 * psRGXSendCancelCmdOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD *psRGXSendCancelCmdIN = + (PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD *) IMG_OFFSET_ADDR(psRGXSendCancelCmdIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD *psRGXSendCancelCmdOUT = + (PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD *) IMG_OFFSET_ADDR(psRGXSendCancelCmdOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXSendCancelCmdIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXSendCancelCmdOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXSendCancelCmd_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSendCancelCmdOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSendCancelCmdOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSendCancelCmd_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSendCancelCmdOUT->eError = + PVRSRVRGXSendCancelCmdKM(psComputeContextInt, + psRGXSendCancelCmdIN->i32FirstIntJobRefToCancel, + psRGXSendCancelCmdIN->i32LastIntJobRefToCancel); + +RGXSendCancelCmd_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + static IMG_INT PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psRGXSetComputeContextPriorityIN_UI8, @@ -579,6 +638,296 @@ PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static IMG_INT +PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetComputeContextPropertyIN_UI8, + IMG_UINT8 * psRGXSetComputeContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXSetComputeContextPropertyIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXSetComputeContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXSetComputeContextProperty_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetComputeContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt, + psRGXSetComputeContextPropertyIN->ui32Property, + psRGXSetComputeContextPropertyIN->ui64Input, + &psRGXSetComputeContextPropertyOUT->ui64Output); + +RGXSetComputeContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetLastDeviceErrorIN_UI8, + IMG_UINT8 * psRGXGetLastDeviceErrorOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorIN = + (PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *) + IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorOUT = + (PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *) + IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorOUT_UI8, 0); + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXGetLastDeviceErrorOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXGetLastDeviceError_exit; + } + } + + PVR_UNREFERENCED_PARAMETER(psRGXGetLastDeviceErrorIN); + + psRGXGetLastDeviceErrorOUT->eError = + PVRSRVRGXGetLastDeviceErrorKM(psConnection, OSGetDevNode(psConnection), + &psRGXGetLastDeviceErrorOUT->ui32Error); + +RGXGetLastDeviceError_exit: + + return 0; +} + +static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, + "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgeRGXKickTimestampQuery(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickTimestampQueryIN_UI8, + IMG_UINT8 * psRGXKickTimestampQueryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryIN = + (PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *) + IMG_OFFSET_ADDR(psRGXKickTimestampQueryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryOUT = + (PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *) + IMG_OFFSET_ADDR(psRGXKickTimestampQueryOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXKickTimestampQueryIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_BYTE *ui8DMCmdInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + ((IMG_UINT64) psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXKickTimestampQueryIN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTimestampQuery_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_COMPUTE_BIT_MASK)) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXKickTimestampQuery_exit; + } + } + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXKickTimestampQuery_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickTimestampQueryIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTimestampQueryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickTimestampQuery_exit; + } + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickTimestampQueryIN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTimestampQuery_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXKickTimestampQueryIN->ui32CmdSize != 0) + { + ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8DMCmdInt, (const void __user *)psRGXKickTimestampQueryIN->pui8DMCmd, + psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTimestampQuery_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickTimestampQueryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXKickTimestampQueryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTimestampQuery_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickTimestampQueryOUT->eError = + PVRSRVRGXKickTimestampQueryKM(psComputeContextInt, + psRGXKickTimestampQueryIN->hCheckFenceFd, + psRGXKickTimestampQueryIN->hUpdateTimeline, + &psRGXKickTimestampQueryOUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickTimestampQueryIN->ui32CmdSize, + ui8DMCmdInt, psRGXKickTimestampQueryIN->ui32ExtJobRef); + +RGXKickTimestampQuery_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXKickTimestampQueryOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, @@ -589,16 +938,16 @@ static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXKickCDM2IN_UI8, - IMG_UINT8 * psRGXKickCDM2OUT_UI8, CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXKickCDM(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickCDMIN_UI8, + IMG_UINT8 * psRGXKickCDMOUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXKICKCDM2 *psRGXKickCDM2IN = - (PVRSRV_BRIDGE_IN_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2IN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *psRGXKickCDM2OUT = - (PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2OUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXKICKCDM *psRGXKickCDMIN = + (PVRSRV_BRIDGE_IN_RGXKICKCDM *) IMG_OFFSET_ADDR(psRGXKickCDMIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKCDM *psRGXKickCDMOUT = + (PVRSRV_BRIDGE_OUT_RGXKICKCDM *) IMG_OFFSET_ADDR(psRGXKickCDMOUT_UI8, 0); - IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext; + IMG_HANDLE hComputeContext = psRGXKickCDMIN->hComputeContext; RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL; IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL; @@ -612,38 +961,36 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)) + - ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + ((IMG_UINT64) psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32SyncPMRCount * sizeof(PMR *)) + + ((IMG_UINT64) psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; - if (unlikely(psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) + if (unlikely(psRGXKickCDMIN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM_exit; } - if (unlikely(psRGXKickCDM2IN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + if (unlikely(psRGXKickCDMIN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM_exit; } - if (unlikely(psRGXKickCDM2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + if (unlikely(psRGXKickCDMIN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM_exit; } { @@ -654,26 +1001,25 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK)) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXKickCDM_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXKickCDMIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -681,88 +1027,87 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickCDM2IN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickCDMIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXKickCDM2_exit; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickCDM_exit; } } } - if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount != 0) { psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); OSCachedMemSet(psClientUpdateUFOSyncPrimBlockInt, 0, - psRGXKickCDM2IN->ui32ClientUpdateCount * + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)); ui32NextOffset += - psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) { if (OSCopyFromUser (NULL, hClientUpdateUFOSyncPrimBlockInt2, - (const void __user *)psRGXKickCDM2IN->phClientUpdateUFOSyncPrimBlock, - psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + (const void __user *)psRGXKickCDMIN->phClientUpdateUFOSyncPrimBlock, + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } - if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount != 0) { ui32ClientUpdateOffsetInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) { if (OSCopyFromUser (NULL, ui32ClientUpdateOffsetInt, - (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateOffset, - psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + (const void __user *)psRGXKickCDMIN->pui32ClientUpdateOffset, + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } - if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount != 0) { ui32ClientUpdateValueInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) { if (OSCopyFromUser (NULL, ui32ClientUpdateValueInt, - (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateValue, - psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + (const void __user *)psRGXKickCDMIN->pui32ClientUpdateValue, + psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } @@ -777,73 +1122,73 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, { if (OSCopyFromUser (NULL, uiUpdateFenceNameInt, - (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName, + (const void __user *)psRGXKickCDMIN->puiUpdateFenceName, PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - 1] = '\0'; } - if (psRGXKickCDM2IN->ui32CmdSize != 0) + if (psRGXKickCDMIN->ui32CmdSize != 0) { ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE); + ui32NextOffset += psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0) + if (psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0) { if (OSCopyFromUser - (NULL, ui8DMCmdInt, (const void __user *)psRGXKickCDM2IN->pui8DMCmd, - psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + (NULL, ui8DMCmdInt, (const void __user *)psRGXKickCDMIN->pui8DMCmd, + psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } - if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) + if (psRGXKickCDMIN->ui32SyncPMRCount != 0) { ui32SyncPMRFlagsInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + ui32NextOffset += psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_UINT32); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + if (psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) { if (OSCopyFromUser (NULL, ui32SyncPMRFlagsInt, - (const void __user *)psRGXKickCDM2IN->pui32SyncPMRFlags, - psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + (const void __user *)psRGXKickCDMIN->pui32SyncPMRFlags, + psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } - if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) + if (psRGXKickCDMIN->ui32SyncPMRCount != 0) { psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psSyncPMRsInt, 0, psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)); - ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *); + OSCachedMemSet(psSyncPMRsInt, 0, psRGXKickCDMIN->ui32SyncPMRCount * sizeof(PMR *)); + ui32NextOffset += psRGXKickCDMIN->ui32SyncPMRCount * sizeof(PMR *); hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + ui32NextOffset += psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_HANDLE); } /* Copy the data over */ - if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + if (psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) { if (OSCopyFromUser - (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickCDM2IN->phSyncPMRs, - psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickCDMIN->phSyncPMRs, + psRGXKickCDMIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) { - psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } @@ -851,34 +1196,34 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psRGXKickCDM2OUT->eError = + psRGXKickCDMOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **)&psComputeContextInt, hComputeContext, PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); - if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } { IMG_UINT32 i; - for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + for (i = 0; i < psRGXKickCDMIN->ui32ClientUpdateCount; i++) { /* Look up the address from the handle */ - psRGXKickCDM2OUT->eError = + psRGXKickCDMOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **) &psClientUpdateUFOSyncPrimBlockInt[i], hClientUpdateUFOSyncPrimBlockInt2[i], PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); - if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } } @@ -886,46 +1231,47 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, { IMG_UINT32 i; - for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) + for (i = 0; i < psRGXKickCDMIN->ui32SyncPMRCount; i++) { /* Look up the address from the handle */ - psRGXKickCDM2OUT->eError = + psRGXKickCDMOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, (void **)&psSyncPMRsInt[i], hSyncPMRsInt2[i], PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickCDMOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXKickCDM2_exit; + goto RGXKickCDM_exit; } } } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psRGXKickCDM2OUT->eError = + psRGXKickCDMOUT->eError = PVRSRVRGXKickCDMKM(psComputeContextInt, - psRGXKickCDM2IN->ui32ClientUpdateCount, + psRGXKickCDMIN->ui32ClientUpdateCount, psClientUpdateUFOSyncPrimBlockInt, ui32ClientUpdateOffsetInt, ui32ClientUpdateValueInt, - psRGXKickCDM2IN->hCheckFenceFd, - psRGXKickCDM2IN->hUpdateTimeline, - &psRGXKickCDM2OUT->hUpdateFence, + psRGXKickCDMIN->hCheckFenceFd, + psRGXKickCDMIN->hUpdateTimeline, + &psRGXKickCDMOUT->hUpdateFence, uiUpdateFenceNameInt, - psRGXKickCDM2IN->ui32CmdSize, + psRGXKickCDMIN->hExportFenceToSignal, + psRGXKickCDMIN->ui32CmdSize, ui8DMCmdInt, - psRGXKickCDM2IN->ui32PDumpFlags, - psRGXKickCDM2IN->ui32ExtJobRef, - psRGXKickCDM2IN->ui32SyncPMRCount, + psRGXKickCDMIN->ui32PDumpFlags, + psRGXKickCDMIN->ui32ExtJobRef, + psRGXKickCDMIN->ui32SyncPMRCount, ui32SyncPMRFlagsInt, psSyncPMRsInt, - psRGXKickCDM2IN->ui32NumOfWorkgroups, - psRGXKickCDM2IN->ui32NumOfWorkitems, - psRGXKickCDM2IN->ui64DeadlineInus); + psRGXKickCDMIN->ui32NumOfWorkgroups, + psRGXKickCDMIN->ui32NumOfWorkitems, + psRGXKickCDMIN->ui64DeadlineInus, &psRGXKickCDMOUT->ui32IntJobRef); -RGXKickCDM2_exit: +RGXKickCDM_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); @@ -942,7 +1288,7 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, { IMG_UINT32 i; - for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + for (i = 0; i < psRGXKickCDMIN->ui32ClientUpdateCount; i++) { /* Unreference the previously looked up handle */ @@ -960,7 +1306,7 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, { IMG_UINT32 i; - for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) + for (i = 0; i < psRGXKickCDMIN->ui32SyncPMRCount; i++) { /* Unreference the previously looked up handle */ @@ -977,35 +1323,37 @@ PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXKickCDM2OUT->eError == PVRSRV_OK) + if (psRGXKickCDMOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } +static PVRSRV_ERROR _RGXCDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXCDMReleaseSharedMemoryKM((PMR *) pvData); + return eError; +} + static IMG_INT -PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXSetComputeContextPropertyIN_UI8, - IMG_UINT8 * psRGXSetComputeContextPropertyOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXCDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCDMGetSharedMemoryIN_UI8, + IMG_UINT8 * psRGXCDMGetSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyIN = - (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *) - IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyOUT = - (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *) - IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXCDMGETSHAREDMEMORY *psRGXCDMGetSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXCDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXCDMGetSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY *psRGXCDMGetSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXCDMGetSharedMemoryOUT_UI8, 0); - IMG_HANDLE hComputeContext = psRGXSetComputeContextPropertyIN->hComputeContext; - RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + PMR *psCLIPMRMemInt = NULL; { PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); @@ -1015,65 +1363,68 @@ PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK)) { - psRGXSetComputeContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + psRGXCDMGetSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; - goto RGXSetComputeContextProperty_exit; + goto RGXCDMGetSharedMemory_exit; } } - /* Lock over handle lookup. */ + PVR_UNREFERENCED_PARAMETER(psRGXCDMGetSharedMemoryIN); + + psRGXCDMGetSharedMemoryOUT->eError = + PVRSRVRGXCDMGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection), + &psCLIPMRMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + goto RGXCDMGetSharedMemory_exit; + } + + /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psRGXSetComputeContextPropertyOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psComputeContextInt, - hComputeContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); - if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK)) + psRGXCDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCDMGetSharedMemoryOUT-> + hCLIPMRMem, + (void *)psCLIPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCDMGetSharedMemorypsCLIPMRMemIntRelease); + if (unlikely(psRGXCDMGetSharedMemoryOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXSetComputeContextProperty_exit; + goto RGXCDMGetSharedMemory_exit; } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - psRGXSetComputeContextPropertyOUT->eError = - PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt, - psRGXSetComputeContextPropertyIN->ui32Property, - psRGXSetComputeContextPropertyIN->ui64Input, - &psRGXSetComputeContextPropertyOUT->ui64Output); - -RGXSetComputeContextProperty_exit: + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); +RGXCDMGetSharedMemory_exit: - /* Unreference the previously looked up handle */ - if (psComputeContextInt) + if (psRGXCDMGetSharedMemoryOUT->eError != PVRSRV_OK) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hComputeContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + if (psCLIPMRMemInt) + { + PVRSRVRGXCDMReleaseSharedMemoryKM(psCLIPMRMemInt); + } } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); return 0; } static IMG_INT -PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXGetLastDeviceErrorIN_UI8, - IMG_UINT8 * psRGXGetLastDeviceErrorOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXCDMReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCDMReleaseSharedMemoryIN_UI8, + IMG_UINT8 * psRGXCDMReleaseSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorIN = - (PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *) - IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorOUT = - (PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *) - IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY *psRGXCDMReleaseSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXCDMReleaseSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY *psRGXCDMReleaseSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXCDMReleaseSharedMemoryOUT_UI8, 0); { PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); @@ -1083,19 +1434,34 @@ PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry, !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK)) { - psRGXGetLastDeviceErrorOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + psRGXCDMReleaseSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; - goto RGXGetLastDeviceError_exit; + goto RGXCDMReleaseSharedMemory_exit; } } - PVR_UNREFERENCED_PARAMETER(psRGXGetLastDeviceErrorIN); + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); - psRGXGetLastDeviceErrorOUT->eError = - PVRSRVRGXGetLastDeviceErrorKM(psConnection, OSGetDevNode(psConnection), - &psRGXGetLastDeviceErrorOUT->ui32Error); + psRGXCDMReleaseSharedMemoryOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXCDMReleaseSharedMemoryIN->hPMRMem, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); + if (unlikely((psRGXCDMReleaseSharedMemoryOUT->eError != PVRSRV_OK) && + (psRGXCDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psRGXCDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXCDMReleaseSharedMemoryOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXCDMReleaseSharedMemory_exit; + } -RGXGetLastDeviceError_exit: + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCDMReleaseSharedMemory_exit: return 0; } @@ -1114,31 +1480,64 @@ PVRSRV_ERROR InitRGXCMPBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, - PVRSRVBridgeRGXCreateComputeContext, NULL); + PVRSRVBridgeRGXCreateComputeContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, - PVRSRVBridgeRGXDestroyComputeContext, NULL); + PVRSRVBridgeRGXDestroyComputeContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, - PVRSRVBridgeRGXFlushComputeData, NULL); + PVRSRVBridgeRGXFlushComputeData, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA), + sizeof(PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSENDCANCELCMD, + PVRSRVBridgeRGXSendCancelCmd, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSENDCANCELCMD), + sizeof(PVRSRV_BRIDGE_OUT_RGXSENDCANCELCMD)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, - PVRSRVBridgeRGXSetComputeContextPriority, NULL); + PVRSRVBridgeRGXSetComputeContextPriority, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, - PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2, - PVRSRVBridgeRGXKickCDM2, NULL); + PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE), + sizeof(PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY, - PVRSRVBridgeRGXSetComputeContextProperty, NULL); + PVRSRVBridgeRGXSetComputeContextProperty, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR, - PVRSRVBridgeRGXGetLastDeviceError, NULL); + PVRSRVBridgeRGXGetLastDeviceError, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY, + PVRSRVBridgeRGXKickTimestampQuery, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY), + sizeof(PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM, + PVRSRVBridgeRGXKickCDM, NULL, sizeof(PVRSRV_BRIDGE_IN_RGXKICKCDM), + sizeof(PVRSRV_BRIDGE_OUT_RGXKICKCDM)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCDMGETSHAREDMEMORY, + PVRSRVBridgeRGXCDMGetSharedMemory, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXCDMGETSHAREDMEMORY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCDMRELEASESHAREDMEMORY, + PVRSRVBridgeRGXCDMReleaseSharedMemory, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCDMRELEASESHAREDMEMORY), + sizeof(PVRSRV_BRIDGE_OUT_RGXCDMRELEASESHAREDMEMORY)); return PVRSRV_OK; } @@ -1156,17 +1555,26 @@ void DeinitRGXCMPBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSENDCANCELCMD); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCDMGETSHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXCDMRELEASESHAREDMEMORY); + } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h index 227f01ae6b7b..534b8e9536ed 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h @@ -58,15 +58,21 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST 0 #define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0 #define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+8 -#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+9 -#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+9) +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWEROFF PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWERON PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+12 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+13 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+14 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+15 +#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+15) /******************************************* RGXFWDebugSetFWLog @@ -100,6 +106,54 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST; +/******************************************* + RGXFWDebugPowerOff + *******************************************/ + +/* Bridge in structure for RGXFWDebugPowerOff */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWEROFF_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWEROFF; + +/* Bridge out structure for RGXFWDebugPowerOff */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF; + +/******************************************* + RGXFWDebugPowerOn + *******************************************/ + +/* Bridge in structure for RGXFWDebugPowerOn */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWERON_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWERON; + +/* Bridge out structure for RGXFWDebugPowerOn */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON; + +/******************************************* + RGXFWDebugSetVzConnectionCooldownPeriodInSec + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetVzConnectionCooldownPeriodInSec */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC_TAG +{ + IMG_UINT32 ui32VzConne; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC; + +/* Bridge out structure for RGXFWDebugSetVzConnectionCooldownPeriodInSec */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC; + /******************************************* RGXFWDebugSetHCSDeadline *******************************************/ @@ -117,21 +171,71 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG } __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE; /******************************************* - RGXFWDebugSetOSidPriority + RGXFWDebugSetDriverPriority *******************************************/ -/* Bridge in structure for RGXFWDebugSetOSidPriority */ -typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY_TAG +/* Bridge in structure for RGXFWDebugSetDriverPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY_TAG { - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; IMG_UINT32 ui32Priority; -} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY; -/* Bridge out structure for RGXFWDebugSetOSidPriority */ -typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG +/* Bridge out structure for RGXFWDebugSetDriverPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY_TAG { PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY; + +/******************************************* + RGXFWDebugSetDriverTimeSlice + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetDriverTimeSlice */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE_TAG +{ + IMG_UINT32 ui32DriverID; + IMG_UINT32 ui32TSPercentage; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE; + +/* Bridge out structure for RGXFWDebugSetDriverTimeSlice */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE; + +/******************************************* + RGXFWDebugSetDriverTimeSliceInterval + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetDriverTimeSliceInterval */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL_TAG +{ + IMG_UINT32 ui32TSIntervalMs; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL; + +/* Bridge out structure for RGXFWDebugSetDriverTimeSliceInterval */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL; + +/******************************************* + RGXFWDebugSetDriverIsolationGroup + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetDriverIsolationGroup */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG +{ + IMG_UINT32 ui32DriverID; + IMG_UINT32 ui32IsolationGroup; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP; + +/* Bridge out structure for RGXFWDebugSetDriverIsolationGroup */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP; /******************************************* RGXFWDebugSetOSNewOnlineState @@ -140,8 +244,8 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG /* Bridge in structure for RGXFWDebugSetOSNewOnlineState */ typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG { + IMG_UINT32 ui32DriverID; IMG_UINT32 ui32OSNewState; - IMG_UINT32 ui32OSid; } __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE; /* Bridge out structure for RGXFWDebugSetOSNewOnlineState */ @@ -158,7 +262,7 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP_TAG { IMG_UINT64 ui64ui64GuestHeapBase; - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; } __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP; /* Bridge out structure for RGXFWDebugMapGuestHeap */ @@ -206,7 +310,7 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE_TAG /* Bridge in structure for RGXCurrentTime */ typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG { - IMG_UINT32 ui32EmptyStructPlaceholder; + IMG_UINT8 ui8TimerType; } __packed PVRSRV_BRIDGE_IN_RGXCURRENTTIME; /* Bridge out structure for RGXCurrentTime */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c index 37eb7b3497cd..e5646490aa2d 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c @@ -108,6 +108,71 @@ PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static IMG_INT +PVRSRVBridgeRGXFWDebugPowerOff(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugPowerOffIN_UI8, + IMG_UINT8 * psRGXFWDebugPowerOffOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWEROFF *psRGXFWDebugPowerOffIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWEROFF *) IMG_OFFSET_ADDR(psRGXFWDebugPowerOffIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF *psRGXFWDebugPowerOffOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF *) IMG_OFFSET_ADDR(psRGXFWDebugPowerOffOUT_UI8, + 0); + + PVR_UNREFERENCED_PARAMETER(psRGXFWDebugPowerOffIN); + + psRGXFWDebugPowerOffOUT->eError = + PVRSRVRGXFWDebugPowerOffKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugPowerOn(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugPowerOnIN_UI8, + IMG_UINT8 * psRGXFWDebugPowerOnOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWERON *psRGXFWDebugPowerOnIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGPOWERON *) IMG_OFFSET_ADDR(psRGXFWDebugPowerOnIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON *psRGXFWDebugPowerOnOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON *) IMG_OFFSET_ADDR(psRGXFWDebugPowerOnOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXFWDebugPowerOnIN); + + psRGXFWDebugPowerOnOUT->eError = + PVRSRVRGXFWDebugPowerOnKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetVzConnectionCooldownPeriodInSec(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC + *psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC *) + IMG_OFFSET_ADDR(psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC + *psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC *) + IMG_OFFSET_ADDR(psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT_UI8, 0); + + psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT->eError = + PVRSRVRGXFWDebugSetVzConnectionCooldownPeriodInSecKM(psConnection, + OSGetDevNode(psConnection), + psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN-> + ui32VzConne); + + return 0; +} + static IMG_INT PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psRGXFWDebugSetHCSDeadlineIN_UI8, @@ -129,22 +194,94 @@ PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry, } static IMG_INT -PVRSRVBridgeRGXFWDebugSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXFWDebugSetOSidPriorityIN_UI8, - IMG_UINT8 * psRGXFWDebugSetOSidPriorityOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXFWDebugSetDriverPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetDriverPriorityIN_UI8, + IMG_UINT8 * psRGXFWDebugSetDriverPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityOUT_UI8, 0); + + psRGXFWDebugSetDriverPriorityOUT->eError = + PVRSRVRGXFWDebugSetDriverPriorityKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetDriverPriorityIN->ui32DriverID, + psRGXFWDebugSetDriverPriorityIN->ui32Priority); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetDriverTimeSlice(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetDriverTimeSliceIN_UI8, + IMG_UINT8 * psRGXFWDebugSetDriverTimeSliceOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityIN = - (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *) - IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityOUT = - (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *) - IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityOUT_UI8, 0); - - psRGXFWDebugSetOSidPriorityOUT->eError = - PVRSRVRGXFWDebugSetOSidPriorityKM(psConnection, OSGetDevNode(psConnection), - psRGXFWDebugSetOSidPriorityIN->ui32OSid, - psRGXFWDebugSetOSidPriorityIN->ui32Priority); + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE *psRGXFWDebugSetDriverTimeSliceIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE *psRGXFWDebugSetDriverTimeSliceOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceOUT_UI8, 0); + + psRGXFWDebugSetDriverTimeSliceOUT->eError = + PVRSRVRGXFWDebugSetDriverTimeSliceKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetDriverTimeSliceIN->ui32DriverID, + psRGXFWDebugSetDriverTimeSliceIN-> + ui32TSPercentage); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetDriverTimeSliceInterval(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * + psRGXFWDebugSetDriverTimeSliceIntervalIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetDriverTimeSliceIntervalOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL + *psRGXFWDebugSetDriverTimeSliceIntervalIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceIntervalIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL + *psRGXFWDebugSetDriverTimeSliceIntervalOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceIntervalOUT_UI8, 0); + + psRGXFWDebugSetDriverTimeSliceIntervalOUT->eError = + PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetDriverTimeSliceIntervalIN-> + ui32TSIntervalMs); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetDriverIsolationGroupIN_UI8, + IMG_UINT8 * + psRGXFWDebugSetDriverIsolationGroupOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupOUT + = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *) + IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupOUT_UI8, 0); + + psRGXFWDebugSetDriverIsolationGroupOUT->eError = + PVRSRVRGXFWDebugSetDriverIsolationGroupKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetDriverIsolationGroupIN-> + ui32DriverID, + psRGXFWDebugSetDriverIsolationGroupIN-> + ui32IsolationGroup); return 0; } @@ -164,7 +301,7 @@ PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry, psRGXFWDebugSetOSNewOnlineStateOUT->eError = PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, OSGetDevNode(psConnection), - psRGXFWDebugSetOSNewOnlineStateIN->ui32OSid, + psRGXFWDebugSetOSNewOnlineStateIN->ui32DriverID, psRGXFWDebugSetOSNewOnlineStateIN-> ui32OSNewState); @@ -186,7 +323,7 @@ PVRSRVBridgeRGXFWDebugMapGuestHeap(IMG_UINT32 ui32DispatchTableEntry, psRGXFWDebugMapGuestHeapOUT->eError = PVRSRVRGXFWDebugMapGuestHeapKM(psConnection, OSGetDevNode(psConnection), - psRGXFWDebugMapGuestHeapIN->ui32OSid, + psRGXFWDebugMapGuestHeapIN->ui32DriverID, psRGXFWDebugMapGuestHeapIN->ui64ui64GuestHeapBase); return 0; @@ -242,41 +379,14 @@ PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry, PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT = (PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeOUT_UI8, 0); - PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN); - psRGXCurrentTimeOUT->eError = PVRSRVRGXCurrentTime(psConnection, OSGetDevNode(psConnection), - &psRGXCurrentTimeOUT->ui64Time); - - return 0; -} - -#if defined(SUPPORT_VALIDATION) - -static IMG_INT -PVRSRVBridgeRGXFWDebugInjectFault(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXFWDebugInjectFaultIN_UI8, - IMG_UINT8 * psRGXFWDebugInjectFaultOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultIN = - (PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *) - IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultOUT = - (PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *) - IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultOUT_UI8, 0); - - PVR_UNREFERENCED_PARAMETER(psRGXFWDebugInjectFaultIN); - - psRGXFWDebugInjectFaultOUT->eError = - PVRSRVRGXFWDebugInjectFaultKM(psConnection, OSGetDevNode(psConnection)); + psRGXCurrentTimeIN->ui8TimerType, &psRGXCurrentTimeOUT->ui64Time); return 0; } -#else #define PVRSRVBridgeRGXFWDebugInjectFault NULL -#endif /* *************************************************************************** * Server bridge dispatch related glue @@ -292,38 +402,89 @@ PVRSRV_ERROR InitRGXFWDBGBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG, - PVRSRVBridgeRGXFWDebugSetFWLog, NULL); + PVRSRVBridgeRGXFWDebugSetFWLog, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST, - PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL); + PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWEROFF, + PVRSRVBridgeRGXFWDebugPowerOff, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWEROFF)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWERON, + PVRSRVBridgeRGXFWDebugPowerOn, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGPOWERON)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC, + PVRSRVBridgeRGXFWDebugSetVzConnectionCooldownPeriodInSec, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC), + sizeof + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE, - PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL); + PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, - PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY, - PVRSRVBridgeRGXFWDebugSetOSidPriority, NULL); + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY, + PVRSRVBridgeRGXFWDebugSetDriverPriority, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICE, + PVRSRVBridgeRGXFWDebugSetDriverTimeSlice, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL, + PVRSRVBridgeRGXFWDebugSetDriverTimeSliceInterval, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP, + PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE, - PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL); + PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP, - PVRSRVBridgeRGXFWDebugMapGuestHeap, NULL); + PVRSRVBridgeRGXFWDebugMapGuestHeap, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE, - PVRSRVBridgeRGXFWDebugPHRConfigure, NULL); + PVRSRVBridgeRGXFWDebugPHRConfigure, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE, - PVRSRVBridgeRGXFWDebugWdgConfigure, NULL); + PVRSRVBridgeRGXFWDebugWdgConfigure, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE), + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME, - PVRSRVBridgeRGXCurrentTime, NULL); + PVRSRVBridgeRGXCurrentTime, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCURRENTTIME), + sizeof(PVRSRV_BRIDGE_OUT_RGXCURRENTTIME)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT, - PVRSRVBridgeRGXFWDebugInjectFault, NULL); + PVRSRVBridgeRGXFWDebugInjectFault, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT)); return PVRSRV_OK; } @@ -339,11 +500,27 @@ void DeinitRGXFWDBGBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWEROFF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPOWERON); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, - PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY); + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE); diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxhwperf_bridge/common_rgxhwperf_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxhwperf_bridge/common_rgxhwperf_bridge.h index 48f6247fef8f..7940464c7177 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxhwperf_bridge/common_rgxhwperf_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxhwperf_bridge/common_rgxhwperf_bridge.h @@ -55,13 +55,71 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_hwperf.h" #define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4 -#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5 -#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5) +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFTIMESTAMP PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXOPENHWPERFCLIENTSTREAM PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCLOSEHWPERFCLIENTSTREAM PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXWRITEHWPERFCLIENTEVENT PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+9) + +/******************************************* + RGXGetConfiguredHWPerfCounters + *******************************************/ + +/* Bridge in structure for RGXGetConfiguredHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG +{ + RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; + IMG_UINT32 ui32BlockID; +} __packed PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS; + +/* Bridge out structure for RGXGetConfiguredHWPerfCounters */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG +{ + RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS; + +/******************************************* + RGXGetEnabledHWPerfBlocks + *******************************************/ + +/* Bridge in structure for RGXGetEnabledHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS_TAG +{ + IMG_UINT32 *pui32EnabledBlockIDs; + IMG_UINT32 ui32ArrayLen; +} __packed PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS; + +/* Bridge out structure for RGXGetEnabledHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS_TAG +{ + IMG_UINT32 *pui32EnabledBlockIDs; + PVRSRV_ERROR eError; + IMG_UINT32 ui32BlockCount; +} __packed PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS; + +/******************************************* + RGXGetHWPerfTimeStamp + *******************************************/ + +/* Bridge in structure for RGXGetHWPerfTimeStamp */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP; + +/* Bridge out structure for RGXGetHWPerfTimeStamp */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP_TAG +{ + IMG_UINT64 ui64TimeStamp; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP; /******************************************* RGXCtrlHWPerf @@ -81,24 +139,6 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF; -/******************************************* - RGXConfigureHWPerfBlocks - *******************************************/ - -/* Bridge in structure for RGXConfigureHWPerfBlocks */ -typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS_TAG -{ - RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs; - IMG_UINT32 ui32ArrayLen; - IMG_UINT32 ui32CtrlWord; -} __packed PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS; - -/* Bridge out structure for RGXConfigureHWPerfBlocks */ -typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS_TAG -{ - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS; - /******************************************* RGXGetHWPerfBvncFeatureFlags *******************************************/ @@ -135,40 +175,72 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS_TAG } __packed PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS; /******************************************* - RGXGetConfiguredHWPerfCounters + RGXConfigureHWPerfBlocks *******************************************/ -/* Bridge in structure for RGXGetConfiguredHWPerfCounters */ -typedef struct PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG +/* Bridge in structure for RGXConfigureHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS_TAG { - RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; - IMG_UINT32 ui32BlockID; -} __packed PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS; + RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs; + IMG_UINT32 ui32ArrayLen; + IMG_UINT32 ui32CtrlWord; +} __packed PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS; -/* Bridge out structure for RGXGetConfiguredHWPerfCounters */ -typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG +/* Bridge out structure for RGXConfigureHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS_TAG { - RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS; +} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS; /******************************************* - RGXGetEnabledHWPerfBlocks + RGXOpenHWPerfClientStream *******************************************/ -/* Bridge in structure for RGXGetEnabledHWPerfBlocks */ -typedef struct PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS_TAG +/* Bridge in structure for RGXOpenHWPerfClientStream */ +typedef struct PVRSRV_BRIDGE_IN_RGXOPENHWPERFCLIENTSTREAM_TAG { - IMG_UINT32 *pui32EnabledBlockIDs; - IMG_UINT32 ui32ArrayLen; -} __packed PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS; + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXOPENHWPERFCLIENTSTREAM; -/* Bridge out structure for RGXGetEnabledHWPerfBlocks */ -typedef struct PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS_TAG +/* Bridge out structure for RGXOpenHWPerfClientStream */ +typedef struct PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM_TAG { - IMG_UINT32 *pui32EnabledBlockIDs; + IMG_HANDLE hSD; PVRSRV_ERROR eError; - IMG_UINT32 ui32BlockCount; -} __packed PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS; +} __packed PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM; + +/******************************************* + RGXCloseHWPerfClientStream + *******************************************/ + +/* Bridge in structure for RGXCloseHWPerfClientStream */ +typedef struct PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM_TAG +{ + IMG_HANDLE hSD; +} __packed PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM; + +/* Bridge out structure for RGXCloseHWPerfClientStream */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM; + +/******************************************* + RGXWriteHWPerfClientEvent + *******************************************/ + +/* Bridge in structure for RGXWriteHWPerfClientEvent */ +typedef struct PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT_TAG +{ + IMG_HANDLE hSD; + IMG_BYTE *pui8Data; + IMG_UINT32 ui32Size; +} __packed PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT; + +/* Bridge out structure for RGXWriteHWPerfClientEvent */ +typedef struct PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT; #endif /* COMMON_RGXHWPERF_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxhwperf_bridge/server_rgxhwperf_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxhwperf_bridge/server_rgxhwperf_bridge.c index 1cb51ba4cdf0..97aa4f234367 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxhwperf_bridge/server_rgxhwperf_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxhwperf_bridge/server_rgxhwperf_bridge.c @@ -66,73 +66,157 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * Server-side bridge entry points */ +static_assert(1 <= IMG_UINT32_MAX, "1 must not be larger than IMG_UINT32_MAX"); + static IMG_INT -PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXCtrlHWPerfIN_UI8, - IMG_UINT8 * psRGXCtrlHWPerfOUT_UI8, CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXGetConfiguredHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetConfiguredHWPerfCountersIN_UI8, + IMG_UINT8 * psRGXGetConfiguredHWPerfCountersOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN = - (PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT = - (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersIN = + (PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersOUT = + (PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *) + IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersOUT_UI8, 0); - psRGXCtrlHWPerfOUT->eError = - PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevNode(psConnection), - psRGXCtrlHWPerfIN->ui32StreamId, - psRGXCtrlHWPerfIN->bToggle, psRGXCtrlHWPerfIN->ui64Mask); + RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCountersInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; + + psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters = + psRGXGetConfiguredHWPerfCountersIN->psConfiguredCounters; + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXGetConfiguredHWPerfCounters_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfCountersIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXGetConfiguredHWPerfCountersIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXGetConfiguredHWPerfCountersOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXGetConfiguredHWPerfCounters_exit; + } + } + } + + if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) + { + psConfiguredCountersInt = + (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK); + } + + psRGXGetConfiguredHWPerfCountersOUT->eError = + PVRSRVRGXGetConfiguredHWPerfCountersKM(psConnection, OSGetDevNode(psConnection), + psRGXGetConfiguredHWPerfCountersIN->ui32BlockID, + psConfiguredCountersInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXGetConfiguredHWPerfCountersOUT->eError != PVRSRV_OK)) + { + goto RGXGetConfiguredHWPerfCounters_exit; + } + + /* If dest ptr is non-null and we have data to copy */ + if ((psConfiguredCountersInt) && ((1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, + (void __user *)psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters, + psConfiguredCountersInt, + (1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK))) != PVRSRV_OK)) + { + psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXGetConfiguredHWPerfCounters_exit; + } + } + +RGXGetConfiguredHWPerfCounters_exit: + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXGetConfiguredHWPerfCountersOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); return 0; } -static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX + 3 <= IMG_UINT32_MAX, - "RGXFWIF_HWPERF_CTRL_BLKS_MAX+3 must not be larger than IMG_UINT32_MAX"); - static IMG_INT -PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXConfigureHWPerfBlocksIN_UI8, - IMG_UINT8 * psRGXConfigureHWPerfBlocksOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXGetEnabledHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetEnabledHWPerfBlocksIN_UI8, + IMG_UINT8 * psRGXGetEnabledHWPerfBlocksOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksIN = - (PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *) - IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksOUT = - (PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *) - IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksIN = + (PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksOUT = + (PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksOUT_UI8, 0); - RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL; + IMG_UINT32 *pui32EnabledBlockIDsInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * - sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; + ((IMG_UINT64) psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) + 0; - if (unlikely(psRGXConfigureHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX + 3)) - { - psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXConfigureHWPerfBlocks_exit; - } + psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs = + psRGXGetEnabledHWPerfBlocksIN->pui32EnabledBlockIDs; if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXConfigureHWPerfBlocks_exit; + psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXGetEnabledHWPerfBlocks_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXGetEnabledHWPerfBlocksIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -140,70 +224,111 @@ PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigureHWPerfBlocksIN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXGetEnabledHWPerfBlocksIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXConfigureHWPerfBlocks_exit; + psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXGetEnabledHWPerfBlocks_exit; } } } - if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen != 0) + if (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen != 0) { - psBlockConfigsInt = - (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK); + pui32EnabledBlockIDsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32); } - /* Copy the data over */ - if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0) + psRGXGetEnabledHWPerfBlocksOUT->eError = + PVRSRVRGXGetEnabledHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), + psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen, + &psRGXGetEnabledHWPerfBlocksOUT->ui32BlockCount, + pui32EnabledBlockIDsInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXGetEnabledHWPerfBlocksOUT->eError != PVRSRV_OK)) { - if (OSCopyFromUser - (NULL, psBlockConfigsInt, - (const void __user *)psRGXConfigureHWPerfBlocksIN->psBlockConfigs, - psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * - sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK) + goto RGXGetEnabledHWPerfBlocks_exit; + } + + /* If dest ptr is non-null and we have data to copy */ + if ((pui32EnabledBlockIDsInt) && + ((psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs, + pui32EnabledBlockIDsInt, + (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32))) != + PVRSRV_OK)) { - psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXConfigureHWPerfBlocks_exit; + goto RGXGetEnabledHWPerfBlocks_exit; } } - psRGXConfigureHWPerfBlocksOUT->eError = - PVRSRVRGXConfigureHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), - psRGXConfigureHWPerfBlocksIN->ui32CtrlWord, - psRGXConfigureHWPerfBlocksIN->ui32ArrayLen, - psBlockConfigsInt); - -RGXConfigureHWPerfBlocks_exit: +RGXGetEnabledHWPerfBlocks_exit: /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXConfigureHWPerfBlocksOUT->eError == PVRSRV_OK) + if (psRGXGetEnabledHWPerfBlocksOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } +static IMG_INT +PVRSRVBridgeRGXGetHWPerfTimeStamp(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetHWPerfTimeStampIN_UI8, + IMG_UINT8 * psRGXGetHWPerfTimeStampOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP *psRGXGetHWPerfTimeStampIN = + (PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP *) + IMG_OFFSET_ADDR(psRGXGetHWPerfTimeStampIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP *psRGXGetHWPerfTimeStampOUT = + (PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP *) + IMG_OFFSET_ADDR(psRGXGetHWPerfTimeStampOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfTimeStampIN); + + psRGXGetHWPerfTimeStampOUT->eError = + PVRSRVRGXGetHWPerfTimeStampKM(psConnection, OSGetDevNode(psConnection), + &psRGXGetHWPerfTimeStampOUT->ui64TimeStamp); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCtrlHWPerfIN_UI8, + IMG_UINT8 * psRGXCtrlHWPerfOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN = + (PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT = + (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfOUT_UI8, 0); + + psRGXCtrlHWPerfOUT->eError = + PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevNode(psConnection), + psRGXCtrlHWPerfIN->ui32StreamId, + psRGXCtrlHWPerfIN->bToggle, psRGXCtrlHWPerfIN->ui64Mask); + + return 0; +} + static IMG_INT PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsIN_UI8, @@ -246,9 +371,7 @@ PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -270,7 +393,6 @@ PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), sizeof(unsigned long)); @@ -286,7 +408,6 @@ PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -332,59 +453,58 @@ PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } -static_assert(1 <= IMG_UINT32_MAX, "1 must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX + 3 <= IMG_UINT32_MAX, + "RGXFWIF_HWPERF_CTRL_BLKS_MAX+3 must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeRGXGetConfiguredHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXGetConfiguredHWPerfCountersIN_UI8, - IMG_UINT8 * psRGXGetConfiguredHWPerfCountersOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXConfigureHWPerfBlocksIN_UI8, + IMG_UINT8 * psRGXConfigureHWPerfBlocksOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersIN = - (PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *) - IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersOUT = - (PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *) - IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksIN = + (PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksOUT = + (PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksOUT_UI8, 0); - RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCountersInt = NULL; + RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; - psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters = - psRGXGetConfiguredHWPerfCountersIN->psConfiguredCounters; + if (unlikely(psRGXConfigureHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX + 3)) + { + psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXConfigureHWPerfBlocks_exit; + } if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXGetConfiguredHWPerfCounters_exit; + psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXConfigureHWPerfBlocks_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfCountersIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -392,118 +512,215 @@ PVRSRVBridgeRGXGetConfiguredHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = - (IMG_BYTE *) (void *)psRGXGetConfiguredHWPerfCountersIN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigureHWPerfBlocksIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXGetConfiguredHWPerfCountersOUT->eError = - PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXGetConfiguredHWPerfCounters_exit; + psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXConfigureHWPerfBlocks_exit; } } } - if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) + if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen != 0) { - psConfiguredCountersInt = + psBlockConfigsInt = (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK); - } - - psRGXGetConfiguredHWPerfCountersOUT->eError = - PVRSRVRGXGetConfiguredHWPerfCountersKM(psConnection, OSGetDevNode(psConnection), - psRGXGetConfiguredHWPerfCountersIN->ui32BlockID, - psConfiguredCountersInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXGetConfiguredHWPerfCountersOUT->eError != PVRSRV_OK)) - { - goto RGXGetConfiguredHWPerfCounters_exit; + ui32NextOffset += + psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK); } - /* If dest ptr is non-null and we have data to copy */ - if ((psConfiguredCountersInt) && ((1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) > 0)) + /* Copy the data over */ + if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0) { - if (unlikely - (OSCopyToUser - (NULL, - (void __user *)psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters, - psConfiguredCountersInt, - (1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK))) != PVRSRV_OK)) + if (OSCopyFromUser + (NULL, psBlockConfigsInt, + (const void __user *)psRGXConfigureHWPerfBlocksIN->psBlockConfigs, + psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK) { - psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXGetConfiguredHWPerfCounters_exit; + goto RGXConfigureHWPerfBlocks_exit; } } -RGXGetConfiguredHWPerfCounters_exit: + psRGXConfigureHWPerfBlocksOUT->eError = + PVRSRVRGXConfigureHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), + psRGXConfigureHWPerfBlocksIN->ui32CtrlWord, + psRGXConfigureHWPerfBlocksIN->ui32ArrayLen, + psBlockConfigsInt); + +RGXConfigureHWPerfBlocks_exit: /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXGetConfiguredHWPerfCountersOUT->eError == PVRSRV_OK) + if (psRGXConfigureHWPerfBlocksOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; } +static PVRSRV_ERROR _RGXOpenHWPerfClientStreampsSDIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXCloseHWPerfClientStreamKM((HWPERF_STREAM_DESC *) pvData); + return eError; +} + static IMG_INT -PVRSRVBridgeRGXGetEnabledHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXGetEnabledHWPerfBlocksIN_UI8, - IMG_UINT8 * psRGXGetEnabledHWPerfBlocksOUT_UI8, +PVRSRVBridgeRGXOpenHWPerfClientStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXOpenHWPerfClientStreamIN_UI8, + IMG_UINT8 * psRGXOpenHWPerfClientStreamOUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksIN = - (PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *) - IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksOUT = - (PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *) - IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXOPENHWPERFCLIENTSTREAM *psRGXOpenHWPerfClientStreamIN = + (PVRSRV_BRIDGE_IN_RGXOPENHWPERFCLIENTSTREAM *) + IMG_OFFSET_ADDR(psRGXOpenHWPerfClientStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM *psRGXOpenHWPerfClientStreamOUT = + (PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM *) + IMG_OFFSET_ADDR(psRGXOpenHWPerfClientStreamOUT_UI8, 0); - IMG_UINT32 *pui32EnabledBlockIDsInt = NULL; + HWPERF_STREAM_DESC *psSDInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psRGXOpenHWPerfClientStreamIN); + + psRGXOpenHWPerfClientStreamOUT->eError = + PVRSRVRGXOpenHWPerfClientStreamKM(psConnection, OSGetDevNode(psConnection), &psSDInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXOpenHWPerfClientStreamOUT->eError != PVRSRV_OK)) + { + goto RGXOpenHWPerfClientStream_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXOpenHWPerfClientStreamOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXOpenHWPerfClientStreamOUT->hSD, (void *)psSDInt, + PVRSRV_HANDLE_TYPE_PVR_HWPERF_SD, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXOpenHWPerfClientStreampsSDIntRelease); + if (unlikely(psRGXOpenHWPerfClientStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXOpenHWPerfClientStream_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXOpenHWPerfClientStream_exit: + + if (psRGXOpenHWPerfClientStreamOUT->eError != PVRSRV_OK) + { + if (psSDInt) + { + PVRSRVRGXCloseHWPerfClientStreamKM(psSDInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXCloseHWPerfClientStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCloseHWPerfClientStreamIN_UI8, + IMG_UINT8 * psRGXCloseHWPerfClientStreamOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM *psRGXCloseHWPerfClientStreamIN = + (PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM *) + IMG_OFFSET_ADDR(psRGXCloseHWPerfClientStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM *psRGXCloseHWPerfClientStreamOUT = + (PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM *) + IMG_OFFSET_ADDR(psRGXCloseHWPerfClientStreamOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXCloseHWPerfClientStreamOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXCloseHWPerfClientStreamIN->hSD, + PVRSRV_HANDLE_TYPE_PVR_HWPERF_SD); + if (unlikely((psRGXCloseHWPerfClientStreamOUT->eError != PVRSRV_OK) && + (psRGXCloseHWPerfClientStreamOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psRGXCloseHWPerfClientStreamOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXCloseHWPerfClientStreamOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXCloseHWPerfClientStream_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCloseHWPerfClientStream_exit: + + return 0; +} + +static_assert(PVRSRVTL_MAX_PACKET_SIZE <= IMG_UINT32_MAX, + "PVRSRVTL_MAX_PACKET_SIZE must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgeRGXWriteHWPerfClientEvent(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXWriteHWPerfClientEventIN_UI8, + IMG_UINT8 * psRGXWriteHWPerfClientEventOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT *psRGXWriteHWPerfClientEventIN = + (PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT *) + IMG_OFFSET_ADDR(psRGXWriteHWPerfClientEventIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT *psRGXWriteHWPerfClientEventOUT = + (PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT *) + IMG_OFFSET_ADDR(psRGXWriteHWPerfClientEventOUT_UI8, 0); + + IMG_HANDLE hSD = psRGXWriteHWPerfClientEventIN->hSD; + HWPERF_STREAM_DESC *psSDInt = NULL; + IMG_BYTE *ui8DataInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) + 0; + ((IMG_UINT64) psRGXWriteHWPerfClientEventIN->ui32Size * sizeof(IMG_BYTE)) + 0; - psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs = - psRGXGetEnabledHWPerfBlocksIN->pui32EnabledBlockIDs; + if (unlikely(psRGXWriteHWPerfClientEventIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE)) + { + psRGXWriteHWPerfClientEventOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXWriteHWPerfClientEvent_exit; + } if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXGetEnabledHWPerfBlocks_exit; + psRGXWriteHWPerfClientEventOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXWriteHWPerfClientEvent_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXGetEnabledHWPerfBlocksIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXWriteHWPerfClientEventIN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -511,71 +728,82 @@ PVRSRVBridgeRGXGetEnabledHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXGetEnabledHWPerfBlocksIN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXWriteHWPerfClientEventIN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXGetEnabledHWPerfBlocks_exit; + psRGXWriteHWPerfClientEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXWriteHWPerfClientEvent_exit; } } } - if (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen != 0) + if (psRGXWriteHWPerfClientEventIN->ui32Size != 0) { - pui32EnabledBlockIDsInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32); + ui8DataInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXWriteHWPerfClientEventIN->ui32Size * sizeof(IMG_BYTE); } - psRGXGetEnabledHWPerfBlocksOUT->eError = - PVRSRVRGXGetEnabledHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), - psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen, - &psRGXGetEnabledHWPerfBlocksOUT->ui32BlockCount, - pui32EnabledBlockIDsInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXGetEnabledHWPerfBlocksOUT->eError != PVRSRV_OK)) - { - goto RGXGetEnabledHWPerfBlocks_exit; - } - - /* If dest ptr is non-null and we have data to copy */ - if ((pui32EnabledBlockIDsInt) && - ((psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) > 0)) + /* Copy the data over */ + if (psRGXWriteHWPerfClientEventIN->ui32Size * sizeof(IMG_BYTE) > 0) { - if (unlikely - (OSCopyToUser - (NULL, (void __user *)psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs, - pui32EnabledBlockIDsInt, - (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32))) != - PVRSRV_OK)) + if (OSCopyFromUser + (NULL, ui8DataInt, (const void __user *)psRGXWriteHWPerfClientEventIN->pui8Data, + psRGXWriteHWPerfClientEventIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK) { - psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXWriteHWPerfClientEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXGetEnabledHWPerfBlocks_exit; + goto RGXWriteHWPerfClientEvent_exit; } } -RGXGetEnabledHWPerfBlocks_exit: + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXWriteHWPerfClientEventOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, PVRSRV_HANDLE_TYPE_PVR_HWPERF_SD, IMG_TRUE); + if (unlikely(psRGXWriteHWPerfClientEventOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXWriteHWPerfClientEvent_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXWriteHWPerfClientEventOUT->eError = + PVRSRVRGXWriteHWPerfClientEventKM(psSDInt, + psRGXWriteHWPerfClientEventIN->ui32Size, ui8DataInt); + +RGXWriteHWPerfClientEvent_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_HWPERF_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); /* Allocated space should be equal to the last updated offset */ #ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXGetEnabledHWPerfBlocksOUT->eError == PVRSRV_OK) + if (psRGXWriteHWPerfClientEventOUT->eError == PVRSRV_OK) PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -594,28 +822,61 @@ void DeinitRGXHWPERFBridge(void); PVRSRV_ERROR InitRGXHWPERFBridge(void) { - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, - PVRSRVBridgeRGXCtrlHWPerf, NULL); + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS, + PVRSRVBridgeRGXGetConfiguredHWPerfCounters, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS), + sizeof(PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS, - PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL); + PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS, + PVRSRVBridgeRGXGetEnabledHWPerfBlocks, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS), + sizeof(PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFTIMESTAMP, + PVRSRVBridgeRGXGetHWPerfTimeStamp, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, + PVRSRVBridgeRGXCtrlHWPerf, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCTRLHWPERF), + sizeof(PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS, - PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL); + PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS, - PVRSRVBridgeRGXControlHWPerfBlocks, NULL); + PVRSRVBridgeRGXControlHWPerfBlocks, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS), + sizeof(PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS, - PVRSRVBridgeRGXGetConfiguredHWPerfCounters, NULL); + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS, + PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS), + sizeof(PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS, - PVRSRVBridgeRGXGetEnabledHWPerfBlocks, NULL); + PVRSRV_BRIDGE_RGXHWPERF_RGXOPENHWPERFCLIENTSTREAM, + PVRSRVBridgeRGXOpenHWPerfClientStream, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXOPENHWPERFCLIENTSTREAM)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCLOSEHWPERFCLIENTSTREAM, + PVRSRVBridgeRGXCloseHWPerfClientStream, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCLOSEHWPERFCLIENTSTREAM), + sizeof(PVRSRV_BRIDGE_OUT_RGXCLOSEHWPERFCLIENTSTREAM)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXWRITEHWPERFCLIENTEVENT, + PVRSRVBridgeRGXWriteHWPerfClientEvent, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXWRITEHWPERFCLIENTEVENT), + sizeof(PVRSRV_BRIDGE_OUT_RGXWRITEHWPERFCLIENTEVENT)); return PVRSRV_OK; } @@ -626,10 +887,16 @@ PVRSRV_ERROR InitRGXHWPERFBridge(void) void DeinitRGXHWPERFBridge(void) { - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS); + PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFTIMESTAMP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS); @@ -638,9 +905,15 @@ void DeinitRGXHWPERFBridge(void) PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS); + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, - PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS); + PVRSRV_BRIDGE_RGXHWPERF_RGXOPENHWPERFCLIENTSTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCLOSEHWPERFCLIENTSTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXWRITEHWPERFCLIENTEVENT); } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxkicksync_bridge/server_rgxkicksync_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxkicksync_bridge/server_rgxkicksync_bridge.c index 233e0fe3b4c2..8d943cd536e2 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxkicksync_bridge/server_rgxkicksync_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxkicksync_bridge/server_rgxkicksync_bridge.c @@ -61,6 +61,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) + /* *************************************************************************** * Server-side bridge entry points */ @@ -228,9 +230,7 @@ PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -257,7 +257,6 @@ PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long)); @@ -273,7 +272,6 @@ PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -461,11 +459,7 @@ PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -532,6 +526,9 @@ PVRSRVBridgeRGXSetKickSyncContextProperty(IMG_UINT32 ui32DispatchTableEntry, * Server bridge dispatch related glue */ +#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */ + +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) PVRSRV_ERROR InitRGXKICKSYNCBridge(void); void DeinitRGXKICKSYNCBridge(void); @@ -543,18 +540,25 @@ PVRSRV_ERROR InitRGXKICKSYNCBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, - PVRSRVBridgeRGXCreateKickSyncContext, NULL); + PVRSRVBridgeRGXCreateKickSyncContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, - PVRSRVBridgeRGXDestroyKickSyncContext, NULL); + PVRSRVBridgeRGXDestroyKickSyncContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2, - PVRSRVBridgeRGXKickSync2, NULL); + PVRSRVBridgeRGXKickSync2, NULL, sizeof(PVRSRV_BRIDGE_IN_RGXKICKSYNC2), + sizeof(PVRSRV_BRIDGE_OUT_RGXKICKSYNC2)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY, - PVRSRVBridgeRGXSetKickSyncContextProperty, NULL); + PVRSRVBridgeRGXSetKickSyncContextProperty, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY)); return PVRSRV_OK; } @@ -577,3 +581,13 @@ void DeinitRGXKICKSYNCBridge(void) PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY); } +#else /* SUPPORT_RGXKICKSYNC_BRIDGE */ +/* This bridge is conditional on SUPPORT_RGXKICKSYNC_BRIDGE - when not defined, + * do not populate the dispatch table with its functions + */ +#define InitRGXKICKSYNCBridge() \ + PVRSRV_OK + +#define DeinitRGXKICKSYNCBridge() + +#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxpdump_bridge/client_rgxpdump_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxpdump_bridge/client_rgxpdump_direct_bridge.c index ed34b211c135..7ba5126053dc 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxpdump_bridge/client_rgxpdump_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxpdump_bridge/client_rgxpdump_direct_bridge.c @@ -76,19 +76,9 @@ IMG_INTERNAL PVRSRV_ERROR BridgePDumpSignatureBuffer(IMG_HANDLE hBridge, IMG_UIN IMG_INTERNAL PVRSRV_ERROR BridgePDumpComputeCRCSignatureCheck(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags) { -#if defined(SUPPORT_VALIDATION) - PVRSRV_ERROR eError; - - eError = - PVRSRVPDumpComputeCRCSignatureCheckKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), - ui32PDumpFlags); - - return eError; -#else PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); return PVRSRV_ERROR_NOT_IMPLEMENTED; -#endif } IMG_INTERNAL PVRSRV_ERROR BridgePDumpCRCSignatureCheck(IMG_HANDLE hBridge, diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxpdump_bridge/server_rgxpdump_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxpdump_bridge/server_rgxpdump_bridge.c index e9a5d42422ce..58ed4ebb037f 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxpdump_bridge/server_rgxpdump_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxpdump_bridge/server_rgxpdump_bridge.c @@ -102,32 +102,7 @@ PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry, return 0; } -#if defined(SUPPORT_VALIDATION) - -static IMG_INT -PVRSRVBridgePDumpComputeCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psPDumpComputeCRCSignatureCheckIN_UI8, - IMG_UINT8 * psPDumpComputeCRCSignatureCheckOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK *psPDumpComputeCRCSignatureCheckIN = - (PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK *) - IMG_OFFSET_ADDR(psPDumpComputeCRCSignatureCheckIN_UI8, 0); - PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK *psPDumpComputeCRCSignatureCheckOUT = - (PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK *) - IMG_OFFSET_ADDR(psPDumpComputeCRCSignatureCheckOUT_UI8, 0); - - psPDumpComputeCRCSignatureCheckOUT->eError = - PVRSRVPDumpComputeCRCSignatureCheckKM(psConnection, OSGetDevNode(psConnection), - psPDumpComputeCRCSignatureCheckIN-> - ui32PDumpFlags); - - return 0; -} - -#else #define PVRSRVBridgePDumpComputeCRCSignatureCheck NULL -#endif static IMG_INT PVRSRVBridgePDumpCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry, @@ -203,25 +178,37 @@ PVRSRV_ERROR InitRGXPDUMPBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER, - PVRSRVBridgePDumpTraceBuffer, NULL); + PVRSRVBridgePDumpTraceBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER, - PVRSRVBridgePDumpSignatureBuffer, NULL); + PVRSRVBridgePDumpSignatureBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPCOMPUTECRCSIGNATURECHECK, - PVRSRVBridgePDumpComputeCRCSignatureCheck, NULL); + PVRSRVBridgePDumpComputeCRCSignatureCheck, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK, - PVRSRVBridgePDumpCRCSignatureCheck, NULL); + PVRSRVBridgePDumpCRCSignatureCheck, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPRECOMMAND, - PVRSRVBridgePDumpValCheckPreCommand, NULL); + PVRSRVBridgePDumpValCheckPreCommand, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPOSTCOMMAND, - PVRSRVBridgePDumpValCheckPostCommand, NULL); + PVRSRVBridgePDumpValCheckPostCommand, NULL, + sizeof(PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND), + sizeof(PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxray_bridge/server_rgxray_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxray_bridge/server_rgxray_bridge.c index 37e1feeadf50..c537779917bf 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxray_bridge/server_rgxray_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxray_bridge/server_rgxray_bridge.c @@ -95,9 +95,7 @@ PVRSRVBridgeRGXCreateRayContext(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -122,7 +120,6 @@ PVRSRVBridgeRGXCreateRayContext(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateRayContextIN), sizeof(unsigned long)); @@ -138,7 +135,6 @@ PVRSRVBridgeRGXCreateRayContext(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -253,11 +249,7 @@ PVRSRVBridgeRGXCreateRayContext(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -330,9 +322,7 @@ PVRSRVBridgeRGXKickRDM(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -365,7 +355,6 @@ PVRSRVBridgeRGXKickRDM(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickRDMIN), sizeof(unsigned long)); @@ -381,7 +370,6 @@ PVRSRVBridgeRGXKickRDM(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -595,11 +583,7 @@ PVRSRVBridgeRGXKickRDM(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -619,13 +603,18 @@ PVRSRV_ERROR InitRGXRAYBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT, - PVRSRVBridgeRGXCreateRayContext, NULL); + PVRSRVBridgeRGXCreateRayContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT, - PVRSRVBridgeRGXDestroyRayContext, NULL); + PVRSRVBridgeRGXDestroyRayContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKRDM, - PVRSRVBridgeRGXKickRDM, NULL); + PVRSRVBridgeRGXKickRDM, NULL, sizeof(PVRSRV_BRIDGE_IN_RGXKICKRDM), + sizeof(PVRSRV_BRIDGE_OUT_RGXKICKRDM)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxregconfig_bridge/server_rgxregconfig_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxregconfig_bridge/server_rgxregconfig_bridge.c index 4cdcb127299b..548401470d09 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxregconfig_bridge/server_rgxregconfig_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxregconfig_bridge/server_rgxregconfig_bridge.c @@ -184,23 +184,30 @@ PVRSRV_ERROR InitRGXREGCONFIGBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE, - PVRSRVBridgeRGXSetRegConfigType, NULL); + PVRSRVBridgeRGXSetRegConfigType, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG, - PVRSRVBridgeRGXAddRegconfig, NULL); + PVRSRVBridgeRGXAddRegconfig, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXADDREGCONFIG), + sizeof(PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG, - PVRSRVBridgeRGXClearRegConfig, NULL); + PVRSRVBridgeRGXClearRegConfig, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG, - PVRSRVBridgeRGXEnableRegConfig, NULL); + PVRSRVBridgeRGXEnableRegConfig, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG, - PVRSRVBridgeRGXDisableRegConfig, NULL); + PVRSRVBridgeRGXDisableRegConfig, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxta3d_bridge/common_rgxta3d_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxta3d_bridge/common_rgxta3d_bridge.h index 3b81dd1e75c0..9157c6895073 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxta3d_bridge/common_rgxta3d_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxta3d_bridge/common_rgxta3d_bridge.h @@ -57,59 +57,23 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_sync_km.h" #define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0 -#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0 -#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1 -#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2 -#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3 -#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4 -#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5 -#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6 -#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7 -#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8 -#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9 -#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10 -#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11 -#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12 -#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13 -#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13) - -/******************************************* - RGXCreateHWRTDataSet - *******************************************/ - -/* Bridge in structure for RGXCreateHWRTDataSet */ -typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG -{ - IMG_DEV_VIRTADDR sVHeapTableDevVAddr; - IMG_UINT64 ui64PPPMultiSampleCtl; - IMG_DEV_VIRTADDR *psPMDataAddr; - IMG_DEV_VIRTADDR *psPMSecureDataAddr; - IMG_DEV_VIRTADDR *psTailPtrsDevVAddr; - IMG_HANDLE *phKmHwRTDataSet; - IMG_HANDLE *phapsFreeLists; - IMG_UINT32 ui32ISPMergeLowerX; - IMG_UINT32 ui32ISPMergeLowerY; - IMG_UINT32 ui32ISPMergeScaleX; - IMG_UINT32 ui32ISPMergeScaleY; - IMG_UINT32 ui32ISPMergeUpperX; - IMG_UINT32 ui32ISPMergeUpperY; - IMG_UINT32 ui32PPPScreen; - IMG_UINT32 ui32RgnStride; - IMG_UINT32 ui32TEAA; - IMG_UINT32 ui32TEMTILE1; - IMG_UINT32 ui32TEMTILE2; - IMG_UINT32 ui32TEScreen; - IMG_UINT32 ui32TPCSize; - IMG_UINT32 ui32TPCStride; - IMG_UINT16 ui16MaxRTs; -} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET; - -/* Bridge out structure for RGXCreateHWRTDataSet */ -typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG -{ - IMG_HANDLE *phKmHwRTDataSet; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET; +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSENDZSSTOREDISABLE PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+15 +#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+15) /******************************************* RGXDestroyHWRTDataSet @@ -195,35 +159,6 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER; -/******************************************* - RGXCreateFreeList - *******************************************/ - -/* Bridge in structure for RGXCreateFreeList */ -typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG -{ - IMG_DEV_VIRTADDR spsFreeListBaseDevVAddr; - IMG_DEV_VIRTADDR spsFreeListStateDevVAddr; - IMG_DEVMEM_OFFSET_T uiPMROffset; - IMG_DEVMEM_OFFSET_T uiPMRStateOffset; - IMG_HANDLE hMemCtxPrivData; - IMG_HANDLE hsFreeListPMR; - IMG_HANDLE hsFreeListStatePMR; - IMG_HANDLE hsGlobalFreeList; - IMG_UINT32 ui32GrowFLPages; - IMG_UINT32 ui32GrowParamThreshold; - IMG_UINT32 ui32InitFLPages; - IMG_UINT32 ui32MaxFLPages; - IMG_BOOL bbFreeListCheck; -} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST; - -/* Bridge out structure for RGXCreateFreeList */ -typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG -{ - IMG_HANDLE hCleanupCookie; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST; - /******************************************* RGXDestroyFreeList *******************************************/ @@ -240,33 +175,6 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST; -/******************************************* - RGXCreateRenderContext - *******************************************/ - -/* Bridge in structure for RGXCreateRenderContext */ -typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG -{ - IMG_UINT64 ui64RobustnessAddress; - IMG_HANDLE hPrivData; - IMG_BYTE *pui8FrameworkCmd; - IMG_BYTE *pui8StaticRenderContextState; - IMG_INT32 i32Priority; - IMG_UINT32 ui32ContextFlags; - IMG_UINT32 ui32FrameworkCmdSize; - IMG_UINT32 ui32Max3DDeadlineMS; - IMG_UINT32 ui32MaxTADeadlineMS; - IMG_UINT32 ui32PackedCCBSizeU8888; - IMG_UINT32 ui32StaticRenderContextStateSize; -} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT; - -/* Bridge out structure for RGXCreateRenderContext */ -typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG -{ - IMG_HANDLE hRenderContext; - PVRSRV_ERROR eError; -} __packed PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT; - /******************************************* RGXDestroyRenderContext *******************************************/ @@ -283,6 +191,25 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT; +/******************************************* + RGXSendZSStoreDisable + *******************************************/ + +/* Bridge in structure for RGXSendZSStoreDisable */ +typedef struct PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE_TAG +{ + IMG_HANDLE hRenderContext; + IMG_INT32 i32ExtJobRefToDisableZSStore; + IMG_BOOL bDisableDepthStore; + IMG_BOOL bDisableStencilStore; +} __packed PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE; + +/* Bridge out structure for RGXSendZSStoreDisable */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE; + /******************************************* RGXSetRenderContextPriority *******************************************/ @@ -397,4 +324,102 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY; +/******************************************* + RGXCreateHWRTDataSet + *******************************************/ + +/* Bridge in structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG +{ + IMG_DEV_VIRTADDR sVHeapTableDevVAddr; + IMG_DEV_VIRTADDR *psPMDataAddr; + IMG_DEV_VIRTADDR *psPMSecureDataAddr; + IMG_DEV_VIRTADDR *psTailPtrsDevVAddr; + IMG_HANDLE *phKmHwRTDataSet; + IMG_HANDLE *phapsFreeLists; + IMG_UINT16 ui16MaxRTs; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET; + +/* Bridge out structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG +{ + IMG_HANDLE *phKmHwRTDataSet; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET; + +/******************************************* + RGXCreateFreeList + *******************************************/ + +/* Bridge in structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG +{ + IMG_HANDLE hFreeListAndStateReservation; + IMG_HANDLE hMemCtxPrivData; + IMG_HANDLE hsGlobalFreeList; + IMG_UINT32 ui32GrowFLPages; + IMG_UINT32 ui32GrowParamThreshold; + IMG_UINT32 ui32InitFLPages; + IMG_UINT32 ui32MaxFLPages; + IMG_BOOL bbFreeListCheck; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST; + +/* Bridge out structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG +{ + IMG_HANDLE hCleanupCookie; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST; + +/******************************************* + RGXCreateRenderContext + *******************************************/ + +/* Bridge in structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG +{ + IMG_UINT64 ui64RobustnessAddress; + IMG_HANDLE hPrivData; + IMG_BYTE *pui8FrameworkCmd; + IMG_BYTE *pui8StaticRenderContextState; + IMG_INT32 i32Priority; + IMG_UINT32 ui32ContextFlags; + IMG_UINT32 ui32FrameworkCmdSize; + IMG_UINT32 ui32Max3DDeadlineMS; + IMG_UINT32 ui32MaxTADeadlineMS; + IMG_UINT32 ui32PackedCCBSizeU8888; + IMG_UINT32 ui32StaticRenderContextStateSize; +} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT; + +/* Bridge out structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG +{ + IMG_HANDLE hRenderContext; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT; + +/******************************************* + RGXCreateHWRTDataSet2 + *******************************************/ + +/* Bridge in structure for RGXCreateHWRTDataSet2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2_TAG +{ + IMG_DEV_VIRTADDR sVHeapTableDevVAddr; + IMG_HANDLE hMListsReservation; + IMG_HANDLE hPMSecureStatesReservation; + IMG_HANDLE hPMStatesReservation; + IMG_DEV_VIRTADDR *psTailPtrsDevVAddr; + IMG_HANDLE *phKmHwRTDataSet; + IMG_HANDLE *phapsFreeLists; + IMG_UINT16 ui16MaxRTs; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2; + +/* Bridge out structure for RGXCreateHWRTDataSet2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2_TAG +{ + IMG_HANDLE *phKmHwRTDataSet; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2; + #endif /* COMMON_RGXTA3D_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxta3d_bridge/server_rgxta3d_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxta3d_bridge/server_rgxta3d_bridge.c index 65eb868eb9ec..dbb87e97fb26 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxta3d_bridge/server_rgxta3d_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxta3d_bridge/server_rgxta3d_bridge.c @@ -65,962 +65,725 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * Server-side bridge entry points */ -static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); - return eError; -} - -static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); -static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, - "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); - static IMG_INT -PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8, - IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN = - (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT = - (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *) - IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0); - IMG_DEV_VIRTADDR *sPMDataAddrInt = NULL; - IMG_DEV_VIRTADDR *sPMSecureDataAddrInt = NULL; - RGX_FREELIST **psapsFreeListsInt = NULL; - IMG_HANDLE *hapsFreeListsInt2 = NULL; - IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL; - RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL; - IMG_HANDLE *hKmHwRTDataSetInt2 = NULL; + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif + psRGXDestroyHWRTDataSetOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyHWRTDataSetIN-> + hKmHwRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + if (unlikely + ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK) + && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyHWRTDataSet_exit; + } - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) + - ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0; + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); - psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet = psRGXCreateHWRTDataSetIN->phKmHwRTDataSet; +RGXDestroyHWRTDataSet_exit: - if (ui64BufferSize > IMG_UINT32_MAX) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXCreateHWRTDataSet_exit; - } + return 0; +} - ui32BufferSize = (IMG_UINT32) ui64BufferSize; +static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData); + return eError; +} - if (ui32BufferSize != 0) - { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; +static IMG_INT +PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateZSBufferIN_UI8, + IMG_UINT8 * psRGXCreateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0); - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN; + IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR; + PMR *psPMRInt = NULL; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); - if (!pArrayArgsBuffer) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXCreateHWRTDataSet_exit; - } - } + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; } + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) { - sPMDataAddrInt = - (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + psRGXCreateZSBufferOUT->eError = + RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection), + psReservationInt, + psPMRInt, psRGXCreateZSBufferIN->uiMapFlags, &pssZSBufferKMInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) { - if (OSCopyFromUser - (NULL, sPMDataAddrInt, - (const void __user *)psRGXCreateHWRTDataSetIN->psPMDataAddr, - RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXCreateHWRTDataSet_exit; - } + goto RGXCreateZSBuffer_exit; } + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateZSBufferOUT-> + hsZSBufferKM, + (void *)pssZSBufferKMInt, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateZSBufferpssZSBufferKMIntRelease); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) { - sPMSecureDataAddrInt = - (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; } - /* Copy the data over */ - if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) - { - if (OSCopyFromUser - (NULL, sPMSecureDataAddrInt, - (const void __user *)psRGXCreateHWRTDataSetIN->psPMSecureDataAddr, - RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - goto RGXCreateHWRTDataSet_exit; - } - } +RGXCreateZSBuffer_exit: - { - psapsFreeListsInt = - (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psapsFreeListsInt, 0, - RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)); - ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *); - hapsFreeListsInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE); - } + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0) + /* Unreference the previously looked up handle */ + if (psReservationInt) { - if (OSCopyFromUser - (NULL, hapsFreeListsInt2, - (const void __user *)psRGXCreateHWRTDataSetIN->phapsFreeLists, - RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXCreateHWRTDataSet_exit; - } + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } + /* Unreference the previously looked up handle */ + if (psPMRInt) { - sTailPtrsDevVAddrInt = - (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK) { - if (OSCopyFromUser - (NULL, sTailPtrsDevVAddrInt, - (const void __user *)psRGXCreateHWRTDataSetIN->psTailPtrsDevVAddr, - RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) + if (pssZSBufferKMInt) { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXCreateHWRTDataSet_exit; + RGXDestroyZSBufferKM(pssZSBufferKMInt); } } - if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyZSBufferIN_UI8, + IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8, + 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyZSBufferOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyZSBufferIN-> + hsZSBufferMemDesc, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + if (unlikely + ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) + && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) { - psKmHwRTDataSetInt = - (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psKmHwRTDataSetInt, 0, - RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)); - ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *); - hKmHwRTDataSetInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE); + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyZSBuffer_exit; } + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyZSBuffer_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXPopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8, + 0); + + IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; + RGX_POPULATION *pssPopulationInt = NULL; + /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); + /* Look up the address from the handle */ + psRGXPopulateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssZSBufferKMInt, + hsZSBufferKM, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) { - IMG_UINT32 i; - - for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) - { - /* Look up the address from the handle */ - psRGXCreateHWRTDataSetOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psapsFreeListsInt[i], - hapsFreeListsInt2[i], - PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); - if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateHWRTDataSet_exit; - } - } + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psRGXCreateHWRTDataSetOUT->eError = - RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection), - psRGXCreateHWRTDataSetIN->sVHeapTableDevVAddr, - sPMDataAddrInt, - sPMSecureDataAddrInt, - psapsFreeListsInt, - psRGXCreateHWRTDataSetIN->ui32PPPScreen, - psRGXCreateHWRTDataSetIN->ui64PPPMultiSampleCtl, - psRGXCreateHWRTDataSetIN->ui32TPCStride, - sTailPtrsDevVAddrInt, - psRGXCreateHWRTDataSetIN->ui32TPCSize, - psRGXCreateHWRTDataSetIN->ui32TEScreen, - psRGXCreateHWRTDataSetIN->ui32TEAA, - psRGXCreateHWRTDataSetIN->ui32TEMTILE1, - psRGXCreateHWRTDataSetIN->ui32TEMTILE2, - psRGXCreateHWRTDataSetIN->ui32RgnStride, - psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerX, - psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerY, - psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperX, - psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperY, - psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleX, - psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleY, - psRGXCreateHWRTDataSetIN->ui16MaxRTs, psKmHwRTDataSetInt); + psRGXPopulateZSBufferOUT->eError = + RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt); /* Exit early if bridged call fails */ - if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) { - goto RGXCreateHWRTDataSet_exit; + goto RGXPopulateZSBuffer_exit; } /* Lock over handle creation. */ LockHandle(psConnection->psHandleBase); - if (hKmHwRTDataSetInt2) - { - IMG_UINT32 i; - - for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) - { - - psRGXCreateHWRTDataSetOUT->eError = - PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &hKmHwRTDataSetInt2[i], - (void *)psKmHwRTDataSetInt[i], - PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease); - if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateHWRTDataSet_exit; - } - } + psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXPopulateZSBufferOUT-> + hsPopulation, + (void *)pssPopulationInt, + PVRSRV_HANDLE_TYPE_RGX_POPULATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXPopulateZSBufferpssPopulationIntRelease); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; } + /* Release now we have created handles. */ UnlockHandle(psConnection->psHandleBase); - /* If dest ptr is non-null and we have data to copy */ - if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0)) - { - if (unlikely - (OSCopyToUser - (NULL, (void __user *)psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet, - hKmHwRTDataSetInt2, - (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK)) - { - psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXCreateHWRTDataSet_exit; - } - } - -RGXCreateHWRTDataSet_exit: +RGXPopulateZSBuffer_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); - if (hapsFreeListsInt2) + /* Unreference the previously looked up handle */ + if (pssZSBufferKMInt) { - IMG_UINT32 i; - - for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) - { - - /* Unreference the previously looked up handle */ - if (psapsFreeListsInt && psapsFreeListsInt[i]) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hapsFreeListsInt2[i], - PVRSRV_HANDLE_TYPE_RGX_FREELIST); - } - } + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsZSBufferKM, PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK) + if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK) { + if (pssPopulationInt) { - IMG_UINT32 i; - - if (hKmHwRTDataSetInt2) - { - for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) - { - if (hKmHwRTDataSetInt2[i]) - { - RGXDestroyHWRTDataSet(hKmHwRTDataSetInt2[i]); - } - } - } + RGXUnpopulateZSBufferKM(pssPopulationInt); } } - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXCreateHWRTDataSetOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); - return 0; } static IMG_INT -PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8, - IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8, +PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN = - (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *) - IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT = - (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *) - IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0); /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - psRGXDestroyHWRTDataSetOUT->eError = + psRGXUnpopulateZSBufferOUT->eError = PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXDestroyHWRTDataSetIN-> - hKmHwRTDataSet, - PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); - if (unlikely - ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK) - && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY))) + (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation, + PVRSRV_HANDLE_TYPE_RGX_POPULATION); + if (unlikely((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) && + (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) { PVR_DPF((PVR_DBG_ERROR, "%s: %s", - __func__, PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT->eError))); + __func__, PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto RGXDestroyHWRTDataSet_exit; + goto RGXUnpopulateZSBuffer_exit; } /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); -RGXDestroyHWRTDataSet_exit: +RGXUnpopulateZSBuffer_exit: return 0; } -static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData); - return eError; -} - static IMG_INT -PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXCreateZSBufferIN_UI8, - IMG_UINT8 * psRGXCreateZSBufferOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN = - (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT = - (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0); - - IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation; - DEVMEMINT_RESERVATION *psReservationInt = NULL; - IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR; - PMR *psPMRInt = NULL; - RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psRGXCreateZSBufferOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psReservationInt, - hReservation, - PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); - if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateZSBuffer_exit; - } - - /* Look up the address from the handle */ - psRGXCreateZSBufferOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPMRInt, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateZSBuffer_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psRGXCreateZSBufferOUT->eError = - RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection), - psReservationInt, - psPMRInt, psRGXCreateZSBufferIN->uiMapFlags, &pssZSBufferKMInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) - { - goto RGXCreateZSBuffer_exit; - } +PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyFreeListIN_UI8, + IMG_UINT8 * psRGXDestroyFreeListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8, + 0); - /* Lock over handle creation. */ + /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXCreateZSBufferOUT-> - hsZSBufferKM, - (void *)pssZSBufferKMInt, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXCreateZSBufferpssZSBufferKMIntRelease); - if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + psRGXDestroyFreeListOUT->eError = + PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + if (unlikely((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) && + (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && + (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))) { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyFreeListOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto RGXCreateZSBuffer_exit; - } - - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); - -RGXCreateZSBuffer_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (psReservationInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + goto RGXDestroyFreeList_exit; } - /* Unreference the previously looked up handle */ - if (psPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ + /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); - if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK) - { - if (pssZSBufferKMInt) - { - RGXDestroyZSBufferKM(pssZSBufferKMInt); - } - } +RGXDestroyFreeList_exit: return 0; } static IMG_INT -PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXDestroyZSBufferIN_UI8, - IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyRenderContextIN_UI8, + IMG_UINT8 * psRGXDestroyRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN = - (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT = - (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8, - 0); + PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0); /* Lock over handle destruction. */ LockHandle(psConnection->psHandleBase); - psRGXDestroyZSBufferOUT->eError = + psRGXDestroyRenderContextOUT->eError = PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXDestroyZSBufferIN-> - hsZSBufferMemDesc, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + (IMG_HANDLE) psRGXDestroyRenderContextIN-> + hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); if (unlikely - ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) - && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) + && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))) { PVR_DPF((PVR_DBG_ERROR, "%s: %s", - __func__, PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->eError))); + __func__, PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto RGXDestroyZSBuffer_exit; + goto RGXDestroyRenderContext_exit; } /* Release now we have destroyed handles. */ UnlockHandle(psConnection->psHandleBase); -RGXDestroyZSBuffer_exit: +RGXDestroyRenderContext_exit: return 0; } -static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData); - return eError; -} - static IMG_INT -PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXPopulateZSBufferIN_UI8, - IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXSendZSStoreDisable(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSendZSStoreDisableIN_UI8, + IMG_UINT8 * psRGXSendZSStoreDisableOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN = - (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8, - 0); - PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT = - (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8, - 0); - - IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM; - RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; - RGX_POPULATION *pssPopulationInt = NULL; + PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE *psRGXSendZSStoreDisableIN = + (PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE *) + IMG_OFFSET_ADDR(psRGXSendZSStoreDisableIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE *psRGXSendZSStoreDisableOUT = + (PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE *) + IMG_OFFSET_ADDR(psRGXSendZSStoreDisableOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXSendZSStoreDisableIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); /* Look up the address from the handle */ - psRGXPopulateZSBufferOUT->eError = + psRGXSendZSStoreDisableOUT->eError = PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&pssZSBufferKMInt, - hsZSBufferKM, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); - if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSendZSStoreDisableOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXPopulateZSBuffer_exit; + goto RGXSendZSStoreDisable_exit; } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psRGXPopulateZSBufferOUT->eError = - RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + psRGXSendZSStoreDisableOUT->eError = + PVRSRVRGXSendZSStoreDisableKM(psConnection, OSGetDevNode(psConnection), + psRenderContextInt, + psRGXSendZSStoreDisableIN->bDisableDepthStore, + psRGXSendZSStoreDisableIN->bDisableStencilStore, + psRGXSendZSStoreDisableIN->i32ExtJobRefToDisableZSStore); + +RGXSendZSStoreDisable_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) { - goto RGXPopulateZSBuffer_exit; + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Lock over handle creation. */ + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetRenderContextPriorityIN_UI8, + IMG_UINT8 * psRGXSetRenderContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); - psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXPopulateZSBufferOUT-> - hsPopulation, - (void *)pssPopulationInt, - PVRSRV_HANDLE_TYPE_RGX_POPULATION, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXPopulateZSBufferpssPopulationIntRelease); - if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + /* Look up the address from the handle */ + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXPopulateZSBuffer_exit; + goto RGXSetRenderContextPriority_exit; } - - /* Release now we have created handles. */ + /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); -RGXPopulateZSBuffer_exit: + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection), + psRenderContextInt, + psRGXSetRenderContextPriorityIN->i32Priority); + +RGXSetRenderContextPriority_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); /* Unreference the previously looked up handle */ - if (pssZSBufferKMInt) + if (psRenderContextInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hsZSBufferKM, PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK) - { - if (pssPopulationInt) - { - RGXUnpopulateZSBufferKM(pssPopulationInt); - } - } - return 0; } static IMG_INT -PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8, - IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXRenderContextStalledIN_UI8, + IMG_UINT8 * psRGXRenderContextStalledOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN = - (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *) - IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT = - (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *) - IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN = + (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT = + (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0); - /* Lock over handle destruction. */ + IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); - psRGXUnpopulateZSBufferOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation, - PVRSRV_HANDLE_TYPE_RGX_POPULATION); - if (unlikely((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) && - (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + /* Look up the address from the handle */ + psRGXRenderContextStalledOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)) { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->eError))); UnlockHandle(psConnection->psHandleBase); - goto RGXUnpopulateZSBuffer_exit; + goto RGXRenderContextStalled_exit; } - - /* Release now we have destroyed handles. */ + /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); -RGXUnpopulateZSBuffer_exit: + psRGXRenderContextStalledOUT->eError = RGXRenderContextStalledKM(psRenderContextInt); + +RGXRenderContextStalled_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); return 0; } -static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = RGXDestroyFreeList((RGX_FREELIST *) pvData); - return eError; -} +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, + "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, + "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); static IMG_INT -PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXCreateFreeListIN_UI8, - IMG_UINT8 * psRGXCreateFreeListOUT_UI8, - CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickTA3D2IN_UI8, + IMG_UINT8 * psRGXKickTA3D2OUT_UI8, CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN = - (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT = - (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0); + PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN = + (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0); - IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData; - IMG_HANDLE hMemCtxPrivDataInt = NULL; - IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList; - RGX_FREELIST *pssGlobalFreeListInt = NULL; - IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR; - PMR *pssFreeListPMRInt = NULL; - IMG_HANDLE hsFreeListStatePMR = psRGXCreateFreeListIN->hsFreeListStatePMR; - PMR *pssFreeListStatePMRInt = NULL; - RGX_FREELIST *psCleanupCookieInt = NULL; + IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAFenceValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32Client3DUpdateValueInt = NULL; + IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock; + SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_CHAR *uiUpdateFenceName3DInt = NULL; + IMG_BYTE *ui8TACmdInt = NULL; + IMG_BYTE *ui83DPRCmdInt = NULL; + IMG_BYTE *ui83DCmdInt = NULL; + IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet; + RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL; + IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer; + RGX_ZSBUFFER_DATA *psZSBufferInt = NULL; + IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer; + RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; - /* Look up the address from the handle */ - psRGXCreateFreeListOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&hMemCtxPrivDataInt, - hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) + + ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + + if (unlikely(psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS)) { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateFreeList_exit; + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; } - if (psRGXCreateFreeListIN->hsGlobalFreeList) + if (unlikely(psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS)) { - /* Look up the address from the handle */ - psRGXCreateFreeListOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&pssGlobalFreeListInt, - hsGlobalFreeList, - PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateFreeList_exit; - } + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; } - /* Look up the address from the handle */ - psRGXCreateFreeListOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&pssFreeListPMRInt, - hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS)) { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateFreeList_exit; + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; } - /* Look up the address from the handle */ - psRGXCreateFreeListOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&pssFreeListStatePMRInt, - hsFreeListStatePMR, - PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickTA3D2IN->ui32TACmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateFreeList_exit; + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - psRGXCreateFreeListOUT->eError = - RGXCreateFreeList(psConnection, OSGetDevNode(psConnection), - hMemCtxPrivDataInt, - psRGXCreateFreeListIN->ui32MaxFLPages, - psRGXCreateFreeListIN->ui32InitFLPages, - psRGXCreateFreeListIN->ui32GrowFLPages, - psRGXCreateFreeListIN->ui32GrowParamThreshold, - pssGlobalFreeListInt, - psRGXCreateFreeListIN->bbFreeListCheck, - psRGXCreateFreeListIN->spsFreeListBaseDevVAddr, - psRGXCreateFreeListIN->spsFreeListStateDevVAddr, - pssFreeListPMRInt, - psRGXCreateFreeListIN->uiPMROffset, - pssFreeListStatePMRInt, - psRGXCreateFreeListIN->uiPMRStateOffset, &psCleanupCookieInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickTA3D2IN->ui323DPRCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) { - goto RGXCreateFreeList_exit; + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; } - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); + if (unlikely(psRGXKickTA3D2IN->ui323DCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } - psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXCreateFreeListOUT-> - hCleanupCookie, - (void *)psCleanupCookieInt, - PVRSRV_HANDLE_TYPE_RGX_FREELIST, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXCreateFreeListpsCleanupCookieIntRelease); - if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateFreeList_exit; - } - - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); - -RGXCreateFreeList_exit: - - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); - - /* Unreference the previously looked up handle */ - if (hMemCtxPrivDataInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); - } - - if (psRGXCreateFreeListIN->hsGlobalFreeList) - { - - /* Unreference the previously looked up handle */ - if (pssGlobalFreeListInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hsGlobalFreeList, - PVRSRV_HANDLE_TYPE_RGX_FREELIST); - } - } - - /* Unreference the previously looked up handle */ - if (pssFreeListPMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - - /* Unreference the previously looked up handle */ - if (pssFreeListStatePMRInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hsFreeListStatePMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - - if (psRGXCreateFreeListOUT->eError != PVRSRV_OK) - { - if (psCleanupCookieInt) - { - RGXDestroyFreeList(psCleanupCookieInt); - } - } - - return 0; -} - -static IMG_INT -PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXDestroyFreeListIN_UI8, - IMG_UINT8 * psRGXDestroyFreeListOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN = - (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT = - (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8, - 0); - - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); - - psRGXDestroyFreeListOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie, - PVRSRV_HANDLE_TYPE_RGX_FREELIST); - if (unlikely((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) && - (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && - (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psRGXDestroyFreeListOUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto RGXDestroyFreeList_exit; - } - - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); - -RGXDestroyFreeList_exit: - - return 0; -} - -static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) pvData); - return eError; -} - -static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -static_assert(RGXFWIF_STATIC_RENDERCONTEXT_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_STATIC_RENDERCONTEXT_SIZE must not be larger than IMG_UINT32_MAX"); - -static IMG_INT -PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXCreateRenderContextIN_UI8, - IMG_UINT8 * psRGXCreateRenderContextOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN = - (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *) - IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT = - (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *) - IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0); - - IMG_BYTE *ui8FrameworkCmdInt = NULL; - IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData; - IMG_HANDLE hPrivDataInt = NULL; - IMG_BYTE *ui8StaticRenderContextStateInt = NULL; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * - sizeof(IMG_BYTE)) + 0; - - if (unlikely(psRGXCreateRenderContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) - { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXCreateRenderContext_exit; - } - - if (unlikely - (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize > - RGXFWIF_STATIC_RENDERCONTEXT_SIZE)) - { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXCreateRenderContext_exit; + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; } if (ui64BufferSize > IMG_UINT32_MAX) { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXCreateRenderContext_exit; + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXKickTA3D2_exit; } ui32BufferSize = (IMG_UINT32) ui64BufferSize; if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long)); + PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long)); IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; @@ -1028,1225 +791,1910 @@ PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; if (bHaveEnoughSpace) { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRenderContextIN; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTA3D2IN; pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); if (!pArrayArgsBuffer) { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXCreateRenderContext_exit; + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickTA3D2_exit; } } } - if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0) + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) { - ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + psClientTAFenceSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psClientTAFenceSyncPrimBlockInt, 0, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)); ui32NextOffset += - psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAFenceSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE); } /* Copy the data over */ - if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0) { if (OSCopyFromUser - (NULL, ui8FrameworkCmdInt, - (const void __user *)psRGXCreateRenderContextIN->pui8FrameworkCmd, - psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != - PVRSRV_OK) + (NULL, hClientTAFenceSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN->phClientTAFenceSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXCreateRenderContext_exit; + goto RGXKickTA3D2_exit; } } - if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0) + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) { - ui8StaticRenderContextStateInt = - (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += - psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE); + ui32ClientTAFenceSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); } /* Copy the data over */ - if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE) > 0) + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) { if (OSCopyFromUser - (NULL, ui8StaticRenderContextStateInt, - (const void __user *)psRGXCreateRenderContextIN->pui8StaticRenderContextState, - psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * - sizeof(IMG_BYTE)) != PVRSRV_OK) + (NULL, ui32ClientTAFenceSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) { - psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXCreateRenderContext_exit; + goto RGXKickTA3D2_exit; } } - - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); - - /* Look up the address from the handle */ - psRGXCreateRenderContextOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&hPrivDataInt, - hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); - if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) { - UnlockHandle(psConnection->psHandleBase); - goto RGXCreateRenderContext_exit; + ui32ClientTAFenceValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - psRGXCreateRenderContextOUT->eError = - PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection), - psRGXCreateRenderContextIN->i32Priority, - psRGXCreateRenderContextIN->ui32FrameworkCmdSize, - ui8FrameworkCmdInt, - hPrivDataInt, - psRGXCreateRenderContextIN-> - ui32StaticRenderContextStateSize, - ui8StaticRenderContextStateInt, - psRGXCreateRenderContextIN->ui32PackedCCBSizeU8888, - psRGXCreateRenderContextIN->ui32ContextFlags, - psRGXCreateRenderContextIN->ui64RobustnessAddress, - psRGXCreateRenderContextIN->ui32MaxTADeadlineMS, - psRGXCreateRenderContextIN->ui32Max3DDeadlineMS, - &psRenderContextInt); - /* Exit early if bridged call fails */ - if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) { - goto RGXCreateRenderContext_exit; - } - - /* Lock over handle creation. */ - LockHandle(psConnection->psHandleBase); + if (OSCopyFromUser + (NULL, ui32ClientTAFenceValueInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceValue, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXCreateRenderContextOUT-> - hRenderContext, - (void *)psRenderContextInt, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXCreateRenderContextpsRenderContextIntRelease); - if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + psClientTAUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psClientTAUpdateSyncPrimBlockInt, 0, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientTAUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN->phClientTAUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateValue, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + psClient3DUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psClient3DUpdateSyncPrimBlockInt, 0, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClient3DUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClient3DUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN->phClient3DUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateSyncOffset, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateValue, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + { + uiUpdateFenceName3DInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceName3DInt, + (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName3D, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXKickTA3D2IN->ui32TACmdSize != 0) + { + ui8TACmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8TACmdInt, (const void __user *)psRGXKickTA3D2IN->pui8TACmd, + psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0) + { + ui83DPRCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui83DPRCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DPRCmd, + psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DCmdSize != 0) + { + ui83DCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui83DCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DCmd, + psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psSyncPMRsInt, 0, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)); + ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickTA3D2IN->phSyncPMRs, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psClientTAFenceSyncPrimBlockInt[i], + hClientTAFenceSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psClientTAUpdateSyncPrimBlockInt[i], + hClientTAUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psClient3DUpdateSyncPrimBlockInt[i], + hClient3DUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPRFenceUFOSyncPrimBlockInt, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + if (psRGXKickTA3D2IN->hKMHWRTDataSet) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKMHWRTDataSetInt, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hZSBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psZSBufferInt, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMSAAScratchBufferInt, + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickTA3D2OUT->eError = + PVRSRVRGXKickTA3DKM(psRenderContextInt, + psRGXKickTA3D2IN->ui32ClientTAFenceCount, + psClientTAFenceSyncPrimBlockInt, + ui32ClientTAFenceSyncOffsetInt, + ui32ClientTAFenceValueInt, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount, + psClientTAUpdateSyncPrimBlockInt, + ui32ClientTAUpdateSyncOffsetInt, + ui32ClientTAUpdateValueInt, + psRGXKickTA3D2IN->ui32Client3DUpdateCount, + psClient3DUpdateSyncPrimBlockInt, + ui32Client3DUpdateSyncOffsetInt, + ui32Client3DUpdateValueInt, + psPRFenceUFOSyncPrimBlockInt, + psRGXKickTA3D2IN->ui32PRFenceUFOSyncOffset, + psRGXKickTA3D2IN->ui32PRFenceValue, + psRGXKickTA3D2IN->hCheckFence, + psRGXKickTA3D2IN->hUpdateTimeline, + &psRGXKickTA3D2OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickTA3D2IN->hCheckFence3D, + psRGXKickTA3D2IN->hUpdateTimeline3D, + &psRGXKickTA3D2OUT->hUpdateFence3D, + uiUpdateFenceName3DInt, + psRGXKickTA3D2IN->ui32TACmdSize, + ui8TACmdInt, + psRGXKickTA3D2IN->ui323DPRCmdSize, + ui83DPRCmdInt, + psRGXKickTA3D2IN->ui323DCmdSize, + ui83DCmdInt, + psRGXKickTA3D2IN->ui32ExtJobRef, + psRGXKickTA3D2IN->bbKickTA, + psRGXKickTA3D2IN->bbKickPR, + psRGXKickTA3D2IN->bbKick3D, + psRGXKickTA3D2IN->bbAbort, + psRGXKickTA3D2IN->ui32PDumpFlags, + psKMHWRTDataSetInt, + psZSBufferInt, + psMSAAScratchBufferInt, + psRGXKickTA3D2IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXKickTA3D2IN->ui32RenderTargetSize, + psRGXKickTA3D2IN->ui32NumberOfDrawCalls, + psRGXKickTA3D2IN->ui32NumberOfIndices, + psRGXKickTA3D2IN->ui32NumberOfMRTs, psRGXKickTA3D2IN->ui64Deadline); + +RGXKickTA3D2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + + if (hClientTAFenceSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { + + /* Unreference the previously looked up handle */ + if (psClientTAFenceSyncPrimBlockInt && psClientTAFenceSyncPrimBlockInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClientTAFenceSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hClientTAUpdateSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (psClientTAUpdateSyncPrimBlockInt && psClientTAUpdateSyncPrimBlockInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClientTAUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hClient3DUpdateSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (psClient3DUpdateSyncPrimBlockInt && psClient3DUpdateSyncPrimBlockInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClient3DUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + /* Unreference the previously looked up handle */ + if (psPRFenceUFOSyncPrimBlockInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + + if (psRGXKickTA3D2IN->hKMHWRTDataSet) + { + + /* Unreference the previously looked up handle */ + if (psKMHWRTDataSetInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + } + } + + if (psRGXKickTA3D2IN->hZSBuffer) + { + + /* Unreference the previously looked up handle */ + if (psZSBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + } + + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + { + + /* Unreference the previously looked up handle */ + if (psMSAAScratchBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (psSyncPMRsInt && psSyncPMRsInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXKickTA3D2OUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetRenderContextPropertyIN_UI8, + IMG_UINT8 * psRGXSetRenderContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXSetRenderContextPropertyIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK)) { UnlockHandle(psConnection->psHandleBase); - goto RGXCreateRenderContext_exit; + goto RGXSetRenderContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt, + psRGXSetRenderContextPropertyIN->ui32Property, + psRGXSetRenderContextPropertyIN->ui64Input, + &psRGXSetRenderContextPropertyOUT->ui64Output); + +RGXSetRenderContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); + return eError; +} + +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0); + + IMG_DEV_VIRTADDR *sPMDataAddrInt = NULL; + IMG_DEV_VIRTADDR *sPMSecureDataAddrInt = NULL; + RGX_FREELIST **psapsFreeListsInt = NULL; + IMG_HANDLE *hapsFreeListsInt2 = NULL; + IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL; + RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL; + IMG_HANDLE *hKmHwRTDataSetInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0; + + psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet = psRGXCreateHWRTDataSetIN->phKmHwRTDataSet; + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXCreateHWRTDataSet_exit; } - /* Release now we have created handles. */ - UnlockHandle(psConnection->psHandleBase); + ui32BufferSize = (IMG_UINT32) ui64BufferSize; -RGXCreateRenderContext_exit: + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateHWRTDataSet_exit; + } + } + } - /* Unreference the previously looked up handle */ - if (hPrivDataInt) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + sPMDataAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); - if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK) + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) { - if (psRenderContextInt) + if (OSCopyFromUser + (NULL, sPMDataAddrInt, + (const void __user *)psRGXCreateHWRTDataSetIN->psPMDataAddr, + RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) { - PVRSRVRGXDestroyRenderContextKM(psRenderContextInt); + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; } } - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXCreateRenderContextOUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ - -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); - - return 0; -} + { + sPMSecureDataAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); + } -static IMG_INT -PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXDestroyRenderContextIN_UI8, - IMG_UINT8 * psRGXDestroyRenderContextOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN = - (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *) - IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT = - (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *) - IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0); + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + { + if (OSCopyFromUser + (NULL, sPMSecureDataAddrInt, + (const void __user *)psRGXCreateHWRTDataSetIN->psPMSecureDataAddr, + RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - /* Lock over handle destruction. */ - LockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + } - psRGXDestroyRenderContextOUT->eError = - PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, - (IMG_HANDLE) psRGXDestroyRenderContextIN-> - hCleanupCookie, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); - if (unlikely - ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) - && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) - && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))) { - PVR_DPF((PVR_DBG_ERROR, - "%s: %s", - __func__, PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->eError))); - UnlockHandle(psConnection->psHandleBase); - goto RGXDestroyRenderContext_exit; + psapsFreeListsInt = + (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psapsFreeListsInt, 0, + RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)); + ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *); + hapsFreeListsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE); } - /* Release now we have destroyed handles. */ - UnlockHandle(psConnection->psHandleBase); + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hapsFreeListsInt2, + (const void __user *)psRGXCreateHWRTDataSetIN->phapsFreeLists, + RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -RGXDestroyRenderContext_exit: + goto RGXCreateHWRTDataSet_exit; + } + } - return 0; -} + { + sTailPtrsDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); + } -static IMG_INT -PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXSetRenderContextPriorityIN_UI8, - IMG_UINT8 * psRGXSetRenderContextPriorityOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN = - (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *) - IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT = - (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *) - IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0); + /* Copy the data over */ + if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) + { + if (OSCopyFromUser + (NULL, sTailPtrsDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSetIN->psTailPtrsDevVAddr, + RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + goto RGXCreateHWRTDataSet_exit; + } + } + if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) + { + psKmHwRTDataSetInt = + (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psKmHwRTDataSetInt, 0, + RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *); + hKmHwRTDataSetInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE); + } /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psRGXSetRenderContextPriorityOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psRenderContextInt, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); - if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)) { - UnlockHandle(psConnection->psHandleBase); - goto RGXSetRenderContextPriority_exit; + IMG_UINT32 i; + + for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) + { + /* Look up the address from the handle */ + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psapsFreeListsInt[i], + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + } } /* Release now we have looked up handles. */ UnlockHandle(psConnection->psHandleBase); - psRGXSetRenderContextPriorityOUT->eError = - PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection), - psRenderContextInt, - psRGXSetRenderContextPriorityIN->i32Priority); + psRGXCreateHWRTDataSetOUT->eError = + RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection), + psRGXCreateHWRTDataSetIN->sVHeapTableDevVAddr, + sPMDataAddrInt, + sPMSecureDataAddrInt, + psapsFreeListsInt, + sTailPtrsDevVAddrInt, + psRGXCreateHWRTDataSetIN->ui16MaxRTs, psKmHwRTDataSetInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + goto RGXCreateHWRTDataSet_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + if (hKmHwRTDataSetInt2) + { + IMG_UINT32 i; + + for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) + { + + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &hKmHwRTDataSetInt2[i], + (void *)psKmHwRTDataSetInt[i], + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + IMG_UINT32 j; + /* Ensure the remaining handles are set to NULL. hKmHwRTDataSetInt2[i] was + * zeroed when calling PVRSRVAllocHandleUnlocked, so we start at the next + * element. If it was the last iteration, the loop doesn't run. + */ + for (j = i + 1; j < RGXMKIF_NUM_RTDATAS; j++) + { + hKmHwRTDataSetInt2[j] = NULL; + } + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + + } + } + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* If dest ptr is non-null and we have data to copy */ + if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet, + hKmHwRTDataSetInt2, + (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK)) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } -RGXSetRenderContextPriority_exit: +RGXCreateHWRTDataSet_exit: /* Lock over handle lookup cleanup. */ LockHandle(psConnection->psHandleBase); - /* Unreference the previously looked up handle */ - if (psRenderContextInt) + if (hapsFreeListsInt2) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + IMG_UINT32 i; + + for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) + { + + /* Unreference the previously looked up handle */ + if (psapsFreeListsInt && psapsFreeListsInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + } + } } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - return 0; -} + if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK) + { + if (hKmHwRTDataSetInt2) + { + PVRSRV_ERROR eError; -static IMG_INT -PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXRenderContextStalledIN_UI8, - IMG_UINT8 * psRGXRenderContextStalledOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN = - (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *) - IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT = - (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *) - IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0); + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); - IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + { + IMG_UINT32 idx; + for (idx = 0; idx < RGXMKIF_NUM_RTDATAS; idx++) + { + if (hKmHwRTDataSetInt2[idx]) + { - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); + eError = + PVRSRVDestroyHandleUnlocked(psConnection-> + psHandleBase, + hKmHwRTDataSetInt2 + [idx], + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); - /* Look up the address from the handle */ - psRGXRenderContextStalledOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psRenderContextInt, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); - if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXRenderContextStalled_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); + } + else if (psKmHwRTDataSetInt[idx]) + { + /* Free/Destroy/Release the resource */ + RGXDestroyHWRTDataSet(psKmHwRTDataSetInt[idx]); + } + } + } - psRGXRenderContextStalledOUT->eError = RGXRenderContextStalledKM(psRenderContextInt); + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); -RGXRenderContextStalled_exit: + } - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); + else if (psKmHwRTDataSetInt) + { + IMG_UINT32 i; + for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) + { + if (psKmHwRTDataSetInt[i]) + { + RGXDestroyHWRTDataSet(psKmHwRTDataSetInt[i]); + } + } + } - /* Unreference the previously looked up handle */ - if (psRenderContextInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXCreateHWRTDataSetOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); return 0; } -static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, - "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, - "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, - "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, - "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, - "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, - "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, - "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyFreeList((RGX_FREELIST *) pvData); + return eError; +} static IMG_INT -PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXKickTA3D2IN_UI8, - IMG_UINT8 * psRGXKickTA3D2OUT_UI8, CONNECTION_DATA * psConnection) +PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateFreeListIN_UI8, + IMG_UINT8 * psRGXCreateFreeListOUT_UI8, + CONNECTION_DATA * psConnection) { - PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN = - (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT = - (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0); - - IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; - SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL; - IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL; - IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL; - IMG_UINT32 *ui32ClientTAFenceValueInt = NULL; - SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL; - IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL; - IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL; - IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL; - SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL; - IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL; - IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL; - IMG_UINT32 *ui32Client3DUpdateValueInt = NULL; - IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock; - SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL; - IMG_CHAR *uiUpdateFenceNameInt = NULL; - IMG_CHAR *uiUpdateFenceName3DInt = NULL; - IMG_BYTE *ui8TACmdInt = NULL; - IMG_BYTE *ui83DPRCmdInt = NULL; - IMG_BYTE *ui83DCmdInt = NULL; - IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet; - RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL; - IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer; - RGX_ZSBUFFER_DATA *psZSBufferInt = NULL; - IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer; - RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL; - IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; - PMR **psSyncPMRsInt = NULL; - IMG_HANDLE *hSyncPMRsInt2 = NULL; - - IMG_UINT32 ui32NextOffset = 0; - IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) - IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif - - IMG_UINT32 ui32BufferSize = 0; - IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + - ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) + - ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; - - if (unlikely(psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS)) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; - } - - if (unlikely(psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS)) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; - } + PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN = + (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0); - if (unlikely(psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS)) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; - } + IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData; + IMG_HANDLE hMemCtxPrivDataInt = NULL; + IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList; + RGX_FREELIST *pssGlobalFreeListInt = NULL; + IMG_HANDLE hFreeListAndStateReservation = + psRGXCreateFreeListIN->hFreeListAndStateReservation; + DEVMEMINT_RESERVATION *psFreeListAndStateReservationInt = NULL; + RGX_FREELIST *psCleanupCookieInt = NULL; - if (unlikely(psRGXKickTA3D2IN->ui32TACmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; - } + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); - if (unlikely(psRGXKickTA3D2IN->ui323DPRCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hMemCtxPrivDataInt, + hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; } - if (unlikely(psRGXKickTA3D2IN->ui323DCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + if (psRGXCreateFreeListIN->hsGlobalFreeList) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssGlobalFreeListInt, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } } - if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psFreeListAndStateReservationInt, + hFreeListAndStateReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; - goto RGXKickTA3D2_exit; + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); - if (ui64BufferSize > IMG_UINT32_MAX) + psRGXCreateFreeListOUT->eError = + RGXCreateFreeList(psConnection, OSGetDevNode(psConnection), + hMemCtxPrivDataInt, + psRGXCreateFreeListIN->ui32MaxFLPages, + psRGXCreateFreeListIN->ui32InitFLPages, + psRGXCreateFreeListIN->ui32GrowFLPages, + psRGXCreateFreeListIN->ui32GrowParamThreshold, + pssGlobalFreeListInt, + psRGXCreateFreeListIN->bbFreeListCheck, + psFreeListAndStateReservationInt, &psCleanupCookieInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; - goto RGXKickTA3D2_exit; + goto RGXCreateFreeList_exit; } - ui32BufferSize = (IMG_UINT32) ui64BufferSize; + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); - if (ui32BufferSize != 0) + psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateFreeListOUT-> + hCleanupCookie, + (void *)psCleanupCookieInt, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateFreeListpsCleanupCookieIntRelease); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) { -#if !defined(INTEGRITY_OS) - /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ - IMG_UINT32 ui32InBufferOffset = - PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long)); - IMG_UINT32 ui32InBufferExcessSize = - ui32InBufferOffset >= - PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } - bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; - if (bHaveEnoughSpace) - { - IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTA3D2IN; + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; - } - else -#endif - { - pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); +RGXCreateFreeList_exit: - if (!pArrayArgsBuffer) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto RGXKickTA3D2_exit; - } - } - } + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + /* Unreference the previously looked up handle */ + if (hMemCtxPrivDataInt) { - psClientTAFenceSyncPrimBlockInt = - (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psClientTAFenceSyncPrimBlockInt, 0, - psRGXKickTA3D2IN->ui32ClientTAFenceCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)); - ui32NextOffset += - psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *); - hClientTAFenceSyncPrimBlockInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE); + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); } - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0) + if (psRGXCreateFreeListIN->hsGlobalFreeList) { - if (OSCopyFromUser - (NULL, hClientTAFenceSyncPrimBlockInt2, - (const void __user *)psRGXKickTA3D2IN->phClientTAFenceSyncPrimBlock, - psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickTA3D2_exit; + /* Unreference the previously looked up handle */ + if (pssGlobalFreeListInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); } } - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + + /* Unreference the previously looked up handle */ + if (psFreeListAndStateReservationInt) { - ui32ClientTAFenceSyncOffsetInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hFreeListAndStateReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) + if (psRGXCreateFreeListOUT->eError != PVRSRV_OK) { - if (OSCopyFromUser - (NULL, ui32ClientTAFenceSyncOffsetInt, - (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceSyncOffset, - psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + if (psCleanupCookieInt) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; + RGXDestroyFreeList(psCleanupCookieInt); } } - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) - { - ui32ClientTAFenceValueInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); - } - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32ClientTAFenceValueInt, - (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceValue, - psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; +} - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) - { - psClientTAUpdateSyncPrimBlockInt = - (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psClientTAUpdateSyncPrimBlockInt, 0, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)); - ui32NextOffset += - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); - hClientTAUpdateSyncPrimBlockInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE); - } +static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) pvData); + return eError; +} - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0) - { - if (OSCopyFromUser - (NULL, hClientTAUpdateSyncPrimBlockInt2, - (const void __user *)psRGXKickTA3D2IN->phClientTAUpdateSyncPrimBlock, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; +static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_STATIC_RENDERCONTEXT_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_STATIC_RENDERCONTEXT_SIZE must not be larger than IMG_UINT32_MAX"); - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) - { - ui32ClientTAUpdateSyncOffsetInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); - } +static IMG_INT +PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateRenderContextIN_UI8, + IMG_UINT8 * psRGXCreateRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0); - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32ClientTAUpdateSyncOffsetInt, - (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateSyncOffset, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + IMG_BYTE *ui8FrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + IMG_BYTE *ui8StaticRenderContextStateInt = NULL; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) - { - ui32ClientTAUpdateValueInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); - } + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32ClientTAUpdateValueInt, - (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateValue, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + + ((IMG_UINT64) psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * + sizeof(IMG_BYTE)) + 0; - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + if (unlikely(psRGXCreateRenderContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) { - psClient3DUpdateSyncPrimBlockInt = - (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psClient3DUpdateSyncPrimBlockInt, 0, - psRGXKickTA3D2IN->ui32Client3DUpdateCount * - sizeof(SYNC_PRIMITIVE_BLOCK *)); - ui32NextOffset += - psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); - hClient3DUpdateSyncPrimBlockInt2 = - (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE); + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRenderContext_exit; } - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0) + if (unlikely + (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize > + RGXFWIF_STATIC_RENDERCONTEXT_SIZE)) { - if (OSCopyFromUser - (NULL, hClient3DUpdateSyncPrimBlockInt2, - (const void __user *)psRGXKickTA3D2IN->phClient3DUpdateSyncPrimBlock, - psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; - } + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRenderContext_exit; } - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + + if (ui64BufferSize > IMG_UINT32_MAX) { - ui32Client3DUpdateSyncOffsetInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXCreateRenderContext_exit; } - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) { - if (OSCopyFromUser - (NULL, ui32Client3DUpdateSyncOffsetInt, - (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateSyncOffset, - psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRenderContextIN; - goto RGXKickTA3D2_exit; + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } - } - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) - { - ui32Client3DUpdateValueInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); - } - - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32Client3DUpdateValueInt, - (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateValue, - psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + else { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); - goto RGXKickTA3D2_exit; + if (!pArrayArgsBuffer) + { + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateRenderContext_exit; + } } } + if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0) { - uiUpdateFenceNameInt = - (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); } /* Copy the data over */ - if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) { if (OSCopyFromUser - (NULL, uiUpdateFenceNameInt, - (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName, - PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + (NULL, ui8FrameworkCmdInt, + (const void __user *)psRGXCreateRenderContextIN->pui8FrameworkCmd, + psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickTA3D2_exit; + goto RGXCreateRenderContext_exit; } - ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - - 1] = '\0'; } - + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0) { - uiUpdateFenceName3DInt = - (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + ui8StaticRenderContextStateInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE); } /* Copy the data over */ - if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE) > 0) { if (OSCopyFromUser - (NULL, uiUpdateFenceName3DInt, - (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName3D, - PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + (NULL, ui8StaticRenderContextStateInt, + (const void __user *)psRGXCreateRenderContextIN->pui8StaticRenderContextState, + psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * + sizeof(IMG_BYTE)) != PVRSRV_OK) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - goto RGXKickTA3D2_exit; + goto RGXCreateRenderContext_exit; } - ((IMG_CHAR *) uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - - 1] = '\0'; } - if (psRGXKickTA3D2IN->ui32TACmdSize != 0) + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateRenderContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) { - ui8TACmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0) + psRGXCreateRenderContextOUT->eError = + PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection), + psRGXCreateRenderContextIN->i32Priority, + psRGXCreateRenderContextIN->ui32FrameworkCmdSize, + ui8FrameworkCmdInt, + hPrivDataInt, + psRGXCreateRenderContextIN-> + ui32StaticRenderContextStateSize, + ui8StaticRenderContextStateInt, + psRGXCreateRenderContextIN->ui32PackedCCBSizeU8888, + psRGXCreateRenderContextIN->ui32ContextFlags, + psRGXCreateRenderContextIN->ui64RobustnessAddress, + psRGXCreateRenderContextIN->ui32MaxTADeadlineMS, + psRGXCreateRenderContextIN->ui32Max3DDeadlineMS, + &psRenderContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) { - if (OSCopyFromUser - (NULL, ui8TACmdInt, (const void __user *)psRGXKickTA3D2IN->pui8TACmd, - psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; - } + goto RGXCreateRenderContext_exit; } - if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0) + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateRenderContextOUT-> + hRenderContext, + (void *)psRenderContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateRenderContextpsRenderContextIntRelease); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) { - ui83DPRCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; } - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0) - { - if (OSCopyFromUser - (NULL, ui83DPRCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DPRCmd, - psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui323DCmdSize != 0) +RGXCreateRenderContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) { - ui83DCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE); + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0) + if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK) { - if (OSCopyFromUser - (NULL, ui83DCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DCmd, - psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + if (psRenderContextInt) { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - - goto RGXKickTA3D2_exit; + PVRSRVRGXDestroyRenderContextKM(psRenderContextInt); } } - if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) - { - ui32SyncPMRFlagsInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); - } - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) - { - if (OSCopyFromUser - (NULL, ui32SyncPMRFlagsInt, - (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags, - psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXCreateRenderContextOUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _RGXCreateHWRTDataSet2psKmHwRTDataSetIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); + return eError; +} + +static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); +static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, + "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); - goto RGXKickTA3D2_exit; - } - } - if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) - { - psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - OSCachedMemSet(psSyncPMRsInt, 0, - psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)); - ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *); - hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); - } +static IMG_INT +PVRSRVBridgeRGXCreateHWRTDataSet2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateHWRTDataSet2IN_UI8, + IMG_UINT8 * psRGXCreateHWRTDataSet2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2 *psRGXCreateHWRTDataSet2IN = + (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2 *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSet2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2 *psRGXCreateHWRTDataSet2OUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2 *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSet2OUT_UI8, 0); + + IMG_HANDLE hMListsReservation = psRGXCreateHWRTDataSet2IN->hMListsReservation; + DEVMEMINT_RESERVATION *psMListsReservationInt = NULL; + IMG_HANDLE hPMStatesReservation = psRGXCreateHWRTDataSet2IN->hPMStatesReservation; + DEVMEMINT_RESERVATION *psPMStatesReservationInt = NULL; + IMG_HANDLE hPMSecureStatesReservation = + psRGXCreateHWRTDataSet2IN->hPMSecureStatesReservation; + DEVMEMINT_RESERVATION *psPMSecureStatesReservationInt = NULL; + RGX_FREELIST **psapsFreeListsInt = NULL; + IMG_HANDLE *hapsFreeListsInt2 = NULL; + IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL; + RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL; + IMG_HANDLE *hKmHwRTDataSetInt2 = NULL; - /* Copy the data over */ - if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) - { - if (OSCopyFromUser - (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickTA3D2IN->phSyncPMRs, - psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) - { - psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; - goto RGXKickTA3D2_exit; - } - } + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) + + ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0; - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); + psRGXCreateHWRTDataSet2OUT->phKmHwRTDataSet = psRGXCreateHWRTDataSet2IN->phKmHwRTDataSet; - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psRenderContextInt, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + if (ui64BufferSize > IMG_UINT32_MAX) { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXCreateHWRTDataSet2_exit; } - { - IMG_UINT32 i; - - for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) - { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psClientTAFenceSyncPrimBlockInt[i], - hClientTAFenceSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, - IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - } - } + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + if (ui32BufferSize != 0) { - IMG_UINT32 i; + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSet2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; - for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **) - &psClientTAUpdateSyncPrimBlockInt[i], - hClientTAUpdateSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, - IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - } - } - - { - IMG_UINT32 i; + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSet2IN; - for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **) - &psClient3DUpdateSyncPrimBlockInt[i], - hClient3DUpdateSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, - IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateHWRTDataSet2_exit; } } } - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psPRFenceUFOSyncPrimBlockInt, - hPRFenceUFOSyncPrimBlock, - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; + psapsFreeListsInt = + (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psapsFreeListsInt, 0, + RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)); + ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *); + hapsFreeListsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE); } - if (psRGXKickTA3D2IN->hKMHWRTDataSet) + /* Copy the data over */ + if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0) { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psKMHWRTDataSetInt, - hKMHWRTDataSet, - PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + if (OSCopyFromUser + (NULL, hapsFreeListsInt2, + (const void __user *)psRGXCreateHWRTDataSet2IN->phapsFreeLists, + RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } - } + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - if (psRGXKickTA3D2IN->hZSBuffer) - { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psZSBufferInt, - hZSBuffer, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; + goto RGXCreateHWRTDataSet2_exit; } } - if (psRGXKickTA3D2IN->hMSAAScratchBuffer) { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psMSAAScratchBufferInt, - hMSAAScratchBuffer, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } + sTailPtrsDevVAddrInt = + (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); } + /* Copy the data over */ + if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) { - IMG_UINT32 i; - - for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + if (OSCopyFromUser + (NULL, sTailPtrsDevVAddrInt, + (const void __user *)psRGXCreateHWRTDataSet2IN->psTailPtrsDevVAddr, + RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) { - /* Look up the address from the handle */ - psRGXKickTA3D2OUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psSyncPMRsInt[i], - hSyncPMRsInt2[i], - PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); - if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXKickTA3D2_exit; - } + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet2_exit; } } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); - - psRGXKickTA3D2OUT->eError = - PVRSRVRGXKickTA3DKM(psRenderContextInt, - psRGXKickTA3D2IN->ui32ClientTAFenceCount, - psClientTAFenceSyncPrimBlockInt, - ui32ClientTAFenceSyncOffsetInt, - ui32ClientTAFenceValueInt, - psRGXKickTA3D2IN->ui32ClientTAUpdateCount, - psClientTAUpdateSyncPrimBlockInt, - ui32ClientTAUpdateSyncOffsetInt, - ui32ClientTAUpdateValueInt, - psRGXKickTA3D2IN->ui32Client3DUpdateCount, - psClient3DUpdateSyncPrimBlockInt, - ui32Client3DUpdateSyncOffsetInt, - ui32Client3DUpdateValueInt, - psPRFenceUFOSyncPrimBlockInt, - psRGXKickTA3D2IN->ui32PRFenceUFOSyncOffset, - psRGXKickTA3D2IN->ui32PRFenceValue, - psRGXKickTA3D2IN->hCheckFence, - psRGXKickTA3D2IN->hUpdateTimeline, - &psRGXKickTA3D2OUT->hUpdateFence, - uiUpdateFenceNameInt, - psRGXKickTA3D2IN->hCheckFence3D, - psRGXKickTA3D2IN->hUpdateTimeline3D, - &psRGXKickTA3D2OUT->hUpdateFence3D, - uiUpdateFenceName3DInt, - psRGXKickTA3D2IN->ui32TACmdSize, - ui8TACmdInt, - psRGXKickTA3D2IN->ui323DPRCmdSize, - ui83DPRCmdInt, - psRGXKickTA3D2IN->ui323DCmdSize, - ui83DCmdInt, - psRGXKickTA3D2IN->ui32ExtJobRef, - psRGXKickTA3D2IN->bbKickTA, - psRGXKickTA3D2IN->bbKickPR, - psRGXKickTA3D2IN->bbKick3D, - psRGXKickTA3D2IN->bbAbort, - psRGXKickTA3D2IN->ui32PDumpFlags, - psKMHWRTDataSetInt, - psZSBufferInt, - psMSAAScratchBufferInt, - psRGXKickTA3D2IN->ui32SyncPMRCount, - ui32SyncPMRFlagsInt, - psSyncPMRsInt, - psRGXKickTA3D2IN->ui32RenderTargetSize, - psRGXKickTA3D2IN->ui32NumberOfDrawCalls, - psRGXKickTA3D2IN->ui32NumberOfIndices, - psRGXKickTA3D2IN->ui32NumberOfMRTs, psRGXKickTA3D2IN->ui64Deadline); - -RGXKickTA3D2_exit: + if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) + { + psKmHwRTDataSetInt = + (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psKmHwRTDataSetInt, 0, + RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *); + hKmHwRTDataSetInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE); + } - /* Lock over handle lookup cleanup. */ + /* Lock over handle lookup. */ LockHandle(psConnection->psHandleBase); - /* Unreference the previously looked up handle */ - if (psRenderContextInt) + /* Look up the address from the handle */ + psRGXCreateHWRTDataSet2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMListsReservationInt, + hMListsReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet2_exit; + } + + /* Look up the address from the handle */ + psRGXCreateHWRTDataSet2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMStatesReservationInt, + hPMStatesReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet2_exit; + } + + /* Look up the address from the handle */ + psRGXCreateHWRTDataSet2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMSecureStatesReservationInt, + hPMSecureStatesReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet2_exit; } - if (hClientTAFenceSyncPrimBlockInt2) { IMG_UINT32 i; - for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) { - - /* Unreference the previously looked up handle */ - if (psClientTAFenceSyncPrimBlockInt && psClientTAFenceSyncPrimBlockInt[i]) + /* Look up the address from the handle */ + psRGXCreateHWRTDataSet2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psapsFreeListsInt[i], + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hClientTAFenceSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet2_exit; } } } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateHWRTDataSet2OUT->eError = + RGXCreateHWRTDataSet2(psConnection, OSGetDevNode(psConnection), + psRGXCreateHWRTDataSet2IN->sVHeapTableDevVAddr, + psMListsReservationInt, + psPMStatesReservationInt, + psPMSecureStatesReservationInt, + psapsFreeListsInt, + sTailPtrsDevVAddrInt, + psRGXCreateHWRTDataSet2IN->ui16MaxRTs, psKmHwRTDataSetInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) + { + goto RGXCreateHWRTDataSet2_exit; + } - if (hClientTAUpdateSyncPrimBlockInt2) + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + if (hKmHwRTDataSetInt2) { IMG_UINT32 i; - for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) { - /* Unreference the previously looked up handle */ - if (psClientTAUpdateSyncPrimBlockInt && psClientTAUpdateSyncPrimBlockInt[i]) + psRGXCreateHWRTDataSet2OUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &hKmHwRTDataSetInt2[i], + (void *)psKmHwRTDataSetInt[i], + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateHWRTDataSet2psKmHwRTDataSetIntRelease); + if (unlikely(psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK)) { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hClientTAUpdateSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + IMG_UINT32 j; + /* Ensure the remaining handles are set to NULL. hKmHwRTDataSetInt2[i] was + * zeroed when calling PVRSRVAllocHandleUnlocked, so we start at the next + * element. If it was the last iteration, the loop doesn't run. + */ + for (j = i + 1; j < RGXMKIF_NUM_RTDATAS; j++) + { + hKmHwRTDataSetInt2[j] = NULL; + } + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet2_exit; } + } } + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); - if (hClient3DUpdateSyncPrimBlockInt2) + /* If dest ptr is non-null and we have data to copy */ + if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0)) { - IMG_UINT32 i; - - for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psRGXCreateHWRTDataSet2OUT->phKmHwRTDataSet, + hKmHwRTDataSetInt2, + (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK)) { + psRGXCreateHWRTDataSet2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; - /* Unreference the previously looked up handle */ - if (psClient3DUpdateSyncPrimBlockInt && psClient3DUpdateSyncPrimBlockInt[i]) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hClient3DUpdateSyncPrimBlockInt2[i], - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); - } + goto RGXCreateHWRTDataSet2_exit; } } +RGXCreateHWRTDataSet2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + /* Unreference the previously looked up handle */ - if (psPRFenceUFOSyncPrimBlockInt) + if (psMListsReservationInt) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hPRFenceUFOSyncPrimBlock, - PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); - } - - if (psRGXKickTA3D2IN->hKMHWRTDataSet) - { - - /* Unreference the previously looked up handle */ - if (psKMHWRTDataSetInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hKMHWRTDataSet, - PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); - } + hMListsReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } - if (psRGXKickTA3D2IN->hZSBuffer) + /* Unreference the previously looked up handle */ + if (psPMStatesReservationInt) { - - /* Unreference the previously looked up handle */ - if (psZSBufferInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hZSBuffer, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); - } + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMStatesReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } - if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + /* Unreference the previously looked up handle */ + if (psPMSecureStatesReservationInt) { - - /* Unreference the previously looked up handle */ - if (psMSAAScratchBufferInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hMSAAScratchBuffer, - PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); - } + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMSecureStatesReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); } - if (hSyncPMRsInt2) + if (hapsFreeListsInt2) { IMG_UINT32 i; - for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) { /* Unreference the previously looked up handle */ - if (psSyncPMRsInt && psSyncPMRsInt[i]) + if (psapsFreeListsInt && psapsFreeListsInt[i]) { PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hSyncPMRsInt2[i], - PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST); } } } /* Release now we have cleaned up look up handles. */ UnlockHandle(psConnection->psHandleBase); - /* Allocated space should be equal to the last updated offset */ -#ifdef PVRSRV_NEED_PVR_ASSERT - if (psRGXKickTA3D2OUT->eError == PVRSRV_OK) - PVR_ASSERT(ui32BufferSize == ui32NextOffset); -#endif /* PVRSRV_NEED_PVR_ASSERT */ + if (psRGXCreateHWRTDataSet2OUT->eError != PVRSRV_OK) + { + if (hKmHwRTDataSetInt2) + { + PVRSRV_ERROR eError; -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else - if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif - OSFreeMemNoStats(pArrayArgsBuffer); + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); - return 0; -} + { + IMG_UINT32 idx; + for (idx = 0; idx < RGXMKIF_NUM_RTDATAS; idx++) + { + if (hKmHwRTDataSetInt2[idx]) + { -static IMG_INT -PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry, - IMG_UINT8 * psRGXSetRenderContextPropertyIN_UI8, - IMG_UINT8 * psRGXSetRenderContextPropertyOUT_UI8, - CONNECTION_DATA * psConnection) -{ - PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyIN = - (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *) - IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0); - PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyOUT = - (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *) - IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0); + eError = + PVRSRVDestroyHandleUnlocked(psConnection-> + psHandleBase, + hKmHwRTDataSetInt2 + [idx], + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + if (unlikely + ((eError != PVRSRV_OK) + && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) + || (eError == PVRSRV_ERROR_RETRY)); - IMG_HANDLE hRenderContext = psRGXSetRenderContextPropertyIN->hRenderContext; - RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + } + else if (psKmHwRTDataSetInt[idx]) + { + /* Free/Destroy/Release the resource */ + RGXDestroyHWRTDataSet(psKmHwRTDataSetInt[idx]); + } + } + } - /* Lock over handle lookup. */ - LockHandle(psConnection->psHandleBase); + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); - /* Look up the address from the handle */ - psRGXSetRenderContextPropertyOUT->eError = - PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, - (void **)&psRenderContextInt, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); - if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXSetRenderContextProperty_exit; - } - /* Release now we have looked up handles. */ - UnlockHandle(psConnection->psHandleBase); + } - psRGXSetRenderContextPropertyOUT->eError = - PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt, - psRGXSetRenderContextPropertyIN->ui32Property, - psRGXSetRenderContextPropertyIN->ui64Input, - &psRGXSetRenderContextPropertyOUT->ui64Output); + else if (psKmHwRTDataSetInt) + { + IMG_UINT32 i; + for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) + { + if (psKmHwRTDataSetInt[i]) + { + RGXDestroyHWRTDataSet(psKmHwRTDataSetInt[i]); + } + } + } -RGXSetRenderContextProperty_exit: + } - /* Lock over handle lookup cleanup. */ - LockHandle(psConnection->psHandleBase); + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXCreateHWRTDataSet2OUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ - /* Unreference the previously looked up handle */ - if (psRenderContextInt) - { - PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, - hRenderContext, - PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); - } - /* Release now we have cleaned up look up handles. */ - UnlockHandle(psConnection->psHandleBase); + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); return 0; } @@ -2264,49 +2712,86 @@ void DeinitRGXTA3DBridge(void); PVRSRV_ERROR InitRGXTA3DBridge(void) { - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET, - PVRSRVBridgeRGXCreateHWRTDataSet, NULL); - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET, - PVRSRVBridgeRGXDestroyHWRTDataSet, NULL); + PVRSRVBridgeRGXDestroyHWRTDataSet, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, - PVRSRVBridgeRGXCreateZSBuffer, NULL); + PVRSRVBridgeRGXCreateZSBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, - PVRSRVBridgeRGXDestroyZSBuffer, NULL); + PVRSRVBridgeRGXDestroyZSBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, - PVRSRVBridgeRGXPopulateZSBuffer, NULL); + PVRSRVBridgeRGXPopulateZSBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, - PVRSRVBridgeRGXUnpopulateZSBuffer, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, - PVRSRVBridgeRGXCreateFreeList, NULL); + PVRSRVBridgeRGXUnpopulateZSBuffer, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER), + sizeof(PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, - PVRSRVBridgeRGXDestroyFreeList, NULL); - - SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, - PVRSRVBridgeRGXCreateRenderContext, NULL); + PVRSRVBridgeRGXDestroyFreeList, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, - PVRSRVBridgeRGXDestroyRenderContext, NULL); + PVRSRVBridgeRGXDestroyRenderContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSENDZSSTOREDISABLE, + PVRSRVBridgeRGXSendZSStoreDisable, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE), + sizeof(PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, - PVRSRVBridgeRGXSetRenderContextPriority, NULL); + PVRSRVBridgeRGXSetRenderContextPriority, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED, - PVRSRVBridgeRGXRenderContextStalled, NULL); + PVRSRVBridgeRGXRenderContextStalled, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED), + sizeof(PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2, - PVRSRVBridgeRGXKickTA3D2, NULL); + PVRSRVBridgeRGXKickTA3D2, NULL, sizeof(PVRSRV_BRIDGE_IN_RGXKICKTA3D2), + sizeof(PVRSRV_BRIDGE_OUT_RGXKICKTA3D2)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY, - PVRSRVBridgeRGXSetRenderContextProperty, NULL); + PVRSRVBridgeRGXSetRenderContextProperty, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY), + sizeof(PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET, + PVRSRVBridgeRGXCreateHWRTDataSet, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, + PVRSRVBridgeRGXCreateFreeList, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEFREELIST), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, + PVRSRVBridgeRGXCreateRenderContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET2, + PVRSRVBridgeRGXCreateHWRTDataSet2, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET2), + sizeof(PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET2)); return PVRSRV_OK; } @@ -2317,8 +2802,6 @@ PVRSRV_ERROR InitRGXTA3DBridge(void) void DeinitRGXTA3DBridge(void) { - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET); UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER); @@ -2329,16 +2812,13 @@ void DeinitRGXTA3DBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, - PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT); - UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSENDZSSTOREDISABLE); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY); @@ -2350,4 +2830,13 @@ void DeinitRGXTA3DBridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET2); + } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c index 99e6239cf7a9..4af3cfc35cba 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c @@ -136,15 +136,19 @@ PVRSRV_ERROR InitRGXTIMERQUERYBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY, - PVRSRVBridgeRGXBeginTimerQuery, NULL); + PVRSRVBridgeRGXBeginTimerQuery, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY), + sizeof(PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY, - PVRSRVBridgeRGXEndTimerQuery, NULL); + PVRSRVBridgeRGXEndTimerQuery, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer, - NULL); + NULL, sizeof(PVRSRV_BRIDGE_IN_RGXQUERYTIMER), + sizeof(PVRSRV_BRIDGE_OUT_RGXQUERYTIMER)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtq2_bridge/common_rgxtq2_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtq2_bridge/common_rgxtq2_bridge.h index 9d8397fc90bf..f20fca015dc0 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtq2_bridge/common_rgxtq2_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtq2_bridge/common_rgxtq2_bridge.h @@ -63,7 +63,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5 #define PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+6 #define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7 -#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7) +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER3 PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+8) /******************************************* RGXTDMCreateTransferContext @@ -78,6 +79,7 @@ typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG IMG_INT32 i32Priority; IMG_UINT32 ui32ContextFlags; IMG_UINT32 ui32FrameworkCmdSize; + IMG_UINT32 ui32MaxDeadlineMS; IMG_UINT32 ui32PackedCCBSizeU88; } __packed PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT; @@ -186,7 +188,6 @@ typedef struct PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY_TAG typedef struct PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY_TAG { IMG_HANDLE hCLIPMRMem; - IMG_HANDLE hUSCPMRMem; PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY; @@ -225,4 +226,39 @@ typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY; +/******************************************* + RGXTDMSubmitTransfer3 + *******************************************/ + +/* Bridge in structure for RGXTDMSubmitTransfer3 */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3_TAG +{ + IMG_UINT64 ui64DeadlineInus; + IMG_HANDLE hTransferContext; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_UINT32 *pui32UpdateSyncOffset; + IMG_UINT32 *pui32UpdateValue; + IMG_UINT8 *pui8FWCommand; + IMG_CHAR *puiUpdateFenceName; + IMG_HANDLE *phSyncPMRs; + IMG_HANDLE *phUpdateUFOSyncPrimBlock; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_FENCE hExportFenceToSignal; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_UINT32 ui32Characteristic1; + IMG_UINT32 ui32Characteristic2; + IMG_UINT32 ui32ClientUpdateCount; + IMG_UINT32 ui32CommandSize; + IMG_UINT32 ui32ExternalJobReference; + IMG_UINT32 ui32PDumpFlags; + IMG_UINT32 ui32SyncPMRCount; +} __packed PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3; + +/* Bridge out structure for RGXTDMSubmitTransfer3 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE hUpdateFence; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3; + #endif /* COMMON_RGXTQ2_BRIDGE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtq2_bridge/server_rgxtq2_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtq2_bridge/server_rgxtq2_bridge.c index 7dcd29a5ba54..1129b824d850 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtq2_bridge/server_rgxtq2_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/rgxtq2_bridge/server_rgxtq2_bridge.c @@ -97,9 +97,7 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -136,7 +134,6 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long)); @@ -153,7 +150,6 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -214,7 +210,9 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, ui32PackedCCBSizeU88, psRGXTDMCreateTransferContextIN->ui32ContextFlags, psRGXTDMCreateTransferContextIN-> - ui64RobustnessAddress, &psTransferContextInt); + ui64RobustnessAddress, + psRGXTDMCreateTransferContextIN->ui32MaxDeadlineMS, + &psTransferContextInt); /* Exit early if bridged call fails */ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) { @@ -269,11 +267,7 @@ PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -512,9 +506,7 @@ PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -572,7 +564,6 @@ PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), sizeof(unsigned long)); @@ -588,7 +579,6 @@ PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -891,11 +881,7 @@ PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -908,13 +894,6 @@ static PVRSRV_ERROR _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) return eError; } -static PVRSRV_ERROR _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease(void *pvData) -{ - PVRSRV_ERROR eError; - eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); - return eError; -} - static IMG_INT PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT8 * psRGXTDMGetSharedMemoryIN_UI8, @@ -929,7 +908,6 @@ PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryOUT_UI8, 0); PMR *psCLIPMRMemInt = NULL; - PMR *psUSCPMRMemInt = NULL; { PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); @@ -949,7 +927,7 @@ PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, psRGXTDMGetSharedMemoryOUT->eError = PVRSRVRGXTDMGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection), - &psCLIPMRMemInt, &psUSCPMRMemInt); + &psCLIPMRMemInt); /* Exit early if bridged call fails */ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) { @@ -973,20 +951,6 @@ PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, goto RGXTDMGetSharedMemory_exit; } - psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, - &psRGXTDMGetSharedMemoryOUT-> - hUSCPMRMem, - (void *)psUSCPMRMemInt, - PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, - PVRSRV_HANDLE_ALLOC_FLAG_MULTI, - (PFN_HANDLE_RELEASE) & - _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease); - if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) - { - UnlockHandle(psConnection->psHandleBase); - goto RGXTDMGetSharedMemory_exit; - } - /* Release now we have created handles. */ UnlockHandle(psConnection->psHandleBase); @@ -998,10 +962,6 @@ PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, { PVRSRVRGXTDMReleaseSharedMemoryKM(psCLIPMRMemInt); } - if (psUSCPMRMemInt) - { - PVRSRVRGXTDMReleaseSharedMemoryKM(psUSCPMRMemInt); - } } return 0; @@ -1134,6 +1094,424 @@ PVRSRVBridgeRGXTDMSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, return 0; } +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, + "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); +static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, + "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); +static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, + "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); + +static IMG_INT +PVRSRVBridgeRGXTDMSubmitTransfer3(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMSubmitTransfer3IN_UI8, + IMG_UINT8 * psRGXTDMSubmitTransfer3OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3 *psRGXTDMSubmitTransfer3IN = + (PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer3IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3 *psRGXTDMSubmitTransfer3OUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer3OUT_UI8, 0); + + IMG_HANDLE hTransferContext = psRGXTDMSubmitTransfer3IN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_UINT8 *ui8FWCommandInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; + + IMG_UINT32 ui32BufferSize = 0; + IMG_UINT64 ui64BufferSize = + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32CommandSize * sizeof(IMG_UINT8)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(PMR *)) + + ((IMG_UINT64) psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + + if (unlikely(psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer3_exit; + } + + if (unlikely + (psRGXTDMSubmitTransfer3IN->ui32CommandSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer3_exit; + } + + if (unlikely(psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer3_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMSubmitTransfer3_exit; + } + } + + if (ui64BufferSize > IMG_UINT32_MAX) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + goto RGXTDMSubmitTransfer3_exit; + } + + ui32BufferSize = (IMG_UINT32) ui64BufferSize; + + if (ui32BufferSize != 0) + { + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer3IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXTDMSubmitTransfer3IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else + { + pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXTDMSubmitTransfer3_exit; + } + } + } + + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount != 0) + { + psUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psUpdateUFOSyncPrimBlockInt, 0, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *)); + ui32NextOffset += + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hUpdateUFOSyncPrimBlockInt2, + (const void __user *)psRGXTDMSubmitTransfer3IN->phUpdateUFOSyncPrimBlock, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != + PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount != 0) + { + ui32UpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateSyncOffsetInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->pui32UpdateSyncOffset, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount != 0) + { + ui32UpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateValueInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->pui32UpdateValue, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXTDMSubmitTransfer3IN->ui32CommandSize != 0) + { + ui8FWCommandInt = (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer3IN->ui32CommandSize * sizeof(IMG_UINT8); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32CommandSize * sizeof(IMG_UINT8) > 0) + { + if (OSCopyFromUser + (NULL, ui8FWCommandInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->pui8FWCommand, + psRGXTDMSubmitTransfer3IN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + if (psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXTDMSubmitTransfer3IN->pui32SyncPMRFlags, + psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + if (psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + OSCachedMemSet(psSyncPMRsInt, 0, + psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(PMR *)); + ui32NextOffset += psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, + (const void __user *)psRGXTDMSubmitTransfer3IN->phSyncPMRs, + psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer3OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer3_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer3OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer3OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer3_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer3OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psUpdateUFOSyncPrimBlockInt[i], + hUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer3OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer3_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer3OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer3OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer3_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSubmitTransfer3OUT->eError = + PVRSRVRGXTDMSubmitTransfer3KM(psTransferContextInt, + psRGXTDMSubmitTransfer3IN->ui32PDumpFlags, + psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount, + psUpdateUFOSyncPrimBlockInt, + ui32UpdateSyncOffsetInt, + ui32UpdateValueInt, + psRGXTDMSubmitTransfer3IN->hCheckFenceFD, + psRGXTDMSubmitTransfer3IN->hUpdateTimeline, + &psRGXTDMSubmitTransfer3OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXTDMSubmitTransfer3IN->hExportFenceToSignal, + psRGXTDMSubmitTransfer3IN->ui32CommandSize, + ui8FWCommandInt, + psRGXTDMSubmitTransfer3IN->ui32ExternalJobReference, + psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXTDMSubmitTransfer3IN->ui32Characteristic1, + psRGXTDMSubmitTransfer3IN->ui32Characteristic2, + psRGXTDMSubmitTransfer3IN->ui64DeadlineInus); + +RGXTDMSubmitTransfer3_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + + if (hUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer3IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (psUpdateUFOSyncPrimBlockInt && psUpdateUFOSyncPrimBlockInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer3IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (psSyncPMRsInt && psSyncPMRsInt[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ +#ifdef PVRSRV_NEED_PVR_ASSERT + if (psRGXTDMSubmitTransfer3OUT->eError == PVRSRV_OK) + PVR_ASSERT(ui32BufferSize == ui32NextOffset); +#endif /* PVRSRV_NEED_PVR_ASSERT */ + + if (!bHaveEnoughSpace && pArrayArgsBuffer) + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + /* *************************************************************************** * Server bridge dispatch related glue */ @@ -1149,32 +1527,52 @@ PVRSRV_ERROR InitRGXTQ2Bridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, - PVRSRVBridgeRGXTDMCreateTransferContext, NULL); + PVRSRVBridgeRGXTDMCreateTransferContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, - PVRSRVBridgeRGXTDMDestroyTransferContext, NULL); + PVRSRVBridgeRGXTDMDestroyTransferContext, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, - PVRSRVBridgeRGXTDMSetTransferContextPriority, NULL); + PVRSRVBridgeRGXTDMSetTransferContextPriority, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, - PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL); + PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2, - PVRSRVBridgeRGXTDMSubmitTransfer2, NULL); + PVRSRVBridgeRGXTDMSubmitTransfer2, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY, - PVRSRVBridgeRGXTDMGetSharedMemory, NULL); + PVRSRVBridgeRGXTDMGetSharedMemory, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY, - PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL); + PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY, - PVRSRVBridgeRGXTDMSetTransferContextProperty, NULL); + PVRSRVBridgeRGXTDMSetTransferContextProperty, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY)); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER3, + PVRSRVBridgeRGXTDMSubmitTransfer3, NULL, + sizeof(PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER3), + sizeof(PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER3)); return PVRSRV_OK; } @@ -1207,4 +1605,6 @@ void DeinitRGXTQ2Bridge(void) UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY); + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER3); + } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/client_ri_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/client_ri_bridge.h index b3c42e6f496e..c419e21e9d07 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/client_ri_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/client_ri_bridge.h @@ -62,8 +62,7 @@ IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, const IMG_CHAR * puiTextB, IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size, - IMG_BOOL bIsImport, - IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle); + IMG_UINT64 ui64Flags, IMG_HANDLE * phRIHandle); IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge, IMG_UINT32 ui32TextBSize, diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/client_ri_direct_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/client_ri_direct_bridge.c index 74eaf18e4aeb..de037ee78be5 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/client_ri_direct_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/client_ri_direct_bridge.c @@ -70,21 +70,24 @@ IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, const IMG_CHAR * puiTextB, IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size, - IMG_BOOL bIsImport, - IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle) + IMG_UINT64 ui64Flags, IMG_HANDLE * phRIHandle) { PVRSRV_ERROR eError; PMR *psPMRHandleInt; RI_HANDLE psRIHandleInt = NULL; - PVR_UNREFERENCED_PARAMETER(hBridge); psPMRHandleInt = (PMR *) hPMRHandle; eError = - RIWriteMEMDESCEntryKM(psPMRHandleInt, + RIWriteMEMDESCEntryKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psPMRHandleInt, ui32TextBSize, - puiTextB, - ui64Offset, ui64Size, bIsImport, bIsSuballoc, &psRIHandleInt); + puiTextB, ui64Offset, ui64Size, ui64Flags, &psRIHandleInt); + + if (eError != PVRSRV_OK) + { + return eError; + } *phRIHandle = psRIHandleInt; return eError; diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/common_ri_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/common_ri_bridge.h index 967ed5395518..baa41adccdfe 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/common_ri_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/common_ri_bridge.h @@ -88,13 +88,12 @@ typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG /* Bridge in structure for RIWriteMEMDESCEntry */ typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG { + IMG_UINT64 ui64Flags; IMG_UINT64 ui64Offset; IMG_UINT64 ui64Size; IMG_HANDLE hPMRHandle; const IMG_CHAR *puiTextB; IMG_UINT32 ui32TextBSize; - IMG_BOOL bIsImport; - IMG_BOOL bIsSuballoc; } __packed PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY; /* Bridge out structure for RIWriteMEMDESCEntry */ diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/server_ri_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/server_ri_bridge.c index 73806ccea9bb..f8a54d70ef0c 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/server_ri_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/ri_bridge/server_ri_bridge.c @@ -143,9 +143,7 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -167,7 +165,6 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long)); @@ -183,7 +180,6 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -234,13 +230,13 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, UnlockHandle(psConnection->psHandleBase); psRIWriteMEMDESCEntryOUT->eError = - RIWriteMEMDESCEntryKM(psPMRHandleInt, + RIWriteMEMDESCEntryKM(psConnection, OSGetDevNode(psConnection), + psPMRHandleInt, psRIWriteMEMDESCEntryIN->ui32TextBSize, uiTextBInt, psRIWriteMEMDESCEntryIN->ui64Offset, psRIWriteMEMDESCEntryIN->ui64Size, - psRIWriteMEMDESCEntryIN->bIsImport, - psRIWriteMEMDESCEntryIN->bIsSuballoc, &psRIHandleInt); + psRIWriteMEMDESCEntryIN->ui64Flags, &psRIHandleInt); /* Exit early if bridged call fails */ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) { @@ -295,11 +291,7 @@ PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -333,9 +325,7 @@ PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -357,7 +347,6 @@ PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long)); @@ -373,7 +362,6 @@ PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -455,11 +443,7 @@ PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -705,31 +689,47 @@ PVRSRV_ERROR InitRIBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, - PVRSRVBridgeRIWritePMREntry, NULL); + PVRSRVBridgeRIWritePMREntry, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY), + sizeof(PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, - PVRSRVBridgeRIWriteMEMDESCEntry, NULL); + PVRSRVBridgeRIWriteMEMDESCEntry, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY), + sizeof(PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, - PVRSRVBridgeRIWriteProcListEntry, NULL); + PVRSRVBridgeRIWriteProcListEntry, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY), + sizeof(PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, - PVRSRVBridgeRIUpdateMEMDESCAddr, NULL); + PVRSRVBridgeRIUpdateMEMDESCAddr, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR), + sizeof(PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, - PVRSRVBridgeRIDeleteMEMDESCEntry, NULL); + PVRSRVBridgeRIDeleteMEMDESCEntry, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY), + sizeof(PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList, - NULL); + NULL, + sizeof(PVRSRV_BRIDGE_IN_RIDUMPLIST), + sizeof(PVRSRV_BRIDGE_OUT_RIDUMPLIST)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll, - NULL); + NULL, 0, sizeof(PVRSRV_BRIDGE_OUT_RIDUMPALL)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, - PVRSRVBridgeRIDumpProcess, NULL); + PVRSRVBridgeRIDumpProcess, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIDUMPPROCESS), + sizeof(PVRSRV_BRIDGE_OUT_RIDUMPPROCESS)); SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER, - PVRSRVBridgeRIWritePMREntryWithOwner, NULL); + PVRSRVBridgeRIWritePMREntryWithOwner, NULL, + sizeof(PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER), + sizeof(PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/srvcore_bridge/common_srvcore_bridge.h b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/srvcore_bridge/common_srvcore_bridge.h index 9a3a3d186bb4..603ba5b1d89e 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/srvcore_bridge/common_srvcore_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/srvcore_bridge/common_srvcore_bridge.h @@ -320,7 +320,7 @@ typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG /* Bridge in structure for FindProcessMemStats */ typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG { - IMG_UINT32 *pui32MemStatsArray; + IMG_UINT64 *pui64MemStatsArray; IMG_UINT32 ui32ArrSize; IMG_UINT32 ui32PID; IMG_BOOL bbAllProcessStats; @@ -329,7 +329,7 @@ typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG /* Bridge out structure for FindProcessMemStats */ typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG { - IMG_UINT32 *pui32MemStatsArray; + IMG_UINT64 *pui64MemStatsArray; PVRSRV_ERROR eError; } __packed PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS; diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/srvcore_bridge/server_srvcore_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/srvcore_bridge/server_srvcore_bridge.c index d0dfdad448f0..d40e53b244f8 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/srvcore_bridge/server_srvcore_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/srvcore_bridge/server_srvcore_bridge.c @@ -457,9 +457,7 @@ PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -481,7 +479,6 @@ PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long)); @@ -497,7 +494,6 @@ PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -542,11 +538,7 @@ PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -587,9 +579,7 @@ PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -613,7 +603,6 @@ PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), sizeof(unsigned long)); @@ -629,7 +618,6 @@ PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -679,11 +667,7 @@ PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -758,17 +742,15 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, (PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8, 0); - IMG_UINT32 *pui32MemStatsArrayInt = NULL; + IMG_UINT64 *pui64MemStatsArrayInt = NULL; IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = - ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) + 0; + ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) + 0; if (psFindProcessMemStatsIN->ui32ArrSize > PVRSRV_PROCESS_STAT_TYPE_COUNT) { @@ -778,7 +760,7 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, PVR_UNREFERENCED_PARAMETER(psConnection); - psFindProcessMemStatsOUT->pui32MemStatsArray = psFindProcessMemStatsIN->pui32MemStatsArray; + psFindProcessMemStatsOUT->pui64MemStatsArray = psFindProcessMemStatsIN->pui64MemStatsArray; if (ui64BufferSize > IMG_UINT32_MAX) { @@ -790,7 +772,6 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long)); @@ -806,7 +787,6 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -820,16 +800,16 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, if (psFindProcessMemStatsIN->ui32ArrSize != 0) { - pui32MemStatsArrayInt = - (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); - ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32); + pui64MemStatsArrayInt = + (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64); } psFindProcessMemStatsOUT->eError = PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID, psFindProcessMemStatsIN->ui32ArrSize, psFindProcessMemStatsIN->bbAllProcessStats, - pui32MemStatsArrayInt); + pui64MemStatsArrayInt); /* Exit early if bridged call fails */ if (unlikely(psFindProcessMemStatsOUT->eError != PVRSRV_OK)) { @@ -837,14 +817,14 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, } /* If dest ptr is non-null and we have data to copy */ - if ((pui32MemStatsArrayInt) && - ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0)) + if ((pui64MemStatsArrayInt) && + ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) > 0)) { if (unlikely (OSCopyToUser - (NULL, (void __user *)psFindProcessMemStatsOUT->pui32MemStatsArray, - pui32MemStatsArrayInt, - (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32))) != PVRSRV_OK)) + (NULL, (void __user *)psFindProcessMemStatsOUT->pui64MemStatsArray, + pui64MemStatsArrayInt, + (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64))) != PVRSRV_OK)) { psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; @@ -860,11 +840,7 @@ PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -978,55 +954,82 @@ PVRSRV_ERROR InitSRVCOREBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT, - PVRSRVBridgeConnect, NULL); + PVRSRVBridgeConnect, NULL, sizeof(PVRSRV_BRIDGE_IN_CONNECT), + sizeof(PVRSRV_BRIDGE_OUT_CONNECT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT, - PVRSRVBridgeDisconnect, NULL); + PVRSRVBridgeDisconnect, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_DISCONNECT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, - PVRSRVBridgeAcquireGlobalEventObject, NULL); + PVRSRVBridgeAcquireGlobalEventObject, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, - PVRSRVBridgeReleaseGlobalEventObject, NULL); + PVRSRVBridgeReleaseGlobalEventObject, NULL, + sizeof(PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT), + sizeof(PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, - PVRSRVBridgeEventObjectOpen, NULL); + PVRSRVBridgeEventObjectOpen, NULL, + sizeof(PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN), + sizeof(PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, - PVRSRVBridgeEventObjectWait, NULL); + PVRSRVBridgeEventObjectWait, NULL, + sizeof(PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT), + sizeof(PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, - PVRSRVBridgeEventObjectClose, NULL); + PVRSRVBridgeEventObjectClose, NULL, + sizeof(PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE), + sizeof(PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, - PVRSRVBridgeDumpDebugInfo, NULL); + PVRSRVBridgeDumpDebugInfo, NULL, + sizeof(PVRSRV_BRIDGE_IN_DUMPDEBUGINFO), + sizeof(PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, - PVRSRVBridgeGetDevClockSpeed, NULL); + PVRSRVBridgeGetDevClockSpeed, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, - PVRSRVBridgeHWOpTimeout, NULL); + PVRSRVBridgeHWOpTimeout, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_HWOPTIMEOUT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, - PVRSRVBridgeAlignmentCheck, NULL); + PVRSRVBridgeAlignmentCheck, NULL, + sizeof(PVRSRV_BRIDGE_IN_ALIGNMENTCHECK), + sizeof(PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, - PVRSRVBridgeGetDeviceStatus, NULL); + PVRSRVBridgeGetDeviceStatus, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_GETDEVICESTATUS)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO, - PVRSRVBridgeGetMultiCoreInfo, NULL); + PVRSRVBridgeGetMultiCoreInfo, NULL, + sizeof(PVRSRV_BRIDGE_IN_GETMULTICOREINFO), + sizeof(PVRSRV_BRIDGE_OUT_GETMULTICOREINFO)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, - PVRSRVBridgeEventObjectWaitTimeout, NULL); + PVRSRVBridgeEventObjectWaitTimeout, NULL, + sizeof(PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT), + sizeof(PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS, - PVRSRVBridgeFindProcessMemStats, NULL); + PVRSRVBridgeFindProcessMemStats, NULL, + sizeof(PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS), + sizeof(PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE, - PVRSRVBridgeAcquireInfoPage, NULL); + PVRSRVBridgeAcquireInfoPage, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE, - PVRSRVBridgeReleaseInfoPage, NULL); + PVRSRVBridgeReleaseInfoPage, NULL, + sizeof(PVRSRV_BRIDGE_IN_RELEASEINFOPAGE), + sizeof(PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/sync_bridge/server_sync_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/sync_bridge/server_sync_bridge.c index 4788fc1fb723..a0d9210086c9 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/sync_bridge/server_sync_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/sync_bridge/server_sync_bridge.c @@ -167,17 +167,16 @@ PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psSyncHandleInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psHandleBase); } - if (psSyncHandleInt) + else if (psSyncHandleInt) { PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); } + } return 0; @@ -527,9 +526,7 @@ PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -551,7 +548,6 @@ PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long)); @@ -567,7 +563,6 @@ PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -615,11 +610,7 @@ PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -683,35 +674,53 @@ PVRSRV_ERROR InitSYNCBridge(void) { SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, - PVRSRVBridgeAllocSyncPrimitiveBlock, NULL); + PVRSRVBridgeAllocSyncPrimitiveBlock, NULL, 0, + sizeof(PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, - PVRSRVBridgeFreeSyncPrimitiveBlock, NULL); + PVRSRVBridgeFreeSyncPrimitiveBlock, NULL, + sizeof(PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK), + sizeof(PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, - PVRSRVBridgeSyncPrimSet, NULL); + PVRSRVBridgeSyncPrimSet, NULL, sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMSET), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMSET)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, - PVRSRVBridgeSyncPrimPDump, NULL); + PVRSRVBridgeSyncPrimPDump, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, - PVRSRVBridgeSyncPrimPDumpValue, NULL); + PVRSRVBridgeSyncPrimPDumpValue, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, - PVRSRVBridgeSyncPrimPDumpPol, NULL); + PVRSRVBridgeSyncPrimPDumpPol, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, - PVRSRVBridgeSyncPrimPDumpCBP, NULL); + PVRSRVBridgeSyncPrimPDumpCBP, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP), + sizeof(PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, - PVRSRVBridgeSyncAllocEvent, NULL); + PVRSRVBridgeSyncAllocEvent, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCALLOCEVENT), + sizeof(PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, - PVRSRVBridgeSyncFreeEvent, NULL); + PVRSRVBridgeSyncFreeEvent, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFREEEVENT), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFREEEVENT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL, - PVRSRVBridgeSyncCheckpointSignalledPDumpPol, NULL); + PVRSRVBridgeSyncCheckpointSignalledPDumpPol, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL), + sizeof(PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/syncfallback_bridge/server_syncfallback_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/syncfallback_bridge/server_syncfallback_bridge.c index 800a9dd4c189..68e650d89602 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/syncfallback_bridge/server_syncfallback_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/syncfallback_bridge/server_syncfallback_bridge.c @@ -103,9 +103,7 @@ PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -128,7 +126,6 @@ PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbTimelineCreatePVRIN), sizeof(unsigned long)); @@ -144,7 +141,6 @@ PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -226,11 +222,7 @@ PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -391,9 +383,7 @@ PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -415,7 +405,6 @@ PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbFenceMergeIN), sizeof(unsigned long)); @@ -431,7 +420,6 @@ PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -556,11 +544,7 @@ PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -678,9 +662,7 @@ PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -717,7 +699,6 @@ PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbFenceDumpIN), sizeof(unsigned long)); @@ -733,7 +714,6 @@ PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -853,11 +833,7 @@ PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -891,9 +867,7 @@ PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -915,7 +889,6 @@ PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbTimelineCreateSWIN), sizeof(unsigned long)); @@ -931,7 +904,6 @@ PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -1013,11 +985,7 @@ PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -1053,9 +1021,7 @@ PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -1077,7 +1043,6 @@ PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncFbFenceCreateSWIN), sizeof(unsigned long)); @@ -1093,7 +1058,6 @@ PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -1204,11 +1168,7 @@ PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -1429,16 +1389,15 @@ PVRSRVBridgeSyncFbFenceExportInsecure(IMG_UINT32 ui32DispatchTableEntry, * This should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); - /* Avoid freeing/destroying/releasing the resource a second time below */ - psExportInt = NULL; /* Release now we have cleaned up creation handles. */ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); } - if (psExportInt) + else if (psExportInt) { SyncFbFenceExportDestroyInsecure(psExportInt); } + } return 0; @@ -1846,66 +1805,98 @@ PVRSRV_ERROR InitSYNCFALLBACKBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATEPVR, - PVRSRVBridgeSyncFbTimelineCreatePVR, NULL); + PVRSRVBridgeSyncFbTimelineCreatePVR, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINERELEASE, - PVRSRVBridgeSyncFbTimelineRelease, NULL); + PVRSRVBridgeSyncFbTimelineRelease, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUP, - PVRSRVBridgeSyncFbFenceDup, NULL); + PVRSRVBridgeSyncFbFenceDup, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEMERGE, - PVRSRVBridgeSyncFbFenceMerge, NULL); + PVRSRVBridgeSyncFbFenceMerge, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCERELEASE, - PVRSRVBridgeSyncFbFenceRelease, NULL); + PVRSRVBridgeSyncFbFenceRelease, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEWAIT, - PVRSRVBridgeSyncFbFenceWait, NULL); + PVRSRVBridgeSyncFbFenceWait, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUMP, - PVRSRVBridgeSyncFbFenceDump, NULL); + PVRSRVBridgeSyncFbFenceDump, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATESW, - PVRSRVBridgeSyncFbTimelineCreateSW, NULL); + PVRSRVBridgeSyncFbTimelineCreateSW, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCECREATESW, - PVRSRVBridgeSyncFbFenceCreateSW, NULL); + PVRSRVBridgeSyncFbFenceCreateSW, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINEADVANCESW, - PVRSRVBridgeSyncFbTimelineAdvanceSW, NULL); + PVRSRVBridgeSyncFbTimelineAdvanceSW, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTINSECURE, - PVRSRVBridgeSyncFbFenceExportInsecure, NULL); + PVRSRVBridgeSyncFbFenceExportInsecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYINSECURE, - PVRSRVBridgeSyncFbFenceExportDestroyInsecure, NULL); + PVRSRVBridgeSyncFbFenceExportDestroyInsecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTINSECURE, - PVRSRVBridgeSyncFbFenceImportInsecure, NULL); + PVRSRVBridgeSyncFbFenceImportInsecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTSECURE, - PVRSRVBridgeSyncFbFenceExportSecure, NULL); + PVRSRVBridgeSyncFbFenceExportSecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYSECURE, - PVRSRVBridgeSyncFbFenceExportDestroySecure, NULL); + PVRSRVBridgeSyncFbFenceExportDestroySecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTSECURE, - PVRSRVBridgeSyncFbFenceImportSecure, NULL); + PVRSRVBridgeSyncFbFenceImportSecure, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/synctracking_bridge/server_synctracking_bridge.c b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/synctracking_bridge/server_synctracking_bridge.c index adc8ab487a12..06757e5ef8d3 100644 --- a/drivers/gpu/drm/img/img-volcanic/generated/volcanic/synctracking_bridge/server_synctracking_bridge.c +++ b/drivers/gpu/drm/img/img-volcanic/generated/volcanic/synctracking_bridge/server_synctracking_bridge.c @@ -132,9 +132,7 @@ PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, IMG_UINT32 ui32NextOffset = 0; IMG_BYTE *pArrayArgsBuffer = NULL; -#if !defined(INTEGRITY_OS) IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -#endif IMG_UINT32 ui32BufferSize = 0; IMG_UINT64 ui64BufferSize = @@ -156,7 +154,6 @@ PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, if (ui32BufferSize != 0) { -#if !defined(INTEGRITY_OS) /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long)); @@ -172,7 +169,6 @@ PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; } else -#endif { pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); @@ -285,11 +281,7 @@ PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, PVR_ASSERT(ui32BufferSize == ui32NextOffset); #endif /* PVRSRV_NEED_PVR_ASSERT */ -#if defined(INTEGRITY_OS) - if (pArrayArgsBuffer) -#else if (!bHaveEnoughSpace && pArrayArgsBuffer) -#endif OSFreeMemNoStats(pArrayArgsBuffer); return 0; @@ -310,10 +302,14 @@ PVRSRV_ERROR InitSYNCTRACKINGBridge(void) SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, - PVRSRVBridgeSyncRecordRemoveByHandle, NULL); + PVRSRVBridgeSyncRecordRemoveByHandle, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE), + sizeof(PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE)); SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, - PVRSRVBridgeSyncRecordAdd, NULL); + PVRSRVBridgeSyncRecordAdd, NULL, + sizeof(PVRSRV_BRIDGE_IN_SYNCRECORDADD), + sizeof(PVRSRV_BRIDGE_OUT_SYNCRECORDADD)); return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h index ec1bbd6a423f..5f60752630c0 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC_ALGORITHM (1U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) #define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h index b29afcf0afb0..1af637872cf9 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h @@ -57,6 +57,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC_ALGORITHM (1U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) #define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h index a23aebf29bb2..3ad2665eb9ee 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC_ALGORITHM (2U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) #define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h index 46543915e28c..4594c6429129 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_GPU_VIRTUALISATION #define RGX_FEATURE_GS_RTA_SUPPORT #define RGX_FEATURE_LAYOUT_MARS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h index fd5a4d5ed20b..e271c768760d 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (3U) #define RGX_FEATURE_FBCDC_ALGORITHM (3U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h index f98ba978cc57..8b76f2aa6cf0 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (3U) #define RGX_FEATURE_FBCDC_ALGORITHM (3U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h index 0563b1bcb6d7..9d54fdd6a578 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_GPU_VIRTUALISATION #define RGX_FEATURE_GS_RTA_SUPPORT #define RGX_FEATURE_LAYOUT_MARS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h index 05d0a793797e..ffe2926153ff 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_GPU_VIRTUALISATION #define RGX_FEATURE_GS_RTA_SUPPORT #define RGX_FEATURE_LAYOUT_MARS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h index a16f199d5f4d..d59101b73ad6 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_GPU_VIRTUALISATION #define RGX_FEATURE_GS_RTA_SUPPORT #define RGX_FEATURE_LAYOUT_MARS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h index 810b58bccfa9..fd7547bca34e 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (3U) #define RGX_FEATURE_FBCDC_ALGORITHM (3U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h index 84f3454b7c38..6db0a9272d48 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_GPU_VIRTUALISATION #define RGX_FEATURE_GS_RTA_SUPPORT #define RGX_FEATURE_LAYOUT_MARS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h index bd1d4de0d095..e79b894f1299 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (3U) #define RGX_FEATURE_FBCDC_ALGORITHM (3U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h index ee4a2a5ad0ce..2b92c75a2d80 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (3U) #define RGX_FEATURE_FBCDC_ALGORITHM (3U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h index 9d82a54ba0bd..d14c991870e1 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (3U) #define RGX_FEATURE_FBCDC_ALGORITHM (3U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h index 35d2676b3fb2..0274380eafdd 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (3U) #define RGX_FEATURE_FBCDC_ALGORITHM (3U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h index f4c1bb5c551b..bae4231b4f9e 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h @@ -60,6 +60,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h index a7ee7251ce6e..805e6df66ab3 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h @@ -60,6 +60,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_33.V.11.3.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_33.V.11.3.h index acc75a02a212..19155570aa2e 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_33.V.11.3.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_33.V.11.3.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_GPU_VIRTUALISATION #define RGX_FEATURE_IRQ_PER_OS #define RGX_FEATURE_LAYOUT_MARS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_33.V.22.1.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_33.V.22.1.h index 80a5d3c25155..a375f749cf46 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_33.V.22.1.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_33.V.22.1.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_GPU_VIRTUALISATION #define RGX_FEATURE_GS_RTA_SUPPORT #define RGX_FEATURE_IRQ_PER_OS diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.182.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.182.h index 0a6cdb0cda3f..48084d57d188 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.182.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.182.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (50U) #define RGX_FEATURE_FBCDC_ALGORITHM (50U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) @@ -91,6 +92,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U) #define RGX_FEATURE_SOC_TIMER #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TFBC_VERSION (10U) #define RGX_FEATURE_TILE_SIZE_X (16U) #define RGX_FEATURE_TILE_SIZE_Y (16U) #define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.183.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.183.h index 6de1c1b1698b..dc4a3590f0d9 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.183.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.183.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (50U) #define RGX_FEATURE_FBCDC_ALGORITHM (50U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) @@ -91,6 +92,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U) #define RGX_FEATURE_SOC_TIMER #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TFBC_VERSION (10U) #define RGX_FEATURE_TILE_SIZE_X (16U) #define RGX_FEATURE_TILE_SIZE_Y (16U) #define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.796.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.796.h index 9fe1913dab82..d4f87c4ffa4d 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.796.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.796.h @@ -60,6 +60,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS #define RGX_FEATURE_ECC_RAMS (2U) +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (50U) #define RGX_FEATURE_FBCDC_ALGORITHM (50U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) @@ -80,6 +81,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PHYS_BUS_WIDTH (36U) #define RGX_FEATURE_RISCV_FW_PROCESSOR #define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SAFETY_IRQ #define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT #define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 #define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) @@ -92,6 +94,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U) #define RGX_FEATURE_SOC_TIMER #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TFBC_VERSION (10U) #define RGX_FEATURE_TILE_REGION_PROTECTION #define RGX_FEATURE_TILE_SIZE_X (16U) #define RGX_FEATURE_TILE_SIZE_Y (16U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.52.182.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.52.182.h new file mode 100644 index 000000000000..049939fbb452 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.52.182.h @@ -0,0 +1,105 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 36.V.52.182 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_36_V_52_182_H +#define RGXCONFIG_KM_36_V_52_182_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 36 +#define RGX_BNC_KM_N 52 +#define RGX_BNC_KM_C 182 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (50U) +#define RGX_FEATURE_FBCDC_ALGORITHM (50U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_LAYOUT_MARS (1U) +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U) +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TFBC_VERSION (10U) +#define RGX_FEATURE_TILE_SIZE_X (16U) +#define RGX_FEATURE_TILE_SIZE_Y (16U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_XE_ARCHITECTURE (1U) +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U) +#define RGX_FEATURE_XPU_MAX_SLAVES (3U) +#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U) + +#endif /* RGXCONFIG_KM_36_V_52_182_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.103.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.103.h index bb77353940a1..3f8b4ffab362 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.103.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.103.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (50U) #define RGX_FEATURE_FBCDC_ALGORITHM (50U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) @@ -90,6 +91,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U) #define RGX_FEATURE_SOC_TIMER #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TFBC_VERSION (10U) #define RGX_FEATURE_TILE_SIZE_X (16U) #define RGX_FEATURE_TILE_SIZE_Y (16U) #define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.182.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.182.h index cfbf51f48629..78b53d52cf4a 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.182.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.182.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (50U) #define RGX_FEATURE_FBCDC_ALGORITHM (50U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) @@ -91,6 +92,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U) #define RGX_FEATURE_SOC_TIMER #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TFBC_VERSION (10U) #define RGX_FEATURE_TILE_SIZE_X (16U) #define RGX_FEATURE_TILE_SIZE_Y (16U) #define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.183.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.183.h index 244eac02f94b..d2f5dd4bab1b 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.183.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.183.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (50U) #define RGX_FEATURE_FBCDC_ALGORITHM (50U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) @@ -91,6 +92,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U) #define RGX_FEATURE_SOC_TIMER #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TFBC_VERSION (10U) #define RGX_FEATURE_TILE_SIZE_X (16U) #define RGX_FEATURE_TILE_SIZE_Y (16U) #define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.280.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.280.h index 3b232913413c..30bbaa68dcb5 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.280.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.280.h @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (50U) #define RGX_FEATURE_FBCDC_ALGORITHM (50U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) @@ -94,6 +95,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_TFBC_DELTA_CORRELATION #define RGX_FEATURE_TFBC_LOSSY_37_PERCENT #define RGX_FEATURE_TFBC_NATIVE_YUV10 +#define RGX_FEATURE_TFBC_VERSION (11U) #define RGX_FEATURE_TILE_SIZE_X (16U) #define RGX_FEATURE_TILE_SIZE_Y (16U) #define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h index b12f3c22f1a3..28114cad8c97 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h @@ -61,6 +61,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE_MORTON_CAPABLE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC_ALGORITHM (2U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) #define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h index 8a67d49cc1a4..99ee53e0c197 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h @@ -61,6 +61,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE_MORTON_CAPABLE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC_ALGORITHM (2U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) #define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h index 61c165e03eb7..c27adf56b55b 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h @@ -60,6 +60,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE_MORTON_CAPABLE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC_ALGORITHM (2U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) #define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h index 74aeef76d43e..2d4aa9599a2b 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h @@ -61,6 +61,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE_MORTON_CAPABLE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC_ALGORITHM (2U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) #define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_46.V.102.389.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_46.V.102.389.h new file mode 100644 index 000000000000..a8eab1e08e4d --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_46.V.102.389.h @@ -0,0 +1,112 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 46.V.102.389 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_46_V_102_389_H +#define RGXCONFIG_KM_46_V_102_389_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 46 +#define RGX_BNC_KM_N 102 +#define RGX_BNC_KM_C 389 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (50U) +#define RGX_FEATURE_FBCDC_ALGORITHM (50U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_LAYOUT_MARS (1U) +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U) +#define RGX_FEATURE_NUM_MEMBUS (1U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U) +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TFBC_DELTA_CORRELATION +#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT +#define RGX_FEATURE_TFBC_NATIVE_YUV10 +#define RGX_FEATURE_TFBC_VERSION (20U) +#define RGX_FEATURE_TILE_SIZE_X (16U) +#define RGX_FEATURE_TILE_SIZE_Y (16U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_VOLCANIC_TB +#define RGX_FEATURE_XE_ARCHITECTURE (1U) +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U) +#define RGX_FEATURE_XPU_MAX_SLAVES (3U) +#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U) + +#endif /* RGXCONFIG_KM_46_V_102_389_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_46.V.204.390.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_46.V.204.390.h new file mode 100644 index 000000000000..96544ed32b48 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_46.V.204.390.h @@ -0,0 +1,114 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 46.V.204.390 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_46_V_204_390_H +#define RGXCONFIG_KM_46_V_204_390_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 46 +#define RGX_BNC_KM_N 204 +#define RGX_BNC_KM_C 390 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_AXI_ACELITE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (50U) +#define RGX_FEATURE_FBCDC_ALGORITHM (50U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_IRQ_PER_OS +#define RGX_FEATURE_LAYOUT_MARS (1U) +#define RGX_FEATURE_MIPS +#define RGX_FEATURE_NUM_CLUSTERS (1U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (7U) +#define RGX_FEATURE_NUM_MEMBUS (1U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_RASTER_PIPES (1U) +#define RGX_FEATURE_PBE2_IN_XE +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) +#define RGX_FEATURE_ROGUEXE +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) +#define RGX_FEATURE_SLC_BANKS (1U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) +#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U) +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TFBC_DELTA_CORRELATION +#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT +#define RGX_FEATURE_TFBC_NATIVE_YUV10 +#define RGX_FEATURE_TFBC_VERSION (20U) +#define RGX_FEATURE_TILE_SIZE_X (16U) +#define RGX_FEATURE_TILE_SIZE_Y (16U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_VOLCANIC_TB +#define RGX_FEATURE_XE_ARCHITECTURE (1U) +#define RGX_FEATURE_XE_MEMORY_HIERARCHY +#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U) +#define RGX_FEATURE_XPU_MAX_SLAVES (3U) +#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U) + +#endif /* RGXCONFIG_KM_46_V_204_390_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h index c2698feb1272..1c9b7db878d3 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_COMPUTE #define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC_ALGORITHM (2U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) #define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h index 2bd20b2ba770..cbf5589ce79c 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h @@ -61,6 +61,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_COMPUTE_MORTON_CAPABLE #define RGX_FEATURE_COMPUTE_OVERLAP #define RGX_FEATURE_DYNAMIC_DUST_POWER +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC_ALGORITHM (2U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (2U) #define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_33.15.11.3.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_33.15.11.3.h index 3d56965b06c6..9762fa901bda 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_33.15.11.3.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_33.15.11.3.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_33_15_11_3_H #define RGXCORE_KM_33_15_11_3_H -/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */ +/* Automatically generated file (14/08/2023 09:10:43): Do not edit manually */ /* CS: @5820045 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_63553 +#define FIX_HW_BRN_71242 #define FIX_HW_BRN_71317 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.29.52.182.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.29.52.182.h new file mode 100644 index 000000000000..7a5a87c84f7b --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.29.52.182.h @@ -0,0 +1,75 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 36.29.52.182 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_36_29_52_182_H +#define RGXCORE_KM_36_29_52_182_H + +/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */ +/* CS: @5908879 */ + +/****************************************************************************** + * BVNC = 36.29.52.182 + *****************************************************************************/ +#define RGX_BVNC_KM_B 36 +#define RGX_BVNC_KM_V 29 +#define RGX_BVNC_KM_N 52 +#define RGX_BVNC_KM_C 182 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_71317 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_47025 +#define HW_ERN_57596 + + + +#endif /* RGXCORE_KM_36_29_52_182_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.50.54.182.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.50.54.182.h index 10f13f673df9..0eccc97305ab 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.50.54.182.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.50.54.182.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_36_50_54_182_H #define RGXCORE_KM_36_50_54_182_H -/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */ +/* Automatically generated file (14/08/2023 09:10:43): Do not edit manually */ /* CS: @5849603 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_63553 +#define FIX_HW_BRN_71242 #define FIX_HW_BRN_71317 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.52.104.182.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.52.104.182.h index 54c974689f50..d14b662b9730 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.52.104.182.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_36.52.104.182.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_36_52_104_182_H #define RGXCORE_KM_36_52_104_182_H -/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */ +/* Automatically generated file (14/08/2023 09:10:43): Do not edit manually */ /* CS: @5849605 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_63553 +#define FIX_HW_BRN_71242 #define FIX_HW_BRN_71317 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_46.72.102.389.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_46.72.102.389.h new file mode 100644 index 000000000000..66eb259c0ebf --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_46.72.102.389.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 46.72.102.389 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_46_72_102_389_H +#define RGXCORE_KM_46_72_102_389_H + +/* Automatically generated file (07/02/2023 09:11:39): Do not edit manually */ +/* CS: @6385487 */ + +/****************************************************************************** + * BVNC = 46.72.102.389 + *****************************************************************************/ +#define RGX_BVNC_KM_B 46 +#define RGX_BVNC_KM_V 72 +#define RGX_BVNC_KM_N 102 +#define RGX_BVNC_KM_C 389 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_66927 +#define FIX_HW_BRN_71317 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_47025 +#define HW_ERN_57596 + + + +#endif /* RGXCORE_KM_46_72_102_389_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_46.94.204.390.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_46.94.204.390.h new file mode 100644 index 000000000000..548989eb518d --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/cores/rgxcore_km_46.94.204.390.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 46.94.204.390 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_46_94_204_390_H +#define RGXCORE_KM_46_94_204_390_H + +/* Automatically generated file (02/03/2023 09:11:42): Do not edit manually */ +/* CS: @6389604 */ + +/****************************************************************************** + * BVNC = 46.94.204.390 + *****************************************************************************/ +#define RGX_BVNC_KM_B 46 +#define RGX_BVNC_KM_V 94 +#define RGX_BVNC_KM_N 204 +#define RGX_BVNC_KM_C 390 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_63553 +#define FIX_HW_BRN_66927 +#define FIX_HW_BRN_71317 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_42290 +#define HW_ERN_42606 +#define HW_ERN_47025 +#define HW_ERN_57596 + + + +#endif /* RGXCORE_KM_46_94_204_390_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_bvnc_defs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_bvnc_defs_km.h index 792d9b26f55d..73c44b77f498 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_bvnc_defs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_bvnc_defs_km.h @@ -63,173 +63,134 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * Mask and bit-position macros for features without values *****************************************************************************/ -#define RGX_FEATURE_AXI_ACELITE_POS (0U) -#define RGX_FEATURE_AXI_ACELITE_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) +#define RGX_FEATURE_AXI_ACELITE_POS (0U) +#define RGX_FEATURE_AXI_ACELITE_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) -#define RGX_FEATURE_CLUSTER_GROUPING_POS (1U) -#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) +#define RGX_FEATURE_CLUSTER_GROUPING_POS (1U) +#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) -#define RGX_FEATURE_COMPUTE_POS (2U) -#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) +#define RGX_FEATURE_COMPUTE_POS (2U) +#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) -#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (3U) -#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (3U) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) -#define RGX_FEATURE_COMPUTE_ONLY_POS (4U) -#define RGX_FEATURE_COMPUTE_ONLY_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) +#define RGX_FEATURE_COMPUTE_OVERLAP_POS (4U) +#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) -#define RGX_FEATURE_COMPUTE_OVERLAP_POS (5U) -#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) +#define RGX_FEATURE_COREID_PER_OS_POS (5U) +#define RGX_FEATURE_COREID_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) -#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (6U) -#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) +#define RGX_FEATURE_DYNAMIC_DUST_POWER_POS (6U) +#define RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) -#define RGX_FEATURE_COREID_PER_OS_POS (7U) -#define RGX_FEATURE_COREID_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) +#define RGX_FEATURE_FASTRENDER_DM_POS (7U) +#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) -#define RGX_FEATURE_DUST_POWER_ISLAND_S7_POS (8U) -#define RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (8U) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) -#define RGX_FEATURE_DYNAMIC_DUST_POWER_POS (9U) -#define RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) +#define RGX_FEATURE_GPU_VIRTUALISATION_POS (9U) +#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) -#define RGX_FEATURE_FASTRENDER_DM_POS (10U) -#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) +#define RGX_FEATURE_GS_RTA_SUPPORT_POS (10U) +#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) -#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (11U) -#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) +#define RGX_FEATURE_IRQ_PER_OS_POS (11U) +#define RGX_FEATURE_IRQ_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) -#define RGX_FEATURE_GPU_VIRTUALISATION_POS (12U) -#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) +#define RGX_FEATURE_MIPS_POS (12U) +#define RGX_FEATURE_MIPS_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) -#define RGX_FEATURE_GS_RTA_SUPPORT_POS (13U) -#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) +#define RGX_FEATURE_PBE2_IN_XE_POS (13U) +#define RGX_FEATURE_PBE2_IN_XE_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) -#define RGX_FEATURE_IRQ_PER_OS_POS (14U) -#define RGX_FEATURE_IRQ_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) +#define RGX_FEATURE_PBE_CHECKSUM_2D_POS (14U) +#define RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) -#define RGX_FEATURE_META_DMA_POS (15U) -#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) +#define RGX_FEATURE_PBVNC_COREID_REG_POS (15U) +#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) -#define RGX_FEATURE_MIPS_POS (16U) -#define RGX_FEATURE_MIPS_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) +#define RGX_FEATURE_PERFBUS_POS (16U) +#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) -#define RGX_FEATURE_PBE2_IN_XE_POS (17U) -#define RGX_FEATURE_PBE2_IN_XE_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) +#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (17U) +#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) -#define RGX_FEATURE_PBE_CHECKSUM_2D_POS (18U) -#define RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) +#define RGX_FEATURE_PM_MMU_VFP_POS (18U) +#define RGX_FEATURE_PM_MMU_VFP_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) -#define RGX_FEATURE_PBVNC_COREID_REG_POS (19U) -#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) +#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (19U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) -#define RGX_FEATURE_PDS_PER_DUST_POS (20U) -#define RGX_FEATURE_PDS_PER_DUST_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) +#define RGX_FEATURE_ROGUEXE_POS (20U) +#define RGX_FEATURE_ROGUEXE_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) -#define RGX_FEATURE_PDS_TEMPSIZE8_POS (21U) -#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) +#define RGX_FEATURE_SAFETY_IRQ_POS (21U) +#define RGX_FEATURE_SAFETY_IRQ_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) -#define RGX_FEATURE_PERFBUS_POS (22U) -#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000000400000)) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_POS (22U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK (IMG_UINT64_C(0x0000000000400000)) -#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (23U) -#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000000000800000)) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_POS (23U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_BIT_MASK (IMG_UINT64_C(0x0000000000800000)) -#define RGX_FEATURE_PM_MMU_VFP_POS (24U) -#define RGX_FEATURE_PM_MMU_VFP_BIT_MASK (IMG_UINT64_C(0x0000000001000000)) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_POS (24U) +#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_BIT_MASK (IMG_UINT64_C(0x0000000001000000)) -#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (25U) -#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000000002000000)) +#define RGX_FEATURE_SINGLE_BIF_POS (25U) +#define RGX_FEATURE_SINGLE_BIF_BIT_MASK (IMG_UINT64_C(0x0000000002000000)) -#define RGX_FEATURE_ROGUEXE_POS (26U) -#define RGX_FEATURE_ROGUEXE_BIT_MASK (IMG_UINT64_C(0x0000000004000000)) +#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE_POS (26U) +#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE_BIT_MASK (IMG_UINT64_C(0x0000000004000000)) -#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (27U) -#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000000008000000)) +#define RGX_FEATURE_SOC_TIMER_POS (27U) +#define RGX_FEATURE_SOC_TIMER_BIT_MASK (IMG_UINT64_C(0x0000000008000000)) -#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (28U) -#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000010000000)) +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (28U) +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000000010000000)) -#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (29U) -#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000000020000000)) +#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (29U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000000020000000)) -#define RGX_FEATURE_SIGNAL_SNOOPING_POS (30U) -#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000000040000000)) +#define RGX_FEATURE_TFBC_DELTA_CORRELATION_POS (30U) +#define RGX_FEATURE_TFBC_DELTA_CORRELATION_BIT_MASK (IMG_UINT64_C(0x0000000040000000)) -#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_POS (31U) -#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK (IMG_UINT64_C(0x0000000080000000)) +#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT_POS (31U) +#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK (IMG_UINT64_C(0x0000000080000000)) -#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_POS (32U) -#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_BIT_MASK (IMG_UINT64_C(0x0000000100000000)) +#define RGX_FEATURE_TFBC_NATIVE_YUV10_POS (32U) +#define RGX_FEATURE_TFBC_NATIVE_YUV10_BIT_MASK (IMG_UINT64_C(0x0000000100000000)) -#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_POS (33U) -#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_BIT_MASK (IMG_UINT64_C(0x0000000200000000)) +#define RGX_FEATURE_TILE_REGION_PROTECTION_POS (33U) +#define RGX_FEATURE_TILE_REGION_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0000000200000000)) -#define RGX_FEATURE_SINGLE_BIF_POS (34U) -#define RGX_FEATURE_SINGLE_BIF_BIT_MASK (IMG_UINT64_C(0x0000000400000000)) +#define RGX_FEATURE_TLA_POS (34U) +#define RGX_FEATURE_TLA_BIT_MASK (IMG_UINT64_C(0x0000000400000000)) -#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_POS (35U) -#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK (IMG_UINT64_C(0x0000000800000000)) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (35U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000000800000000)) -#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE_POS (36U) -#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE_BIT_MASK (IMG_UINT64_C(0x0000001000000000)) +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (36U) +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000001000000000)) -#define RGX_FEATURE_SLC_VIVT_POS (37U) -#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000002000000000)) +#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS (37U) +#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK (IMG_UINT64_C(0x0000002000000000)) -#define RGX_FEATURE_SOC_TIMER_POS (38U) -#define RGX_FEATURE_SOC_TIMER_BIT_MASK (IMG_UINT64_C(0x0000004000000000)) +#define RGX_FEATURE_VOLCANIC_TB_POS (38U) +#define RGX_FEATURE_VOLCANIC_TB_BIT_MASK (IMG_UINT64_C(0x0000004000000000)) -#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (39U) -#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000008000000000)) +#define RGX_FEATURE_WATCHDOG_TIMER_POS (39U) +#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x0000008000000000)) -#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (40U) -#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000010000000000)) +#define RGX_FEATURE_WORKGROUP_PROTECTION_POS (40U) +#define RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0000010000000000)) -#define RGX_FEATURE_TESSELLATION_POS (41U) -#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000020000000000)) +#define RGX_FEATURE_XE_MEMORY_HIERARCHY_POS (41U) +#define RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000020000000000)) -#define RGX_FEATURE_TFBC_DELTA_CORRELATION_POS (42U) -#define RGX_FEATURE_TFBC_DELTA_CORRELATION_BIT_MASK (IMG_UINT64_C(0x0000040000000000)) - -#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT_POS (43U) -#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK (IMG_UINT64_C(0x0000080000000000)) - -#define RGX_FEATURE_TFBC_NATIVE_YUV10_POS (44U) -#define RGX_FEATURE_TFBC_NATIVE_YUV10_BIT_MASK (IMG_UINT64_C(0x0000100000000000)) - -#define RGX_FEATURE_TILE_REGION_PROTECTION_POS (45U) -#define RGX_FEATURE_TILE_REGION_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0000200000000000)) - -#define RGX_FEATURE_TLA_POS (46U) -#define RGX_FEATURE_TLA_BIT_MASK (IMG_UINT64_C(0x0000400000000000)) - -#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (47U) -#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000800000000000)) - -#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (48U) -#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0001000000000000)) - -#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS (49U) -#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK (IMG_UINT64_C(0x0002000000000000)) - -#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (50U) -#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0004000000000000)) - -#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (51U) -#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0008000000000000)) - -#define RGX_FEATURE_WATCHDOG_TIMER_POS (52U) -#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x0010000000000000)) - -#define RGX_FEATURE_WORKGROUP_PROTECTION_POS (53U) -#define RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0020000000000000)) - -#define RGX_FEATURE_XE_MEMORY_HIERARCHY_POS (54U) -#define RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0040000000000000)) - -#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS (55U) -#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0080000000000000)) +#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS (42U) +#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000040000000000)) /****************************************************************************** @@ -237,36 +198,36 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * for handling the corresponding values *****************************************************************************/ -#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (2) -#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (2) -#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (4) -#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (6) -#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (4) -#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX (2) -#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX (2) -#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (3) -#define RGX_FEATURE_META_MAX_VALUE_IDX (4) -#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (1) -#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (3) -#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (1) -#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (5) -#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (9) -#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (3) -#define RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX (3) -#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (4) -#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (1) -#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (1) -#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX (3) -#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (4) -#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2) -#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (6) -#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (3) -#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (3) -#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2) -#define RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX (2) -#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX (2) -#define RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX (2) -#define RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX (2) +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_FAULT_DECODE_VERSION_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (6U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_META_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (5U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (9U) +#define RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (6U) +#define RGX_FEATURE_TB_GPU_COUNT_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX (2U) /****************************************************************************** * Features with values indexes @@ -275,6 +236,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX, RGX_FEATURE_ECC_RAMS_IDX, + RGX_FEATURE_FAULT_DECODE_VERSION_IDX, RGX_FEATURE_FBCDC_IDX, RGX_FEATURE_FBCDC_ALGORITHM_IDX, RGX_FEATURE_FBCDC_ARCHITECTURE_IDX, @@ -282,20 +244,19 @@ typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_IDX, RGX_FEATURE_LAYOUT_MARS_IDX, RGX_FEATURE_META_IDX, - RGX_FEATURE_META_COREMEM_BANKS_IDX, RGX_FEATURE_META_COREMEM_SIZE_IDX, - RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX, RGX_FEATURE_NUM_CLUSTERS_IDX, RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX, + RGX_FEATURE_NUM_MEMBUS_IDX, RGX_FEATURE_NUM_OSIDS_IDX, RGX_FEATURE_NUM_RASTER_PIPES_IDX, RGX_FEATURE_PHYS_BUS_WIDTH_IDX, - RGX_FEATURE_SCALABLE_TE_ARCH_IDX, - RGX_FEATURE_SCALABLE_VCE_IDX, RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_IDX, RGX_FEATURE_SLC_BANKS_IDX, RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX, RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX, + RGX_FEATURE_TB_GPU_COUNT_IDX, + RGX_FEATURE_TFBC_VERSION_IDX, RGX_FEATURE_TILE_SIZE_X_IDX, RGX_FEATURE_TILE_SIZE_Y_IDX, RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX, @@ -311,65 +272,65 @@ typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { * Mask and bit-position macros for ERNs and BRNs *****************************************************************************/ -#define FIX_HW_BRN_38344_POS (0U) -#define FIX_HW_BRN_38344_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) +#define FIX_HW_BRN_38344_POS (0U) +#define FIX_HW_BRN_38344_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) -#define HW_ERN_42290_POS (1U) -#define HW_ERN_42290_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) +#define HW_ERN_42290_POS (1U) +#define HW_ERN_42290_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) -#define FIX_HW_BRN_42321_POS (2U) -#define FIX_HW_BRN_42321_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) +#define FIX_HW_BRN_42321_POS (2U) +#define FIX_HW_BRN_42321_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) -#define HW_ERN_42606_POS (3U) -#define HW_ERN_42606_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) +#define HW_ERN_42606_POS (3U) +#define HW_ERN_42606_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) -#define HW_ERN_46066_POS (4U) -#define HW_ERN_46066_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) +#define HW_ERN_47025_POS (4U) +#define HW_ERN_47025_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) -#define HW_ERN_47025_POS (5U) -#define HW_ERN_47025_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) +#define FIX_HW_BRN_50767_POS (5U) +#define FIX_HW_BRN_50767_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) -#define HW_ERN_50539_POS (6U) -#define HW_ERN_50539_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) +#define HW_ERN_57596_POS (6U) +#define HW_ERN_57596_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) -#define FIX_HW_BRN_50767_POS (7U) -#define FIX_HW_BRN_50767_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) +#define FIX_HW_BRN_60084_POS (7U) +#define FIX_HW_BRN_60084_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) -#define HW_ERN_57596_POS (8U) -#define HW_ERN_57596_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) +#define HW_ERN_61389_POS (8U) +#define HW_ERN_61389_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) -#define FIX_HW_BRN_60084_POS (9U) -#define FIX_HW_BRN_60084_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) +#define FIX_HW_BRN_61450_POS (9U) +#define FIX_HW_BRN_61450_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) -#define HW_ERN_61389_POS (10U) -#define HW_ERN_61389_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) +#define FIX_HW_BRN_63142_POS (10U) +#define FIX_HW_BRN_63142_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) -#define FIX_HW_BRN_61450_POS (11U) -#define FIX_HW_BRN_61450_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) +#define FIX_HW_BRN_63553_POS (11U) +#define FIX_HW_BRN_63553_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) -#define FIX_HW_BRN_63142_POS (12U) -#define FIX_HW_BRN_63142_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) +#define FIX_HW_BRN_64502_POS (12U) +#define FIX_HW_BRN_64502_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) -#define FIX_HW_BRN_63553_POS (13U) -#define FIX_HW_BRN_63553_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) +#define FIX_HW_BRN_65101_POS (13U) +#define FIX_HW_BRN_65101_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) -#define FIX_HW_BRN_64502_POS (14U) -#define FIX_HW_BRN_64502_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) +#define FIX_HW_BRN_65273_POS (14U) +#define FIX_HW_BRN_65273_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) -#define FIX_HW_BRN_65101_POS (15U) -#define FIX_HW_BRN_65101_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) +#define HW_ERN_66622_POS (15U) +#define HW_ERN_66622_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) -#define FIX_HW_BRN_65273_POS (16U) -#define FIX_HW_BRN_65273_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) +#define FIX_HW_BRN_66927_POS (16U) +#define FIX_HW_BRN_66927_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) -#define HW_ERN_66622_POS (17U) -#define HW_ERN_66622_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) +#define FIX_HW_BRN_68186_POS (17U) +#define FIX_HW_BRN_68186_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) -#define FIX_HW_BRN_68186_POS (18U) -#define FIX_HW_BRN_68186_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) +#define FIX_HW_BRN_71242_POS (18U) +#define FIX_HW_BRN_71242_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) -#define FIX_HW_BRN_71317_POS (19U) -#define FIX_HW_BRN_71317_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) +#define FIX_HW_BRN_71317_POS (19U) +#define FIX_HW_BRN_71317_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) /* Macro used for padding the unavailable values for features with values */ #define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFEU) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_bvnc_table_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_bvnc_table_km.h index 15d9171cd34f..d10a0a1ab22d 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_bvnc_table_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_bvnc_table_km.h @@ -68,7 +68,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; -static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, }; +static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 2, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_FAULT_DECODE_VERSION_values[RGX_FEATURE_FAULT_DECODE_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, }; static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, 50, }; @@ -84,26 +86,20 @@ static const IMG_UINT16 aui16_RGX_FEATURE_LAYOUT_MARS_values[RGX_FEATURE_LAYOUT_ static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, LTP217, LTP218, MTP218, }; -static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, }; - static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, }; -static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, }; - static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, }; static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 7, 8, 12, }; +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_MEMBUS_values[RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; + static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, }; static const IMG_UINT16 aui16_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, 36, 40, }; -static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, }; - -static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, }; - static const IMG_UINT16 aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values[RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; @@ -112,6 +108,10 @@ static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FE static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, 16, 64, 128, }; +static const IMG_UINT16 aui16_RGX_FEATURE_TB_GPU_COUNT_values[RGX_FEATURE_TB_GPU_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_TFBC_VERSION_values[RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 10, 11, 20, }; + static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_X_values[RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 32, }; static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_Y_values[RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 32, }; @@ -133,9 +133,10 @@ static const IMG_UINT16 aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values[RGX_FEAT * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h *****************************************************************************/ -static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = { +static const void * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = { aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values, aui16_RGX_FEATURE_ECC_RAMS_values, + aui16_RGX_FEATURE_FAULT_DECODE_VERSION_values, aui16_RGX_FEATURE_FBCDC_values, aui16_RGX_FEATURE_FBCDC_ALGORITHM_values, aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values, @@ -143,20 +144,19 @@ static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values, aui16_RGX_FEATURE_LAYOUT_MARS_values, aui16_RGX_FEATURE_META_values, - aui16_RGX_FEATURE_META_COREMEM_BANKS_values, aui16_RGX_FEATURE_META_COREMEM_SIZE_values, - aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values, aui16_RGX_FEATURE_NUM_CLUSTERS_values, aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values, + aui16_RGX_FEATURE_NUM_MEMBUS_values, aui16_RGX_FEATURE_NUM_OSIDS_values, aui16_RGX_FEATURE_NUM_RASTER_PIPES_values, aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values, - aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values, - aui16_RGX_FEATURE_SCALABLE_VCE_values, aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values, aui16_RGX_FEATURE_SLC_BANKS_values, aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values, aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values, + aui16_RGX_FEATURE_TB_GPU_COUNT_values, + aui16_RGX_FEATURE_TFBC_VERSION_values, aui16_RGX_FEATURE_TILE_SIZE_X_values, aui16_RGX_FEATURE_TILE_SIZE_Y_values, aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values, @@ -176,6 +176,7 @@ static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX, RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX, + RGX_FEATURE_FAULT_DECODE_VERSION_MAX_VALUE_IDX, RGX_FEATURE_FBCDC_MAX_VALUE_IDX, RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX, RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX, @@ -183,20 +184,19 @@ static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX, RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX, RGX_FEATURE_META_MAX_VALUE_IDX, - RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX, RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX, - RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX, RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX, RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX, + RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX, RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX, RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX, RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX, - RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX, - RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX, RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX, RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX, RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX, RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX, + RGX_FEATURE_TB_GPU_COUNT_MAX_VALUE_IDX, + RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX, RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX, RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX, RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX, @@ -206,42 +206,45 @@ static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX, }; +#define RGX_FEATURE_VALUE_TYPE_UINT16 (0x0000U) +#define RGX_FEATURE_VALUE_TYPE_UINT32 (0x8000U) +#define RGX_FEATURE_TYPE_BIT_SHIFT 14 /****************************************************************************** * Bit-positions for features with values *****************************************************************************/ static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = { - (0U), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */ - (2U), /* RGX_FEATURE_ECC_RAMS_POS */ - (4U), /* RGX_FEATURE_FBCDC_POS */ - (7U), /* RGX_FEATURE_FBCDC_ALGORITHM_POS */ - (10U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */ - (13U), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */ - (15U), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */ - (17U), /* RGX_FEATURE_LAYOUT_MARS_POS */ - (19U), /* RGX_FEATURE_META_POS */ - (22U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */ - (23U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */ - (25U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */ - (26U), /* RGX_FEATURE_NUM_CLUSTERS_POS */ - (29U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */ - (33U), /* RGX_FEATURE_NUM_OSIDS_POS */ - (35U), /* RGX_FEATURE_NUM_RASTER_PIPES_POS */ - (37U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */ - (40U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */ - (41U), /* RGX_FEATURE_SCALABLE_VCE_POS */ - (42U), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */ - (44U), /* RGX_FEATURE_SLC_BANKS_POS */ - (47U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */ - (49U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */ - (52U), /* RGX_FEATURE_TILE_SIZE_X_POS */ - (54U), /* RGX_FEATURE_TILE_SIZE_Y_POS */ - (56U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */ - (58U), /* RGX_FEATURE_XE_ARCHITECTURE_POS */ - (60U), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_POS */ - (62U), /* RGX_FEATURE_XPU_MAX_SLAVES_POS */ - (64U), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_POS */ + (0U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */ + (2U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_ECC_RAMS_POS */ + (4U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FAULT_DECODE_VERSION_POS */ + (6U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_POS */ + (9U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ALGORITHM_POS */ + (12U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */ + (15U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */ + (17U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */ + (19U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_LAYOUT_MARS_POS */ + (21U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_POS */ + (24U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_COREMEM_SIZE_POS */ + (26U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_CLUSTERS_POS */ + (29U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */ + (33U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_MEMBUS_POS */ + (35U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_OSIDS_POS */ + (37U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_RASTER_PIPES_POS */ + (39U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */ + (42U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */ + (44U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_BANKS_POS */ + (47U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */ + (49U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */ + (52U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TB_GPU_COUNT_POS */ + (54U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TFBC_VERSION_POS */ + (57U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_X_POS */ + (59U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_Y_POS */ + (61U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */ + (64U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XE_ARCHITECTURE_POS */ + (66U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_POS */ + (68U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XPU_MAX_SLAVES_POS */ + (70U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XPU_REGISTER_BROADCAST_POS */ }; @@ -252,34 +255,34 @@ static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = { static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = { (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */ (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_ECC_RAMS_BIT_MASK */ - (IMG_UINT64_C(0x0000000000000070)), /* RGX_FEATURE_FBCDC_BIT_MASK */ - (IMG_UINT64_C(0x0000000000000380)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */ - (IMG_UINT64_C(0x0000000000001C00)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */ - (IMG_UINT64_C(0x0000000000006000)), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_BIT_MASK */ - (IMG_UINT64_C(0x0000000000018000)), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_BIT_MASK */ - (IMG_UINT64_C(0x0000000000060000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */ - (IMG_UINT64_C(0x0000000000380000)), /* RGX_FEATURE_META_BIT_MASK */ - (IMG_UINT64_C(0x0000000000400000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */ - (IMG_UINT64_C(0x0000000001800000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */ - (IMG_UINT64_C(0x0000000002000000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000030)), /* RGX_FEATURE_FAULT_DECODE_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x00000000000001C0)), /* RGX_FEATURE_FBCDC_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000E00)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */ + (IMG_UINT64_C(0x0000000000007000)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */ + (IMG_UINT64_C(0x0000000000018000)), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000060000)), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000180000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000E00000)), /* RGX_FEATURE_META_BIT_MASK */ + (IMG_UINT64_C(0x0000000003000000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */ (IMG_UINT64_C(0x000000001C000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */ (IMG_UINT64_C(0x00000001E0000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */ - (IMG_UINT64_C(0x0000000600000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */ - (IMG_UINT64_C(0x0000001800000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */ - (IMG_UINT64_C(0x000000E000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */ - (IMG_UINT64_C(0x0000010000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */ - (IMG_UINT64_C(0x0000020000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */ + (IMG_UINT64_C(0x0000000600000000)), /* RGX_FEATURE_NUM_MEMBUS_BIT_MASK */ + (IMG_UINT64_C(0x0000001800000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */ + (IMG_UINT64_C(0x0000006000000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */ + (IMG_UINT64_C(0x0000038000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */ (IMG_UINT64_C(0x00000C0000000000)), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK */ (IMG_UINT64_C(0x0000700000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */ (IMG_UINT64_C(0x0001800000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */ (IMG_UINT64_C(0x000E000000000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */ - (IMG_UINT64_C(0x0030000000000000)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */ - (IMG_UINT64_C(0x00C0000000000000)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */ - (IMG_UINT64_C(0x0300000000000000)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */ - (IMG_UINT64_C(0x0C00000000000000)), /* RGX_FEATURE_XE_ARCHITECTURE_BIT_MASK */ - (IMG_UINT64_C(0x3000000000000000)), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_BIT_MASK */ - (IMG_UINT64_C(0xC000000000000000)), /* RGX_FEATURE_XPU_MAX_SLAVES_BIT_MASK */ - (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_BIT_MASK */ + (IMG_UINT64_C(0x0030000000000000)), /* RGX_FEATURE_TB_GPU_COUNT_BIT_MASK */ + (IMG_UINT64_C(0x01C0000000000000)), /* RGX_FEATURE_TFBC_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x0600000000000000)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */ + (IMG_UINT64_C(0x1800000000000000)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */ + (IMG_UINT64_C(0x6000000000000000)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_XE_ARCHITECTURE_BIT_MASK */ + (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000030)), /* RGX_FEATURE_XPU_MAX_SLAVES_BIT_MASK */ + (IMG_UINT64_C(0x00000000000000C0)), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_BIT_MASK */ }; @@ -290,39 +293,41 @@ static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = { static const IMG_UINT64 gaFeatures[][4]= { - { IMG_UINT64_C(0x000100000002001e), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x01aa8068689aa481), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.2.30 */ - { IMG_UINT64_C(0x0001000000040005), IMG_UINT64_C(0x0000400000402024), IMG_UINT64_C(0x01aa80686c9aa481), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.4.5 */ - { IMG_UINT64_C(0x0001000000040013), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x01aa80686c9aa481), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.4.19 */ - { IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x0082c04000c0222f), IMG_UINT64_C(0x01aa8068e912a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.2.51 */ - { IMG_UINT64_C(0x000400000002003a), IMG_UINT64_C(0x0082c04000c0322f), IMG_UINT64_C(0x01aa806ce912a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.2.58 */ - { IMG_UINT64_C(0x0004000000040037), IMG_UINT64_C(0x0082c04000c0222e), IMG_UINT64_C(0x01aa8068ed12a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.4.55 */ - { IMG_UINT64_C(0x000400000006003e), IMG_UINT64_C(0x0082c04000c0322f), IMG_UINT64_C(0x01aab074f112a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.6.62 */ - { IMG_UINT64_C(0x000500000001002e), IMG_UINT64_C(0x0000004004402205), IMG_UINT64_C(0x05a69068248aa501), IMG_UINT64_C(0x0000000000000000) }, /* 5.0.1.46 */ - { IMG_UINT64_C(0x0006000000040023), IMG_UINT64_C(0x0082c04000c0222f), IMG_UINT64_C(0x01aa8068ed12a901), IMG_UINT64_C(0x0000000000000000) }, /* 6.0.4.35 */ - { IMG_UINT64_C(0x000f000000010040), IMG_UINT64_C(0x0000004004403205), IMG_UINT64_C(0x05a8906c448aa501), IMG_UINT64_C(0x0000000000000000) }, /* 15.0.1.64 */ - { IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0554942c44020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.21.16 */ - { IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c64020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.25 */ - { IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c84020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.30 */ - { IMG_UINT64_C(0x0016000000360026), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944c84020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.38 */ - { IMG_UINT64_C(0x001600000036014a), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c8402a591), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.330 */ - { IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944cc4020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.104.18 */ - { IMG_UINT64_C(0x00160000006800da), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944cc402a591), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.104.218 */ - { IMG_UINT64_C(0x0016000000d0013e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558a4550802a591), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.208.318 */ - { IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x0558984c8402a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.54.204 */ - { IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x0558984ca402a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.104.504 */ - { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x055aa8550802a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.208.504 */ - { IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x055aa8550802a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.208.505 */ - { IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x004181c2844f74a5), IMG_UINT64_C(0x0556984c4402a621), IMG_UINT64_C(0x0000000000000000) }, /* 29.0.52.202 */ - { IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x004181c2844f74a5), IMG_UINT64_C(0x055aa854e802a621), IMG_UINT64_C(0x0000000000000000) }, /* 29.0.108.208 */ - { IMG_UINT64_C(0x00210000000b0003), IMG_UINT64_C(0x00400092844b5085), IMG_UINT64_C(0x0552984a24020001), IMG_UINT64_C(0x0000000000000000) }, /* 33.0.11.3 */ - { IMG_UINT64_C(0x0021000000160001), IMG_UINT64_C(0x004180c2854b70a5), IMG_UINT64_C(0x0556984c44020001), IMG_UINT64_C(0x0000000000000000) }, /* 33.0.22.1 */ - { IMG_UINT64_C(0x0024000000360067), IMG_UINT64_C(0x004180d2844b38a5), IMG_UINT64_C(0x0556984c8402aeb1), IMG_UINT64_C(0x0000000000000000) }, /* 36.0.54.103 */ - { IMG_UINT64_C(0x00240000003600b6), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984c8404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.54.182 */ - { IMG_UINT64_C(0x00240000003600b7), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984c8404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.54.183 */ - { IMG_UINT64_C(0x0024000000360118), IMG_UINT64_C(0x00419cd2844b78a5), IMG_UINT64_C(0x5556984c8404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.54.280 */ - { IMG_UINT64_C(0x00240000006800b6), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984ca404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.104.182 */ - { IMG_UINT64_C(0x00240000006800b7), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984ca404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.104.183 */ - { IMG_UINT64_C(0x002400000068031c), IMG_UINT64_C(0x0071a0d2864a78a5), IMG_UINT64_C(0x5556984ca404aeb5), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.104.796 */ + { IMG_UINT64_C(0x0001000000040005), IMG_UINT64_C(0x0000000400010414), IMG_UINT64_C(0x340a81a06d6a9211), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.4.5 */ + { IMG_UINT64_C(0x0001000000040013), IMG_UINT64_C(0x0000000400010415), IMG_UINT64_C(0x340a81a06d6a9211), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.4.19 */ + { IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x0000042c0803045f), IMG_UINT64_C(0x340a81a0ea4aa411), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.2.51 */ + { IMG_UINT64_C(0x000400000002003a), IMG_UINT64_C(0x0000042c0803065f), IMG_UINT64_C(0x340a81b0ea4aa411), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.2.58 */ + { IMG_UINT64_C(0x0004000000040037), IMG_UINT64_C(0x0000042c0803045e), IMG_UINT64_C(0x340a81a0ee4aa411), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.4.55 */ + { IMG_UINT64_C(0x000400000006003e), IMG_UINT64_C(0x0000042c0803065f), IMG_UINT64_C(0x340ab1d0f24aa411), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.6.62 */ + { IMG_UINT64_C(0x000500000001002e), IMG_UINT64_C(0x0000000008110445), IMG_UINT64_C(0x340691a0252a9411), IMG_UINT64_C(0x0000000000000001) }, /* 5.0.1.46 */ + { IMG_UINT64_C(0x0006000000040023), IMG_UINT64_C(0x0000042c0803045f), IMG_UINT64_C(0x340a81a0ee4aa411), IMG_UINT64_C(0x0000000000000000) }, /* 6.0.4.35 */ + { IMG_UINT64_C(0x000f000000010040), IMG_UINT64_C(0x0000000008110645), IMG_UINT64_C(0x340891b0452a9411), IMG_UINT64_C(0x0000000000000001) }, /* 15.0.1.64 */ + { IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x000000001ad1b615), IMG_UINT64_C(0x2a0494b044080011), IMG_UINT64_C(0x0000000000000001) }, /* 22.0.21.16 */ + { IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x000000001ad1b615), IMG_UINT64_C(0x2a0894b064080011), IMG_UINT64_C(0x0000000000000001) }, /* 22.0.54.25 */ + { IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x000000001ad1b615), IMG_UINT64_C(0x2a0894b084080011), IMG_UINT64_C(0x0000000000000001) }, /* 22.0.54.30 */ + { IMG_UINT64_C(0x0016000000360026), IMG_UINT64_C(0x000000001ad1b615), IMG_UINT64_C(0x2a08953084080011), IMG_UINT64_C(0x0000000000000001) }, /* 22.0.54.38 */ + { IMG_UINT64_C(0x001600000036014a), IMG_UINT64_C(0x000000001ad1b615), IMG_UINT64_C(0x2a0894b0840a9651), IMG_UINT64_C(0x0000000000000001) }, /* 22.0.54.330 */ + { IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x000000001ad1b615), IMG_UINT64_C(0x2a089530c4080011), IMG_UINT64_C(0x0000000000000001) }, /* 22.0.104.18 */ + { IMG_UINT64_C(0x00160000006800da), IMG_UINT64_C(0x000000001ad1b615), IMG_UINT64_C(0x2a089530c40a9651), IMG_UINT64_C(0x0000000000000001) }, /* 22.0.104.218 */ + { IMG_UINT64_C(0x0016000000d0013e), IMG_UINT64_C(0x000000001ad1b615), IMG_UINT64_C(0x2a08a551080a9651), IMG_UINT64_C(0x0000000000000001) }, /* 22.0.208.318 */ + { IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x000002003951fe95), IMG_UINT64_C(0x2a089930840a9651), IMG_UINT64_C(0x0000000000000001) }, /* 24.0.54.204 */ + { IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x000002003951fe95), IMG_UINT64_C(0x2a089930a40a9651), IMG_UINT64_C(0x0000000000000001) }, /* 24.0.104.504 */ + { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x000002003951fe95), IMG_UINT64_C(0x2a0aa951080a9651), IMG_UINT64_C(0x0000000000000001) }, /* 24.0.208.504 */ + { IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x000002003951fe95), IMG_UINT64_C(0x2a0aa951080a9651), IMG_UINT64_C(0x0000000000000001) }, /* 24.0.208.505 */ + { IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x000002183951feb5), IMG_UINT64_C(0x2a069930440a9891), IMG_UINT64_C(0x0000000000000001) }, /* 29.0.52.202 */ + { IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x000002183951feb5), IMG_UINT64_C(0x2a0aa950e80a9891), IMG_UINT64_C(0x0000000000000001) }, /* 29.0.108.208 */ + { IMG_UINT64_C(0x00210000000b0003), IMG_UINT64_C(0x000002001551ba25), IMG_UINT64_C(0x2a02992824080011), IMG_UINT64_C(0x0000000000000001) }, /* 33.0.11.3 */ + { IMG_UINT64_C(0x0021000000160001), IMG_UINT64_C(0x000002181955be35), IMG_UINT64_C(0x2a06993044080011), IMG_UINT64_C(0x0000000000000001) }, /* 33.0.22.1 */ + { IMG_UINT64_C(0x00240000003400b6), IMG_UINT64_C(0x000002001d51bf35), IMG_UINT64_C(0x2a4699304412bad1), IMG_UINT64_C(0x0000000000000055) }, /* 36.0.52.182 */ + { IMG_UINT64_C(0x0024000000360067), IMG_UINT64_C(0x000002181d51b735), IMG_UINT64_C(0x2a469930840abad1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.54.103 */ + { IMG_UINT64_C(0x00240000003600b6), IMG_UINT64_C(0x000002181d51bf35), IMG_UINT64_C(0x2a4699308412bad1), IMG_UINT64_C(0x0000000000000055) }, /* 36.0.54.182 */ + { IMG_UINT64_C(0x00240000003600b7), IMG_UINT64_C(0x000002181d51bf35), IMG_UINT64_C(0x2a4699308412bad1), IMG_UINT64_C(0x0000000000000055) }, /* 36.0.54.183 */ + { IMG_UINT64_C(0x0024000000360118), IMG_UINT64_C(0x00000219dd51bf35), IMG_UINT64_C(0x2a8699308412bad1), IMG_UINT64_C(0x0000000000000055) }, /* 36.0.54.280 */ + { IMG_UINT64_C(0x00240000006800b6), IMG_UINT64_C(0x000002181d51bf35), IMG_UINT64_C(0x2a469930a412bad1), IMG_UINT64_C(0x0000000000000055) }, /* 36.0.104.182 */ + { IMG_UINT64_C(0x00240000006800b7), IMG_UINT64_C(0x000002181d51bf35), IMG_UINT64_C(0x2a469930a412bad1), IMG_UINT64_C(0x0000000000000055) }, /* 36.0.104.183 */ + { IMG_UINT64_C(0x002400000068031c), IMG_UINT64_C(0x0000039a1d79af35), IMG_UINT64_C(0x2a469930a412bad9), IMG_UINT64_C(0x0000000000000055) }, /* 36.0.104.796 */ + { IMG_UINT64_C(0x002e000000660185), IMG_UINT64_C(0x00000241dd51bf35), IMG_UINT64_C(0x2ad899324412bad5), IMG_UINT64_C(0x0000000000000055) }, /* 46.0.102.389 */ + { IMG_UINT64_C(0x002e000000cc0186), IMG_UINT64_C(0x00000259dd51bf35), IMG_UINT64_C(0x2ad89932c412bad5), IMG_UINT64_C(0x0000000000000055) }, /* 46.0.204.390 */ }; /****************************************************************************** @@ -333,47 +338,48 @@ static const IMG_UINT64 gaFeatures[][4]= static const IMG_UINT64 gaErnsBrns[][2]= { { IMG_UINT64_C(0x0001002700040013), IMG_UINT64_C(0x0000000000000005) }, /* 1.39.4.19 */ - { IMG_UINT64_C(0x0001004b0002001e), IMG_UINT64_C(0x0000000000000004) }, /* 1.75.2.30 */ { IMG_UINT64_C(0x0001005200040005), IMG_UINT64_C(0x0000000000000000) }, /* 1.82.4.5 */ - { IMG_UINT64_C(0x0004001f00040037), IMG_UINT64_C(0x000000000000108a) }, /* 4.31.4.55 */ - { IMG_UINT64_C(0x0004002800020033), IMG_UINT64_C(0x000000000000108a) }, /* 4.40.2.51 */ - { IMG_UINT64_C(0x0004002b0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.43.6.62 */ - { IMG_UINT64_C(0x0004002d0002003a), IMG_UINT64_C(0x000000000000500a) }, /* 4.45.2.58 */ - { IMG_UINT64_C(0x0004002e0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.46.6.62 */ + { IMG_UINT64_C(0x0004001f00040037), IMG_UINT64_C(0x000000000000042a) }, /* 4.31.4.55 */ + { IMG_UINT64_C(0x0004002800020033), IMG_UINT64_C(0x000000000000042a) }, /* 4.40.2.51 */ + { IMG_UINT64_C(0x0004002b0006003e), IMG_UINT64_C(0x000000000000142a) }, /* 4.43.6.62 */ + { IMG_UINT64_C(0x0004002d0002003a), IMG_UINT64_C(0x000000000000140a) }, /* 4.45.2.58 */ + { IMG_UINT64_C(0x0004002e0006003e), IMG_UINT64_C(0x000000000000142a) }, /* 4.46.6.62 */ { IMG_UINT64_C(0x000500090001002e), IMG_UINT64_C(0x0000000000000001) }, /* 5.9.1.46 */ - { IMG_UINT64_C(0x0006002200040023), IMG_UINT64_C(0x000000000000100a) }, /* 6.34.4.35 */ - { IMG_UINT64_C(0x000f000500010040), IMG_UINT64_C(0x0000000000004008) }, /* 15.5.1.64 */ - { IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x0000000000096b08) }, /* 22.30.54.25 */ - { IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x0000000000096b08) }, /* 22.40.54.30 */ - { IMG_UINT64_C(0x0016002e0036014a), IMG_UINT64_C(0x000000000009ea0a) }, /* 22.46.54.330 */ - { IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x0000000000096b08) }, /* 22.49.21.16 */ - { IMG_UINT64_C(0x001600430036001e), IMG_UINT64_C(0x0000000000096708) }, /* 22.67.54.30 */ - { IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x0000000000096508) }, /* 22.68.54.30 */ - { IMG_UINT64_C(0x00160056006800da), IMG_UINT64_C(0x000000000008e408) }, /* 22.86.104.218 */ - { IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x0000000000086508) }, /* 22.87.104.18 */ - { IMG_UINT64_C(0x0016006600360026), IMG_UINT64_C(0x0000000000086508) }, /* 22.102.54.38 */ - { IMG_UINT64_C(0x0016006800d0013e), IMG_UINT64_C(0x000000000008e40a) }, /* 22.104.208.318 */ - { IMG_UINT64_C(0x0016006900d0013e), IMG_UINT64_C(0x000000000008e40a) }, /* 22.105.208.318 */ - { IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x00000000000a210a) }, /* 24.50.208.504 */ - { IMG_UINT64_C(0x0018003800d001f9), IMG_UINT64_C(0x00000000000a210a) }, /* 24.56.208.505 */ - { IMG_UINT64_C(0x00180042003600cc), IMG_UINT64_C(0x00000000000a210a) }, /* 24.66.54.204 */ - { IMG_UINT64_C(0x00180043006801f8), IMG_UINT64_C(0x00000000000a210a) }, /* 24.67.104.504 */ - { IMG_UINT64_C(0x001d000e006c00d0), IMG_UINT64_C(0x00000000000e212a) }, /* 29.14.108.208 */ - { IMG_UINT64_C(0x001d0013003400ca), IMG_UINT64_C(0x00000000000e212a) }, /* 29.19.52.202 */ - { IMG_UINT64_C(0x0021000800160001), IMG_UINT64_C(0x000000000008212a) }, /* 33.8.22.1 */ - { IMG_UINT64_C(0x0021000f000b0003), IMG_UINT64_C(0x000000000008212a) }, /* 33.15.11.3 */ - { IMG_UINT64_C(0x00240032003600b6), IMG_UINT64_C(0x000000000008212a) }, /* 36.50.54.182 */ - { IMG_UINT64_C(0x00240034006800b6), IMG_UINT64_C(0x000000000008212a) }, /* 36.52.104.182 */ - { IMG_UINT64_C(0x002400350068031c), IMG_UINT64_C(0x000000000008012a) }, /* 36.53.104.796 */ - { IMG_UINT64_C(0x00240036003600b7), IMG_UINT64_C(0x000000000008212a) }, /* 36.54.54.183 */ - { IMG_UINT64_C(0x0024003700360067), IMG_UINT64_C(0x000000000008212a) }, /* 36.55.54.103 */ - { IMG_UINT64_C(0x00240038006800b7), IMG_UINT64_C(0x000000000008212a) }, /* 36.56.104.183 */ - { IMG_UINT64_C(0x0024003c00360118), IMG_UINT64_C(0x000000000008212a) }, /* 36.60.54.280 */ + { IMG_UINT64_C(0x0006002200040023), IMG_UINT64_C(0x000000000000040a) }, /* 6.34.4.35 */ + { IMG_UINT64_C(0x000f000500010040), IMG_UINT64_C(0x0000000000001008) }, /* 15.5.1.64 */ + { IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x0000000000085ac8) }, /* 22.30.54.25 */ + { IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x0000000000085ac8) }, /* 22.40.54.30 */ + { IMG_UINT64_C(0x0016002e0036014a), IMG_UINT64_C(0x0000000000087a8a) }, /* 22.46.54.330 */ + { IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x0000000000085ac8) }, /* 22.49.21.16 */ + { IMG_UINT64_C(0x001600430036001e), IMG_UINT64_C(0x00000000000859c8) }, /* 22.67.54.30 */ + { IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x0000000000085948) }, /* 22.68.54.30 */ + { IMG_UINT64_C(0x00160056006800da), IMG_UINT64_C(0x0000000000083908) }, /* 22.86.104.218 */ + { IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x0000000000081948) }, /* 22.87.104.18 */ + { IMG_UINT64_C(0x0016006600360026), IMG_UINT64_C(0x0000000000081948) }, /* 22.102.54.38 */ + { IMG_UINT64_C(0x0016006800d0013e), IMG_UINT64_C(0x000000000008390a) }, /* 22.104.208.318 */ + { IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x000000000008884a) }, /* 24.50.208.504 */ + { IMG_UINT64_C(0x0018003800d001f9), IMG_UINT64_C(0x000000000008884a) }, /* 24.56.208.505 */ + { IMG_UINT64_C(0x00180042003600cc), IMG_UINT64_C(0x000000000008884a) }, /* 24.66.54.204 */ + { IMG_UINT64_C(0x00180043006801f8), IMG_UINT64_C(0x000000000008884a) }, /* 24.67.104.504 */ + { IMG_UINT64_C(0x001d000e006c00d0), IMG_UINT64_C(0x00000000000a885a) }, /* 29.14.108.208 */ + { IMG_UINT64_C(0x001d0013003400ca), IMG_UINT64_C(0x00000000000a885a) }, /* 29.19.52.202 */ + { IMG_UINT64_C(0x0021000800160001), IMG_UINT64_C(0x000000000008085a) }, /* 33.8.22.1 */ + { IMG_UINT64_C(0x0021000f000b0003), IMG_UINT64_C(0x00000000000c085a) }, /* 33.15.11.3 */ + { IMG_UINT64_C(0x0024001d003400b6), IMG_UINT64_C(0x000000000008085a) }, /* 36.29.52.182 */ + { IMG_UINT64_C(0x00240032003600b6), IMG_UINT64_C(0x00000000000c085a) }, /* 36.50.54.182 */ + { IMG_UINT64_C(0x00240034006800b6), IMG_UINT64_C(0x00000000000c085a) }, /* 36.52.104.182 */ + { IMG_UINT64_C(0x002400350068031c), IMG_UINT64_C(0x000000000008005a) }, /* 36.53.104.796 */ + { IMG_UINT64_C(0x00240036003600b7), IMG_UINT64_C(0x000000000008085a) }, /* 36.54.54.183 */ + { IMG_UINT64_C(0x0024003700360067), IMG_UINT64_C(0x000000000008085a) }, /* 36.55.54.103 */ + { IMG_UINT64_C(0x00240038006800b7), IMG_UINT64_C(0x000000000008085a) }, /* 36.56.104.183 */ + { IMG_UINT64_C(0x0024003c00360118), IMG_UINT64_C(0x000000000008085a) }, /* 36.60.54.280 */ + { IMG_UINT64_C(0x002e004800660185), IMG_UINT64_C(0x000000000009085a) }, /* 46.72.102.389 */ + { IMG_UINT64_C(0x002e005e00cc0186), IMG_UINT64_C(0x000000000009085a) }, /* 46.94.204.390 */ }; #if defined(DEBUG) -#define FEATURE_NO_VALUES_NAMES_MAX_IDX (56) +#define FEATURE_NO_VALUES_NAMES_MAX_IDX (43U) static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] = { @@ -381,44 +387,32 @@ static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_ "CLUSTER_GROUPING", "COMPUTE", "COMPUTE_MORTON_CAPABLE", - "COMPUTE_ONLY", "COMPUTE_OVERLAP", - "COMPUTE_OVERLAP_WITH_BARRIERS", "COREID_PER_OS", - "DUST_POWER_ISLAND_S7", "DYNAMIC_DUST_POWER", "FASTRENDER_DM", "GPU_MULTICORE_SUPPORT", "GPU_VIRTUALISATION", "GS_RTA_SUPPORT", "IRQ_PER_OS", - "META_DMA", "MIPS", "PBE2_IN_XE", "PBE_CHECKSUM_2D", "PBVNC_COREID_REG", - "PDS_PER_DUST", - "PDS_TEMPSIZE8", "PERFBUS", "PERF_COUNTER_BATCH", "PM_MMU_VFP", "RISCV_FW_PROCESSOR", "ROGUEXE", - "S7_CACHE_HIERARCHY", - "S7_TOP_INFRASTRUCTURE", - "SCALABLE_VDM_GPP", - "SIGNAL_SNOOPING", + "SAFETY_IRQ", "SIMPLE_INTERNAL_PARAMETER_FORMAT", "SIMPLE_INTERNAL_PARAMETER_FORMAT_V1", "SIMPLE_INTERNAL_PARAMETER_FORMAT_V2", "SINGLE_BIF", - "SLC_HYBRID_CACHELINE_64_128", "SLC_SIZE_CONFIGURABLE", - "SLC_VIVT", "SOC_TIMER", "SYS_BUS_SECURE_RESET", "TDM_PDS_CHECKSUM", - "TESSELLATION", "TFBC_DELTA_CORRELATION", "TFBC_LOSSY_37_PERCENT", "TFBC_NATIVE_YUV10", @@ -427,15 +421,14 @@ static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_ "TPU_CEM_DATAMASTER_GLOBAL_REGISTERS", "TPU_DM_GLOBAL_REGISTERS", "TPU_FILTERING_MODE_CONTROL", - "VDM_DRAWINDIRECT", - "VDM_OBJECT_LEVEL_LLS", + "VOLCANIC_TB", "WATCHDOG_TIMER", "WORKGROUP_PROTECTION", "XE_MEMORY_HIERARCHY", "XT_TOP_INFRASTRUCTURE", }; -#define ERNSBRNS_IDS_MAX_IDX (20) +#define ERNSBRNS_IDS_MAX_IDX (20U) static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] = { @@ -443,9 +436,7 @@ static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] = 42290, 42321, 42606, - 46066, 47025, - 50539, 50767, 57596, 60084, @@ -457,7 +448,9 @@ static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] = 65101, 65273, 66622, + 66927, 68186, + 71242, 71317, }; diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_cr_defs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_cr_defs_km.h index 9d230f8bf81c..1326877b50be 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_cr_defs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgx_cr_defs_km.h @@ -63,8129 +63,8407 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* Register RGX_CR_RASTERISATION_INDIRECT */ -#define RGX_CR_RASTERISATION_INDIRECT (0x8238U) -#define RGX_CR_RASTERISATION_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_RASTERISATION_INDIRECT (0x8238U) +#define RGX_CR_RASTERISATION_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_USC_INDIRECT */ -#define RGX_CR_USC_INDIRECT (0x8000U) -#define RGX_CR_USC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_USC_INDIRECT (0x8000U) +#define RGX_CR_USC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_PBE_INDIRECT */ -#define RGX_CR_PBE_INDIRECT (0x83E0U) -#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_PBE_INDIRECT (0x83E0U) +#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_PBE_PERF_INDIRECT */ -#define RGX_CR_PBE_PERF_INDIRECT (0x83D8U) -#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_PBE_PERF_INDIRECT (0x83D8U) +#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_TPU_PERF_INDIRECT */ -#define RGX_CR_TPU_PERF_INDIRECT (0x83F0U) -#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) +#define RGX_CR_TPU_PERF_INDIRECT (0x83F0U) +#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) /* Register RGX_CR_RASTERISATION_PERF_INDIRECT */ -#define RGX_CR_RASTERISATION_PERF_INDIRECT (0x8318U) -#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_RASTERISATION_PERF_INDIRECT (0x8318U) +#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_TPU_MCU_L0_PERF_INDIRECT */ -#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT (0x8028U) -#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) +#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT (0x8028U) +#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) /* Register RGX_CR_USC_PERF_INDIRECT */ -#define RGX_CR_USC_PERF_INDIRECT (0x8030U) -#define RGX_CR_USC_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_USC_PERF_INDIRECT (0x8030U) +#define RGX_CR_USC_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_BLACKPEARL_INDIRECT */ -#define RGX_CR_BLACKPEARL_INDIRECT (0x8388U) -#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_BLACKPEARL_INDIRECT (0x8388U) +#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_BLACKPEARL_PERF_INDIRECT */ -#define RGX_CR_BLACKPEARL_PERF_INDIRECT (0x83F8U) -#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_BLACKPEARL_PERF_INDIRECT (0x83F8U) +#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_TEXAS3_PERF_INDIRECT */ -#define RGX_CR_TEXAS3_PERF_INDIRECT (0x83D0U) -#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) +#define RGX_CR_TEXAS3_PERF_INDIRECT (0x83D0U) +#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) /* Register RGX_CR_TEXAS_PERF_INDIRECT */ -#define RGX_CR_TEXAS_PERF_INDIRECT (0x8288U) -#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_TEXAS_PERF_INDIRECT (0x8288U) +#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_BX_TU_PERF_INDIRECT */ -#define RGX_CR_BX_TU_PERF_INDIRECT (0xC900U) -#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_BX_TU_PERF_INDIRECT (0xC900U) +#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_CLK_CTRL */ -#define RGX_CR_CLK_CTRL (0x0000U) -#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) -#define RGX_CR_CLK_CTRL__S7_TOP__MASKFULL (IMG_UINT64_C(0xCFCF03000F3F3F0F)) -#define RGX_CR_CLK_CTRL_MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) -#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT (62U) -#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000)) -#define RGX_CR_CLK_CTRL_IPP_SHIFT (60U) -#define RGX_CR_CLK_CTRL_IPP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_IPP_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_IPP_ON (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_CLK_CTRL_IPP_AUTO (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_CLK_CTRL_FBC_SHIFT (58U) -#define RGX_CR_CLK_CTRL_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_FBC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_FBC_ON (IMG_UINT64_C(0x0400000000000000)) -#define RGX_CR_CLK_CTRL_FBC_AUTO (IMG_UINT64_C(0x0800000000000000)) -#define RGX_CR_CLK_CTRL_FBDC_SHIFT (56U) -#define RGX_CR_CLK_CTRL_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_FBDC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_FBDC_ON (IMG_UINT64_C(0x0100000000000000)) -#define RGX_CR_CLK_CTRL_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000)) -#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT (54U) -#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON (IMG_UINT64_C(0x0040000000000000)) -#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO (IMG_UINT64_C(0x0080000000000000)) -#define RGX_CR_CLK_CTRL_USCS_SHIFT (52U) -#define RGX_CR_CLK_CTRL_USCS_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_USCS_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_USCS_ON (IMG_UINT64_C(0x0010000000000000)) -#define RGX_CR_CLK_CTRL_USCS_AUTO (IMG_UINT64_C(0x0020000000000000)) -#define RGX_CR_CLK_CTRL_PBE_SHIFT (50U) -#define RGX_CR_CLK_CTRL_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_PBE_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_PBE_ON (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_CLK_CTRL_PBE_AUTO (IMG_UINT64_C(0x0008000000000000)) -#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT (48U) -#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000)) -#define RGX_CR_CLK_CTRL_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000)) -#define RGX_CR_CLK_CTRL_CDM_SHIFT (46U) -#define RGX_CR_CLK_CTRL_CDM_CLRMSK (IMG_UINT64_C(0xFFFF3FFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_CDM_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_CDM_ON (IMG_UINT64_C(0x0000400000000000)) -#define RGX_CR_CLK_CTRL_CDM_AUTO (IMG_UINT64_C(0x0000800000000000)) -#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT (44U) -#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_SIDEKICK_ON (IMG_UINT64_C(0x0000100000000000)) -#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO (IMG_UINT64_C(0x0000200000000000)) -#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT (42U) -#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO (IMG_UINT64_C(0x0000080000000000)) -#define RGX_CR_CLK_CTRL_BIF_SHIFT (40U) -#define RGX_CR_CLK_CTRL_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL_BIF_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_BIF_ON (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_CLK_CTRL_BIF_AUTO (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT (28U) -#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) -#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT (26U) -#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) -#define RGX_CR_CLK_CTRL_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_CLK_CTRL_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_CLK_CTRL_TPU_SHIFT (24U) -#define RGX_CR_CLK_CTRL_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) -#define RGX_CR_CLK_CTRL_TPU_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_TPU_ON (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_CLK_CTRL_TPU_AUTO (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_CLK_CTRL_USC_SHIFT (20U) -#define RGX_CR_CLK_CTRL_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) -#define RGX_CR_CLK_CTRL_USC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_USC_ON (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_CLK_CTRL_USC_AUTO (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_CLK_CTRL_TLA_SHIFT (18U) -#define RGX_CR_CLK_CTRL_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) -#define RGX_CR_CLK_CTRL_TLA_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_TLA_ON (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_CLK_CTRL_TLA_AUTO (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_CLK_CTRL_SLC_SHIFT (16U) -#define RGX_CR_CLK_CTRL_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -#define RGX_CR_CLK_CTRL_SLC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_SLC_ON (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_CLK_CTRL_SLC_AUTO (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_CLK_CTRL_UVS_SHIFT (14U) -#define RGX_CR_CLK_CTRL_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) -#define RGX_CR_CLK_CTRL_UVS_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_UVS_ON (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_CLK_CTRL_UVS_AUTO (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_CLK_CTRL_PDS_SHIFT (12U) -#define RGX_CR_CLK_CTRL_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) -#define RGX_CR_CLK_CTRL_PDS_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_PDS_ON (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_CLK_CTRL_PDS_AUTO (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_CLK_CTRL_VDM_SHIFT (10U) -#define RGX_CR_CLK_CTRL_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) -#define RGX_CR_CLK_CTRL_VDM_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_VDM_ON (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_CLK_CTRL_VDM_AUTO (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_CLK_CTRL_PM_SHIFT (8U) -#define RGX_CR_CLK_CTRL_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -#define RGX_CR_CLK_CTRL_PM_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_PM_ON (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_CLK_CTRL_PM_AUTO (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_CLK_CTRL_GPP_SHIFT (6U) -#define RGX_CR_CLK_CTRL_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) -#define RGX_CR_CLK_CTRL_GPP_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_GPP_ON (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_CLK_CTRL_GPP_AUTO (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_CLK_CTRL_TE_SHIFT (4U) -#define RGX_CR_CLK_CTRL_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) -#define RGX_CR_CLK_CTRL_TE_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_TE_ON (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_CLK_CTRL_TE_AUTO (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_CLK_CTRL_TSP_SHIFT (2U) -#define RGX_CR_CLK_CTRL_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) -#define RGX_CR_CLK_CTRL_TSP_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_TSP_ON (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_CLK_CTRL_TSP_AUTO (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_CLK_CTRL_ISP_SHIFT (0U) -#define RGX_CR_CLK_CTRL_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) -#define RGX_CR_CLK_CTRL_ISP_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL_ISP_ON (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_CLK_CTRL_ISP_AUTO (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_CTRL (0x0000U) +#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) +#define RGX_CR_CLK_CTRL__S7_INFRA__MASKFULL (IMG_UINT64_C(0xCFCF03000F3F3F0F)) +#define RGX_CR_CLK_CTRL_MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT (62U) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS__S7_INFRA__ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS__S7_INFRA__AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS__PBE2_XE__ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_TEXAS__PBE2_XE__AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL_IPP_SHIFT (60U) +#define RGX_CR_CLK_CTRL_IPP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_IPP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_IPP_ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL_IPP_AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL_IPP__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_IPP__S7_INFRA__ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL_IPP__S7_INFRA__AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL_IPP__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_IPP__PBE2_XE__ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL_IPP__PBE2_XE__AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL_FBC_SHIFT (58U) +#define RGX_CR_CLK_CTRL_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_FBC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FBC_ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL_FBC_AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL_FBC__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FBC__S7_INFRA__ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL_FBC__S7_INFRA__AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL_FBC__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FBC__PBE2_XE__ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL_FBC__PBE2_XE__AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL_FBDC_SHIFT (56U) +#define RGX_CR_CLK_CTRL_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_FBDC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FBDC_ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL_FBDC__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FBDC__S7_INFRA__ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL_FBDC__S7_INFRA__AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL_FBDC__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FBDC__PBE2_XE__ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL_FBDC__PBE2_XE__AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT (54U) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE__S7_INFRA__ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE__S7_INFRA__AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE__PBE2_XE__ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL_FB_TLCACHE__PBE2_XE__AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL_USCS_SHIFT (52U) +#define RGX_CR_CLK_CTRL_USCS_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_USCS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_USCS_ON (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_CLK_CTRL_USCS_AUTO (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_CLK_CTRL_USCS__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_USCS__S7_INFRA__ON (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_CLK_CTRL_USCS__S7_INFRA__AUTO (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_CLK_CTRL_USCS__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_USCS__PBE2_XE__ON (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_CLK_CTRL_USCS__PBE2_XE__AUTO (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_CLK_CTRL_PBE_SHIFT (50U) +#define RGX_CR_CLK_CTRL_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_PBE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PBE_ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL_PBE_AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL_PBE__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PBE__S7_INFRA__ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL_PBE__S7_INFRA__AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL_PBE__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PBE__PBE2_XE__ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL_PBE__PBE2_XE__AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT (48U) +#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1__S7_INFRA__ON (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1__S7_INFRA__AUTO (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1__PBE2_XE__ON (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L1__PBE2_XE__AUTO (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_CLK_CTRL_CDM_SHIFT (46U) +#define RGX_CR_CLK_CTRL_CDM_CLRMSK (IMG_UINT64_C(0xFFFF3FFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_CDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_CDM_ON (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_CLK_CTRL_CDM_AUTO (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_CLK_CTRL_CDM__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_CDM__S7_INFRA__ON (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_CLK_CTRL_CDM__S7_INFRA__AUTO (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_CLK_CTRL_CDM__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_CDM__PBE2_XE__ON (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_CLK_CTRL_CDM__PBE2_XE__AUTO (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT (44U) +#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK_ON (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK__S7_INFRA__ON (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK__S7_INFRA__AUTO (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK__PBE2_XE__ON (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_CLK_CTRL_SIDEKICK__PBE2_XE__AUTO (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT (42U) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__S7_INFRA__ON (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__S7_INFRA__AUTO (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__PBE2_XE__ON (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__PBE2_XE__AUTO (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_CLK_CTRL_BIF_SHIFT (40U) +#define RGX_CR_CLK_CTRL_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL_BIF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF_ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL_BIF_AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL_BIF__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF__S7_INFRA__ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL_BIF__S7_INFRA__AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL_BIF__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_BIF__PBE2_XE__ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL_BIF__PBE2_XE__AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT (28U) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__S7_INFRA__ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__PBE2_XE__ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT (26U) +#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_CTRL_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL_MCU_L0__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L0__S7_INFRA__ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL_MCU_L0__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL_MCU_L0__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_MCU_L0__PBE2_XE__ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL_MCU_L0__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL_TPU_SHIFT (24U) +#define RGX_CR_CLK_CTRL_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_CTRL_TPU_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TPU_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL_TPU_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL_TPU__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TPU__S7_INFRA__ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL_TPU__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL_TPU__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TPU__PBE2_XE__ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL_TPU__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL_USC_SHIFT (20U) +#define RGX_CR_CLK_CTRL_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_CTRL_USC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_USC_ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL_USC_AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL_USC__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_USC__S7_INFRA__ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL_USC__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL_USC__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_USC__PBE2_XE__ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL_USC__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL_TLA_SHIFT (18U) +#define RGX_CR_CLK_CTRL_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_CTRL_TLA_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TLA_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL_TLA_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL_TLA__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TLA__S7_INFRA__ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL_TLA__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL_TLA__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TLA__PBE2_XE__ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL_TLA__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL_SLC_SHIFT (16U) +#define RGX_CR_CLK_CTRL_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_CTRL_SLC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_SLC_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL_SLC_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL_SLC__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_SLC__S7_INFRA__ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL_SLC__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL_SLC__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_SLC__PBE2_XE__ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL_SLC__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL_UVS_SHIFT (14U) +#define RGX_CR_CLK_CTRL_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) +#define RGX_CR_CLK_CTRL_UVS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_UVS_ON (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_CTRL_UVS_AUTO (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_CLK_CTRL_UVS__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_UVS__S7_INFRA__ON (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_CTRL_UVS__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_CLK_CTRL_UVS__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_UVS__PBE2_XE__ON (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_CTRL_UVS__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_CLK_CTRL_PDS_SHIFT (12U) +#define RGX_CR_CLK_CTRL_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_CLK_CTRL_PDS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PDS_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL_PDS_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL_PDS__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PDS__S7_INFRA__ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL_PDS__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL_PDS__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PDS__PBE2_XE__ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL_PDS__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL_VDM_SHIFT (10U) +#define RGX_CR_CLK_CTRL_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_CLK_CTRL_VDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_VDM_ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL_VDM_AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL_VDM__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_VDM__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL_VDM__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL_VDM__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_VDM__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL_VDM__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL_PM_SHIFT (8U) +#define RGX_CR_CLK_CTRL_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL_PM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PM_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL_PM_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL_PM__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PM__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL_PM__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL_PM__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_PM__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL_PM__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL_GPP_SHIFT (6U) +#define RGX_CR_CLK_CTRL_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) +#define RGX_CR_CLK_CTRL_GPP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_GPP_ON (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_CTRL_GPP_AUTO (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_CTRL_GPP__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_GPP__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_CTRL_GPP__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_CTRL_GPP__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_GPP__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_CTRL_GPP__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_CTRL_TE_SHIFT (4U) +#define RGX_CR_CLK_CTRL_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_CLK_CTRL_TE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TE_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL_TE_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL_TE__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TE__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL_TE__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL_TE__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TE__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL_TE__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL_TSP_SHIFT (2U) +#define RGX_CR_CLK_CTRL_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) +#define RGX_CR_CLK_CTRL_TSP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TSP_ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_CTRL_TSP_AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_CTRL_TSP__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TSP__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_CTRL_TSP__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_CTRL_TSP__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_TSP__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_CTRL_TSP__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_CTRL_ISP_SHIFT (0U) +#define RGX_CR_CLK_CTRL_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL_ISP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_ISP_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL_ISP_AUTO (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_CTRL_ISP__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_ISP__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL_ISP__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_CTRL_ISP__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL_ISP__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL_ISP__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000002)) /* Register RGX_CR_CLK_STATUS */ -#define RGX_CR_CLK_STATUS (0x0008U) -#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) -#define RGX_CR_CLK_STATUS__S7_TOP__MASKFULL (IMG_UINT64_C(0x00000001B3101773)) -#define RGX_CR_CLK_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) -#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT (32U) -#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT (31U) -#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_CLK_STATUS_IPP_SHIFT (30U) -#define RGX_CR_CLK_STATUS_IPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_CLK_STATUS_IPP_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_IPP_RUNNING (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_CLK_STATUS_FBC_SHIFT (29U) -#define RGX_CR_CLK_STATUS_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_CLK_STATUS_FBC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_FBC_RUNNING (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_CLK_STATUS_FBDC_SHIFT (28U) -#define RGX_CR_CLK_STATUS_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_CLK_STATUS_FBDC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_FBDC_RUNNING (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT (27U) -#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_CLK_STATUS_USCS_SHIFT (26U) -#define RGX_CR_CLK_STATUS_USCS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_CLK_STATUS_USCS_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_USCS_RUNNING (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_CLK_STATUS_PBE_SHIFT (25U) -#define RGX_CR_CLK_STATUS_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_CLK_STATUS_PBE_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT (24U) -#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_CLK_STATUS_MCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_CLK_STATUS_CDM_SHIFT (23U) -#define RGX_CR_CLK_STATUS_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_CLK_STATUS_CDM_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_CDM_RUNNING (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT (22U) -#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_CLK_STATUS_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT (21U) -#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_CLK_STATUS_BIF_SHIFT (20U) -#define RGX_CR_CLK_STATUS_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_CLK_STATUS_BIF_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT (14U) -#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT (13U) -#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_CLK_STATUS_MCU_L0_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_CLK_STATUS_TPU_SHIFT (12U) -#define RGX_CR_CLK_STATUS_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_CLK_STATUS_TPU_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_CLK_STATUS_USC_SHIFT (10U) -#define RGX_CR_CLK_STATUS_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_CLK_STATUS_USC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_USC_RUNNING (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_CLK_STATUS_TLA_SHIFT (9U) -#define RGX_CR_CLK_STATUS_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_CLK_STATUS_TLA_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_TLA_RUNNING (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_CLK_STATUS_SLC_SHIFT (8U) -#define RGX_CR_CLK_STATUS_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_CLK_STATUS_SLC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_CLK_STATUS_UVS_SHIFT (7U) -#define RGX_CR_CLK_STATUS_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_CLK_STATUS_UVS_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_UVS_RUNNING (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_CLK_STATUS_PDS_SHIFT (6U) -#define RGX_CR_CLK_STATUS_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_CLK_STATUS_PDS_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_CLK_STATUS_VDM_SHIFT (5U) -#define RGX_CR_CLK_STATUS_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_CLK_STATUS_VDM_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_VDM_RUNNING (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_CLK_STATUS_PM_SHIFT (4U) -#define RGX_CR_CLK_STATUS_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_CLK_STATUS_PM_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_PM_RUNNING (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_CLK_STATUS_GPP_SHIFT (3U) -#define RGX_CR_CLK_STATUS_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_CLK_STATUS_GPP_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_GPP_RUNNING (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_CLK_STATUS_TE_SHIFT (2U) -#define RGX_CR_CLK_STATUS_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_CLK_STATUS_TE_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_TE_RUNNING (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_CLK_STATUS_TSP_SHIFT (1U) -#define RGX_CR_CLK_STATUS_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_CLK_STATUS_TSP_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_TSP_RUNNING (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_CLK_STATUS_ISP_SHIFT (0U) -#define RGX_CR_CLK_STATUS_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_CLK_STATUS_ISP_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_STATUS (0x0008U) +#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) +#define RGX_CR_CLK_STATUS__S7_INFRA__MASKFULL (IMG_UINT64_C(0x00000001B3101773)) +#define RGX_CR_CLK_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) +#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT (32U) +#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_STATUS_MCU_FBTC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_FBTC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_STATUS_MCU_FBTC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_FBTC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT (31U) +#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_TEXAS__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_STATUS_IPP_SHIFT (30U) +#define RGX_CR_CLK_STATUS_IPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_CLK_STATUS_IPP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_IPP_RUNNING (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_STATUS_IPP__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_IPP__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_STATUS_IPP__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_IPP__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_STATUS_FBC_SHIFT (29U) +#define RGX_CR_CLK_STATUS_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_CLK_STATUS_FBC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FBC_RUNNING (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_STATUS_FBC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FBC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_STATUS_FBC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FBC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_STATUS_FBDC_SHIFT (28U) +#define RGX_CR_CLK_STATUS_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_CLK_STATUS_FBDC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FBDC_RUNNING (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_STATUS_FBDC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FBDC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_STATUS_FBDC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FBDC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT (27U) +#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_FB_TLCACHE__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_STATUS_USCS_SHIFT (26U) +#define RGX_CR_CLK_STATUS_USCS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_CLK_STATUS_USCS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_USCS_RUNNING (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_STATUS_USCS__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_USCS__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_STATUS_USCS__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_USCS__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_STATUS_PBE_SHIFT (25U) +#define RGX_CR_CLK_STATUS_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_CLK_STATUS_PBE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_STATUS_PBE__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PBE__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_STATUS_PBE__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PBE__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT (24U) +#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_CLK_STATUS_MCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_STATUS_MCU_L1__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_L1__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_STATUS_MCU_L1__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_L1__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_STATUS_CDM_SHIFT (23U) +#define RGX_CR_CLK_STATUS_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_CLK_STATUS_CDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_CDM_RUNNING (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_STATUS_CDM__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_CDM__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_STATUS_CDM__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_CDM__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT (22U) +#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CLK_STATUS_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_STATUS_SIDEKICK__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_SIDEKICK__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_STATUS_SIDEKICK__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_SIDEKICK__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT (21U) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_SIDEKICK__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_STATUS_BIF_SHIFT (20U) +#define RGX_CR_CLK_STATUS_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_CLK_STATUS_BIF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_STATUS_BIF__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_STATUS_BIF__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_BIF__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT (14U) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT (13U) +#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_CLK_STATUS_MCU_L0_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_STATUS_MCU_L0__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_L0__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_STATUS_MCU_L0__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_MCU_L0__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_STATUS_TPU_SHIFT (12U) +#define RGX_CR_CLK_STATUS_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_CLK_STATUS_TPU_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS_TPU__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TPU__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS_TPU__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TPU__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS_USC_SHIFT (10U) +#define RGX_CR_CLK_STATUS_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_CLK_STATUS_USC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_USC_RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS_USC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_USC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS_USC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_USC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS_TLA_SHIFT (9U) +#define RGX_CR_CLK_STATUS_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_CLK_STATUS_TLA_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TLA_RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS_TLA__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TLA__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS_TLA__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TLA__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS_SLC_SHIFT (8U) +#define RGX_CR_CLK_STATUS_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_CLK_STATUS_SLC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS_SLC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_SLC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS_SLC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_SLC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS_UVS_SHIFT (7U) +#define RGX_CR_CLK_STATUS_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_CLK_STATUS_UVS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_UVS_RUNNING (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_STATUS_UVS__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_UVS__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_STATUS_UVS__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_UVS__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_STATUS_PDS_SHIFT (6U) +#define RGX_CR_CLK_STATUS_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_CLK_STATUS_PDS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS_PDS__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PDS__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS_PDS__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PDS__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS_VDM_SHIFT (5U) +#define RGX_CR_CLK_STATUS_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_CLK_STATUS_VDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_VDM_RUNNING (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_STATUS_VDM__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_VDM__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_STATUS_VDM__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_VDM__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_STATUS_PM_SHIFT (4U) +#define RGX_CR_CLK_STATUS_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_STATUS_PM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PM_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS_PM__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PM__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS_PM__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_PM__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS_GPP_SHIFT (3U) +#define RGX_CR_CLK_STATUS_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_CLK_STATUS_GPP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_GPP_RUNNING (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_STATUS_GPP__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_GPP__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_STATUS_GPP__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_GPP__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_STATUS_TE_SHIFT (2U) +#define RGX_CR_CLK_STATUS_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_CLK_STATUS_TE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TE_RUNNING (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_STATUS_TE__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TE__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_STATUS_TE__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TE__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_STATUS_TSP_SHIFT (1U) +#define RGX_CR_CLK_STATUS_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_CLK_STATUS_TSP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TSP_RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_STATUS_TSP__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TSP__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_STATUS_TSP__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_TSP__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_STATUS_ISP_SHIFT (0U) +#define RGX_CR_CLK_STATUS_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS_ISP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_STATUS_ISP__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_ISP__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_STATUS_ISP__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS_ISP__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_CORE_ID */ -#define RGX_CR_CORE_ID__PBVNC (0x0020U) -#define RGX_CR_CORE_ID__PBVNC__MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT (48U) -#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT (32U) -#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) -#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U) -#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) -#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT (0U) -#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_CORE_ID__PBVNC (0x0020U) +#define RGX_CR_CORE_ID__PBVNC__MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT (48U) +#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT (32U) +#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U) +#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT (0U) +#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_CORE_ID */ -#define RGX_CR_CORE_ID (0x0018U) -#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_CORE_ID_ID_SHIFT (16U) -#define RGX_CR_CORE_ID_ID_CLRMSK (0x0000FFFFU) -#define RGX_CR_CORE_ID_CONFIG_SHIFT (0U) -#define RGX_CR_CORE_ID_CONFIG_CLRMSK (0xFFFF0000U) +#define RGX_CR_CORE_ID (0x0018U) +#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CORE_ID_ID_SHIFT (16U) +#define RGX_CR_CORE_ID_ID_CLRMSK (0x0000FFFFU) +#define RGX_CR_CORE_ID_CONFIG_SHIFT (0U) +#define RGX_CR_CORE_ID_CONFIG_CLRMSK (0xFFFF0000U) /* Register RGX_CR_CORE_REVISION */ -#define RGX_CR_CORE_REVISION (0x0020U) -#define RGX_CR_CORE_REVISION_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT (24U) -#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK (0x00FFFFFFU) -#define RGX_CR_CORE_REVISION_MAJOR_SHIFT (16U) -#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK (0xFF00FFFFU) -#define RGX_CR_CORE_REVISION_MINOR_SHIFT (8U) -#define RGX_CR_CORE_REVISION_MINOR_CLRMSK (0xFFFF00FFU) -#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT (0U) -#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_CORE_REVISION (0x0020U) +#define RGX_CR_CORE_REVISION_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT (24U) +#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK (0x00FFFFFFU) +#define RGX_CR_CORE_REVISION_MAJOR_SHIFT (16U) +#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CORE_REVISION_MINOR_SHIFT (8U) +#define RGX_CR_CORE_REVISION_MINOR_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT (0U) +#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_DESIGNER_REV_FIELD1 */ -#define RGX_CR_DESIGNER_REV_FIELD1 (0x0028U) -#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U) -#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (0x00000000U) +#define RGX_CR_DESIGNER_REV_FIELD1 (0x0028U) +#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U) +#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (0x00000000U) /* Register RGX_CR_DESIGNER_REV_FIELD2 */ -#define RGX_CR_DESIGNER_REV_FIELD2 (0x0030U) -#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U) -#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (0x00000000U) +#define RGX_CR_DESIGNER_REV_FIELD2 (0x0030U) +#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U) +#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (0x00000000U) /* Register RGX_CR_CHANGESET_NUMBER */ -#define RGX_CR_CHANGESET_NUMBER (0x0040U) -#define RGX_CR_CHANGESET_NUMBER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT (0U) -#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CHANGESET_NUMBER (0x0040U) +#define RGX_CR_CHANGESET_NUMBER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT (0U) +#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SOC_TIMER_GRAY */ -#define RGX_CR_SOC_TIMER_GRAY (0x00E0U) -#define RGX_CR_SOC_TIMER_GRAY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT (0U) -#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SOC_TIMER_GRAY (0x00E0U) +#define RGX_CR_SOC_TIMER_GRAY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT (0U) +#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SOC_TIMER_BINARY */ -#define RGX_CR_SOC_TIMER_BINARY (0x00E8U) -#define RGX_CR_SOC_TIMER_BINARY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT (0U) -#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SOC_TIMER_BINARY (0x00E8U) +#define RGX_CR_SOC_TIMER_BINARY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT (0U) +#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_CLK_XTPLUS_CTRL */ -#define RGX_CR_CLK_XTPLUS_CTRL (0x0080U) -#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL (IMG_UINT64_C(0x0000003FFFFF0000)) -#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT (36U) -#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON (IMG_UINT64_C(0x0000001000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO (IMG_UINT64_C(0x0000002000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT (34U) -#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON (IMG_UINT64_C(0x0000000400000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT (32U) -#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT (30U) -#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT (28U) -#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT (26U) -#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT (24U) -#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT (22U) -#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT (20U) -#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT (18U) -#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT (16U) -#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_XTPLUS_CTRL (0x0080U) +#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL (IMG_UINT64_C(0x0000003FFFFF0000)) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT (36U) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT (34U) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT (32U) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT (30U) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT (28U) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT (26U) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT (24U) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT (22U) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT (20U) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT (18U) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT (16U) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO (IMG_UINT64_C(0x0000000000020000)) /* Register RGX_CR_CLK_XTPLUS_STATUS */ -#define RGX_CR_CLK_XTPLUS_STATUS (0x0088U) -#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF)) -#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT (10U) -#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT (9U) -#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT (8U) -#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT (7U) -#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT (6U) -#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT (5U) -#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT (4U) -#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT (3U) -#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT (2U) -#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT (1U) -#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT (0U) -#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_XTPLUS_STATUS (0x0088U) +#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF)) +#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT (10U) +#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT (9U) +#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT (8U) +#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT (7U) +#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT (6U) +#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT (5U) +#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT (4U) +#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT (3U) +#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT (2U) +#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT (1U) +#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT (0U) +#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_SOFT_RESET */ -#define RGX_CR_SOFT_RESET (0x0100U) -#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFEFFFFFFFFFFC3D)) -#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00E7FFFFFFFFFC3D)) -#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT (63U) -#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_EN (IMG_UINT64_C(0x8000000000000000)) -#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT (62U) -#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_SOFT_RESET_BERNADO2_CORE_SHIFT (61U) -#define RGX_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_BERNADO2_CORE_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_SOFT_RESET_JONES_CORE_SHIFT (60U) -#define RGX_CR_SOFT_RESET_JONES_CORE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_JONES_CORE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_SOFT_RESET_TILING_CORE_SHIFT (59U) -#define RGX_CR_SOFT_RESET_TILING_CORE_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_TILING_CORE_EN (IMG_UINT64_C(0x0800000000000000)) -#define RGX_CR_SOFT_RESET_TE3_SHIFT (58U) -#define RGX_CR_SOFT_RESET_TE3_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_TE3_EN (IMG_UINT64_C(0x0400000000000000)) -#define RGX_CR_SOFT_RESET_VCE_SHIFT (57U) -#define RGX_CR_SOFT_RESET_VCE_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_VCE_EN (IMG_UINT64_C(0x0200000000000000)) -#define RGX_CR_SOFT_RESET_VBS_SHIFT (56U) -#define RGX_CR_SOFT_RESET_VBS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_VBS_EN (IMG_UINT64_C(0x0100000000000000)) -#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT (55U) -#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_DPX1_CORE_EN (IMG_UINT64_C(0x0080000000000000)) -#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT (54U) -#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_DPX0_CORE_EN (IMG_UINT64_C(0x0040000000000000)) -#define RGX_CR_SOFT_RESET_FBA_SHIFT (53U) -#define RGX_CR_SOFT_RESET_FBA_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_FBA_EN (IMG_UINT64_C(0x0020000000000000)) -#define RGX_CR_SOFT_RESET_FB_CDC_SHIFT (51U) -#define RGX_CR_SOFT_RESET_FB_CDC_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_FB_CDC_EN (IMG_UINT64_C(0x0008000000000000)) -#define RGX_CR_SOFT_RESET_SH_SHIFT (50U) -#define RGX_CR_SOFT_RESET_SH_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_SH_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_SOFT_RESET_VRDM_SHIFT (49U) -#define RGX_CR_SOFT_RESET_VRDM_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_VRDM_EN (IMG_UINT64_C(0x0002000000000000)) -#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT (48U) -#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_MCU_FBTC_EN (IMG_UINT64_C(0x0001000000000000)) -#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT (47U) -#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN (IMG_UINT64_C(0x0000800000000000)) -#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT (46U) -#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN (IMG_UINT64_C(0x0000400000000000)) -#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT (45U) -#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN (IMG_UINT64_C(0x0000200000000000)) -#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT (44U) -#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN (IMG_UINT64_C(0x0000100000000000)) -#define RGX_CR_SOFT_RESET_IPP_SHIFT (43U) -#define RGX_CR_SOFT_RESET_IPP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_IPP_EN (IMG_UINT64_C(0x0000080000000000)) -#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (42U) -#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT (41U) -#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT (40U) -#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT (39U) -#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) -#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN (IMG_UINT64_C(0x0000008000000000)) -#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT (38U) -#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN (IMG_UINT64_C(0x0000004000000000)) -#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT (37U) -#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN (IMG_UINT64_C(0x0000002000000000)) -#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT (36U) -#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN (IMG_UINT64_C(0x0000001000000000)) -#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT (35U) -#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) -#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN (IMG_UINT64_C(0x0000000800000000)) -#define RGX_CR_SOFT_RESET_MMU_SHIFT (34U) -#define RGX_CR_SOFT_RESET_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -#define RGX_CR_SOFT_RESET_MMU_EN (IMG_UINT64_C(0x0000000400000000)) -#define RGX_CR_SOFT_RESET_BIF1_SHIFT (33U) -#define RGX_CR_SOFT_RESET_BIF1_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -#define RGX_CR_SOFT_RESET_BIF1_EN (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (32U) -#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_SOFT_RESET_CPU_SHIFT (32U) -#define RGX_CR_SOFT_RESET_CPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_SOFT_RESET_CPU_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT (31U) -#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT (30U) -#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT (29U) -#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT (28U) -#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_SOFT_RESET_SLC_SHIFT (27U) -#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_SOFT_RESET_TLA_SHIFT (26U) -#define RGX_CR_SOFT_RESET_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_SOFT_RESET_TLA_EN (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_SOFT_RESET_UVS_SHIFT (25U) -#define RGX_CR_SOFT_RESET_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_SOFT_RESET_UVS_EN (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_SOFT_RESET_TE_SHIFT (24U) -#define RGX_CR_SOFT_RESET_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_SOFT_RESET_TE_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_SOFT_RESET_GPP_SHIFT (23U) -#define RGX_CR_SOFT_RESET_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_SOFT_RESET_GPP_EN (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_SOFT_RESET_FBDC_SHIFT (22U) -#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_SOFT_RESET_FBC_SHIFT (21U) -#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_SOFT_RESET_PM_SHIFT (20U) -#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U) -#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT (18U) -#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_SOFT_RESET_USC_SHARED_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U) -#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_SOFT_RESET_BIF_SHIFT (16U) -#define RGX_CR_SOFT_RESET_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_SOFT_RESET_BIF_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_SOFT_RESET_CDM_SHIFT (15U) -#define RGX_CR_SOFT_RESET_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_SOFT_RESET_CDM_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_SOFT_RESET_VDM_SHIFT (14U) -#define RGX_CR_SOFT_RESET_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_SOFT_RESET_VDM_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_SOFT_RESET_TESS_SHIFT (13U) -#define RGX_CR_SOFT_RESET_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_SOFT_RESET_TESS_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U) -#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U) -#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_SOFT_RESET_TSP_SHIFT (10U) -#define RGX_CR_SOFT_RESET_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_SOFT_RESET_TSP_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_SOFT_RESET_SYSARB_SHIFT (5U) -#define RGX_CR_SOFT_RESET_SYSARB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_SOFT_RESET_SYSARB_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT (4U) -#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U) -#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U) -#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_SOFT_RESET_USC_SHIFT (0U) -#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SOFT_RESET (0x0100U) +#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFEFFFFFFFFFFC3D)) +#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00E7FFFFFFFFFC3D)) +#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_SHIFT (63U) +#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_SHIFT (62U) +#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_SHIFT (61U) +#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_SHIFT (60U) +#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_SHIFT (59U) +#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_SHIFT (58U) +#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_SHIFT (57U) +#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_SHIFT (56U) +#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT (55U) +#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DPX1_CORE_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT (54U) +#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DPX0_CORE_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SOFT_RESET_FBA_SHIFT (53U) +#define RGX_CR_SOFT_RESET_FBA_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBA_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_SHIFT (51U) +#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SOFT_RESET_SH_SHIFT (50U) +#define RGX_CR_SOFT_RESET_SH_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_SH_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SOFT_RESET_VRDM_SHIFT (49U) +#define RGX_CR_SOFT_RESET_VRDM_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_VRDM_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT (48U) +#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_MCU_FBTC_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT (47U) +#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT (46U) +#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT (45U) +#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT (44U) +#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SOFT_RESET_IPP_SHIFT (43U) +#define RGX_CR_SOFT_RESET_IPP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_IPP_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (42U) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT (41U) +#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT (40U) +#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT (39U) +#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT (38U) +#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT (37U) +#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT (36U) +#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT (35U) +#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_SOFT_RESET_MMU_SHIFT (34U) +#define RGX_CR_SOFT_RESET_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_SOFT_RESET_MMU_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_SOFT_RESET_BIF1_SHIFT (33U) +#define RGX_CR_SOFT_RESET_BIF1_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF1_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (32U) +#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_SOFT_RESET_CPU_SHIFT (32U) +#define RGX_CR_SOFT_RESET_CPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_SOFT_RESET_CPU_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT (31U) +#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT (30U) +#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT (29U) +#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT (28U) +#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SOFT_RESET_SLC_SHIFT (27U) +#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SOFT_RESET_TLA_SHIFT (26U) +#define RGX_CR_SOFT_RESET_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SOFT_RESET_TLA_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SOFT_RESET_UVS_SHIFT (25U) +#define RGX_CR_SOFT_RESET_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SOFT_RESET_UVS_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SOFT_RESET_TE_SHIFT (24U) +#define RGX_CR_SOFT_RESET_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SOFT_RESET_TE_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SOFT_RESET_GPP_SHIFT (23U) +#define RGX_CR_SOFT_RESET_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SOFT_RESET_GPP_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SOFT_RESET_FBDC_SHIFT (22U) +#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SOFT_RESET_FBC_SHIFT (21U) +#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SOFT_RESET_PM_SHIFT (20U) +#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U) +#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT (18U) +#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SOFT_RESET_USC_SHARED_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U) +#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SOFT_RESET_BIF_SHIFT (16U) +#define RGX_CR_SOFT_RESET_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SOFT_RESET_BIF_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SOFT_RESET_CDM_SHIFT (15U) +#define RGX_CR_SOFT_RESET_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SOFT_RESET_CDM_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SOFT_RESET_VDM_SHIFT (14U) +#define RGX_CR_SOFT_RESET_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SOFT_RESET_VDM_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SOFT_RESET_TESS_SHIFT (13U) +#define RGX_CR_SOFT_RESET_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SOFT_RESET_TESS_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U) +#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U) +#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SOFT_RESET_TSP_SHIFT (10U) +#define RGX_CR_SOFT_RESET_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SOFT_RESET_TSP_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SOFT_RESET_SYSARB_SHIFT (5U) +#define RGX_CR_SOFT_RESET_SYSARB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SOFT_RESET_SYSARB_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT (4U) +#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U) +#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U) +#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SOFT_RESET_USC_SHIFT (0U) +#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_SOFT_RESET2 */ -#define RGX_CR_SOFT_RESET2 (0x0108U) -#define RGX_CR_SOFT_RESET2_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) -#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT (12U) -#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK (0xFFE00FFFU) -#define RGX_CR_SOFT_RESET2_TDM_SHIFT (11U) -#define RGX_CR_SOFT_RESET2_TDM_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_SOFT_RESET2_TDM_EN (0x00000800U) -#define RGX_CR_SOFT_RESET2_ASTC_SHIFT (10U) -#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_SOFT_RESET2_ASTC_EN (0x00000400U) -#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT (9U) -#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN (0x00000200U) -#define RGX_CR_SOFT_RESET2_USCPS_SHIFT (8U) -#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_SOFT_RESET2_USCPS_EN (0x00000100U) -#define RGX_CR_SOFT_RESET2_IPF_SHIFT (7U) -#define RGX_CR_SOFT_RESET2_IPF_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_SOFT_RESET2_IPF_EN (0x00000080U) -#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT (6U) -#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_SOFT_RESET2_GEOMETRY_EN (0x00000040U) -#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT (5U) -#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_SOFT_RESET2_USC_SHARED_EN (0x00000020U) -#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT (4U) -#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN (0x00000010U) -#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT (3U) -#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN (0x00000008U) -#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT (2U) -#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SOFT_RESET2_PIXEL_EN (0x00000004U) -#define RGX_CR_SOFT_RESET2_CDM_SHIFT (1U) -#define RGX_CR_SOFT_RESET2_CDM_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SOFT_RESET2_CDM_EN (0x00000002U) -#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT (0U) -#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SOFT_RESET2_VERTEX_EN (0x00000001U) +#define RGX_CR_SOFT_RESET2 (0x0108U) +#define RGX_CR_SOFT_RESET2_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) +#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT (12U) +#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK (0xFFE00FFFU) +#define RGX_CR_SOFT_RESET2_TDM_SHIFT (11U) +#define RGX_CR_SOFT_RESET2_TDM_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_SOFT_RESET2_TDM_EN (0x00000800U) +#define RGX_CR_SOFT_RESET2_ASTC_SHIFT (10U) +#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_SOFT_RESET2_ASTC_EN (0x00000400U) +#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT (9U) +#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN (0x00000200U) +#define RGX_CR_SOFT_RESET2_USCPS_SHIFT (8U) +#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SOFT_RESET2_USCPS_EN (0x00000100U) +#define RGX_CR_SOFT_RESET2_IPF_SHIFT (7U) +#define RGX_CR_SOFT_RESET2_IPF_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SOFT_RESET2_IPF_EN (0x00000080U) +#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT (6U) +#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SOFT_RESET2_GEOMETRY_EN (0x00000040U) +#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT (5U) +#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SOFT_RESET2_USC_SHARED_EN (0x00000020U) +#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT (4U) +#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN (0x00000010U) +#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT (3U) +#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN (0x00000008U) +#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT (2U) +#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SOFT_RESET2_PIXEL_EN (0x00000004U) +#define RGX_CR_SOFT_RESET2_CDM_SHIFT (1U) +#define RGX_CR_SOFT_RESET2_CDM_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SOFT_RESET2_CDM_EN (0x00000002U) +#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT (0U) +#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SOFT_RESET2_VERTEX_EN (0x00000001U) /* Register RGX_CR_EVENT_ENABLE */ -#define RGX_CR_EVENT_ENABLE (0x0128U) -#define RGX_CR_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) -#define RGX_CR_EVENT_ENABLE__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -#define RGX_CR_EVENT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_SHIFT (31U) -#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_EN (0x80000000U) -#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_SHIFT (30U) -#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_EN (0x40000000U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_SHIFT (28U) -#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_EN (0x10000000U) -#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_SHIFT (27U) -#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_EN (0x08000000U) -#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_SHIFT (26U) -#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_EN (0x04000000U) -#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_SHIFT (25U) -#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_EN (0x02000000U) -#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_SHIFT (24U) -#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_EN (0x01000000U) -#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_SHIFT (23U) -#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) -#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_EN (0x00800000U) -#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_SHIFT (22U) -#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_EN (0x00400000U) -#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_SHIFT (21U) -#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_EN (0x00200000U) -#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_SHIFT (20U) -#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_EN (0x00100000U) -#define RGX_CR_EVENT_ENABLE_SAFETY_SHIFT (20U) -#define RGX_CR_EVENT_ENABLE_SAFETY_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_EVENT_ENABLE_SAFETY_EN (0x00100000U) -#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_SHIFT (19U) -#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_EN (0x00080000U) -#define RGX_CR_EVENT_ENABLE_SLAVE_REQ_SHIFT (19U) -#define RGX_CR_EVENT_ENABLE_SLAVE_REQ_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_ENABLE_SLAVE_REQ_EN (0x00080000U) -#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_SHIFT (18U) -#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_EN (0x00040000U) -#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_SHIFT (17U) -#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_EN (0x00020000U) -#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_SHIFT (16U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_EN (0x00010000U) -#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_SHIFT (15U) -#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_EN (0x00008000U) -#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_SHIFT (14U) -#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_EN (0x00004000U) -#define RGX_CR_EVENT_ENABLE_GPIO_ACK_SHIFT (13U) -#define RGX_CR_EVENT_ENABLE_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_EVENT_ENABLE_GPIO_ACK_EN (0x00002000U) -#define RGX_CR_EVENT_ENABLE_GPIO_REQ_SHIFT (12U) -#define RGX_CR_EVENT_ENABLE_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_EVENT_ENABLE_GPIO_REQ_EN (0x00001000U) -#define RGX_CR_EVENT_ENABLE_POWER_ABORT_SHIFT (11U) -#define RGX_CR_EVENT_ENABLE_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_EVENT_ENABLE_POWER_ABORT_EN (0x00000800U) -#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_SHIFT (10U) -#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_EN (0x00000400U) -#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_SHIFT (9U) -#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_EN (0x00000200U) -#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_SHIFT (8U) -#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_EN (0x00000100U) -#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_SHIFT (7U) -#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_EN (0x00000080U) -#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_SHIFT (6U) -#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_EN (0x00000040U) -#define RGX_CR_EVENT_ENABLE_TA_FINISHED_SHIFT (5U) -#define RGX_CR_EVENT_ENABLE_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_EVENT_ENABLE_TA_FINISHED_EN (0x00000020U) -#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_SHIFT (4U) -#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_EN (0x00000010U) -#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_SHIFT (3U) -#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_EN (0x00000008U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_SHIFT (2U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_EN (0x00000004U) -#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_SHIFT (1U) -#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_EN (0x00000002U) -#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_SHIFT (0U) -#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_EN (0x00000001U) +#define RGX_CR_EVENT_ENABLE (0x0128U) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) +#define RGX_CR_EVENT_ENABLE__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_SHIFT (28U) +#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_EN (0x10000000U) +#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_SHIFT (27U) +#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_EN (0x08000000U) +#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_SHIFT (26U) +#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_EN (0x04000000U) +#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_SHIFT (25U) +#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_EN (0x02000000U) +#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_SHIFT (24U) +#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_EN (0x01000000U) +#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_SHIFT (23U) +#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_EN (0x00800000U) +#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_SHIFT (22U) +#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_EN (0x00400000U) +#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_SHIFT (21U) +#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_EN (0x00200000U) +#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_SHIFT (20U) +#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_EN (0x00100000U) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__SAFETY_SHIFT (20U) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__SAFETY_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__SAFETY_EN (0x00100000U) +#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_SHIFT (19U) +#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_EN (0x00080000U) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__SLAVE_REQ_SHIFT (19U) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__SLAVE_REQ_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__SLAVE_REQ_EN (0x00080000U) +#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_ENABLE__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_ENABLE__SIGNALS__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_ENABLE__SIGNALS__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_ENABLE__SIGNALS__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_SHIFT (17U) +#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_EN (0x00020000U) +#define RGX_CR_EVENT_ENABLE__SIGNALS__SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_ENABLE__SIGNALS__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_ENABLE__SIGNALS__SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_SHIFT (14U) +#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_EN (0x00004000U) +#define RGX_CR_EVENT_ENABLE_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_ENABLE_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_ENABLE_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_ENABLE_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_ENABLE_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_ENABLE_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_ENABLE_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_ENABLE_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_ENABLE_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_SHIFT (8U) +#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_EN (0x00000100U) +#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_ENABLE_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_ENABLE_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_ENABLE_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_SHIFT (0U) +#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_EN (0x00000001U) /* Register RGX_CR_EVENT_STATUS */ -#define RGX_CR_EVENT_STATUS (0x0130U) -#define RGX_CR_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) -#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U) -#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U) -#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U) -#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U) -#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT (28U) -#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN (0x10000000U) -#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT (27U) -#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN (0x08000000U) -#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT (26U) -#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN (0x04000000U) -#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT (25U) -#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN (0x02000000U) -#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT (24U) -#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN (0x01000000U) -#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT (23U) -#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) -#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN (0x00800000U) -#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT (22U) -#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN (0x00400000U) -#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT (21U) -#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN (0x00200000U) -#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT (20U) -#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN (0x00100000U) -#define RGX_CR_EVENT_STATUS_SAFETY_SHIFT (20U) -#define RGX_CR_EVENT_STATUS_SAFETY_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_EVENT_STATUS_SAFETY_EN (0x00100000U) -#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT (19U) -#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN (0x00080000U) -#define RGX_CR_EVENT_STATUS_SLAVE_REQ_SHIFT (19U) -#define RGX_CR_EVENT_STATUS_SLAVE_REQ_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_STATUS_SLAVE_REQ_EN (0x00080000U) -#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT (18U) -#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN (0x00040000U) -#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT (17U) -#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN (0x00020000U) -#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U) -#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U) -#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U) -#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U) -#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT (14U) -#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN (0x00004000U) -#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U) -#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U) -#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U) -#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U) -#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U) -#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U) -#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U) -#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U) -#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U) -#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U) -#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT (8U) -#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN (0x00000100U) -#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U) -#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U) -#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U) -#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U) -#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U) -#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U) -#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U) -#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U) -#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT (3U) -#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN (0x00000008U) -#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U) -#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U) -#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U) -#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U) -#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT (0U) -#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN (0x00000001U) +#define RGX_CR_EVENT_STATUS (0x0130U) +#define RGX_CR_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) +#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT (28U) +#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN (0x10000000U) +#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT (27U) +#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN (0x08000000U) +#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT (26U) +#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN (0x04000000U) +#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT (25U) +#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN (0x02000000U) +#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT (24U) +#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN (0x01000000U) +#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT (23U) +#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN (0x00800000U) +#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT (22U) +#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN (0x00400000U) +#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT (21U) +#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN (0x00200000U) +#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT (20U) +#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN (0x00100000U) +#define RGX_CR_EVENT_STATUS__ROGUEXE__SAFETY_SHIFT (20U) +#define RGX_CR_EVENT_STATUS__ROGUEXE__SAFETY_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_STATUS__ROGUEXE__SAFETY_EN (0x00100000U) +#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT (19U) +#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN (0x00080000U) +#define RGX_CR_EVENT_STATUS__ROGUEXE__SLAVE_REQ_SHIFT (19U) +#define RGX_CR_EVENT_STATUS__ROGUEXE__SLAVE_REQ_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_STATUS__ROGUEXE__SLAVE_REQ_EN (0x00080000U) +#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_STATUS__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT (17U) +#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN (0x00020000U) +#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT (14U) +#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN (0x00004000U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT (8U) +#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN (0x00000100U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT (0U) +#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN (0x00000001U) /* Register RGX_CR_EVENT_CLEAR */ -#define RGX_CR_EVENT_CLEAR (0x0138U) -#define RGX_CR_EVENT_CLEAR__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) -#define RGX_CR_EVENT_CLEAR__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -#define RGX_CR_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT (31U) -#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN (0x80000000U) -#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT (30U) -#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN (0x40000000U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT (28U) -#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN (0x10000000U) -#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT (27U) -#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN (0x08000000U) -#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT (26U) -#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN (0x04000000U) -#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT (25U) -#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN (0x02000000U) -#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT (24U) -#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN (0x01000000U) -#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT (23U) -#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) -#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN (0x00800000U) -#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT (22U) -#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN (0x00400000U) -#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT (21U) -#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN (0x00200000U) -#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT (20U) -#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN (0x00100000U) -#define RGX_CR_EVENT_CLEAR_SAFETY_SHIFT (20U) -#define RGX_CR_EVENT_CLEAR_SAFETY_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_EVENT_CLEAR_SAFETY_EN (0x00100000U) -#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT (19U) -#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN (0x00080000U) -#define RGX_CR_EVENT_CLEAR_SLAVE_REQ_SHIFT (19U) -#define RGX_CR_EVENT_CLEAR_SLAVE_REQ_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_CLEAR_SLAVE_REQ_EN (0x00080000U) -#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT (18U) -#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN (0x00040000U) -#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT (17U) -#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_EN (0x00020000U) -#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT (16U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN (0x00010000U) -#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT (15U) -#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_EN (0x00008000U) -#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT (14U) -#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_EN (0x00004000U) -#define RGX_CR_EVENT_CLEAR_GPIO_ACK_SHIFT (13U) -#define RGX_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_EVENT_CLEAR_GPIO_ACK_EN (0x00002000U) -#define RGX_CR_EVENT_CLEAR_GPIO_REQ_SHIFT (12U) -#define RGX_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_EVENT_CLEAR_GPIO_REQ_EN (0x00001000U) -#define RGX_CR_EVENT_CLEAR_POWER_ABORT_SHIFT (11U) -#define RGX_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_EVENT_CLEAR_POWER_ABORT_EN (0x00000800U) -#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT (10U) -#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_EN (0x00000400U) -#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT (9U) -#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN (0x00000200U) -#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT (8U) -#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN (0x00000100U) -#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT (7U) -#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN (0x00000080U) -#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT (6U) -#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_EN (0x00000040U) -#define RGX_CR_EVENT_CLEAR_TA_FINISHED_SHIFT (5U) -#define RGX_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_EVENT_CLEAR_TA_FINISHED_EN (0x00000020U) -#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT (4U) -#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN (0x00000010U) -#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT (3U) -#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN (0x00000008U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT (2U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN (0x00000004U) -#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT (1U) -#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_EN (0x00000002U) -#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT (0U) -#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_EN (0x00000001U) +#define RGX_CR_EVENT_CLEAR (0x0138U) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) +#define RGX_CR_EVENT_CLEAR__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT (28U) +#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN (0x10000000U) +#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT (27U) +#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN (0x08000000U) +#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT (26U) +#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN (0x04000000U) +#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT (25U) +#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN (0x02000000U) +#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT (24U) +#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN (0x01000000U) +#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT (23U) +#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN (0x00800000U) +#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT (22U) +#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN (0x00400000U) +#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT (21U) +#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN (0x00200000U) +#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT (20U) +#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN (0x00100000U) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__SAFETY_SHIFT (20U) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__SAFETY_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__SAFETY_EN (0x00100000U) +#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT (19U) +#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN (0x00080000U) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__SLAVE_REQ_SHIFT (19U) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__SLAVE_REQ_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__SLAVE_REQ_EN (0x00080000U) +#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_CLEAR__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_CLEAR__SIGNALS__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_CLEAR__SIGNALS__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_CLEAR__SIGNALS__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT (17U) +#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_EN (0x00020000U) +#define RGX_CR_EVENT_CLEAR__SIGNALS__SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_CLEAR__SIGNALS__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_CLEAR__SIGNALS__SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT (14U) +#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_EN (0x00004000U) +#define RGX_CR_EVENT_CLEAR_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_CLEAR_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_CLEAR_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_CLEAR_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_CLEAR_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_CLEAR_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT (8U) +#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN (0x00000100U) +#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_CLEAR_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_CLEAR_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT (0U) +#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_EN (0x00000001U) /* Register RGX_CR_TIMER */ -#define RGX_CR_TIMER (0x0160U) -#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF)) -#define RGX_CR_TIMER_BIT31_SHIFT (63U) -#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000)) -#define RGX_CR_TIMER_VALUE_SHIFT (0U) -#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) +#define RGX_CR_TIMER (0x0160U) +#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_SHIFT (63U) +#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_TIMER_VALUE_SHIFT (0U) +#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) /* Register RGX_CR_TLA_STATUS */ -#define RGX_CR_TLA_STATUS (0x0178U) -#define RGX_CR_TLA_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT (39U) -#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK (IMG_UINT64_C(0x0000007FFFFFFFFF)) -#define RGX_CR_TLA_STATUS_REQUEST_SHIFT (7U) -#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK (IMG_UINT64_C(0xFFFFFF800000007F)) -#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT (1U) -#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF81)) -#define RGX_CR_TLA_STATUS_BUSY_SHIFT (0U) -#define RGX_CR_TLA_STATUS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_TLA_STATUS_BUSY_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_TLA_STATUS (0x0178U) +#define RGX_CR_TLA_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT (39U) +#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK (IMG_UINT64_C(0x0000007FFFFFFFFF)) +#define RGX_CR_TLA_STATUS_REQUEST_SHIFT (7U) +#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK (IMG_UINT64_C(0xFFFFFF800000007F)) +#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT (1U) +#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF81)) +#define RGX_CR_TLA_STATUS_BUSY_SHIFT (0U) +#define RGX_CR_TLA_STATUS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_TLA_STATUS_BUSY_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_PM_PARTIAL_RENDER_ENABLE */ -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U) -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U) -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U) /* Register RGX_CR_SIDEKICK_IDLE */ -#define RGX_CR_SIDEKICK_IDLE (0x03C8U) -#define RGX_CR_SIDEKICK_IDLE_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT (6U) -#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN (0x00000040U) -#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT (5U) -#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_SIDEKICK_IDLE_MMU_EN (0x00000020U) -#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT (4U) -#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_SIDEKICK_IDLE_BIF128_EN (0x00000010U) -#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT (3U) -#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SIDEKICK_IDLE_TLA_EN (0x00000008U) -#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT (2U) -#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN (0x00000004U) -#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT (1U) -#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN (0x00000002U) -#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT (0U) -#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN (0x00000001U) +#define RGX_CR_SIDEKICK_IDLE (0x03C8U) +#define RGX_CR_SIDEKICK_IDLE_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT (6U) +#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN (0x00000040U) +#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT (5U) +#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SIDEKICK_IDLE_MMU_EN (0x00000020U) +#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT (4U) +#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SIDEKICK_IDLE_BIF128_EN (0x00000010U) +#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT (3U) +#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SIDEKICK_IDLE_TLA_EN (0x00000008U) +#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT (2U) +#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN (0x00000004U) +#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT (1U) +#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN (0x00000002U) +#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT (0U) +#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN (0x00000001U) /* Register RGX_CR_MARS_IDLE */ -#define RGX_CR_MARS_IDLE (0x08F8U) -#define RGX_CR_MARS_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -#define RGX_CR_MARS_IDLE_MH_SYSARB0_SHIFT (2U) -#define RGX_CR_MARS_IDLE_MH_SYSARB0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_MARS_IDLE_MH_SYSARB0_EN (0x00000004U) -#define RGX_CR_MARS_IDLE_CPU_SHIFT (1U) -#define RGX_CR_MARS_IDLE_CPU_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_MARS_IDLE_CPU_EN (0x00000002U) -#define RGX_CR_MARS_IDLE_SOCIF_SHIFT (0U) -#define RGX_CR_MARS_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MARS_IDLE_SOCIF_EN (0x00000001U) +#define RGX_CR_MARS_IDLE (0x08F8U) +#define RGX_CR_MARS_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_MARS_IDLE_MH_SYSARB0_SHIFT (2U) +#define RGX_CR_MARS_IDLE_MH_SYSARB0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_MARS_IDLE_MH_SYSARB0_EN (0x00000004U) +#define RGX_CR_MARS_IDLE_CPU_SHIFT (1U) +#define RGX_CR_MARS_IDLE_CPU_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MARS_IDLE_CPU_EN (0x00000002U) +#define RGX_CR_MARS_IDLE_SOCIF_SHIFT (0U) +#define RGX_CR_MARS_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MARS_IDLE_SOCIF_EN (0x00000001U) /* Register RGX_CR_VDM_CONTEXT_STORE_STATUS */ -#define RGX_CR_VDM_CONTEXT_STORE_STATUS (0x0430U) -#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000F3)) -#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT (4U) -#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK (0xFFFFFF0FU) -#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) -#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) -#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) -#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS (0x0430U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000F3)) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT (4U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK (0xFFFFFF0FU) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) /* Register RGX_CR_VDM_CONTEXT_STORE_TASK0 */ -#define RGX_CR_VDM_CONTEXT_STORE_TASK0 (0x0438U) -#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT (32U) -#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT (0U) -#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0 (0x0438U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT (32U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) /* Register RGX_CR_VDM_CONTEXT_STORE_TASK1 */ -#define RGX_CR_VDM_CONTEXT_STORE_TASK1 (0x0440U) -#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT (0U) -#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK (0x00000000U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK1 (0x0440U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK (0x00000000U) /* Register RGX_CR_VDM_CONTEXT_STORE_TASK2 */ -#define RGX_CR_VDM_CONTEXT_STORE_TASK2 (0x0448U) -#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT (32U) -#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT (0U) -#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2 (0x0448U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT (32U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) /* Register RGX_CR_VDM_CONTEXT_RESUME_TASK0 */ -#define RGX_CR_VDM_CONTEXT_RESUME_TASK0 (0x0450U) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT (32U) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT (0U) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0 (0x0450U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT (32U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) /* Register RGX_CR_VDM_CONTEXT_RESUME_TASK1 */ -#define RGX_CR_VDM_CONTEXT_RESUME_TASK1 (0x0458U) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT (0U) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (0x00000000U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK1 (0x0458U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (0x00000000U) /* Register RGX_CR_VDM_CONTEXT_RESUME_TASK2 */ -#define RGX_CR_VDM_CONTEXT_RESUME_TASK2 (0x0460U) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U) -#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2 (0x0460U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) /* Register RGX_CR_CDM_CONTEXT_STORE_STATUS */ -#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) /* Register RGX_CR_CDM_CONTEXT_PDS0 */ -#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U) -#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) -#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U) -#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U) -#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U) -#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U) +#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U) /* Register RGX_CR_CDM_CONTEXT_PDS1 */ -#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) -#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (29U) -#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (0x20000000U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (28U) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (0x10000000U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) -#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (27U) -#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (0x08000000U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (21U) -#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) -#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT (20U) -#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN (0x00100000U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) -#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT (11U) -#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) -#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (7U) -#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) -#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) -#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (1U) -#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) -#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT (0U) -#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN (0x00000001U) +#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (28U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (0x10000000U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (27U) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (0x08000000U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (21U) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT (20U) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN (0x00100000U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) +#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN (0x00000001U) /* Register RGX_CR_CDM_TERMINATE_PDS */ -#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U) -#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) -#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U) -#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U) -#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U) -#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U) +#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U) /* Register RGX_CR_CDM_TERMINATE_PDS1 */ -#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) -#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) -#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (29U) -#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (0x20000000U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (28U) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (0x10000000U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) -#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT (27U) -#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN (0x08000000U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (21U) -#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) -#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT (20U) -#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN (0x00100000U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) -#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT (11U) -#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) -#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (7U) -#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) -#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) -#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (1U) -#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) -#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT (0U) -#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN (0x00000001U) +#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (28U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (0x10000000U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) +#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT (27U) +#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN (0x08000000U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (21U) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT (20U) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN (0x00100000U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT (11U) +#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (1U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) +#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT (0U) +#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN (0x00000001U) /* Register RGX_CR_CDM_CONTEXT_LOAD_PDS0 */ -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U) /* Register RGX_CR_CDM_CONTEXT_LOAD_PDS1 */ -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (29U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (0x20000000U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (28U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (0x10000000U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT (27U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN (0x08000000U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (21U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT (20U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN (0x00100000U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT (11U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (7U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (1U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT (0U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN (0x00000001U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (28U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (0x10000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT (27U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN (0x08000000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (21U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT (20U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN (0x00100000U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN (0x00000001U) /* Register RGX_CR_MIPS_WRAPPER_CONFIG */ -#define RGX_CR_MIPS_WRAPPER_CONFIG (0x0810U) -#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x000001030F01FFFF)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT (40U) -#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT (33U) -#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT (32U) -#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT (25U) -#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF1FFFFFF)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT (24U) -#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT (16U) -#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U) -#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG (0x0810U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x000001030F01FFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT (40U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT (33U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT (32U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT (25U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF1FFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT (24U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT (16U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 */ -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 (0x0818U) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 (0x0818U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 */ -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 (0x0820U) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT (6U) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT (5U) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 (0x0820U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) /* Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 */ -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 (0x0828U) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 (0x0828U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 */ -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 (0x0830U) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT (6U) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT (5U) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 (0x0830U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) /* Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 */ -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 (0x0838U) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 (0x0838U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 */ -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 (0x0840U) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT (6U) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT (5U) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 (0x0840U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) /* Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 */ -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 (0x0848U) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 (0x0848U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 */ -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 (0x0850U) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT (6U) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT (5U) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 (0x0850U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) /* Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 */ -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 (0x0858U) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 (0x0858U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 */ -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 (0x0860U) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT (6U) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT (5U) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 (0x0860U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT (6U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT (5U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) /* Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS */ -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS (0x0868U) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF)) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT (32U) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS (0x0868U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT (32U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) /* Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR */ -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR (0x0870U) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN (0x00000001U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR (0x0870U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN (0x00000001U) /* Register RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG */ -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG (0x0878U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFFBF)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT (36U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT (32U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT (11U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT (7U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB (IMG_UINT64_C(0x0000000000000180)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB (IMG_UINT64_C(0x0000000000000280)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB (IMG_UINT64_C(0x0000000000000300)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB (IMG_UINT64_C(0x0000000000000380)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT (1U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG (0x0878U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFFBF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT (36U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT (32U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT (11U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT (7U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB (IMG_UINT64_C(0x0000000000000180)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB (IMG_UINT64_C(0x0000000000000280)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB (IMG_UINT64_C(0x0000000000000300)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB (IMG_UINT64_C(0x0000000000000380)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT (1U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MIPS_ADDR_REMAP_RANGE_READ */ -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ (0x0880U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT (1U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK (0xFFFFFFC1U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN (0x00000001U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ (0x0880U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT (1U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK (0xFFFFFFC1U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN (0x00000001U) /* Register RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA */ -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA (0x0888U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFF81)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT (36U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT (32U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT (12U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT (11U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT (7U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT (0U) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA (0x0888U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFF81)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT (36U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT (32U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT (12U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT (11U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT (7U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT (0U) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MIPS_WRAPPER_IRQ_ENABLE */ -#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE (0x08A0U) -#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT (0U) -#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN (0x00000001U) +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE (0x08A0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN (0x00000001U) /* Register RGX_CR_MIPS_WRAPPER_IRQ_STATUS */ -#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS (0x08A8U) -#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT (0U) -#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN (0x00000001U) +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS (0x08A8U) +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN (0x00000001U) /* Register RGX_CR_MIPS_WRAPPER_IRQ_CLEAR */ -#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR (0x08B0U) -#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT (0U) -#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN (0x00000001U) +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR (0x08B0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN (0x00000001U) /* Register RGX_CR_MIPS_WRAPPER_NMI_ENABLE */ -#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE (0x08B8U) -#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT (0U) -#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN (0x00000001U) +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE (0x08B8U) +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN (0x00000001U) /* Register RGX_CR_MIPS_WRAPPER_NMI_EVENT */ -#define RGX_CR_MIPS_WRAPPER_NMI_EVENT (0x08C0U) -#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT (0U) -#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN (0x00000001U) +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT (0x08C0U) +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN (0x00000001U) /* Register RGX_CR_MIPS_DEBUG_CONFIG */ -#define RGX_CR_MIPS_DEBUG_CONFIG (0x08C8U) -#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U) -#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN (0x00000001U) +#define RGX_CR_MIPS_DEBUG_CONFIG (0x08C8U) +#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U) +#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN (0x00000001U) /* Register RGX_CR_MIPS_EXCEPTION_STATUS */ -#define RGX_CR_MIPS_EXCEPTION_STATUS (0x08D0U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT (5U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN (0x00000020U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT (4U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN (0x00000010U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT (3U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN (0x00000008U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT (2U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN (0x00000004U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT (1U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN (0x00000002U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT (0U) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN (0x00000001U) +#define RGX_CR_MIPS_EXCEPTION_STATUS (0x08D0U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT (5U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN (0x00000020U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT (4U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN (0x00000010U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT (3U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN (0x00000008U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT (2U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN (0x00000004U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT (1U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN (0x00000002U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT (0U) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN (0x00000001U) /* Register RGX_CR_MIPS_WRAPPER_STATUS */ -#define RGX_CR_MIPS_WRAPPER_STATUS (0x08E8U) -#define RGX_CR_MIPS_WRAPPER_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_SHIFT (0U) -#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MIPS_WRAPPER_STATUS (0x08E8U) +#define RGX_CR_MIPS_WRAPPER_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_SHIFT (0U) +#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_XPU_BROADCAST */ -#define RGX_CR_XPU_BROADCAST (0x0890U) -#define RGX_CR_XPU_BROADCAST_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_XPU_BROADCAST_MASK_SHIFT (0U) -#define RGX_CR_XPU_BROADCAST_MASK_CLRMSK (0xFFFFFE00U) +#define RGX_CR_XPU_BROADCAST (0x0890U) +#define RGX_CR_XPU_BROADCAST_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_XPU_BROADCAST_MASK_SHIFT (0U) +#define RGX_CR_XPU_BROADCAST_MASK_CLRMSK (0xFFFFFE00U) /* Register RGX_CR_META_SP_MSLVDATAX */ -#define RGX_CR_META_SP_MSLVDATAX (0x0A00U) -#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAX (0x0A00U) +#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVDATAT */ -#define RGX_CR_META_SP_MSLVDATAT (0x0A08U) -#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAT (0x0A08U) +#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVCTRL0 */ -#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U) -#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U) -#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U) -#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U) -#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U) +#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVCTRL1 */ -#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U) -#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U) -#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U) -#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U) -#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U) -#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U) -#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U) -#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U) -#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U) -#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U) -#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U) -#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U) -#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U) -#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U) -#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU) -#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U) +#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVHANDSHKE */ -#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U) -#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U) -#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U) +#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVT0KICK */ -#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U) -#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U) +#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT0KICKI */ -#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U) -#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U) +#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICK */ -#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U) -#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U) +#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICKI */ -#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U) -#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U) +#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICK */ -#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U) -#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U) +#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICKI */ -#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U) -#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U) +#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICK */ -#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U) -#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U) +#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICKI */ -#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U) -#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U) +#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVRST */ -#define RGX_CR_META_SP_MSLVRST (0x0AC0U) -#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U) -#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVRST (0x0AC0U) +#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVIRQSTATUS */ -#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQENABLE */ -#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U) -#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U) +#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQLEVEL */ -#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U) -#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U) -#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U) /* Register RGX_CR_MTS_SCHEDULE */ -#define RGX_CR_MTS_SCHEDULE (0x0B00U) -#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE (0x0B00U) +#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE1 */ -#define RGX_CR_MTS_SCHEDULE1 (0x10B00U) -#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE1 (0x10B00U) +#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE2 */ -#define RGX_CR_MTS_SCHEDULE2 (0x20B00U) -#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE2 (0x20B00U) +#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE3 */ -#define RGX_CR_MTS_SCHEDULE3 (0x30B00U) -#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE3 (0x30B00U) +#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE4 */ -#define RGX_CR_MTS_SCHEDULE4 (0x40B00U) -#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE4 (0x40B00U) +#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE5 */ -#define RGX_CR_MTS_SCHEDULE5 (0x50B00U) -#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE5 (0x50B00U) +#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE6 */ -#define RGX_CR_MTS_SCHEDULE6 (0x60B00U) -#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE6 (0x60B00U) +#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE7 */ -#define RGX_CR_MTS_SCHEDULE7 (0x70B00U) -#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE7 (0x70B00U) +#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC */ -#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U) -#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) -#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) /* Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC */ -#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U) -#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) -#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) /* Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC */ -#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U) -#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) -#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) /* Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC */ -#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U) -#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) -#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) /* Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG */ -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFF001)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFFF0FFFFFFFFFFF)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT (44U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFF00FFFFFFFFFFF)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT (40U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT (9U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF9FF)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (8U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFF001)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFFF0FFFFFFFFFFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_SHIFT (44U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFF00FFFFFFFFFFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT (40U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PERSISTENCE_SHIFT (9U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF9FF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_SLC_COHERENT_SHIFT (8U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL__S7_INFRA__META (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL__S7_INFRA__MTS (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MTS_DM0_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE (0x0B58U) -#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE (0x0B58U) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM1_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE (0x0B60U) -#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE (0x0B60U) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM2_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE (0x0B68U) -#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE (0x0B68U) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM3_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE (0x0B70U) -#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE (0x0B70U) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM4_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE (0x0B78U) -#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE (0x0B78U) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM5_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE (0x0B80U) -#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE (0x0B80U) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_INTCTX */ -#define RGX_CR_MTS_INTCTX (0x0B98U) -#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U) -#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU) -#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT (18U) -#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK (0xFFC3FFFFU) -#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT (16U) -#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK (0xFFFCFFFFU) -#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U) -#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU) -#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U) -#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MTS_INTCTX (0x0B98U) +#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU) +#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT (18U) +#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK (0xFFC3FFFFU) +#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT (16U) +#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK (0xFFFCFFFFU) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MTS_BGCTX */ -#define RGX_CR_MTS_BGCTX (0x0BA0U) -#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) -#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT (10U) -#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK (0xFFFFC3FFU) -#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT (8U) -#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK (0xFFFFFCFFU) -#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U) -#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MTS_BGCTX (0x0BA0U) +#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) +#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT (10U) +#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK (0xFFFFC3FFU) +#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT (8U) +#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK (0xFFFFFCFFU) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE */ -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_MTS_GPU_INT_STATUS */ -#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U) -#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U) -#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U) +#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U) +#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_SCHEDULE_ENABLE */ -#define RGX_CR_MTS_SCHEDULE_ENABLE (0x0BC8U) -#define RGX_CR_MTS_SCHEDULE_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MTS_SCHEDULE_ENABLE (0x0BC8U) +#define RGX_CR_MTS_SCHEDULE_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_IRQ_OS0_EVENT_STATUS */ -#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD8U) -#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD8U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS0_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE8U) -#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE8U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS1_EVENT_STATUS */ -#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD8U) -#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD8U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS1_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE8U) -#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE8U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS2_EVENT_STATUS */ -#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD8U) -#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD8U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS2_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE8U) -#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE8U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS3_EVENT_STATUS */ -#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD8U) -#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD8U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS3_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE8U) -#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE8U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS4_EVENT_STATUS */ -#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD8U) -#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD8U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS4_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE8U) -#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE8U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS5_EVENT_STATUS */ -#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD8U) -#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD8U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS5_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE8U) -#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE8U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS6_EVENT_STATUS */ -#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD8U) -#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD8U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS6_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE8U) -#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE8U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS7_EVENT_STATUS */ -#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD8U) -#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD8U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN (0x00000001U) /* Register RGX_CR_IRQ_OS7_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE8U) -#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN (0x00000001U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE8U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN (0x00000001U) /* Register RGX_CR_META_BOOT */ -#define RGX_CR_META_BOOT (0x0BF8U) -#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_BOOT_MODE_SHIFT (0U) -#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_BOOT_MODE_EN (0x00000001U) +#define RGX_CR_META_BOOT (0x0BF8U) +#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_BOOT_MODE_SHIFT (0U) +#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_BOOT_MODE_EN (0x00000001U) /* Register RGX_CR_GARTEN_SLC */ -#define RGX_CR_GARTEN_SLC (0x0BB8U) -#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U) -#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U) +#define RGX_CR_GARTEN_SLC (0x0BB8U) +#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U) /* Register RGX_CR_PPP */ -#define RGX_CR_PPP (0x0CD0U) -#define RGX_CR_PPP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PPP_CHECKSUM_SHIFT (0U) -#define RGX_CR_PPP_CHECKSUM_CLRMSK (0x00000000U) +#define RGX_CR_PPP (0x0CD0U) +#define RGX_CR_PPP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PPP_CHECKSUM_SHIFT (0U) +#define RGX_CR_PPP_CHECKSUM_CLRMSK (0x00000000U) -#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK (0x00000003U) +#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK (0x00000003U) /* Top-left to bottom-right */ -#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR (0x00000000U) +#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR (0x00000000U) /* Top-right to bottom-left */ -#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL (0x00000001U) +#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL (0x00000001U) /* Bottom-left to top-right */ -#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR (0x00000002U) +#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR (0x00000002U) /* Bottom-right to top-left */ -#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL (0x00000003U) +#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL (0x00000003U) -#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK (0x00000003U) +#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK (0x00000003U) /* Normal render */ -#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM (0x00000000U) +#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM (0x00000000U) /* Fast 2D render */ -#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D (0x00000002U) +#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D (0x00000002U) /* Fast scale render */ -#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE (0x00000003U) +#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE (0x00000003U) /* Register RGX_CR_ISP_RENDER */ -#define RGX_CR_ISP_RENDER (0x0F08U) -#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_SHIFT (8U) -#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_EN (0x00000100U) -#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT (7U) -#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN (0x00000080U) -#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT (6U) -#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN (0x00000040U) -#define RGX_CR_ISP_RENDER_DISABLE_EOMT_SHIFT (5U) -#define RGX_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_ISP_RENDER_DISABLE_EOMT_EN (0x00000020U) -#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U) -#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_ISP_RENDER_RESUME_EN (0x00000010U) -#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U) -#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_ISP_RENDER_DIR_TL2BR (0x00000000U) -#define RGX_CR_ISP_RENDER_DIR_TR2BL (0x00000004U) -#define RGX_CR_ISP_RENDER_DIR_BL2TR (0x00000008U) -#define RGX_CR_ISP_RENDER_DIR_BR2TL (0x0000000CU) -#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U) -#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0xFFFFFFFCU) -#define RGX_CR_ISP_RENDER_MODE_NORM (0x00000000U) -#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0x00000002U) -#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0x00000003U) +#define RGX_CR_ISP_RENDER (0x0F08U) +#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_SHIFT (8U) +#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_EN (0x00000100U) +#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT (7U) +#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN (0x00000080U) +#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT (6U) +#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN (0x00000040U) +#define RGX_CR_ISP_RENDER_DISABLE_EOMT_SHIFT (5U) +#define RGX_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_ISP_RENDER_DISABLE_EOMT_EN (0x00000020U) +#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U) +#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_ISP_RENDER_RESUME_EN (0x00000010U) +#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U) +#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ISP_RENDER_DIR_TL2BR (0x00000000U) +#define RGX_CR_ISP_RENDER_DIR_TR2BL (0x00000004U) +#define RGX_CR_ISP_RENDER_DIR_BL2TR (0x00000008U) +#define RGX_CR_ISP_RENDER_DIR_BR2TL (0x0000000CU) +#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U) +#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_ISP_RENDER_MODE_NORM (0x00000000U) +#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0x00000002U) +#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0x00000003U) /* Register RGX_CR_ISP_CTL */ -#define RGX_CR_ISP_CTL (0x0F38U) -#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000FFFFF3FF)) -#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT (31U) -#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_EN (0x80000000U) -#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (30U) -#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x40000000U) -#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT (29U) -#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_EN (0x20000000U) -#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT (28U) -#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_EN (0x10000000U) -#define RGX_CR_ISP_CTL_PAIR_TILES_SHIFT (27U) -#define RGX_CR_ISP_CTL_PAIR_TILES_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_ISP_CTL_PAIR_TILES_EN (0x08000000U) -#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT (26U) -#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN (0x04000000U) -#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT (25U) -#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN (0x02000000U) -#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT (23U) -#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK (0xFE7FFFFFU) -#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 (0x00000000U) -#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 (0x00800000U) -#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL (0x01000000U) -#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT (21U) -#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK (0xFF9FFFFFU) -#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (20U) -#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00100000U) -#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U) -#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U) -#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT (18U) -#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN (0x00040000U) -#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT (17U) -#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN (0x00020000U) -#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT (16U) -#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_ISP_CTL_SAMPLE_POS_EN (0x00010000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT (12U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK (0xFFFF0FFFU) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE (0x00000000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO (0x00001000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE (0x00002000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR (0x00003000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE (0x00004000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX (0x00005000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN (0x00006000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT (0x00007000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE (0x00008000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN (0x00009000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN (0x0000A000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE (0x0000B000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN (0x0000C000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN (0x0000D000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN (0x0000E000U) -#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN (0x0000F000U) -#define RGX_CR_ISP_CTL_VALID_ID_SHIFT (4U) -#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK (0xFFFFFC0FU) -#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U) -#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_ISP_CTL (0x0F38U) +#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000FFFFF3FF)) +#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT (31U) +#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_EN (0x80000000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (30U) +#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x40000000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT (29U) +#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_EN (0x20000000U) +#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT (28U) +#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_EN (0x10000000U) +#define RGX_CR_ISP_CTL_PAIR_TILES_SHIFT (27U) +#define RGX_CR_ISP_CTL_PAIR_TILES_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_ISP_CTL_PAIR_TILES_EN (0x08000000U) +#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT (26U) +#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN (0x04000000U) +#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT (25U) +#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN (0x02000000U) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT (23U) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK (0xFE7FFFFFU) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 (0x00000000U) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 (0x00800000U) +#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL (0x01000000U) +#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT (21U) +#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK (0xFF9FFFFFU) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (20U) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00100000U) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U) +#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT (18U) +#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN (0x00040000U) +#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT (17U) +#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN (0x00020000U) +#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT (16U) +#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_ISP_CTL_SAMPLE_POS_EN (0x00010000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT (12U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE (0x00000000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO (0x00001000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE (0x00002000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR (0x00003000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE (0x00004000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX (0x00005000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN (0x00006000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT (0x00007000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE (0x00008000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN (0x00009000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN (0x0000A000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE (0x0000B000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN (0x0000C000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN (0x0000D000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN (0x0000E000U) +#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN (0x0000F000U) +#define RGX_CR_ISP_CTL_VALID_ID_SHIFT (4U) +#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK (0xFFFFFC0FU) +#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U) +#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_ISP_STATUS */ -#define RGX_CR_ISP_STATUS (0x1038U) -#define RGX_CR_ISP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT (2U) -#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN (0x00000004U) -#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT (1U) -#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_ISP_STATUS_ACTIVE_EN (0x00000002U) -#define RGX_CR_ISP_STATUS_EOR_SHIFT (0U) -#define RGX_CR_ISP_STATUS_EOR_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_ISP_STATUS_EOR_EN (0x00000001U) +#define RGX_CR_ISP_STATUS (0x1038U) +#define RGX_CR_ISP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT (2U) +#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN (0x00000004U) +#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT (1U) +#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ISP_STATUS_ACTIVE_EN (0x00000002U) +#define RGX_CR_ISP_STATUS_EOR_SHIFT (0U) +#define RGX_CR_ISP_STATUS_EOR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ISP_STATUS_EOR_EN (0x00000001U) /* Register group: RGX_CR_ISP_XTP_RESUME, with 64 repeats */ -#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT (64U) +#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT (64U) /* Register RGX_CR_ISP_XTP_RESUME0 */ -#define RGX_CR_ISP_XTP_RESUME0 (0x3A00U) -#define RGX_CR_ISP_XTP_RESUME0_MASKFULL (IMG_UINT64_C(0x00000000003FF3FF)) -#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT (12U) -#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK (0xFFC00FFFU) -#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT (0U) -#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK (0xFFFFFC00U) +#define RGX_CR_ISP_XTP_RESUME0 (0x3A00U) +#define RGX_CR_ISP_XTP_RESUME0_MASKFULL (IMG_UINT64_C(0x00000000003FF3FF)) +#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT (12U) +#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK (0xFFC00FFFU) +#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT (0U) +#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK (0xFFFFFC00U) /* Register group: RGX_CR_ISP_XTP_STORE, with 32 repeats */ -#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT (32U) +#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT (32U) /* Register RGX_CR_ISP_XTP_STORE0 */ -#define RGX_CR_ISP_XTP_STORE0 (0x3C00U) -#define RGX_CR_ISP_XTP_STORE0_MASKFULL (IMG_UINT64_C(0x000000007F3FF3FF)) -#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT (30U) -#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN (0x40000000U) -#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT (29U) -#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_ISP_XTP_STORE0_EOR_EN (0x20000000U) -#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT (28U) -#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN (0x10000000U) -#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT (24U) -#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK (0xF0FFFFFFU) -#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT (12U) -#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK (0xFFC00FFFU) -#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT (0U) -#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK (0xFFFFFC00U) +#define RGX_CR_ISP_XTP_STORE0 (0x3C00U) +#define RGX_CR_ISP_XTP_STORE0_MASKFULL (IMG_UINT64_C(0x000000007F3FF3FF)) +#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT (30U) +#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN (0x40000000U) +#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT (29U) +#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_ISP_XTP_STORE0_EOR_EN (0x20000000U) +#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT (28U) +#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN (0x10000000U) +#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT (24U) +#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK (0xF0FFFFFFU) +#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT (12U) +#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK (0xFFC00FFFU) +#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT (0U) +#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK (0xFFFFFC00U) /* Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats */ -#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT (8U) +#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT (8U) /* Register RGX_CR_BIF_CAT_BASE0 */ -#define RGX_CR_BIF_CAT_BASE0 (0x1200U) -#define RGX_CR_BIF_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT (12U) -#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_BIF_CAT_BASE0 (0x1200U) +#define RGX_CR_BIF_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_CAT_BASE1 */ -#define RGX_CR_BIF_CAT_BASE1 (0x1208U) -#define RGX_CR_BIF_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT (12U) -#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_BIF_CAT_BASE1 (0x1208U) +#define RGX_CR_BIF_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_CAT_BASE2 */ -#define RGX_CR_BIF_CAT_BASE2 (0x1210U) -#define RGX_CR_BIF_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT (12U) -#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_BIF_CAT_BASE2 (0x1210U) +#define RGX_CR_BIF_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_CAT_BASE3 */ -#define RGX_CR_BIF_CAT_BASE3 (0x1218U) -#define RGX_CR_BIF_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT (12U) -#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_BIF_CAT_BASE3 (0x1218U) +#define RGX_CR_BIF_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_CAT_BASE4 */ -#define RGX_CR_BIF_CAT_BASE4 (0x1220U) -#define RGX_CR_BIF_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT (12U) -#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_BIF_CAT_BASE4 (0x1220U) +#define RGX_CR_BIF_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_CAT_BASE5 */ -#define RGX_CR_BIF_CAT_BASE5 (0x1228U) -#define RGX_CR_BIF_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT (12U) -#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_BIF_CAT_BASE5 (0x1228U) +#define RGX_CR_BIF_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_CAT_BASE6 */ -#define RGX_CR_BIF_CAT_BASE6 (0x1230U) -#define RGX_CR_BIF_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT (12U) -#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_BIF_CAT_BASE6 (0x1230U) +#define RGX_CR_BIF_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_CAT_BASE7 */ -#define RGX_CR_BIF_CAT_BASE7 (0x1238U) -#define RGX_CR_BIF_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT (12U) -#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_BIF_CAT_BASE7 (0x1238U) +#define RGX_CR_BIF_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT (12U) +#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_CAT_BASE_INDEX */ -#define RGX_CR_BIF_CAT_BASE_INDEX (0x1240U) -#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL (IMG_UINT64_C(0x00070707073F0707)) -#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT (48U) -#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK (IMG_UINT64_C(0xFFF8FFFFFFFFFFFF)) -#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT (40U) -#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT (32U) -#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) -#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT (24U) -#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) -#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT (19U) -#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC7FFFF)) -#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT (16U) -#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF)) -#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT (8U) -#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF)) -#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT (0U) -#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8)) +#define RGX_CR_BIF_CAT_BASE_INDEX (0x1240U) +#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL (IMG_UINT64_C(0x00070707073F0707)) +#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT (48U) +#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK (IMG_UINT64_C(0xFFF8FFFFFFFFFFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT (40U) +#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT (32U) +#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT (24U) +#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT (19U) +#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC7FFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT (16U) +#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT (8U) +#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF)) +#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT (0U) +#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8)) /* Register RGX_CR_BIF_PM_CAT_BASE_VCE0 */ -#define RGX_CR_BIF_PM_CAT_BASE_VCE0 (0x1248U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT (40U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT (12U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT (1U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT (0U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0 (0x1248U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_BIF_PM_CAT_BASE_TE0 */ -#define RGX_CR_BIF_PM_CAT_BASE_TE0 (0x1250U) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT (40U) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT (12U) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT (1U) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT (0U) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0 (0x1250U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_BIF_PM_CAT_BASE_ALIST0 */ -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0 (0x1260U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT (40U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT (12U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT (1U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT (0U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0 (0x1260U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_BIF_PM_CAT_BASE_VCE1 */ -#define RGX_CR_BIF_PM_CAT_BASE_VCE1 (0x1268U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT (40U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT (12U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT (1U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT (0U) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1 (0x1268U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_BIF_PM_CAT_BASE_TE1 */ -#define RGX_CR_BIF_PM_CAT_BASE_TE1 (0x1270U) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT (40U) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT (12U) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT (1U) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT (0U) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1 (0x1270U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_BIF_PM_CAT_BASE_ALIST1 */ -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1 (0x1280U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT (40U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT (12U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT (1U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT (0U) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1 (0x1280U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT (40U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT (12U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT (1U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT (0U) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_BIF_MMU_ENTRY_STATUS */ -#define RGX_CR_BIF_MMU_ENTRY_STATUS (0x1288U) -#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF0F3)) -#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT (12U) -#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT (4U) -#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) -#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT (0U) -#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_BIF_MMU_ENTRY_STATUS (0x1288U) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF0F3)) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT (12U) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT (4U) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT (0U) +#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_BIF_MMU_ENTRY */ -#define RGX_CR_BIF_MMU_ENTRY (0x1290U) -#define RGX_CR_BIF_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT (1U) -#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN (0x00000002U) -#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT (0U) -#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN (0x00000001U) +#define RGX_CR_BIF_MMU_ENTRY (0x1290U) +#define RGX_CR_BIF_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT (1U) +#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN (0x00000002U) +#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT (0U) +#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN (0x00000001U) /* Register RGX_CR_BIF_CTRL_INVAL */ -#define RGX_CR_BIF_CTRL_INVAL (0x12A0U) -#define RGX_CR_BIF_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT (3U) -#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN (0x00000008U) -#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT (2U) -#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_CTRL_INVAL_PC_EN (0x00000004U) -#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT (1U) -#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BIF_CTRL_INVAL_PD_EN (0x00000002U) -#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT (0U) -#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_CTRL_INVAL_PT_EN (0x00000001U) +#define RGX_CR_BIF_CTRL_INVAL (0x12A0U) +#define RGX_CR_BIF_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT (3U) +#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN (0x00000008U) +#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT (2U) +#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_CTRL_INVAL_PC_EN (0x00000004U) +#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT (1U) +#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_CTRL_INVAL_PD_EN (0x00000002U) +#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT (0U) +#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_CTRL_INVAL_PT_EN (0x00000001U) /* Register RGX_CR_BIF_CTRL */ -#define RGX_CR_BIF_CTRL (0x12A8U) -#define RGX_CR_BIF_CTRL__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000000000033F)) -#define RGX_CR_BIF_CTRL_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_CPU_SHIFT (9U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_CPU_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_CPU_EN (0x00000200U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_SHIFT (8U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_EN (0x00000100U) -#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT (7U) -#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN (0x00000080U) -#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT (6U) -#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN (0x00000040U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT (5U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN (0x00000020U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT (4U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN (0x00000010U) -#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT (3U) -#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN (0x00000008U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT (2U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN (0x00000004U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT (1U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN (0x00000002U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT (0U) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN (0x00000001U) +#define RGX_CR_BIF_CTRL (0x12A8U) +#define RGX_CR_BIF_CTRL__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000000000033F)) +#define RGX_CR_BIF_CTRL_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_CPU_SHIFT (9U) +#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_CPU_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_CPU_EN (0x00000200U) +#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_BIF4_SHIFT (8U) +#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_BIF4_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_BIF4_EN (0x00000100U) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT (7U) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN (0x00000080U) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT (6U) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN (0x00000040U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT (5U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN (0x00000020U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT (4U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN (0x00000010U) +#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT (3U) +#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN (0x00000008U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT (2U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN (0x00000004U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT (1U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN (0x00000002U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT (0U) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN (0x00000001U) /* Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS */ -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS (0x12B0U) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS (0x12B0U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) /* Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS */ -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS (0x12B8U) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT (52U) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN (IMG_UINT64_C(0x0010000000000000)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT (46U) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT (40U) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS (0x12B8U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT (52U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT (46U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT (40U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) /* Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS */ -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS (0x12C0U) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT (12U) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT (4U) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN (0x00000010U) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT (0U) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN (0x00000001U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS (0x12C0U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT (12U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN (0x00000010U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT (0U) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN (0x00000001U) /* Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS */ -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS (0x12C8U) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT (50U) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT (44U) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT (40U) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT (4U) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS (0x12C8U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT (50U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT (44U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT (40U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) +#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) /* Register RGX_CR_BIF_MMU_STATUS */ -#define RGX_CR_BIF_MMU_STATUS (0x12D0U) -#define RGX_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) -#define RGX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) -#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT (28U) -#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN (0x10000000U) -#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) -#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) -#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) -#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) -#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) -#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) -#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) -#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) -#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) -#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) -#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) -#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) +#define RGX_CR_BIF_MMU_STATUS (0x12D0U) +#define RGX_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) +#define RGX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) +#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT (28U) +#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN (0x10000000U) +#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) +#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) +#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) +#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) +#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) +#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) +#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) +#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) +#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) +#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) +#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) +#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) /* Register group: RGX_CR_BIF_TILING_CFG, with 8 repeats */ -#define RGX_CR_BIF_TILING_CFG_REPEATCOUNT (8U) +#define RGX_CR_BIF_TILING_CFG_REPEATCOUNT (8U) /* Register RGX_CR_BIF_TILING_CFG0 */ -#define RGX_CR_BIF_TILING_CFG0 (0x12D8U) -#define RGX_CR_BIF_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT (61U) -#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG0_ENABLE_SHIFT (60U) -#define RGX_CR_BIF_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG0 (0x12D8U) +#define RGX_CR_BIF_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG0_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_TILING_CFG1 */ -#define RGX_CR_BIF_TILING_CFG1 (0x12E0U) -#define RGX_CR_BIF_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT (61U) -#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG1_ENABLE_SHIFT (60U) -#define RGX_CR_BIF_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG1 (0x12E0U) +#define RGX_CR_BIF_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG1_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_TILING_CFG2 */ -#define RGX_CR_BIF_TILING_CFG2 (0x12E8U) -#define RGX_CR_BIF_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT (61U) -#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG2_ENABLE_SHIFT (60U) -#define RGX_CR_BIF_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG2 (0x12E8U) +#define RGX_CR_BIF_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG2_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_TILING_CFG3 */ -#define RGX_CR_BIF_TILING_CFG3 (0x12F0U) -#define RGX_CR_BIF_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT (61U) -#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG3_ENABLE_SHIFT (60U) -#define RGX_CR_BIF_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG3 (0x12F0U) +#define RGX_CR_BIF_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG3_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_TILING_CFG4 */ -#define RGX_CR_BIF_TILING_CFG4 (0x12F8U) -#define RGX_CR_BIF_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT (61U) -#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG4_ENABLE_SHIFT (60U) -#define RGX_CR_BIF_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG4 (0x12F8U) +#define RGX_CR_BIF_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG4_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_TILING_CFG5 */ -#define RGX_CR_BIF_TILING_CFG5 (0x1300U) -#define RGX_CR_BIF_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT (61U) -#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG5_ENABLE_SHIFT (60U) -#define RGX_CR_BIF_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG5 (0x1300U) +#define RGX_CR_BIF_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG5_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_TILING_CFG6 */ -#define RGX_CR_BIF_TILING_CFG6 (0x1308U) -#define RGX_CR_BIF_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT (61U) -#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG6_ENABLE_SHIFT (60U) -#define RGX_CR_BIF_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG6 (0x1308U) +#define RGX_CR_BIF_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG6_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_TILING_CFG7 */ -#define RGX_CR_BIF_TILING_CFG7 (0x1310U) -#define RGX_CR_BIF_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT (61U) -#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG7_ENABLE_SHIFT (60U) -#define RGX_CR_BIF_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG7 (0x1310U) +#define RGX_CR_BIF_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT (61U) +#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG7_ENABLE_SHIFT (60U) +#define RGX_CR_BIF_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_BIF_READS_EXT_STATUS */ -#define RGX_CR_BIF_READS_EXT_STATUS (0x1320U) -#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT (16U) -#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK (0xF000FFFFU) -#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT (0U) -#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIF_READS_EXT_STATUS (0x1320U) +#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT (16U) +#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK (0xF000FFFFU) +#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT (0U) +#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK (0xFFFF0000U) /* Register RGX_CR_BIF_READS_INT_STATUS */ -#define RGX_CR_BIF_READS_INT_STATUS (0x1328U) -#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF)) -#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT (16U) -#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK (0xF800FFFFU) -#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT (0U) -#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIF_READS_INT_STATUS (0x1328U) +#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF)) +#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT (16U) +#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK (0xF800FFFFU) +#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT (0U) +#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK (0xFFFF0000U) /* Register RGX_CR_BIFPM_READS_INT_STATUS */ -#define RGX_CR_BIFPM_READS_INT_STATUS (0x1330U) -#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT (0U) -#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIFPM_READS_INT_STATUS (0x1330U) +#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT (0U) +#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK (0xFFFF0000U) /* Register RGX_CR_BIFPM_READS_EXT_STATUS */ -#define RGX_CR_BIFPM_READS_EXT_STATUS (0x1338U) -#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT (0U) -#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIFPM_READS_EXT_STATUS (0x1338U) +#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT (0U) +#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK (0xFFFF0000U) /* Register RGX_CR_BIFPM_STATUS_MMU */ -#define RGX_CR_BIFPM_STATUS_MMU (0x1350U) -#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT (0U) -#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) +#define RGX_CR_BIFPM_STATUS_MMU (0x1350U) +#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT (0U) +#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_BIF_STATUS_MMU */ -#define RGX_CR_BIF_STATUS_MMU (0x1358U) -#define RGX_CR_BIF_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT (0U) -#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) +#define RGX_CR_BIF_STATUS_MMU (0x1358U) +#define RGX_CR_BIF_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT (0U) +#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_BIF_FAULT_READ */ -#define RGX_CR_BIF_FAULT_READ (0x13E0U) -#define RGX_CR_BIF_FAULT_READ_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT (4U) -#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT (4U) -#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE (16U) +#define RGX_CR_BIF_FAULT_READ (0x13E0U) +#define RGX_CR_BIF_FAULT_READ_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT (4U) +#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT (4U) +#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE (16U) /* Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS */ -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS (0x1430U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS (0x1430U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) /* Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS */ -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS (0x1438U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS (0x1438U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) +#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) /* Register RGX_CR_TFBC_COMPRESSION_CONTROL */ -#define RGX_CR_TFBC_COMPRESSION_CONTROL (0x14A0U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_SHIFT (7U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN (0x00000080U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_SHIFT (4U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_CLRMSK (0xFFFFFF8FU) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_SHIFT (3U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_EN (0x00000008U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT (1U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK (0xFFFFFFF9U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_DEFAULT (0x00000000U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD_AND_CORRELATION (0x00000002U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD (0x00000004U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_RESERVED (0x00000006U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT (0U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_0 (0x00000000U) -#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_1 (0x00000001U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL (0x14A0U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_SHIFT (8U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN (0x00000100U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_SHIFT (7U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN (0x00000080U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_SHIFT (4U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_CLRMSK (0xFFFFFF8FU) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_SHIFT (3U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_EN (0x00000008U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT (1U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK (0xFFFFFFF9U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_DEFAULT (0x00000000U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD_AND_CORRELATION (0x00000002U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD (0x00000004U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_RESERVED (0x00000006U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT (0U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_0 (0x00000000U) +#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_1 (0x00000001U) /* Register RGX_CR_MCU_FENCE */ -#define RGX_CR_MCU_FENCE (0x1740U) -#define RGX_CR_MCU_FENCE_MASKFULL (IMG_UINT64_C(0x000007FFFFFFFFE0)) -#define RGX_CR_MCU_FENCE_DM_SHIFT (40U) -#define RGX_CR_MCU_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_MCU_FENCE_DM_VERTEX (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_MCU_FENCE_DM_PIXEL (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_MCU_FENCE_DM_COMPUTE (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX (IMG_UINT64_C(0x0000030000000000)) -#define RGX_CR_MCU_FENCE_DM_RAY (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_MCU_FENCE_DM_FASTRENDER (IMG_UINT64_C(0x0000050000000000)) -#define RGX_CR_MCU_FENCE_ADDR_SHIFT (5U) -#define RGX_CR_MCU_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT (5U) -#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE (32U) +#define RGX_CR_MCU_FENCE (0x1740U) +#define RGX_CR_MCU_FENCE_MASKFULL (IMG_UINT64_C(0x000007FFFFFFFFE0)) +#define RGX_CR_MCU_FENCE_DM_SHIFT (40U) +#define RGX_CR_MCU_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_MCU_FENCE_DM_VERTEX (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_MCU_FENCE_DM_PIXEL (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MCU_FENCE_DM_COMPUTE (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX (IMG_UINT64_C(0x0000030000000000)) +#define RGX_CR_MCU_FENCE_DM_RAY (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_MCU_FENCE_DM_FASTRENDER (IMG_UINT64_C(0x0000050000000000)) +#define RGX_CR_MCU_FENCE_ADDR_SHIFT (5U) +#define RGX_CR_MCU_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT (5U) +#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE (32U) /* Register group: RGX_CR_SCRATCH, with 16 repeats */ -#define RGX_CR_SCRATCH_REPEATCOUNT (16U) +#define RGX_CR_SCRATCH_REPEATCOUNT (16U) /* Register RGX_CR_SCRATCH0 */ -#define RGX_CR_SCRATCH0 (0x1A00U) -#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH0 (0x1A00U) +#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH1 */ -#define RGX_CR_SCRATCH1 (0x1A08U) -#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH1 (0x1A08U) +#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH2 */ -#define RGX_CR_SCRATCH2 (0x1A10U) -#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH2 (0x1A10U) +#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH3 */ -#define RGX_CR_SCRATCH3 (0x1A18U) -#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH3 (0x1A18U) +#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH3_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH4 */ -#define RGX_CR_SCRATCH4 (0x1A20U) -#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH4_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH4_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH4 (0x1A20U) +#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH4_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH4_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH5 */ -#define RGX_CR_SCRATCH5 (0x1A28U) -#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH5_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH5_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH5 (0x1A28U) +#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH5_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH5_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH6 */ -#define RGX_CR_SCRATCH6 (0x1A30U) -#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH6_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH6_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH6 (0x1A30U) +#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH6_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH6_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH7 */ -#define RGX_CR_SCRATCH7 (0x1A38U) -#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH7_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH7_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH7 (0x1A38U) +#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH7_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH7_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH8 */ -#define RGX_CR_SCRATCH8 (0x1A40U) -#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH8_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH8_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH8 (0x1A40U) +#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH8_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH8_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH9 */ -#define RGX_CR_SCRATCH9 (0x1A48U) -#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH9_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH9_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH9 (0x1A48U) +#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH9_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH9_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH10 */ -#define RGX_CR_SCRATCH10 (0x1A50U) -#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH10_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH10_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH10 (0x1A50U) +#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH10_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH10_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH11 */ -#define RGX_CR_SCRATCH11 (0x1A58U) -#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH11_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH11_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH11 (0x1A58U) +#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH11_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH11_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH12 */ -#define RGX_CR_SCRATCH12 (0x1A60U) -#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH12_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH12_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH12 (0x1A60U) +#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH12_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH12_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH13 */ -#define RGX_CR_SCRATCH13 (0x1A68U) -#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH13_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH13_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH13 (0x1A68U) +#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH13_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH13_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH14 */ -#define RGX_CR_SCRATCH14 (0x1A70U) -#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH14_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH14_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH14 (0x1A70U) +#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH14_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH14_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_SCRATCH15 */ -#define RGX_CR_SCRATCH15 (0x1A78U) -#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SCRATCH15_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH15_DATA_CLRMSK (0x00000000U) +#define RGX_CR_SCRATCH15 (0x1A78U) +#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SCRATCH15_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH15_DATA_CLRMSK (0x00000000U) /* Register group: RGX_CR_OS0_SCRATCH, with 2 repeats */ -#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (2U) +#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (2U) /* Register RGX_CR_OS0_SCRATCH0 */ -#define RGX_CR_OS0_SCRATCH0 (0x1A80U) -#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS0_SCRATCH0 (0x1A80U) +#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS0_SCRATCH1 */ -#define RGX_CR_OS0_SCRATCH1 (0x1A88U) -#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS0_SCRATCH1 (0x1A88U) +#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS0_SCRATCH2 */ -#define RGX_CR_OS0_SCRATCH2 (0x1A90U) -#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS0_SCRATCH2 (0x1A90U) +#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_OS0_SCRATCH3 */ -#define RGX_CR_OS0_SCRATCH3 (0x1A98U) -#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS0_SCRATCH3 (0x1A98U) +#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) /* Register group: RGX_CR_OS1_SCRATCH, with 2 repeats */ -#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (2U) +#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (2U) /* Register RGX_CR_OS1_SCRATCH0 */ -#define RGX_CR_OS1_SCRATCH0 (0x11A80U) -#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS1_SCRATCH0 (0x11A80U) +#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS1_SCRATCH1 */ -#define RGX_CR_OS1_SCRATCH1 (0x11A88U) -#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS1_SCRATCH1 (0x11A88U) +#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS1_SCRATCH2 */ -#define RGX_CR_OS1_SCRATCH2 (0x11A90U) -#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS1_SCRATCH2 (0x11A90U) +#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_OS1_SCRATCH3 */ -#define RGX_CR_OS1_SCRATCH3 (0x11A98U) -#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS1_SCRATCH3 (0x11A98U) +#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) /* Register group: RGX_CR_OS2_SCRATCH, with 2 repeats */ -#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (2U) +#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (2U) /* Register RGX_CR_OS2_SCRATCH0 */ -#define RGX_CR_OS2_SCRATCH0 (0x21A80U) -#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS2_SCRATCH0 (0x21A80U) +#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS2_SCRATCH1 */ -#define RGX_CR_OS2_SCRATCH1 (0x21A88U) -#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS2_SCRATCH1 (0x21A88U) +#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS2_SCRATCH2 */ -#define RGX_CR_OS2_SCRATCH2 (0x21A90U) -#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS2_SCRATCH2 (0x21A90U) +#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_OS2_SCRATCH3 */ -#define RGX_CR_OS2_SCRATCH3 (0x21A98U) -#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS2_SCRATCH3 (0x21A98U) +#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) /* Register group: RGX_CR_OS3_SCRATCH, with 2 repeats */ -#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (2U) +#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (2U) /* Register RGX_CR_OS3_SCRATCH0 */ -#define RGX_CR_OS3_SCRATCH0 (0x31A80U) -#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS3_SCRATCH0 (0x31A80U) +#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS3_SCRATCH1 */ -#define RGX_CR_OS3_SCRATCH1 (0x31A88U) -#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS3_SCRATCH1 (0x31A88U) +#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS3_SCRATCH2 */ -#define RGX_CR_OS3_SCRATCH2 (0x31A90U) -#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS3_SCRATCH2 (0x31A90U) +#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_OS3_SCRATCH3 */ -#define RGX_CR_OS3_SCRATCH3 (0x31A98U) -#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS3_SCRATCH3 (0x31A98U) +#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) /* Register group: RGX_CR_OS4_SCRATCH, with 2 repeats */ -#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (2U) +#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (2U) /* Register RGX_CR_OS4_SCRATCH0 */ -#define RGX_CR_OS4_SCRATCH0 (0x41A80U) -#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS4_SCRATCH0 (0x41A80U) +#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS4_SCRATCH1 */ -#define RGX_CR_OS4_SCRATCH1 (0x41A88U) -#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS4_SCRATCH1 (0x41A88U) +#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS4_SCRATCH2 */ -#define RGX_CR_OS4_SCRATCH2 (0x41A90U) -#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS4_SCRATCH2 (0x41A90U) +#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_OS4_SCRATCH3 */ -#define RGX_CR_OS4_SCRATCH3 (0x41A98U) -#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS4_SCRATCH3 (0x41A98U) +#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) /* Register group: RGX_CR_OS5_SCRATCH, with 2 repeats */ -#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (2U) +#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (2U) /* Register RGX_CR_OS5_SCRATCH0 */ -#define RGX_CR_OS5_SCRATCH0 (0x51A80U) -#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS5_SCRATCH0 (0x51A80U) +#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS5_SCRATCH1 */ -#define RGX_CR_OS5_SCRATCH1 (0x51A88U) -#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS5_SCRATCH1 (0x51A88U) +#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS5_SCRATCH2 */ -#define RGX_CR_OS5_SCRATCH2 (0x51A90U) -#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS5_SCRATCH2 (0x51A90U) +#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_OS5_SCRATCH3 */ -#define RGX_CR_OS5_SCRATCH3 (0x51A98U) -#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS5_SCRATCH3 (0x51A98U) +#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) /* Register group: RGX_CR_OS6_SCRATCH, with 2 repeats */ -#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (2U) +#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (2U) /* Register RGX_CR_OS6_SCRATCH0 */ -#define RGX_CR_OS6_SCRATCH0 (0x61A80U) -#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS6_SCRATCH0 (0x61A80U) +#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS6_SCRATCH1 */ -#define RGX_CR_OS6_SCRATCH1 (0x61A88U) -#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS6_SCRATCH1 (0x61A88U) +#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS6_SCRATCH2 */ -#define RGX_CR_OS6_SCRATCH2 (0x61A90U) -#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS6_SCRATCH2 (0x61A90U) +#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_OS6_SCRATCH3 */ -#define RGX_CR_OS6_SCRATCH3 (0x61A98U) -#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS6_SCRATCH3 (0x61A98U) +#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) /* Register group: RGX_CR_OS7_SCRATCH, with 2 repeats */ -#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (2U) +#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (2U) /* Register RGX_CR_OS7_SCRATCH0 */ -#define RGX_CR_OS7_SCRATCH0 (0x71A80U) -#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS7_SCRATCH0 (0x71A80U) +#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS7_SCRATCH1 */ -#define RGX_CR_OS7_SCRATCH1 (0x71A88U) -#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS7_SCRATCH1 (0x71A88U) +#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS7_SCRATCH2 */ -#define RGX_CR_OS7_SCRATCH2 (0x71A90U) -#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS7_SCRATCH2 (0x71A90U) +#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_OS7_SCRATCH3 */ -#define RGX_CR_OS7_SCRATCH3 (0x71A98U) -#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_OS7_SCRATCH3 (0x71A98U) +#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_SPFILTER_SIGNAL_DESCR */ -#define RGX_CR_SPFILTER_SIGNAL_DESCR (0x2700U) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT (0U) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK (0xFFFF0000U) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT (4U) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE (16U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR (0x2700U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT (0U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK (0xFFFF0000U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT (4U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE (16U) /* Register RGX_CR_SPFILTER_SIGNAL_DESCR_MIN */ -#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN (0x2708U) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT (4U) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE (16U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN (0x2708U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT (4U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE (16U) /* Register group: RGX_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT (16U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT (16U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 (0x3000U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 (0x3000U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 (0x3008U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 (0x3008U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 (0x3010U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 (0x3010U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 (0x3018U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 (0x3018U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 (0x3020U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 (0x3020U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 (0x3028U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 (0x3028U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 (0x3030U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 (0x3030U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 (0x3038U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 (0x3038U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 (0x3040U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 (0x3040U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 (0x3048U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 (0x3048U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 (0x3050U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 (0x3050U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 (0x3058U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 (0x3058U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 (0x3060U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 (0x3060U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 (0x3068U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 (0x3068U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 (0x3070U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 (0x3070U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 (0x3078U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_SHIFT (62U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_SHIFT (40U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 (0x3078U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_SHIFT (62U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_SHIFT (40U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_BOOT */ -#define RGX_CR_FWCORE_BOOT (0x3090U) -#define RGX_CR_FWCORE_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_FWCORE_BOOT_ENABLE_SHIFT (0U) -#define RGX_CR_FWCORE_BOOT_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_BOOT_ENABLE_EN (0x00000001U) +#define RGX_CR_FWCORE_BOOT (0x3090U) +#define RGX_CR_FWCORE_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_BOOT_ENABLE_SHIFT (0U) +#define RGX_CR_FWCORE_BOOT_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_BOOT_ENABLE_EN (0x00000001U) /* Register RGX_CR_FWCORE_RESET_ADDR */ -#define RGX_CR_FWCORE_RESET_ADDR (0x3098U) -#define RGX_CR_FWCORE_RESET_ADDR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) -#define RGX_CR_FWCORE_RESET_ADDR_ADDR_SHIFT (1U) -#define RGX_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK (0x00000001U) -#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSHIFT (1U) -#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSIZE (2U) +#define RGX_CR_FWCORE_RESET_ADDR (0x3098U) +#define RGX_CR_FWCORE_RESET_ADDR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) +#define RGX_CR_FWCORE_RESET_ADDR_ADDR_SHIFT (1U) +#define RGX_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK (0x00000001U) +#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSHIFT (1U) +#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSIZE (2U) /* Register RGX_CR_FWCORE_WRAPPER_NMI_ADDR */ -#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR (0x30A0U) -#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) -#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_SHIFT (1U) -#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_CLRMSK (0x00000001U) -#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSHIFT (1U) -#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSIZE (2U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR (0x30A0U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) +#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_SHIFT (1U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_CLRMSK (0x00000001U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSHIFT (1U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSIZE (2U) /* Register RGX_CR_FWCORE_WRAPPER_NMI_EVENT */ -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT (0x30A8U) -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT (0U) -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN (0x00000001U) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT (0x30A8U) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT (0U) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN (0x00000001U) /* Register RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS */ -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS (0x30B0U) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F771)) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_SHIFT (5U) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_SHIFT (4U) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_EN (0x00000010U) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_SHIFT (0U) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_EN (0x00000001U) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS (0x30B0U) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F771)) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT (8U) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_SHIFT (5U) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_SHIFT (4U) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_EN (0x00000010U) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_SHIFT (0U) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_EN (0x00000001U) /* Register RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS */ -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS (0x30B8U) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0)) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_SHIFT (52U) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0010000000000000)) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT (46U) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF)) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT (40U) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF)) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT (4U) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS (0x30B8U) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0)) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_SHIFT (52U) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT (46U) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF)) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT (40U) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF)) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT (4U) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) +#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) /* Register RGX_CR_FWCORE_MEM_CTRL_INVAL */ -#define RGX_CR_FWCORE_MEM_CTRL_INVAL (0x30C0U) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT (3U) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN (0x00000008U) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT (2U) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_EN (0x00000004U) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT (1U) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_EN (0x00000002U) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT (0U) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_EN (0x00000001U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL (0x30C0U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT (3U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN (0x00000008U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT (2U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_EN (0x00000004U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT (1U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_EN (0x00000002U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT (0U) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_EN (0x00000001U) /* Register RGX_CR_FWCORE_MEM_MMU_STATUS */ -#define RGX_CR_FWCORE_MEM_MMU_STATUS (0x30C8U) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7)) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_SHIFT (20U) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_SHIFT (4U) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_SHIFT (2U) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_EN (0x00000004U) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_SHIFT (1U) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_EN (0x00000002U) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_SHIFT (0U) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_EN (0x00000001U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS (0x30C8U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7)) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_SHIFT (20U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_SHIFT (4U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_SHIFT (2U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_EN (0x00000004U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_SHIFT (1U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_EN (0x00000002U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_SHIFT (0U) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_EN (0x00000001U) /* Register RGX_CR_FWCORE_MEM_READS_EXT_STATUS */ -#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS (0x30D8U) -#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) -#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_SHIFT (0U) -#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_CLRMSK (0xFFFFF000U) +#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS (0x30D8U) +#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) +#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_SHIFT (0U) +#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_CLRMSK (0xFFFFF000U) /* Register RGX_CR_FWCORE_MEM_READS_INT_STATUS */ -#define RGX_CR_FWCORE_MEM_READS_INT_STATUS (0x30E0U) -#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF)) -#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_SHIFT (0U) -#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_CLRMSK (0xFFFFF800U) +#define RGX_CR_FWCORE_MEM_READS_INT_STATUS (0x30E0U) +#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF)) +#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_SHIFT (0U) +#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_CLRMSK (0xFFFFF800U) /* Register RGX_CR_FWCORE_WRAPPER_FENCE */ -#define RGX_CR_FWCORE_WRAPPER_FENCE (0x30E8U) -#define RGX_CR_FWCORE_WRAPPER_FENCE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_SHIFT (0U) -#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_EN (0x00000001U) +#define RGX_CR_FWCORE_WRAPPER_FENCE (0x30E8U) +#define RGX_CR_FWCORE_WRAPPER_FENCE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_SHIFT (0U) +#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_EN (0x00000001U) /* Register group: RGX_CR_FWCORE_MEM_CAT_BASE, with 8 repeats */ -#define RGX_CR_FWCORE_MEM_CAT_BASE_REPEATCOUNT (8U) +#define RGX_CR_FWCORE_MEM_CAT_BASE_REPEATCOUNT (8U) /* Register RGX_CR_FWCORE_MEM_CAT_BASE0 */ -#define RGX_CR_FWCORE_MEM_CAT_BASE0 (0x30F0U) -#define RGX_CR_FWCORE_MEM_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_MEM_CAT_BASE0 (0x30F0U) +#define RGX_CR_FWCORE_MEM_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_MEM_CAT_BASE1 */ -#define RGX_CR_FWCORE_MEM_CAT_BASE1 (0x30F8U) -#define RGX_CR_FWCORE_MEM_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_MEM_CAT_BASE1 (0x30F8U) +#define RGX_CR_FWCORE_MEM_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_MEM_CAT_BASE2 */ -#define RGX_CR_FWCORE_MEM_CAT_BASE2 (0x3100U) -#define RGX_CR_FWCORE_MEM_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_MEM_CAT_BASE2 (0x3100U) +#define RGX_CR_FWCORE_MEM_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_MEM_CAT_BASE3 */ -#define RGX_CR_FWCORE_MEM_CAT_BASE3 (0x3108U) -#define RGX_CR_FWCORE_MEM_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_MEM_CAT_BASE3 (0x3108U) +#define RGX_CR_FWCORE_MEM_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_MEM_CAT_BASE4 */ -#define RGX_CR_FWCORE_MEM_CAT_BASE4 (0x3110U) -#define RGX_CR_FWCORE_MEM_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_MEM_CAT_BASE4 (0x3110U) +#define RGX_CR_FWCORE_MEM_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_MEM_CAT_BASE5 */ -#define RGX_CR_FWCORE_MEM_CAT_BASE5 (0x3118U) -#define RGX_CR_FWCORE_MEM_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_MEM_CAT_BASE5 (0x3118U) +#define RGX_CR_FWCORE_MEM_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_MEM_CAT_BASE6 */ -#define RGX_CR_FWCORE_MEM_CAT_BASE6 (0x3120U) -#define RGX_CR_FWCORE_MEM_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_MEM_CAT_BASE6 (0x3120U) +#define RGX_CR_FWCORE_MEM_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_MEM_CAT_BASE7 */ -#define RGX_CR_FWCORE_MEM_CAT_BASE7 (0x3128U) -#define RGX_CR_FWCORE_MEM_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_SHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_FWCORE_MEM_CAT_BASE7 (0x3128U) +#define RGX_CR_FWCORE_MEM_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_SHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_FWCORE_WDT_RESET */ -#define RGX_CR_FWCORE_WDT_RESET (0x3130U) -#define RGX_CR_FWCORE_WDT_RESET_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_FWCORE_WDT_RESET_EN_SHIFT (0U) -#define RGX_CR_FWCORE_WDT_RESET_EN_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_WDT_RESET_EN_EN (0x00000001U) +#define RGX_CR_FWCORE_WDT_RESET (0x3130U) +#define RGX_CR_FWCORE_WDT_RESET_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_WDT_RESET_EN_SHIFT (0U) +#define RGX_CR_FWCORE_WDT_RESET_EN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WDT_RESET_EN_EN (0x00000001U) /* Register RGX_CR_FWCORE_WDT_CTRL */ -#define RGX_CR_FWCORE_WDT_CTRL (0x3138U) -#define RGX_CR_FWCORE_WDT_CTRL_MASKFULL (IMG_UINT64_C(0x00000000FFFF1F01)) -#define RGX_CR_FWCORE_WDT_CTRL_PROT_SHIFT (16U) -#define RGX_CR_FWCORE_WDT_CTRL_PROT_CLRMSK (0x0000FFFFU) -#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT (8U) -#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK (0xFFFFE0FFU) -#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_FWCORE_WDT_CTRL (0x3138U) +#define RGX_CR_FWCORE_WDT_CTRL_MASKFULL (IMG_UINT64_C(0x00000000FFFF1F01)) +#define RGX_CR_FWCORE_WDT_CTRL_PROT_SHIFT (16U) +#define RGX_CR_FWCORE_WDT_CTRL_PROT_CLRMSK (0x0000FFFFU) +#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT (8U) +#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK (0xFFFFE0FFU) +#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_FWCORE_WDT_COUNT */ -#define RGX_CR_FWCORE_WDT_COUNT (0x3140U) -#define RGX_CR_FWCORE_WDT_COUNT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_WDT_COUNT_VALUE_SHIFT (0U) -#define RGX_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_WDT_COUNT (0x3140U) +#define RGX_CR_FWCORE_WDT_COUNT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_WDT_COUNT_VALUE_SHIFT (0U) +#define RGX_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK (0x00000000U) /* Register group: RGX_CR_FWCORE_DMI_RESERVED0, with 4 repeats */ -#define RGX_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT (4U) +#define RGX_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT (4U) /* Register RGX_CR_FWCORE_DMI_RESERVED00 */ -#define RGX_CR_FWCORE_DMI_RESERVED00 (0x3400U) -#define RGX_CR_FWCORE_DMI_RESERVED00_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED00 (0x3400U) +#define RGX_CR_FWCORE_DMI_RESERVED00_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED01 */ -#define RGX_CR_FWCORE_DMI_RESERVED01 (0x3408U) -#define RGX_CR_FWCORE_DMI_RESERVED01_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED01 (0x3408U) +#define RGX_CR_FWCORE_DMI_RESERVED01_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED02 */ -#define RGX_CR_FWCORE_DMI_RESERVED02 (0x3410U) -#define RGX_CR_FWCORE_DMI_RESERVED02_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED02 (0x3410U) +#define RGX_CR_FWCORE_DMI_RESERVED02_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED03 */ -#define RGX_CR_FWCORE_DMI_RESERVED03 (0x3418U) -#define RGX_CR_FWCORE_DMI_RESERVED03_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED03 (0x3418U) +#define RGX_CR_FWCORE_DMI_RESERVED03_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_DATA0 */ -#define RGX_CR_FWCORE_DMI_DATA0 (0x3420U) -#define RGX_CR_FWCORE_DMI_DATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_DATA0_VAL_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_DATA0_VAL_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_DATA0 (0x3420U) +#define RGX_CR_FWCORE_DMI_DATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_DATA0_VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DATA0_VAL_CLRMSK (0x00000000U) /* Register RGX_CR_FWCORE_DMI_DATA1 */ -#define RGX_CR_FWCORE_DMI_DATA1 (0x3428U) -#define RGX_CR_FWCORE_DMI_DATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_DATA1_VAL_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_DATA1_VAL_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_DATA1 (0x3428U) +#define RGX_CR_FWCORE_DMI_DATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_DATA1_VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DATA1_VAL_CLRMSK (0x00000000U) /* Register group: RGX_CR_FWCORE_DMI_RESERVED1, with 10 repeats */ -#define RGX_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT (10U) +#define RGX_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT (10U) /* Register RGX_CR_FWCORE_DMI_RESERVED10 */ -#define RGX_CR_FWCORE_DMI_RESERVED10 (0x3430U) -#define RGX_CR_FWCORE_DMI_RESERVED10_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED10 (0x3430U) +#define RGX_CR_FWCORE_DMI_RESERVED10_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED11 */ -#define RGX_CR_FWCORE_DMI_RESERVED11 (0x3438U) -#define RGX_CR_FWCORE_DMI_RESERVED11_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED11 (0x3438U) +#define RGX_CR_FWCORE_DMI_RESERVED11_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED12 */ -#define RGX_CR_FWCORE_DMI_RESERVED12 (0x3440U) -#define RGX_CR_FWCORE_DMI_RESERVED12_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED12 (0x3440U) +#define RGX_CR_FWCORE_DMI_RESERVED12_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED13 */ -#define RGX_CR_FWCORE_DMI_RESERVED13 (0x3448U) -#define RGX_CR_FWCORE_DMI_RESERVED13_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED13 (0x3448U) +#define RGX_CR_FWCORE_DMI_RESERVED13_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED14 */ -#define RGX_CR_FWCORE_DMI_RESERVED14 (0x3450U) -#define RGX_CR_FWCORE_DMI_RESERVED14_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED14 (0x3450U) +#define RGX_CR_FWCORE_DMI_RESERVED14_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_DMCONTROL */ -#define RGX_CR_FWCORE_DMI_DMCONTROL (0x3480U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_MASKFULL (IMG_UINT64_C(0x00000000D0000003)) -#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_SHIFT (31U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN (0x80000000U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_SHIFT (30U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN (0x40000000U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_SHIFT (28U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_EN (0x10000000U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_SHIFT (1U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_EN (0x00000002U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN (0x00000001U) +#define RGX_CR_FWCORE_DMI_DMCONTROL (0x3480U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_MASKFULL (IMG_UINT64_C(0x00000000D0000003)) +#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_SHIFT (31U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN (0x80000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_SHIFT (30U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN (0x40000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_SHIFT (28U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_EN (0x10000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_SHIFT (1U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_EN (0x00000002U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN (0x00000001U) /* Register RGX_CR_FWCORE_DMI_DMSTATUS */ -#define RGX_CR_FWCORE_DMI_DMSTATUS (0x3488U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_MASKFULL (IMG_UINT64_C(0x00000000004FFFFF)) -#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_SHIFT (22U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_EN (0x00400000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_SHIFT (19U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_EN (0x00080000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_SHIFT (18U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_EN (0x00040000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_SHIFT (17U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN (0x00020000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_SHIFT (16U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_EN (0x00010000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_SHIFT (15U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_EN (0x00008000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_SHIFT (14U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_EN (0x00004000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_SHIFT (13U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_EN (0x00002000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_SHIFT (12U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_EN (0x00001000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_SHIFT (11U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_EN (0x00000800U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_SHIFT (10U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_EN (0x00000400U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_SHIFT (9U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN (0x00000200U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_SHIFT (8U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_EN (0x00000100U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_SHIFT (7U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_EN (0x00000080U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_SHIFT (6U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_EN (0x00000040U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_SHIFT (5U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_EN (0x00000020U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_SHIFT (4U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_EN (0x00000010U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_FWCORE_DMI_DMSTATUS (0x3488U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_MASKFULL (IMG_UINT64_C(0x00000000004FFFFF)) +#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_SHIFT (22U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_EN (0x00400000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_SHIFT (19U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_EN (0x00080000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_SHIFT (18U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_EN (0x00040000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_SHIFT (17U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN (0x00020000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_SHIFT (16U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_EN (0x00010000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_SHIFT (15U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_EN (0x00008000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_SHIFT (14U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_EN (0x00004000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_SHIFT (13U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_EN (0x00002000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_EN (0x00001000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_SHIFT (11U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_EN (0x00000800U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_SHIFT (10U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_EN (0x00000400U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_SHIFT (9U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN (0x00000200U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_SHIFT (8U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_EN (0x00000100U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_SHIFT (7U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_EN (0x00000080U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_SHIFT (6U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_EN (0x00000040U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_SHIFT (5U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_EN (0x00000020U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_SHIFT (4U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_EN (0x00000010U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_CLRMSK (0xFFFFFFF0U) /* Register group: RGX_CR_FWCORE_DMI_RESERVED2, with 4 repeats */ -#define RGX_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT (4U) +#define RGX_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT (4U) /* Register RGX_CR_FWCORE_DMI_RESERVED20 */ -#define RGX_CR_FWCORE_DMI_RESERVED20 (0x3490U) -#define RGX_CR_FWCORE_DMI_RESERVED20_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED20 (0x3490U) +#define RGX_CR_FWCORE_DMI_RESERVED20_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED21 */ -#define RGX_CR_FWCORE_DMI_RESERVED21 (0x3498U) -#define RGX_CR_FWCORE_DMI_RESERVED21_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED21 (0x3498U) +#define RGX_CR_FWCORE_DMI_RESERVED21_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED22 */ -#define RGX_CR_FWCORE_DMI_RESERVED22 (0x34A0U) -#define RGX_CR_FWCORE_DMI_RESERVED22_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED22 (0x34A0U) +#define RGX_CR_FWCORE_DMI_RESERVED22_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED23 */ -#define RGX_CR_FWCORE_DMI_RESERVED23 (0x34A8U) -#define RGX_CR_FWCORE_DMI_RESERVED23_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED23 (0x34A8U) +#define RGX_CR_FWCORE_DMI_RESERVED23_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_ABSTRACTCS */ -#define RGX_CR_FWCORE_DMI_ABSTRACTCS (0x34B0U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL (IMG_UINT64_C(0x000000001F00170F)) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_SHIFT (24U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_CLRMSK (0xE0FFFFFFU) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_SHIFT (12U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN (0x00001000U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT (8U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK (0xFFFFF8FFU) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS (0x34B0U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL (IMG_UINT64_C(0x000000001F00170F)) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_SHIFT (24U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_CLRMSK (0xE0FFFFFFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN (0x00001000U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT (8U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_FWCORE_DMI_COMMAND */ -#define RGX_CR_FWCORE_DMI_COMMAND (0x34B8U) -#define RGX_CR_FWCORE_DMI_COMMAND_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT (24U) -#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_CLRMSK (0x00FFFFFFU) -#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_CLRMSK (0xFF000000U) +#define RGX_CR_FWCORE_DMI_COMMAND (0x34B8U) +#define RGX_CR_FWCORE_DMI_COMMAND_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT (24U) +#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_CLRMSK (0x00FFFFFFU) +#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_CLRMSK (0xFF000000U) /* Register group: RGX_CR_FWCORE_DMI_RESERVED3, with 32 repeats */ -#define RGX_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT (32U) +#define RGX_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT (32U) /* Register RGX_CR_FWCORE_DMI_RESERVED30 */ -#define RGX_CR_FWCORE_DMI_RESERVED30 (0x34C0U) -#define RGX_CR_FWCORE_DMI_RESERVED30_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED30 (0x34C0U) +#define RGX_CR_FWCORE_DMI_RESERVED30_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED31 */ -#define RGX_CR_FWCORE_DMI_RESERVED31 (0x34C8U) -#define RGX_CR_FWCORE_DMI_RESERVED31_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED31 (0x34C8U) +#define RGX_CR_FWCORE_DMI_RESERVED31_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_SBCS */ -#define RGX_CR_FWCORE_DMI_SBCS (0x35C0U) -#define RGX_CR_FWCORE_DMI_SBCS_MASKFULL (IMG_UINT64_C(0x00000000E07FFFFF)) -#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_SHIFT (29U) -#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_CLRMSK (0x1FFFFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_SHIFT (22U) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_EN (0x00400000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_SHIFT (21U) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN (0x00200000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_SHIFT (20U) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN (0x00100000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT (17U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_CLRMSK (0xFFF1FFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_SHIFT (16U) -#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_EN (0x00010000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_SHIFT (15U) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_EN (0x00008000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT (12U) -#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK (0xFFFF8FFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_SHIFT (5U) -#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_CLRMSK (0xFFFFF01FU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_SHIFT (4U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_EN (0x00000010U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_SHIFT (3U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_EN (0x00000008U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_SHIFT (2U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_EN (0x00000004U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_SHIFT (1U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_EN (0x00000002U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_EN (0x00000001U) +#define RGX_CR_FWCORE_DMI_SBCS (0x35C0U) +#define RGX_CR_FWCORE_DMI_SBCS_MASKFULL (IMG_UINT64_C(0x00000000E07FFFFF)) +#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_SHIFT (29U) +#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_CLRMSK (0x1FFFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_SHIFT (22U) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_EN (0x00400000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_SHIFT (21U) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN (0x00200000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_SHIFT (20U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN (0x00100000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT (17U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_CLRMSK (0xFFF1FFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_SHIFT (16U) +#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_EN (0x00010000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_SHIFT (15U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_EN (0x00008000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK (0xFFFF8FFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_SHIFT (5U) +#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_CLRMSK (0xFFFFF01FU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_SHIFT (4U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_EN (0x00000010U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_SHIFT (3U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_EN (0x00000008U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_SHIFT (2U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_EN (0x00000004U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_SHIFT (1U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_EN (0x00000002U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_EN (0x00000001U) /* Register RGX_CR_FWCORE_DMI_SBADDRESS0 */ -#define RGX_CR_FWCORE_DMI_SBADDRESS0 (0x35C8U) -#define RGX_CR_FWCORE_DMI_SBADDRESS0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0 (0x35C8U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_CLRMSK (0x00000000U) /* Register group: RGX_CR_FWCORE_DMI_SBDATA, with 4 repeats */ -#define RGX_CR_FWCORE_DMI_SBDATA_REPEATCOUNT (4U) +#define RGX_CR_FWCORE_DMI_SBDATA_REPEATCOUNT (4U) /* Register RGX_CR_FWCORE_DMI_SBDATA0 */ -#define RGX_CR_FWCORE_DMI_SBDATA0 (0x35E0U) -#define RGX_CR_FWCORE_DMI_SBDATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_SBDATA0 (0x35E0U) +#define RGX_CR_FWCORE_DMI_SBDATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_FWCORE_DMI_SBDATA1 */ -#define RGX_CR_FWCORE_DMI_SBDATA1 (0x35E8U) -#define RGX_CR_FWCORE_DMI_SBDATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_SBDATA1 (0x35E8U) +#define RGX_CR_FWCORE_DMI_SBDATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_FWCORE_DMI_SBDATA2 */ -#define RGX_CR_FWCORE_DMI_SBDATA2 (0x35F0U) -#define RGX_CR_FWCORE_DMI_SBDATA2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_SBDATA2 (0x35F0U) +#define RGX_CR_FWCORE_DMI_SBDATA2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_FWCORE_DMI_SBDATA3 */ -#define RGX_CR_FWCORE_DMI_SBDATA3 (0x35F8U) -#define RGX_CR_FWCORE_DMI_SBDATA3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_SBDATA3 (0x35F8U) +#define RGX_CR_FWCORE_DMI_SBDATA3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_FWCORE_DMI_HALTSUM0 */ -#define RGX_CR_FWCORE_DMI_HALTSUM0 (0x3600U) -#define RGX_CR_FWCORE_DMI_HALTSUM0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_HALTSUM0 (0x3600U) +#define RGX_CR_FWCORE_DMI_HALTSUM0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_CLRMSK (0x00000000U) /* Register RGX_CR_SLC_CTRL_MISC */ -#define RGX_CR_SLC_CTRL_MISC (0x3800U) -#define RGX_CR_SLC_CTRL_MISC_MASKFULL (IMG_UINT64_C(0xFFFFFFFF03FF010F)) -#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT (32U) -#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_SHIFT (25U) -#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_EN (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_SHIFT (24U) -#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (16U) -#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) -#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000)) -#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000)) -#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT (8U) -#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT (3U) -#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT (2U) -#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U) -#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT (0U) -#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SLC_CTRL_MISC (0x3800U) +#define RGX_CR_SLC_CTRL_MISC_MASKFULL (IMG_UINT64_C(0xFFFFFFFF03FF010F)) +#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT (32U) +#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_SHIFT (25U) +#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_SHIFT (24U) +#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (16U) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000)) +#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT (8U) +#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT (3U) +#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT (2U) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT (0U) +#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_SLC_CTRL_FLUSH_INVAL */ -#define RGX_CR_SLC_CTRL_FLUSH_INVAL (0x3818U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL (IMG_UINT64_C(0x0000000080000FFF)) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT (31U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN (0x80000000U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT (11U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN (0x00000800U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT (10U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN (0x00000400U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT (9U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN (0x00000200U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT (8U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN (0x00000100U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT (7U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN (0x00000080U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT (6U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN (0x00000040U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT (5U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN (0x00000020U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT (4U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN (0x00000010U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT (3U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN (0x00000008U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT (2U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN (0x00000004U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT (1U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN (0x00000002U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT (0U) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN (0x00000001U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL (0x3818U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL (IMG_UINT64_C(0x0000000080000FFF)) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT (31U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN (0x80000000U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT (11U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN (0x00000800U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT (10U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN (0x00000400U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT (9U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN (0x00000200U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT (8U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN (0x00000100U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT (7U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN (0x00000080U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT (6U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN (0x00000040U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT (5U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN (0x00000020U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT (4U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN (0x00000010U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT (3U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN (0x00000008U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT (2U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN (0x00000004U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT (1U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN (0x00000002U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT (0U) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN (0x00000001U) /* Register RGX_CR_SLC_STATUS0 */ -#define RGX_CR_SLC_STATUS0 (0x3820U) -#define RGX_CR_SLC_STATUS0_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT (2U) -#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN (0x00000004U) -#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT (1U) -#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN (0x00000002U) -#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT (0U) -#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN (0x00000001U) +#define RGX_CR_SLC_STATUS0 (0x3820U) +#define RGX_CR_SLC_STATUS0_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT (2U) +#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN (0x00000004U) +#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT (1U) +#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN (0x00000002U) +#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT (0U) +#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN (0x00000001U) /* Register RGX_CR_SLC_CTRL_BYPASS */ -#define RGX_CR_SLC_CTRL_BYPASS (0x3828U) -#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFF7FFF)) -#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_SHIFT (59U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_EN (IMG_UINT64_C(0x0800000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT (58U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_EN (IMG_UINT64_C(0x0400000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_SHIFT (57U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_EN (IMG_UINT64_C(0x0200000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_SHIFT (56U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_EN (IMG_UINT64_C(0x0100000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_SHIFT (55U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_EN (IMG_UINT64_C(0x0080000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_SHIFT (54U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_EN (IMG_UINT64_C(0x0040000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_SHIFT (53U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_EN (IMG_UINT64_C(0x0020000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_SHIFT (52U) -#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_EN (IMG_UINT64_C(0x0010000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_SHIFT (51U) -#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_EN (IMG_UINT64_C(0x0008000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_SHIFT (50U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_SHIFT (49U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_EN (IMG_UINT64_C(0x0002000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_SHIFT (48U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_EN (IMG_UINT64_C(0x0001000000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_SHIFT (47U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_EN (IMG_UINT64_C(0x0000800000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_SHIFT (46U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_EN (IMG_UINT64_C(0x0000400000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_SHIFT (45U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_EN (IMG_UINT64_C(0x0000200000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_SHIFT (44U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_EN (IMG_UINT64_C(0x0000100000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_SHIFT (43U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_EN (IMG_UINT64_C(0x0000080000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_SHIFT (42U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_EN (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_SHIFT (41U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_EN (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_SHIFT (40U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_SHIFT (39U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_EN (IMG_UINT64_C(0x0000008000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_SHIFT (38U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_EN (IMG_UINT64_C(0x0000004000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_SHIFT (37U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_EN (IMG_UINT64_C(0x0000002000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_SHIFT (36U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_EN (IMG_UINT64_C(0x0000001000000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_SHIFT (35U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_EN (IMG_UINT64_C(0x0000000800000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_SHIFT (34U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_EN (IMG_UINT64_C(0x0000000400000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_SHIFT (33U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_EN (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_SHIFT (32U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_SHIFT (31U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_SHIFT (30U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_EN (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_SHIFT (29U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_EN (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_SHIFT (28U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_EN (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT (27U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT (26U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT (25U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT (24U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT (23U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT (22U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT (21U) -#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT (20U) -#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT (19U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT (18U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT (17U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT (16U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT (15U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT (14U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT (13U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT (12U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT (11U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT (10U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT (9U) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT (8U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT (7U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT (6U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT (5U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT (4U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT (3U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT (2U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT (1U) -#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT (0U) -#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SLC_CTRL_BYPASS (0x3828U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFF7FFF)) +#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_ZLS_SHIFT (59U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_ZLS_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_ZLS_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT (58U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_HEADER_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_HEADER_SHIFT (57U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_HEADER_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_DATA_SHIFT (56U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_DATA_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_DATA_SHIFT (55U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_DATA_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_DATA_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_PBE_SHIFT (54U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_PBE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_PBE_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_DM_COMPUTE_SHIFT (53U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_DM_COMPUTE_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PDSRW_NOLINEFILL_SHIFT (52U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PDSRW_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PDSRW_NOLINEFILL_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PBE_NOLINEFILL_SHIFT (51U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PBE_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PBE_NOLINEFILL_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_FBC_SHIFT (50U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_FBC_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_FBC_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_RREQ_SHIFT (49U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_RREQ_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_RREQ_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_CREQ_SHIFT (48U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_CREQ_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_CREQ_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_PREQ_SHIFT (47U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_PREQ_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_PREQ_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_DBSC_SHIFT (46U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_DBSC_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_DBSC_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_SHIFT (45U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PBE_SHIFT (44U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PBE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PBE_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_ISP_SHIFT (43U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_ISP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_ISP_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PM_SHIFT (42U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PM_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PM_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TDM_SHIFT (41U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TDM_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_CDM_SHIFT (40U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_CDM_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_PDS_STATE_SHIFT (39U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_PDS_STATE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_PDS_STATE_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_DB_SHIFT (38U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_DB_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_DB_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_VTX_VAR_SHIFT (37U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_VTX_VAR_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_VTX_VAR_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_VDM_SHIFT (36U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_VDM_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_STREAM_SHIFT (35U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_STREAM_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_STREAM_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_REGION_SHIFT (34U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_REGION_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_REGION_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_VCE_SHIFT (33U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_VCE_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PPP_SHIFT (32U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PPP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_FASTRENDER_SHIFT (31U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_FASTRENDER_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_FASTRENDER_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PM_ALIST_SHIFT (30U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PM_ALIST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PM_ALIST_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_TE_SHIFT (29U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_TE_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_VCE_SHIFT (28U) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_VCE_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT (27U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT (26U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT (25U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT (24U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT (23U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT (22U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT (21U) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT (20U) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT (19U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT (18U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT (17U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT (16U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT (15U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT (14U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT (13U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT (12U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT (11U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT (10U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT (9U) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT (8U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT (7U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT (6U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT (5U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT (4U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT (3U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT (2U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT (1U) +#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT (0U) +#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_SLC_STATUS1 */ -#define RGX_CR_SLC_STATUS1 (0x3870U) -#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0x800003FF03FFFFFF)) -#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT (63U) -#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -#define RGX_CR_SLC_STATUS1_PAUSED_EN (IMG_UINT64_C(0x8000000000000000)) -#define RGX_CR_SLC_STATUS1_READS1_SHIFT (32U) -#define RGX_CR_SLC_STATUS1_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) -#define RGX_CR_SLC_STATUS1_READS0_SHIFT (16U) -#define RGX_CR_SLC_STATUS1_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) -#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT (8U) -#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT (0U) -#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_SLC_STATUS1 (0x3870U) +#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0x800003FF03FFFFFF)) +#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT (63U) +#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_PAUSED_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_SLC_STATUS1_READS1_SHIFT (32U) +#define RGX_CR_SLC_STATUS1_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) +#define RGX_CR_SLC_STATUS1_READS0_SHIFT (16U) +#define RGX_CR_SLC_STATUS1_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) +#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT (8U) +#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT (0U) +#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_SLC_IDLE */ -#define RGX_CR_SLC_IDLE (0x3898U) -#define RGX_CR_SLC_IDLE__XE_MEM__MASKFULL (IMG_UINT64_C(0x00000000000003FF)) -#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_SLC_IDLE_MH_SYSARB1_SHIFT (9U) -#define RGX_CR_SLC_IDLE_MH_SYSARB1_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_SLC_IDLE_MH_SYSARB1_EN (0x00000200U) -#define RGX_CR_SLC_IDLE_MH_SYSARB0_SHIFT (8U) -#define RGX_CR_SLC_IDLE_MH_SYSARB0_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_SLC_IDLE_MH_SYSARB0_EN (0x00000100U) -#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT (7U) -#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_SLC_IDLE_IMGBV4_EN (0x00000080U) -#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (6U) -#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN (0x00000040U) -#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT (5U) -#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_SLC_IDLE_RBOFIFO_EN (0x00000020U) -#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT (4U) -#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_SLC_IDLE_FRC_CONV_EN (0x00000010U) -#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT (3U) -#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SLC_IDLE_VXE_CONV_EN (0x00000008U) -#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT (2U) -#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SLC_IDLE_VXD_CONV_EN (0x00000004U) -#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT (1U) -#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SLC_IDLE_BIF1_CONV_EN (0x00000002U) -#define RGX_CR_SLC_IDLE_CBAR_SHIFT (0U) -#define RGX_CR_SLC_IDLE_CBAR_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SLC_IDLE_CBAR_EN (0x00000001U) +#define RGX_CR_SLC_IDLE (0x3898U) +#define RGX_CR_SLC_IDLE__XE_MEM__MASKFULL (IMG_UINT64_C(0x00000000000003FF)) +#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB1_SHIFT (9U) +#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB1_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB1_EN (0x00000200U) +#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB0_SHIFT (8U) +#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB0_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB0_EN (0x00000100U) +#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT (7U) +#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SLC_IDLE_IMGBV4_EN (0x00000080U) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (6U) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN (0x00000040U) +#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT (5U) +#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SLC_IDLE_RBOFIFO_EN (0x00000020U) +#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT (4U) +#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SLC_IDLE_FRC_CONV_EN (0x00000010U) +#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT (3U) +#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SLC_IDLE_VXE_CONV_EN (0x00000008U) +#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT (2U) +#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SLC_IDLE_VXD_CONV_EN (0x00000004U) +#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT (1U) +#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_IDLE_BIF1_CONV_EN (0x00000002U) +#define RGX_CR_SLC_IDLE_CBAR_SHIFT (0U) +#define RGX_CR_SLC_IDLE_CBAR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_IDLE_CBAR_EN (0x00000001U) /* Register RGX_CR_SLC_STATUS2 */ -#define RGX_CR_SLC_STATUS2 (0x3908U) -#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0x000003FF03FFFFFF)) -#define RGX_CR_SLC_STATUS2_READS3_SHIFT (32U) -#define RGX_CR_SLC_STATUS2_READS3_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) -#define RGX_CR_SLC_STATUS2_READS2_SHIFT (16U) -#define RGX_CR_SLC_STATUS2_READS2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) -#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT (8U) -#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT (0U) -#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_SLC_STATUS2 (0x3908U) +#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0x000003FF03FFFFFF)) +#define RGX_CR_SLC_STATUS2_READS3_SHIFT (32U) +#define RGX_CR_SLC_STATUS2_READS3_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) +#define RGX_CR_SLC_STATUS2_READS2_SHIFT (16U) +#define RGX_CR_SLC_STATUS2_READS2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) +#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT (8U) +#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT (0U) +#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_SLC_CTRL_MISC2 */ -#define RGX_CR_SLC_CTRL_MISC2 (0x3930U) -#define RGX_CR_SLC_CTRL_MISC2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT (0U) -#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK (0x00000000U) +#define RGX_CR_SLC_CTRL_MISC2 (0x3930U) +#define RGX_CR_SLC_CTRL_MISC2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT (0U) +#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK (0x00000000U) /* Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE */ -#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE (0x3938U) -#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT (0U) -#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN (0x00000001U) +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE (0x3938U) +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT (0U) +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN (0x00000001U) /* Register RGX_CR_SLC_SIZE_IN_KB */ -#define RGX_CR_SLC_SIZE_IN_KB (0x3970U) -#define RGX_CR_SLC_SIZE_IN_KB_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_SLC_SIZE_IN_KB_SIZE_SHIFT (0U) -#define RGX_CR_SLC_SIZE_IN_KB_SIZE_CLRMSK (0xFFFF0000U) +#define RGX_CR_SLC_SIZE_IN_KB (0x3970U) +#define RGX_CR_SLC_SIZE_IN_KB_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_SLC_SIZE_IN_KB_SIZE_SHIFT (0U) +#define RGX_CR_SLC_SIZE_IN_KB_SIZE_CLRMSK (0xFFFF0000U) /* Register RGX_CR_USC_TIMER */ -#define RGX_CR_USC_TIMER (0x46C8U) -#define RGX_CR_USC_TIMER_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_TIMER_CNT_SHIFT (0U) -#define RGX_CR_USC_TIMER_CNT_CLRMSK (0x00000000U) +#define RGX_CR_USC_TIMER (0x46C8U) +#define RGX_CR_USC_TIMER_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_TIMER_CNT_SHIFT (0U) +#define RGX_CR_USC_TIMER_CNT_CLRMSK (0x00000000U) /* Register RGX_CR_USC_TIMER_CNT */ -#define RGX_CR_USC_TIMER_CNT (0x46D0U) -#define RGX_CR_USC_TIMER_CNT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT (0U) -#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_USC_TIMER_CNT_RESET_EN (0x00000001U) +#define RGX_CR_USC_TIMER_CNT (0x46D0U) +#define RGX_CR_USC_TIMER_CNT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT (0U) +#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_USC_TIMER_CNT_RESET_EN (0x00000001U) /* Register RGX_CR_USC_UVS0_CHECKSUM */ -#define RGX_CR_USC_UVS0_CHECKSUM (0x5000U) -#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_USC_UVS0_CHECKSUM (0x5000U) +#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_USC_UVS1_CHECKSUM */ -#define RGX_CR_USC_UVS1_CHECKSUM (0x5008U) -#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_USC_UVS1_CHECKSUM (0x5008U) +#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_USC_UVS2_CHECKSUM */ -#define RGX_CR_USC_UVS2_CHECKSUM (0x5010U) -#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_USC_UVS2_CHECKSUM (0x5010U) +#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_USC_UVS3_CHECKSUM */ -#define RGX_CR_USC_UVS3_CHECKSUM (0x5018U) -#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_USC_UVS3_CHECKSUM (0x5018U) +#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PPP_SIGNATURE */ -#define RGX_CR_PPP_SIGNATURE (0x5020U) -#define RGX_CR_PPP_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT (0U) -#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_PPP_SIGNATURE (0x5020U) +#define RGX_CR_PPP_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT (0U) +#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_TE_SIGNATURE */ -#define RGX_CR_TE_SIGNATURE (0x5028U) -#define RGX_CR_TE_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT (0U) -#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_TE_SIGNATURE (0x5028U) +#define RGX_CR_TE_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT (0U) +#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_TE_CHECKSUM */ -#define RGX_CR_TE_CHECKSUM (0x5110U) -#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_TE_CHECKSUM (0x5110U) +#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_USC_UVB_CHECKSUM */ -#define RGX_CR_USC_UVB_CHECKSUM (0x5118U) -#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_USC_UVB_CHECKSUM (0x5118U) +#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_VCE_CHECKSUM */ -#define RGX_CR_VCE_CHECKSUM (0x5030U) -#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_VCE_CHECKSUM (0x5030U) +#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_ISP_PDS_CHECKSUM */ -#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U) -#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U) +#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_ISP_TPF_CHECKSUM */ -#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U) -#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U) +#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_TFPU_PLANE0_CHECKSUM */ -#define RGX_CR_TFPU_PLANE0_CHECKSUM (0x5048U) -#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_TFPU_PLANE0_CHECKSUM (0x5048U) +#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_TFPU_PLANE1_CHECKSUM */ -#define RGX_CR_TFPU_PLANE1_CHECKSUM (0x5050U) -#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_TFPU_PLANE1_CHECKSUM (0x5050U) +#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PBE_CHECKSUM */ -#define RGX_CR_PBE_CHECKSUM (0x5058U) -#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_PBE_CHECKSUM (0x5058U) +#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PDS_DOUTM_STM_SIGNATURE */ -#define RGX_CR_PDS_DOUTM_STM_SIGNATURE (0x5060U) -#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT (0U) -#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_PDS_DOUTM_STM_SIGNATURE (0x5060U) +#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT (0U) +#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_IFPU_ISP_CHECKSUM */ -#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U) -#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U) +#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_USC_UVS4_CHECKSUM */ -#define RGX_CR_USC_UVS4_CHECKSUM (0x5100U) -#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_USC_UVS4_CHECKSUM (0x5100U) +#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_USC_UVS5_CHECKSUM */ -#define RGX_CR_USC_UVS5_CHECKSUM (0x5108U) -#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_USC_UVS5_CHECKSUM (0x5108U) +#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PPP_CLIP_CHECKSUM */ -#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U) -#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U) +#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_TA_PHASE */ -#define RGX_CR_PERF_TA_PHASE (0x6008U) -#define RGX_CR_PERF_TA_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT (0U) -#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_TA_PHASE (0x6008U) +#define RGX_CR_PERF_TA_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_3D_PHASE */ -#define RGX_CR_PERF_3D_PHASE (0x6010U) -#define RGX_CR_PERF_3D_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT (0U) -#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_3D_PHASE (0x6010U) +#define RGX_CR_PERF_3D_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_COMPUTE_PHASE */ -#define RGX_CR_PERF_COMPUTE_PHASE (0x6018U) -#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT (0U) -#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_COMPUTE_PHASE (0x6018U) +#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_TA_CYCLE */ -#define RGX_CR_PERF_TA_CYCLE (0x6020U) -#define RGX_CR_PERF_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT (0U) -#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_TA_CYCLE (0x6020U) +#define RGX_CR_PERF_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_3D_CYCLE */ -#define RGX_CR_PERF_3D_CYCLE (0x6028U) -#define RGX_CR_PERF_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT (0U) -#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_3D_CYCLE (0x6028U) +#define RGX_CR_PERF_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_COMPUTE_CYCLE */ -#define RGX_CR_PERF_COMPUTE_CYCLE (0x6030U) -#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT (0U) -#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_COMPUTE_CYCLE (0x6030U) +#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_TA_OR_3D_CYCLE */ -#define RGX_CR_PERF_TA_OR_3D_CYCLE (0x6038U) -#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT (0U) -#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_TA_OR_3D_CYCLE (0x6038U) +#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_INITIAL_TA_CYCLE */ -#define RGX_CR_PERF_INITIAL_TA_CYCLE (0x6040U) -#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT (0U) -#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_INITIAL_TA_CYCLE (0x6040U) +#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT (0U) +#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC0_READ_STALL */ -#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U) -#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U) +#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC0_WRITE_STALL */ -#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U) -#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC1_READ_STALL */ -#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U) -#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U) +#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC1_WRITE_STALL */ -#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U) -#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC2_READ_STALL */ -#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U) -#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U) +#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC2_WRITE_STALL */ -#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U) -#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC3_READ_STALL */ -#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U) -#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U) +#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC3_WRITE_STALL */ -#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U) -#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_3D_SPINUP */ -#define RGX_CR_PERF_3D_SPINUP (0x6220U) -#define RGX_CR_PERF_3D_SPINUP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT (0U) -#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK (0x00000000U) +#define RGX_CR_PERF_3D_SPINUP (0x6220U) +#define RGX_CR_PERF_3D_SPINUP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT (0U) +#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK (0x00000000U) /* Register RGX_CR_AXI_ACE_LITE_CONFIGURATION */ -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION (0x38C0U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL (IMG_UINT64_C(0x00003FFFFFFFFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT (45U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN (IMG_UINT64_C(0x0000200000000000)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFE01FFFFFFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0x0000001000000000)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0x0000000800000000)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0x0000000400000000)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFC3FFFFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC3FFFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC3FFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U) -#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF0)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION (0x38C0U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL (IMG_UINT64_C(0x00003FFFFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT (45U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFE01FFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFC3FFFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC3FFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC3FFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U) +#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF0)) /* Register RGX_CR_POWER_ESTIMATE_RESULT */ -#define RGX_CR_POWER_ESTIMATE_RESULT (0x6328U) -#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT (0U) -#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_POWER_ESTIMATE_RESULT (0x6328U) +#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT (0U) +#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_TA_PERF */ -#define RGX_CR_TA_PERF (0x7600U) -#define RGX_CR_TA_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_TA_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_TA_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_TA_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_TA_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_TA_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_TA_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_TA_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_TA_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_TA_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_TA_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_TA_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_TA_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_TA_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_TA_PERF (0x7600U) +#define RGX_CR_TA_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_TA_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_TA_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TA_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_TA_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_TA_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TA_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_TA_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_TA_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TA_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_TA_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_TA_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TA_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TA_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_TA_PERF_SELECT0 */ -#define RGX_CR_TA_PERF_SELECT0 (0x7608U) -#define RGX_CR_TA_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT (21U) -#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_TA_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_TA_PERF_SELECT0 (0x7608U) +#define RGX_CR_TA_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TA_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_TA_PERF_SELECT1 */ -#define RGX_CR_TA_PERF_SELECT1 (0x7610U) -#define RGX_CR_TA_PERF_SELECT1_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT (48U) -#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT (32U) -#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT (21U) -#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_TA_PERF_SELECT1_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT (0U) -#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_TA_PERF_SELECT1 (0x7610U) +#define RGX_CR_TA_PERF_SELECT1_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT (21U) +#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TA_PERF_SELECT1_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_TA_PERF_SELECT2 */ -#define RGX_CR_TA_PERF_SELECT2 (0x7618U) -#define RGX_CR_TA_PERF_SELECT2_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT (48U) -#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT (32U) -#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT (21U) -#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_TA_PERF_SELECT2_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT (0U) -#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_TA_PERF_SELECT2 (0x7618U) +#define RGX_CR_TA_PERF_SELECT2_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT (21U) +#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TA_PERF_SELECT2_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_TA_PERF_SELECT3 */ -#define RGX_CR_TA_PERF_SELECT3 (0x7620U) -#define RGX_CR_TA_PERF_SELECT3_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT (48U) -#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT (32U) -#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT (21U) -#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_TA_PERF_SELECT3_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT (0U) -#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_TA_PERF_SELECT3 (0x7620U) +#define RGX_CR_TA_PERF_SELECT3_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT (21U) +#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TA_PERF_SELECT3_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_TA_PERF_SELECTED_BITS */ -#define RGX_CR_TA_PERF_SELECTED_BITS (0x7648U) -#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT (48U) -#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT (32U) -#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) -#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT (16U) -#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) -#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT (0U) -#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_TA_PERF_SELECTED_BITS (0x7648U) +#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT (48U) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT (32U) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT (16U) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT (0U) +#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_TA_PERF_COUNTER_0 */ -#define RGX_CR_TA_PERF_COUNTER_0 (0x7650U) -#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_TA_PERF_COUNTER_0 (0x7650U) +#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_TA_PERF_COUNTER_1 */ -#define RGX_CR_TA_PERF_COUNTER_1 (0x7658U) -#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT (0U) -#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK (0x00000000U) +#define RGX_CR_TA_PERF_COUNTER_1 (0x7658U) +#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT (0U) +#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK (0x00000000U) /* Register RGX_CR_TA_PERF_COUNTER_2 */ -#define RGX_CR_TA_PERF_COUNTER_2 (0x7660U) -#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT (0U) -#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK (0x00000000U) +#define RGX_CR_TA_PERF_COUNTER_2 (0x7660U) +#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT (0U) +#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK (0x00000000U) /* Register RGX_CR_TA_PERF_COUNTER_3 */ -#define RGX_CR_TA_PERF_COUNTER_3 (0x7668U) -#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT (0U) -#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK (0x00000000U) +#define RGX_CR_TA_PERF_COUNTER_3 (0x7668U) +#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT (0U) +#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK (0x00000000U) /* Register RGX_CR_RASTERISATION_PERF */ -#define RGX_CR_RASTERISATION_PERF (0x7700U) -#define RGX_CR_RASTERISATION_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_RASTERISATION_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_RASTERISATION_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_RASTERISATION_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_RASTERISATION_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_RASTERISATION_PERF (0x7700U) +#define RGX_CR_RASTERISATION_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_RASTERISATION_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_RASTERISATION_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_RASTERISATION_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_RASTERISATION_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_RASTERISATION_PERF_SELECT0 */ -#define RGX_CR_RASTERISATION_PERF_SELECT0 (0x7708U) -#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT (21U) -#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_RASTERISATION_PERF_SELECT0 (0x7708U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_RASTERISATION_PERF_COUNTER_0 */ -#define RGX_CR_RASTERISATION_PERF_COUNTER_0 (0x7750U) -#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_RASTERISATION_PERF_COUNTER_0 (0x7750U) +#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_HUB_BIFPMCACHE_PERF */ -#define RGX_CR_HUB_BIFPMCACHE_PERF (0x7800U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_HUB_BIFPMCACHE_PERF (0x7800U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 */ -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 (0x7808U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT (21U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 (0x7808U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 */ -#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 (0x7850U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 (0x7850U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_TPU_MCU_L0_PERF */ -#define RGX_CR_TPU_MCU_L0_PERF (0x7900U) -#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_TPU_MCU_L0_PERF (0x7900U) +#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_TPU_MCU_L0_PERF_SELECT0 */ -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0 (0x7908U) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT (21U) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0 (0x7908U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 */ -#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 (0x7950U) -#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 (0x7950U) +#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_USC_PERF */ -#define RGX_CR_USC_PERF (0x8100U) -#define RGX_CR_USC_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_USC_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_USC_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_USC_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_USC_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_USC_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_USC_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_USC_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_USC_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_USC_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_USC_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_USC_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_USC_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_USC_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_USC_PERF (0x8100U) +#define RGX_CR_USC_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_USC_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_USC_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_USC_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_USC_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_USC_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_USC_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_USC_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_USC_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_USC_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_USC_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_USC_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_USC_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_USC_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_USC_PERF_SELECT0 */ -#define RGX_CR_USC_PERF_SELECT0 (0x8108U) -#define RGX_CR_USC_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT (21U) -#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_USC_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_USC_PERF_SELECT0 (0x8108U) +#define RGX_CR_USC_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_USC_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_USC_PERF_COUNTER_0 */ -#define RGX_CR_USC_PERF_COUNTER_0 (0x8150U) -#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_USC_PERF_COUNTER_0 (0x8150U) +#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_JONES_IDLE */ -#define RGX_CR_JONES_IDLE (0x8328U) -#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000007FFF)) -#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U) -#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U) -#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U) -#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U) -#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U) -#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U) -#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U) -#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U) -#define RGX_CR_JONES_IDLE_TLA_SHIFT (10U) -#define RGX_CR_JONES_IDLE_TLA_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_JONES_IDLE_TLA_EN (0x00000400U) -#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U) -#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U) -#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT (8U) -#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_JONES_IDLE_HOSTIF_EN (0x00000100U) -#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U) -#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U) -#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U) -#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U) -#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U) -#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U) -#define RGX_CR_JONES_IDLE_USCS_SHIFT (4U) -#define RGX_CR_JONES_IDLE_USCS_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_JONES_IDLE_USCS_EN (0x00000010U) -#define RGX_CR_JONES_IDLE_PM_SHIFT (3U) -#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U) -#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U) -#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U) -#define RGX_CR_JONES_IDLE_VDM_SHIFT (1U) -#define RGX_CR_JONES_IDLE_VDM_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_JONES_IDLE_VDM_EN (0x00000002U) -#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U) -#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U) +#define RGX_CR_JONES_IDLE (0x8328U) +#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000007FFF)) +#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U) +#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U) +#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U) +#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U) +#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U) +#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U) +#define RGX_CR_JONES_IDLE_TLA_SHIFT (10U) +#define RGX_CR_JONES_IDLE_TLA_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_JONES_IDLE_TLA_EN (0x00000400U) +#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U) +#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U) +#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT (8U) +#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_JONES_IDLE_HOSTIF_EN (0x00000100U) +#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U) +#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U) +#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U) +#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U) +#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U) +#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U) +#define RGX_CR_JONES_IDLE_USCS_SHIFT (4U) +#define RGX_CR_JONES_IDLE_USCS_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_IDLE_USCS_EN (0x00000010U) +#define RGX_CR_JONES_IDLE_PM_SHIFT (3U) +#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U) +#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U) +#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U) +#define RGX_CR_JONES_IDLE_VDM_SHIFT (1U) +#define RGX_CR_JONES_IDLE_VDM_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_IDLE_VDM_EN (0x00000002U) +#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U) +#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U) /* Register RGX_CR_TORNADO_PERF */ -#define RGX_CR_TORNADO_PERF (0x8228U) -#define RGX_CR_TORNADO_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_TORNADO_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_TORNADO_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_TORNADO_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_TORNADO_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_TORNADO_PERF (0x8228U) +#define RGX_CR_TORNADO_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TORNADO_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TORNADO_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TORNADO_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TORNADO_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_TORNADO_PERF_SELECT0 */ -#define RGX_CR_TORNADO_PERF_SELECT0 (0x8230U) -#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT (21U) -#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_TORNADO_PERF_SELECT0 (0x8230U) +#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_TORNADO_PERF_COUNTER_0 */ -#define RGX_CR_TORNADO_PERF_COUNTER_0 (0x8268U) -#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_TORNADO_PERF_COUNTER_0 (0x8268U) +#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_TEXAS_PERF */ -#define RGX_CR_TEXAS_PERF (0x8290U) -#define RGX_CR_TEXAS_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT (6U) -#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_TEXAS_PERF_CLR_5_EN (0x00000040U) -#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT (5U) -#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_TEXAS_PERF_CLR_4_EN (0x00000020U) -#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_TEXAS_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_TEXAS_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_TEXAS_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_TEXAS_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_TEXAS_PERF (0x8290U) +#define RGX_CR_TEXAS_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT (6U) +#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_TEXAS_PERF_CLR_5_EN (0x00000040U) +#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT (5U) +#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_TEXAS_PERF_CLR_4_EN (0x00000020U) +#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TEXAS_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TEXAS_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TEXAS_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TEXAS_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_TEXAS_PERF_SELECT0 */ -#define RGX_CR_TEXAS_PERF_SELECT0 (0x8298U) -#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) -#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT (31U) -#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) -#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_TEXAS_PERF_SELECT0 (0x8298U) +#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT (31U) +#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) +#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_TEXAS_PERF_COUNTER_0 */ -#define RGX_CR_TEXAS_PERF_COUNTER_0 (0x82D8U) -#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_TEXAS_PERF_COUNTER_0 (0x82D8U) +#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_JONES_PERF */ -#define RGX_CR_JONES_PERF (0x8330U) -#define RGX_CR_JONES_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_JONES_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_JONES_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_JONES_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_JONES_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_JONES_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_JONES_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_JONES_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_JONES_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_JONES_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_JONES_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_JONES_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_JONES_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_JONES_PERF (0x8330U) +#define RGX_CR_JONES_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_JONES_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_JONES_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_JONES_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_JONES_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_JONES_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_JONES_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_JONES_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_JONES_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_JONES_PERF_SELECT0 */ -#define RGX_CR_JONES_PERF_SELECT0 (0x8338U) -#define RGX_CR_JONES_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT (21U) -#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_JONES_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_JONES_PERF_SELECT0 (0x8338U) +#define RGX_CR_JONES_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_JONES_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_JONES_PERF_COUNTER_0 */ -#define RGX_CR_JONES_PERF_COUNTER_0 (0x8368U) -#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_JONES_PERF_COUNTER_0 (0x8368U) +#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_BLACKPEARL_PERF */ -#define RGX_CR_BLACKPEARL_PERF (0x8400U) -#define RGX_CR_BLACKPEARL_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT (6U) -#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN (0x00000040U) -#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT (5U) -#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN (0x00000020U) -#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_BLACKPEARL_PERF (0x8400U) +#define RGX_CR_BLACKPEARL_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT (6U) +#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN (0x00000040U) +#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT (5U) +#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN (0x00000020U) +#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_BLACKPEARL_PERF_SELECT0 */ -#define RGX_CR_BLACKPEARL_PERF_SELECT0 (0x8408U) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT (31U) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0 (0x8408U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT (31U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_BLACKPEARL_PERF_COUNTER_0 */ -#define RGX_CR_BLACKPEARL_PERF_COUNTER_0 (0x8448U) -#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_BLACKPEARL_PERF_COUNTER_0 (0x8448U) +#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_PBE_PERF */ -#define RGX_CR_PBE_PERF (0x8478U) -#define RGX_CR_PBE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_PBE_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_PBE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_PBE_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_PBE_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_PBE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_PBE_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_PBE_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_PBE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_PBE_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_PBE_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_PBE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_PBE_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_PBE_PERF (0x8478U) +#define RGX_CR_PBE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_PBE_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_PBE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_PBE_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_PBE_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_PBE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_PBE_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_PBE_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_PBE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_PBE_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_PBE_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_PBE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_PBE_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_PBE_PERF_SELECT0 */ -#define RGX_CR_PBE_PERF_SELECT0 (0x8480U) -#define RGX_CR_PBE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT (21U) -#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_PBE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_PBE_PERF_SELECT0 (0x8480U) +#define RGX_CR_PBE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_PBE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_PBE_PERF_COUNTER_0 */ -#define RGX_CR_PBE_PERF_COUNTER_0 (0x84B0U) -#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_PBE_PERF_COUNTER_0 (0x84B0U) +#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_OCP_REVINFO */ -#define RGX_CR_OCP_REVINFO (0x9000U) -#define RGX_CR_OCP_REVINFO_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF)) -#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT (33U) -#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFF9FFFFFFFF)) -#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT (32U) -#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_OCP_REVINFO_REVISION_SHIFT (0U) -#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) +#define RGX_CR_OCP_REVINFO (0x9000U) +#define RGX_CR_OCP_REVINFO_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF)) +#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT (33U) +#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFF9FFFFFFFF)) +#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT (32U) +#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_OCP_REVINFO_REVISION_SHIFT (0U) +#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) /* Register RGX_CR_OCP_SYSCONFIG */ -#define RGX_CR_OCP_SYSCONFIG (0x9010U) -#define RGX_CR_OCP_SYSCONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) -#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT (10U) -#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK (0xFFFFF3FFU) -#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT (8U) -#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK (0xFFFFFCFFU) -#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT (6U) -#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT (4U) -#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK (0xFFFFFFCFU) -#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT (2U) -#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT (0U) -#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_OCP_SYSCONFIG (0x9010U) +#define RGX_CR_OCP_SYSCONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) +#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT (10U) +#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK (0xFFFFF3FFU) +#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT (8U) +#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK (0xFFFFFCFFU) +#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT (6U) +#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT (4U) +#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT (2U) +#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT (0U) +#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_OCP_IRQSTATUS_RAW_0 */ -#define RGX_CR_OCP_IRQSTATUS_RAW_0 (0x9020U) -#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U) -#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0x00000001U) +#define RGX_CR_OCP_IRQSTATUS_RAW_0 (0x9020U) +#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0x00000001U) /* Register RGX_CR_OCP_IRQSTATUS_RAW_1 */ -#define RGX_CR_OCP_IRQSTATUS_RAW_1 (0x9028U) -#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U) -#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0x00000001U) +#define RGX_CR_OCP_IRQSTATUS_RAW_1 (0x9028U) +#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0x00000001U) /* Register RGX_CR_OCP_IRQSTATUS_RAW_2 */ -#define RGX_CR_OCP_IRQSTATUS_RAW_2 (0x9030U) -#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT (0U) -#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN (0x00000001U) +#define RGX_CR_OCP_IRQSTATUS_RAW_2 (0x9030U) +#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN (0x00000001U) /* Register RGX_CR_OCP_IRQSTATUS_0 */ -#define RGX_CR_OCP_IRQSTATUS_0 (0x9038U) -#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U) -#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN (0x00000001U) +#define RGX_CR_OCP_IRQSTATUS_0 (0x9038U) +#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN (0x00000001U) /* Register RGX_CR_OCP_IRQSTATUS_1 */ -#define RGX_CR_OCP_IRQSTATUS_1 (0x9040U) -#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U) -#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0x00000001U) +#define RGX_CR_OCP_IRQSTATUS_1 (0x9040U) +#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0x00000001U) /* Register RGX_CR_OCP_IRQSTATUS_2 */ -#define RGX_CR_OCP_IRQSTATUS_2 (0x9048U) -#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT (0U) -#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN (0x00000001U) +#define RGX_CR_OCP_IRQSTATUS_2 (0x9048U) +#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT (0U) +#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN (0x00000001U) /* Register RGX_CR_OCP_IRQENABLE_SET_0 */ -#define RGX_CR_OCP_IRQENABLE_SET_0 (0x9050U) -#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U) -#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0x00000001U) +#define RGX_CR_OCP_IRQENABLE_SET_0 (0x9050U) +#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0x00000001U) /* Register RGX_CR_OCP_IRQENABLE_SET_1 */ -#define RGX_CR_OCP_IRQENABLE_SET_1 (0x9058U) -#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U) -#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0x00000001U) +#define RGX_CR_OCP_IRQENABLE_SET_1 (0x9058U) +#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0x00000001U) /* Register RGX_CR_OCP_IRQENABLE_SET_2 */ -#define RGX_CR_OCP_IRQENABLE_SET_2 (0x9060U) -#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT (0U) -#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN (0x00000001U) +#define RGX_CR_OCP_IRQENABLE_SET_2 (0x9060U) +#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN (0x00000001U) /* Register RGX_CR_OCP_IRQENABLE_CLR_0 */ -#define RGX_CR_OCP_IRQENABLE_CLR_0 (0x9068U) -#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U) -#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0x00000001U) +#define RGX_CR_OCP_IRQENABLE_CLR_0 (0x9068U) +#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0x00000001U) /* Register RGX_CR_OCP_IRQENABLE_CLR_1 */ -#define RGX_CR_OCP_IRQENABLE_CLR_1 (0x9070U) -#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U) -#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0x00000001U) +#define RGX_CR_OCP_IRQENABLE_CLR_1 (0x9070U) +#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0x00000001U) /* Register RGX_CR_OCP_IRQENABLE_CLR_2 */ -#define RGX_CR_OCP_IRQENABLE_CLR_2 (0x9078U) -#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT (0U) -#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN (0x00000001U) +#define RGX_CR_OCP_IRQENABLE_CLR_2 (0x9078U) +#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT (0U) +#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN (0x00000001U) /* Register RGX_CR_OCP_IRQ_EVENT */ -#define RGX_CR_OCP_IRQ_EVENT (0x9080U) -#define RGX_CR_OCP_IRQ_EVENT_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) -#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U) -#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U) -#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U) -#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U) -#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_IRQ_EVENT (0x9080U) +#define RGX_CR_OCP_IRQ_EVENT_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_OCP_DEBUG_CONFIG */ -#define RGX_CR_OCP_DEBUG_CONFIG (0x9088U) -#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT (0U) -#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN (0x00000001U) +#define RGX_CR_OCP_DEBUG_CONFIG (0x9088U) +#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT (0U) +#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN (0x00000001U) /* Register RGX_CR_OCP_DEBUG_STATUS */ -#define RGX_CR_OCP_DEBUG_STATUS (0x9090U) -#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL (IMG_UINT64_C(0x001F1F77FFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT (51U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFE7FFFFFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT (50U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT (48U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT (43U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFE7FFFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT (42U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT (40U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT (38U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN (IMG_UINT64_C(0x0000004000000000)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000002000000000)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN (IMG_UINT64_C(0x0000001000000000)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT (34U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN (IMG_UINT64_C(0x0000000400000000)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT (31U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT (30U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT (29U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT (27U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFE7FFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT (26U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT (24U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT (23U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT (22U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT (21U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT (19U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE7FFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT (18U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT (16U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT (15U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT (14U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT (13U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT (11U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFE7FF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT (10U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT (8U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT (7U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT (6U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT (5U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT (3U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE7)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT (2U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT (0U) -#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) - - -#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT (6U) -#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN (0x00000040U) -#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT (5U) -#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN (0x00000020U) -#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT (4U) -#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN (0x00000010U) -#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT (3U) -#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN (0x00000008U) -#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT (2U) -#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN (0x00000004U) -#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT (1U) -#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN (0x00000002U) -#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT (0U) -#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN (0x00000001U) - - -#define RGX_CR_BIF_TRUST_DM_MASK (0x0000007FU) +#define RGX_CR_OCP_DEBUG_STATUS (0x9090U) +#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL (IMG_UINT64_C(0x001F1F77FFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT (51U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFE7FFFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT (50U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT (48U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT (43U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFE7FFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT (42U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT (40U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT (38U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT (34U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT (31U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT (30U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT (29U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT (27U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFE7FFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT (26U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT (24U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT (23U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT (22U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT (21U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT (19U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE7FFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT (18U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT (16U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT (15U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT (14U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT (13U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT (11U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFE7FF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT (10U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT (8U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT (7U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT (6U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT (5U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT (3U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE7)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT (2U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT (0U) +#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT (6U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN (0x00000040U) +#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT (5U) +#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN (0x00000020U) +#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT (4U) +#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN (0x00000010U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT (3U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN (0x00000008U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT (2U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN (0x00000004U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT (1U) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN (0x00000002U) +#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT (0U) +#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN (0x00000001U) + + +#define RGX_CR_BIF_TRUST_DM_MASK (0x0000007FU) /* Register RGX_CR_BIF_TRUST */ -#define RGX_CR_BIF_TRUST (0xA000U) -#define RGX_CR_BIF_TRUST_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) -#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U) -#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN (0x00100000U) -#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT (19U) -#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN (0x00080000U) -#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT (18U) -#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN (0x00040000U) -#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT (17U) -#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN (0x00020000U) -#define RGX_CR_BIF_TRUST_ENABLE_SHIFT (16U) -#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_BIF_TRUST_ENABLE_EN (0x00010000U) -#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT (9U) -#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK (0xFFFF01FFU) -#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT (8U) -#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN (0x00000100U) -#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT (7U) -#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN (0x00000080U) -#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT (6U) -#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN (0x00000040U) -#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT (5U) -#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN (0x00000020U) -#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT (4U) -#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN (0x00000010U) -#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT (3U) -#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN (0x00000008U) -#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT (2U) -#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN (0x00000004U) -#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT (1U) -#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN (0x00000002U) -#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT (0U) -#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN (0x00000001U) +#define RGX_CR_BIF_TRUST (0xA000U) +#define RGX_CR_BIF_TRUST_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) +#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U) +#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN (0x00100000U) +#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT (19U) +#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN (0x00080000U) +#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT (18U) +#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN (0x00040000U) +#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT (17U) +#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN (0x00020000U) +#define RGX_CR_BIF_TRUST_ENABLE_SHIFT (16U) +#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_BIF_TRUST_ENABLE_EN (0x00010000U) +#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT (9U) +#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK (0xFFFF01FFU) +#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT (8U) +#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN (0x00000100U) +#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT (7U) +#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN (0x00000080U) +#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT (6U) +#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN (0x00000040U) +#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT (5U) +#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN (0x00000020U) +#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT (4U) +#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN (0x00000010U) +#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT (3U) +#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN (0x00000008U) +#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT (2U) +#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN (0x00000004U) +#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT (1U) +#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN (0x00000002U) +#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT (0U) +#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN (0x00000001U) /* Register RGX_CR_SYS_BUS_SECURE */ -#define RGX_CR_SYS_BUS_SECURE (0xA100U) -#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U) -#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U) +#define RGX_CR_SYS_BUS_SECURE (0xA100U) +#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U) /* Register RGX_CR_FBA_FC0_CHECKSUM */ -#define RGX_CR_FBA_FC0_CHECKSUM (0xD170U) -#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_FBA_FC0_CHECKSUM (0xD170U) +#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_FBA_FC1_CHECKSUM */ -#define RGX_CR_FBA_FC1_CHECKSUM (0xD178U) -#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_FBA_FC1_CHECKSUM (0xD178U) +#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_FBA_FC2_CHECKSUM */ -#define RGX_CR_FBA_FC2_CHECKSUM (0xD180U) -#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_FBA_FC2_CHECKSUM (0xD180U) +#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_FBA_FC3_CHECKSUM */ -#define RGX_CR_FBA_FC3_CHECKSUM (0xD188U) -#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_FBA_FC3_CHECKSUM (0xD188U) +#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_CLK_CTRL2 */ -#define RGX_CR_CLK_CTRL2 (0xD200U) -#define RGX_CR_CLK_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000000F33)) -#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT (10U) -#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) -#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_CLK_CTRL2_VRDM_SHIFT (8U) -#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -#define RGX_CR_CLK_CTRL2_VRDM_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL2_VRDM_ON (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_CLK_CTRL2_VRDM_AUTO (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_CLK_CTRL2_SH_SHIFT (4U) -#define RGX_CR_CLK_CTRL2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) -#define RGX_CR_CLK_CTRL2_SH_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL2_SH_ON (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_CLK_CTRL2_SH_AUTO (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_CLK_CTRL2_FBA_SHIFT (0U) -#define RGX_CR_CLK_CTRL2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) -#define RGX_CR_CLK_CTRL2_FBA_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL2_FBA_ON (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_CLK_CTRL2_FBA_AUTO (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_CTRL2 (0xD200U) +#define RGX_CR_CLK_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000000F33)) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT (10U) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL2_VRDM_SHIFT (8U) +#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL2_VRDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_VRDM_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL2_VRDM_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL2_SH_SHIFT (4U) +#define RGX_CR_CLK_CTRL2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_CLK_CTRL2_SH_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_SH_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL2_SH_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL2_FBA_SHIFT (0U) +#define RGX_CR_CLK_CTRL2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL2_FBA_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_FBA_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL2_FBA_AUTO (IMG_UINT64_C(0x0000000000000002)) /* Register RGX_CR_CLK_STATUS2 */ -#define RGX_CR_CLK_STATUS2 (0xD208U) -#define RGX_CR_CLK_STATUS2_MASKFULL (IMG_UINT64_C(0x0000000000000015)) -#define RGX_CR_CLK_STATUS2_VRDM_SHIFT (4U) -#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_CLK_STATUS2_VRDM_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS2_VRDM_RUNNING (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_CLK_STATUS2_SH_SHIFT (2U) -#define RGX_CR_CLK_STATUS2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_CLK_STATUS2_SH_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS2_SH_RUNNING (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_CLK_STATUS2_FBA_SHIFT (0U) -#define RGX_CR_CLK_STATUS2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_CLK_STATUS2_FBA_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS2_FBA_RUNNING (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_STATUS2 (0xD208U) +#define RGX_CR_CLK_STATUS2_MASKFULL (IMG_UINT64_C(0x0000000000000015)) +#define RGX_CR_CLK_STATUS2_VRDM_SHIFT (4U) +#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_STATUS2_VRDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_VRDM_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS2_SH_SHIFT (2U) +#define RGX_CR_CLK_STATUS2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_CLK_STATUS2_SH_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_SH_RUNNING (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_STATUS2_FBA_SHIFT (0U) +#define RGX_CR_CLK_STATUS2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS2_FBA_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_FBA_RUNNING (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_RPM_SHF_FPL */ -#define RGX_CR_RPM_SHF_FPL (0xD520U) -#define RGX_CR_RPM_SHF_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) -#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT (40U) -#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) -#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT (2U) -#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) -#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT (2U) -#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE (4U) +#define RGX_CR_RPM_SHF_FPL (0xD520U) +#define RGX_CR_RPM_SHF_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) +#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT (40U) +#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) +#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT (2U) +#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) +#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT (2U) +#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE (4U) /* Register RGX_CR_RPM_SHF_FPL_READ */ -#define RGX_CR_RPM_SHF_FPL_READ (0xD528U) -#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) -#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT (22U) -#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN (0x00400000U) -#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT (0U) -#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) +#define RGX_CR_RPM_SHF_FPL_READ (0xD528U) +#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) +#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT (22U) +#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN (0x00400000U) +#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT (0U) +#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) /* Register RGX_CR_RPM_SHF_FPL_WRITE */ -#define RGX_CR_RPM_SHF_FPL_WRITE (0xD530U) -#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) -#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT (22U) -#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN (0x00400000U) -#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT (0U) -#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) +#define RGX_CR_RPM_SHF_FPL_WRITE (0xD530U) +#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) +#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT (22U) +#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN (0x00400000U) +#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT (0U) +#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) /* Register RGX_CR_RPM_SHG_FPL */ -#define RGX_CR_RPM_SHG_FPL (0xD538U) -#define RGX_CR_RPM_SHG_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) -#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT (40U) -#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) -#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT (2U) -#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) -#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT (2U) -#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE (4U) +#define RGX_CR_RPM_SHG_FPL (0xD538U) +#define RGX_CR_RPM_SHG_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) +#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT (40U) +#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) +#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT (2U) +#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) +#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT (2U) +#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE (4U) /* Register RGX_CR_RPM_SHG_FPL_READ */ -#define RGX_CR_RPM_SHG_FPL_READ (0xD540U) -#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) -#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT (22U) -#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN (0x00400000U) -#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT (0U) -#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) +#define RGX_CR_RPM_SHG_FPL_READ (0xD540U) +#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) +#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT (22U) +#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN (0x00400000U) +#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT (0U) +#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) /* Register RGX_CR_RPM_SHG_FPL_WRITE */ -#define RGX_CR_RPM_SHG_FPL_WRITE (0xD548U) -#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) -#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT (22U) -#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN (0x00400000U) -#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT (0U) -#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) +#define RGX_CR_RPM_SHG_FPL_WRITE (0xD548U) +#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) +#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT (22U) +#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN (0x00400000U) +#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT (0U) +#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) /* Register RGX_CR_SH_PERF */ -#define RGX_CR_SH_PERF (0xD5F8U) -#define RGX_CR_SH_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_SH_PERF_CLR_3_SHIFT (4U) -#define RGX_CR_SH_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_SH_PERF_CLR_3_EN (0x00000010U) -#define RGX_CR_SH_PERF_CLR_2_SHIFT (3U) -#define RGX_CR_SH_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SH_PERF_CLR_2_EN (0x00000008U) -#define RGX_CR_SH_PERF_CLR_1_SHIFT (2U) -#define RGX_CR_SH_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SH_PERF_CLR_1_EN (0x00000004U) -#define RGX_CR_SH_PERF_CLR_0_SHIFT (1U) -#define RGX_CR_SH_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SH_PERF_CLR_0_EN (0x00000002U) -#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SH_PERF_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_SH_PERF (0xD5F8U) +#define RGX_CR_SH_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_SH_PERF_CLR_3_SHIFT (4U) +#define RGX_CR_SH_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SH_PERF_CLR_3_EN (0x00000010U) +#define RGX_CR_SH_PERF_CLR_2_SHIFT (3U) +#define RGX_CR_SH_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SH_PERF_CLR_2_EN (0x00000008U) +#define RGX_CR_SH_PERF_CLR_1_SHIFT (2U) +#define RGX_CR_SH_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SH_PERF_CLR_1_EN (0x00000004U) +#define RGX_CR_SH_PERF_CLR_0_SHIFT (1U) +#define RGX_CR_SH_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SH_PERF_CLR_0_EN (0x00000002U) +#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SH_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_SH_PERF_SELECT0 */ -#define RGX_CR_SH_PERF_SELECT0 (0xD600U) -#define RGX_CR_SH_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT (21U) -#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_SH_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_SH_PERF_SELECT0 (0xD600U) +#define RGX_CR_SH_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT (21U) +#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SH_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_SH_PERF_COUNTER_0 */ -#define RGX_CR_SH_PERF_COUNTER_0 (0xD628U) -#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT (0U) -#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define RGX_CR_SH_PERF_COUNTER_0 (0xD628U) +#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT (0U) +#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register RGX_CR_SHF_SHG_CHECKSUM */ -#define RGX_CR_SHF_SHG_CHECKSUM (0xD1C0U) -#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_SHF_SHG_CHECKSUM (0xD1C0U) +#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM */ -#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM (0xD1C8U) -#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM (0xD1C8U) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_SHF_VARY_BIF_CHECKSUM */ -#define RGX_CR_SHF_VARY_BIF_CHECKSUM (0xD1D0U) -#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM (0xD1D0U) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_RPM_BIF_CHECKSUM */ -#define RGX_CR_RPM_BIF_CHECKSUM (0xD1D8U) -#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_RPM_BIF_CHECKSUM (0xD1D8U) +#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_SHG_BIF_CHECKSUM */ -#define RGX_CR_SHG_BIF_CHECKSUM (0xD1E0U) -#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_SHG_BIF_CHECKSUM (0xD1E0U) +#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_SHG_FE_BE_CHECKSUM */ -#define RGX_CR_SHG_FE_BE_CHECKSUM (0xD1E8U) -#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_SHG_FE_BE_CHECKSUM (0xD1E8U) +#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register DPX_CR_BF_PERF */ -#define DPX_CR_BF_PERF (0xC458U) -#define DPX_CR_BF_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define DPX_CR_BF_PERF_CLR_3_SHIFT (4U) -#define DPX_CR_BF_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define DPX_CR_BF_PERF_CLR_3_EN (0x00000010U) -#define DPX_CR_BF_PERF_CLR_2_SHIFT (3U) -#define DPX_CR_BF_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define DPX_CR_BF_PERF_CLR_2_EN (0x00000008U) -#define DPX_CR_BF_PERF_CLR_1_SHIFT (2U) -#define DPX_CR_BF_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define DPX_CR_BF_PERF_CLR_1_EN (0x00000004U) -#define DPX_CR_BF_PERF_CLR_0_SHIFT (1U) -#define DPX_CR_BF_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define DPX_CR_BF_PERF_CLR_0_EN (0x00000002U) -#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT (0U) -#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define DPX_CR_BF_PERF_CTRL_ENABLE_EN (0x00000001U) +#define DPX_CR_BF_PERF (0xC458U) +#define DPX_CR_BF_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define DPX_CR_BF_PERF_CLR_3_SHIFT (4U) +#define DPX_CR_BF_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_BF_PERF_CLR_3_EN (0x00000010U) +#define DPX_CR_BF_PERF_CLR_2_SHIFT (3U) +#define DPX_CR_BF_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define DPX_CR_BF_PERF_CLR_2_EN (0x00000008U) +#define DPX_CR_BF_PERF_CLR_1_SHIFT (2U) +#define DPX_CR_BF_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BF_PERF_CLR_1_EN (0x00000004U) +#define DPX_CR_BF_PERF_CLR_0_SHIFT (1U) +#define DPX_CR_BF_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_BF_PERF_CLR_0_EN (0x00000002U) +#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT (0U) +#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BF_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register DPX_CR_BF_PERF_SELECT0 */ -#define DPX_CR_BF_PERF_SELECT0 (0xC460U) -#define DPX_CR_BF_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT (21U) -#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define DPX_CR_BF_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define DPX_CR_BF_PERF_SELECT0 (0xC460U) +#define DPX_CR_BF_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT (21U) +#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define DPX_CR_BF_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register DPX_CR_BF_PERF_COUNTER_0 */ -#define DPX_CR_BF_PERF_COUNTER_0 (0xC488U) -#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT (0U) -#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define DPX_CR_BF_PERF_COUNTER_0 (0xC488U) +#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT (0U) +#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register DPX_CR_BT_PERF */ -#define DPX_CR_BT_PERF (0xC3D0U) -#define DPX_CR_BT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define DPX_CR_BT_PERF_CLR_3_SHIFT (4U) -#define DPX_CR_BT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define DPX_CR_BT_PERF_CLR_3_EN (0x00000010U) -#define DPX_CR_BT_PERF_CLR_2_SHIFT (3U) -#define DPX_CR_BT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define DPX_CR_BT_PERF_CLR_2_EN (0x00000008U) -#define DPX_CR_BT_PERF_CLR_1_SHIFT (2U) -#define DPX_CR_BT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define DPX_CR_BT_PERF_CLR_1_EN (0x00000004U) -#define DPX_CR_BT_PERF_CLR_0_SHIFT (1U) -#define DPX_CR_BT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define DPX_CR_BT_PERF_CLR_0_EN (0x00000002U) -#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT (0U) -#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define DPX_CR_BT_PERF_CTRL_ENABLE_EN (0x00000001U) +#define DPX_CR_BT_PERF (0xC3D0U) +#define DPX_CR_BT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define DPX_CR_BT_PERF_CLR_3_SHIFT (4U) +#define DPX_CR_BT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_BT_PERF_CLR_3_EN (0x00000010U) +#define DPX_CR_BT_PERF_CLR_2_SHIFT (3U) +#define DPX_CR_BT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define DPX_CR_BT_PERF_CLR_2_EN (0x00000008U) +#define DPX_CR_BT_PERF_CLR_1_SHIFT (2U) +#define DPX_CR_BT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BT_PERF_CLR_1_EN (0x00000004U) +#define DPX_CR_BT_PERF_CLR_0_SHIFT (1U) +#define DPX_CR_BT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_BT_PERF_CLR_0_EN (0x00000002U) +#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT (0U) +#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BT_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register DPX_CR_BT_PERF_SELECT0 */ -#define DPX_CR_BT_PERF_SELECT0 (0xC3D8U) -#define DPX_CR_BT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT (21U) -#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define DPX_CR_BT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define DPX_CR_BT_PERF_SELECT0 (0xC3D8U) +#define DPX_CR_BT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT (21U) +#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define DPX_CR_BT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register DPX_CR_BT_PERF_COUNTER_0 */ -#define DPX_CR_BT_PERF_COUNTER_0 (0xC420U) -#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT (0U) -#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define DPX_CR_BT_PERF_COUNTER_0 (0xC420U) +#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT (0U) +#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register DPX_CR_RQ_USC_DEBUG */ -#define DPX_CR_RQ_USC_DEBUG (0xC110U) -#define DPX_CR_RQ_USC_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT (0U) -#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) +#define DPX_CR_RQ_USC_DEBUG (0xC110U) +#define DPX_CR_RQ_USC_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT (0U) +#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) /* Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS */ -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS (0xC5C8U) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT (12U) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT (5U) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT (4U) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN (0x00000010U) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT (0U) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN (0x00000001U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS (0xC5C8U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT (12U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT (8U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT (5U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT (4U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN (0x00000010U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT (0U) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN (0x00000001U) /* Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS */ -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS (0xC5D0U) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x03FFFFFFFFFFFFF0)) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT (57U) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0200000000000000)) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT (44U) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFE000FFFFFFFFFFF)) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT (40U) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT (4U) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS (0xC5D0U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x03FFFFFFFFFFFFF0)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT (57U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0200000000000000)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT (44U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFE000FFFFFFFFFFF)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT (40U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT (4U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) /* Register DPX_CR_BIF_MMU_STATUS */ -#define DPX_CR_BIF_MMU_STATUS (0xC5D8U) -#define DPX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7)) -#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) -#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) -#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) -#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) -#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) -#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) -#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) -#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) -#define DPX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) -#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) -#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) -#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) -#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) -#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) -#define DPX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) +#define DPX_CR_BIF_MMU_STATUS (0xC5D8U) +#define DPX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7)) +#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) +#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) +#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) +#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) +#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) +#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) +#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) +#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) +#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) +#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) +#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) +#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) /* Register DPX_CR_RT_PERF */ -#define DPX_CR_RT_PERF (0xC700U) -#define DPX_CR_RT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define DPX_CR_RT_PERF_CLR_3_SHIFT (4U) -#define DPX_CR_RT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define DPX_CR_RT_PERF_CLR_3_EN (0x00000010U) -#define DPX_CR_RT_PERF_CLR_2_SHIFT (3U) -#define DPX_CR_RT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define DPX_CR_RT_PERF_CLR_2_EN (0x00000008U) -#define DPX_CR_RT_PERF_CLR_1_SHIFT (2U) -#define DPX_CR_RT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define DPX_CR_RT_PERF_CLR_1_EN (0x00000004U) -#define DPX_CR_RT_PERF_CLR_0_SHIFT (1U) -#define DPX_CR_RT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define DPX_CR_RT_PERF_CLR_0_EN (0x00000002U) -#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT (0U) -#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define DPX_CR_RT_PERF_CTRL_ENABLE_EN (0x00000001U) +#define DPX_CR_RT_PERF (0xC700U) +#define DPX_CR_RT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define DPX_CR_RT_PERF_CLR_3_SHIFT (4U) +#define DPX_CR_RT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_RT_PERF_CLR_3_EN (0x00000010U) +#define DPX_CR_RT_PERF_CLR_2_SHIFT (3U) +#define DPX_CR_RT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define DPX_CR_RT_PERF_CLR_2_EN (0x00000008U) +#define DPX_CR_RT_PERF_CLR_1_SHIFT (2U) +#define DPX_CR_RT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_RT_PERF_CLR_1_EN (0x00000004U) +#define DPX_CR_RT_PERF_CLR_0_SHIFT (1U) +#define DPX_CR_RT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_RT_PERF_CLR_0_EN (0x00000002U) +#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT (0U) +#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_RT_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register DPX_CR_RT_PERF_SELECT0 */ -#define DPX_CR_RT_PERF_SELECT0 (0xC708U) -#define DPX_CR_RT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT (21U) -#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define DPX_CR_RT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define DPX_CR_RT_PERF_SELECT0 (0xC708U) +#define DPX_CR_RT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT (21U) +#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define DPX_CR_RT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register DPX_CR_RT_PERF_COUNTER_0 */ -#define DPX_CR_RT_PERF_COUNTER_0 (0xC730U) -#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT (0U) -#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define DPX_CR_RT_PERF_COUNTER_0 (0xC730U) +#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT (0U) +#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register DPX_CR_BX_TU_PERF */ -#define DPX_CR_BX_TU_PERF (0xC908U) -#define DPX_CR_BX_TU_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT (4U) -#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -#define DPX_CR_BX_TU_PERF_CLR_3_EN (0x00000010U) -#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT (3U) -#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -#define DPX_CR_BX_TU_PERF_CLR_2_EN (0x00000008U) -#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT (2U) -#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -#define DPX_CR_BX_TU_PERF_CLR_1_EN (0x00000004U) -#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT (1U) -#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -#define DPX_CR_BX_TU_PERF_CLR_0_EN (0x00000002U) -#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT (0U) -#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN (0x00000001U) +#define DPX_CR_BX_TU_PERF (0xC908U) +#define DPX_CR_BX_TU_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT (4U) +#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) +#define DPX_CR_BX_TU_PERF_CLR_3_EN (0x00000010U) +#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT (3U) +#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) +#define DPX_CR_BX_TU_PERF_CLR_2_EN (0x00000008U) +#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT (2U) +#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) +#define DPX_CR_BX_TU_PERF_CLR_1_EN (0x00000004U) +#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT (1U) +#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) +#define DPX_CR_BX_TU_PERF_CLR_0_EN (0x00000002U) +#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT (0U) +#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN (0x00000001U) /* Register DPX_CR_BX_TU_PERF_SELECT0 */ -#define DPX_CR_BX_TU_PERF_SELECT0 (0xC910U) -#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT (21U) -#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define DPX_CR_BX_TU_PERF_SELECT0 (0xC910U) +#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT (48U) +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT (32U) +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT (21U) +#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) +#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) +#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) +#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT (0U) +#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register DPX_CR_BX_TU_PERF_COUNTER_0 */ -#define DPX_CR_BX_TU_PERF_COUNTER_0 (0xC938U) -#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT (0U) -#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) +#define DPX_CR_BX_TU_PERF_COUNTER_0 (0xC938U) +#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT (0U) +#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) /* Register DPX_CR_RS_PDS_RR_CHECKSUM */ -#define DPX_CR_RS_PDS_RR_CHECKSUM (0xC0F0U) -#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT (0U) -#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) +#define DPX_CR_RS_PDS_RR_CHECKSUM (0xC0F0U) +#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT (0U) +#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) /* Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT */ -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MMU_CBASE_MAPPING */ -#define RGX_CR_MMU_CBASE_MAPPING (0xE148U) -#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U) -#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_MMU_CBASE_MAPPING (0xE148U) +#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_MMU_FAULT_STATUS */ -#define RGX_CR_MMU_FAULT_STATUS (0xE150U) -#define RGX_CR_MMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT (28U) -#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT (20U) -#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT (12U) -#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) -#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT (6U) -#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) -#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT (4U) -#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) -#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT (3U) -#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_MMU_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT (1U) -#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) -#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT (0U) -#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MMU_FAULT_STATUS (0xE150U) +#define RGX_CR_MMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT (28U) +#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT (20U) +#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) +#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT (6U) +#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) +#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MMU_FAULT_STATUS_META */ -#define RGX_CR_MMU_FAULT_STATUS_META (0xE158U) -#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (28U) -#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (20U) -#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT (12U) -#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) -#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (6U) -#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) -#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (4U) -#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) -#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U) -#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U) -#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) -#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U) -#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MMU_FAULT_STATUS_META (0xE158U) +#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (28U) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (20U) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (6U) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_SLC3_CTRL_MISC */ -#define RGX_CR_SLC3_CTRL_MISC (0xE200U) -#define RGX_CR_SLC3_CTRL_MISC_MASKFULL (IMG_UINT64_C(0x0000000000000107)) -#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT (8U) -#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN (0x00000100U) -#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (0U) -#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (0xFFFFFFF8U) -#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR (0x00000000U) -#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0x00000001U) -#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0x00000002U) -#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0x00000003U) -#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0x00000004U) +#define RGX_CR_SLC3_CTRL_MISC (0xE200U) +#define RGX_CR_SLC3_CTRL_MISC_MASKFULL (IMG_UINT64_C(0x0000000000000107)) +#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT (8U) +#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN (0x00000100U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (0U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (0xFFFFFFF8U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR (0x00000000U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0x00000001U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0x00000002U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0x00000003U) +#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0x00000004U) /* Register RGX_CR_SLC3_SCRAMBLE */ -#define RGX_CR_SLC3_SCRAMBLE (0xE208U) -#define RGX_CR_SLC3_SCRAMBLE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT (0U) -#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SLC3_SCRAMBLE (0xE208U) +#define RGX_CR_SLC3_SCRAMBLE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT (0U) +#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SLC3_SCRAMBLE2 */ -#define RGX_CR_SLC3_SCRAMBLE2 (0xE210U) -#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT (0U) -#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SLC3_SCRAMBLE2 (0xE210U) +#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT (0U) +#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SLC3_SCRAMBLE3 */ -#define RGX_CR_SLC3_SCRAMBLE3 (0xE218U) -#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT (0U) -#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SLC3_SCRAMBLE3 (0xE218U) +#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT (0U) +#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SLC3_SCRAMBLE4 */ -#define RGX_CR_SLC3_SCRAMBLE4 (0xE260U) -#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT (0U) -#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SLC3_SCRAMBLE4 (0xE260U) +#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT (0U) +#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SLC3_STATUS */ -#define RGX_CR_SLC3_STATUS (0xE220U) -#define RGX_CR_SLC3_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT (48U) -#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT (32U) -#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) -#define RGX_CR_SLC3_STATUS_READS1_SHIFT (16U) -#define RGX_CR_SLC3_STATUS_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) -#define RGX_CR_SLC3_STATUS_READS0_SHIFT (0U) -#define RGX_CR_SLC3_STATUS_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_SLC3_STATUS (0xE220U) +#define RGX_CR_SLC3_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT (48U) +#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT (32U) +#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_CR_SLC3_STATUS_READS1_SHIFT (16U) +#define RGX_CR_SLC3_STATUS_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_CR_SLC3_STATUS_READS0_SHIFT (0U) +#define RGX_CR_SLC3_STATUS_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_SLC3_IDLE */ -#define RGX_CR_SLC3_IDLE (0xE228U) -#define RGX_CR_SLC3_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) -#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT (18U) -#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK (0xFFF3FFFFU) -#define RGX_CR_SLC3_IDLE_MMU_SHIFT (17U) -#define RGX_CR_SLC3_IDLE_MMU_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_SLC3_IDLE_MMU_EN (0x00020000U) -#define RGX_CR_SLC3_IDLE_RDI_SHIFT (16U) -#define RGX_CR_SLC3_IDLE_RDI_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_SLC3_IDLE_RDI_EN (0x00010000U) -#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT (12U) -#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK (0xFFFF0FFFU) -#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT (4U) -#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU) -#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT (2U) -#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT (1U) -#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN (0x00000002U) -#define RGX_CR_SLC3_IDLE_XBAR_SHIFT (0U) -#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SLC3_IDLE_XBAR_EN (0x00000001U) +#define RGX_CR_SLC3_IDLE (0xE228U) +#define RGX_CR_SLC3_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) +#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT (18U) +#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK (0xFFF3FFFFU) +#define RGX_CR_SLC3_IDLE_MMU_SHIFT (17U) +#define RGX_CR_SLC3_IDLE_MMU_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_SLC3_IDLE_MMU_EN (0x00020000U) +#define RGX_CR_SLC3_IDLE_RDI_SHIFT (16U) +#define RGX_CR_SLC3_IDLE_RDI_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_SLC3_IDLE_RDI_EN (0x00010000U) +#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT (12U) +#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT (4U) +#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU) +#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT (2U) +#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT (1U) +#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN (0x00000002U) +#define RGX_CR_SLC3_IDLE_XBAR_SHIFT (0U) +#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC3_IDLE_XBAR_EN (0x00000001U) /* Register RGX_CR_SLC3_FAULT_STOP_STATUS */ -#define RGX_CR_SLC3_FAULT_STOP_STATUS (0xE248U) -#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) -#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT (0U) -#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFFE000U) +#define RGX_CR_SLC3_FAULT_STOP_STATUS (0xE248U) +#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) +#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT (0U) +#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFFE000U) /* Register RGX_CR_VDM_CONTEXT_STORE_MODE */ -#define RGX_CR_VDM_CONTEXT_STORE_MODE (0xF048U) -#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT (0U) -#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK (0xFFFFFFFCU) -#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX (0x00000000U) -#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE (0x00000001U) -#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST (0x00000002U) +#define RGX_CR_VDM_CONTEXT_STORE_MODE (0xF048U) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT (0U) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX (0x00000000U) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE (0x00000001U) +#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST (0x00000002U) /* Register RGX_CR_CONTEXT_MAPPING0 */ -#define RGX_CR_CONTEXT_MAPPING0 (0xF078U) -#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U) -#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (0x00FFFFFFU) -#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U) -#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (0xFF00FFFFU) -#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U) -#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (0xFFFF00FFU) -#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT (0U) -#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_CONTEXT_MAPPING0 (0xF078U) +#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (0x00FFFFFFU) +#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_CONTEXT_MAPPING1 */ -#define RGX_CR_CONTEXT_MAPPING1 (0xF080U) -#define RGX_CR_CONTEXT_MAPPING1_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT (8U) -#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK (0xFFFF00FFU) -#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT (0U) -#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK (0xFFFFFF00U) +#define RGX_CR_CONTEXT_MAPPING1 (0xF080U) +#define RGX_CR_CONTEXT_MAPPING1_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_CONTEXT_MAPPING2 */ -#define RGX_CR_CONTEXT_MAPPING2 (0xF088U) -#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) -#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U) -#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU) -#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U) -#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU) -#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U) -#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U) +#define RGX_CR_CONTEXT_MAPPING2 (0xF088U) +#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_CONTEXT_MAPPING3 */ -#define RGX_CR_CONTEXT_MAPPING3 (0xF090U) -#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) -#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U) -#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU) -#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U) -#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU) -#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U) -#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U) +#define RGX_CR_CONTEXT_MAPPING3 (0xF090U) +#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_BIF_JONES_OUTSTANDING_READ */ -#define RGX_CR_BIF_JONES_OUTSTANDING_READ (0xF098U) -#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT (0U) -#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIF_JONES_OUTSTANDING_READ (0xF098U) +#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) /* Register RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ */ -#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ (0xF0A0U) -#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U) -#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ (0xF0A0U) +#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) /* Register RGX_CR_BIF_DUST_OUTSTANDING_READ */ -#define RGX_CR_BIF_DUST_OUTSTANDING_READ (0xF0A8U) -#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT (0U) -#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIF_DUST_OUTSTANDING_READ (0xF0A8U) +#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) /* Register RGX_CR_JONES_FIX */ -#define RGX_CR_JONES_FIX (0xF0C0U) -#define RGX_CR_JONES_FIX__ROGUE3__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_JONES_FIX_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_JONES_FIX_DISABLE_SHIFT (0U) -#define RGX_CR_JONES_FIX_DISABLE_CLRMSK (0xFFFF0000U) +#define RGX_CR_JONES_FIX (0xF0C0U) +#define RGX_CR_JONES_FIX__ROGUE3__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_JONES_FIX_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_JONES_FIX_DISABLE_SHIFT (0U) +#define RGX_CR_JONES_FIX_DISABLE_CLRMSK (0xFFFF0000U) /* Register RGX_CR_CONTEXT_MAPPING4 */ -#define RGX_CR_CONTEXT_MAPPING4 (0xF210U) -#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U) -#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) -#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U) -#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) -#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U) -#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) -#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U) -#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) -#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U) -#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U) -#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_CONTEXT_MAPPING4 (0xF210U) +#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_MULTICORE_GPU */ -#define RGX_CR_MULTICORE_GPU (0xF300U) -#define RGX_CR_MULTICORE_GPU_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT (6U) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN (0x00000040U) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT (5U) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN (0x00000020U) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT (4U) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN (0x00000010U) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT (3U) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN (0x00000008U) -#define RGX_CR_MULTICORE_GPU_ID_SHIFT (0U) -#define RGX_CR_MULTICORE_GPU_ID_CLRMSK (0xFFFFFFF8U) +#define RGX_CR_MULTICORE_GPU (0xF300U) +#define RGX_CR_MULTICORE_GPU_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT (6U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN (0x00000040U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT (5U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN (0x00000020U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT (4U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN (0x00000010U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT (3U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN (0x00000008U) +#define RGX_CR_MULTICORE_GPU_ID_SHIFT (0U) +#define RGX_CR_MULTICORE_GPU_ID_CLRMSK (0xFFFFFFF8U) /* Register RGX_CR_MULTICORE_SYSTEM */ -#define RGX_CR_MULTICORE_SYSTEM (0xF308U) -#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U) -#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MULTICORE_SYSTEM (0xF308U) +#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON */ -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON (0xF310U) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON (0xF310U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON */ -#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON (0xF320U) -#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) -#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) -#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) -#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) -#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON (0xF320U) +#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) +#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) +#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) +#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) +#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON */ -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON (0xF330U) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON (0xF330U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_ECC_RAM_ERR_INJ */ -#define RGX_CR_ECC_RAM_ERR_INJ (0xF340U) -#define RGX_CR_ECC_RAM_ERR_INJ_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT (4U) -#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN (0x00000010U) -#define RGX_CR_ECC_RAM_ERR_INJ_USC_SHIFT (3U) -#define RGX_CR_ECC_RAM_ERR_INJ_USC_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_ECC_RAM_ERR_INJ_USC_EN (0x00000008U) -#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT (2U) -#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN (0x00000004U) -#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT (1U) -#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN (0x00000002U) -#define RGX_CR_ECC_RAM_ERR_INJ_MARS_SHIFT (0U) -#define RGX_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_ECC_RAM_ERR_INJ_MARS_EN (0x00000001U) +#define RGX_CR_ECC_RAM_ERR_INJ (0xF340U) +#define RGX_CR_ECC_RAM_ERR_INJ_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT (4U) +#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN (0x00000010U) +#define RGX_CR_ECC_RAM_ERR_INJ_USC_SHIFT (3U) +#define RGX_CR_ECC_RAM_ERR_INJ_USC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_ECC_RAM_ERR_INJ_USC_EN (0x00000008U) +#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT (2U) +#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN (0x00000004U) +#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT (1U) +#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN (0x00000002U) +#define RGX_CR_ECC_RAM_ERR_INJ_MARS_SHIFT (0U) +#define RGX_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ECC_RAM_ERR_INJ_MARS_EN (0x00000001U) /* Register RGX_CR_ECC_RAM_INIT_KICK */ -#define RGX_CR_ECC_RAM_INIT_KICK (0xF348U) -#define RGX_CR_ECC_RAM_INIT_KICK_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_SHIFT (4U) -#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_EN (0x00000010U) -#define RGX_CR_ECC_RAM_INIT_KICK_USC_SHIFT (3U) -#define RGX_CR_ECC_RAM_INIT_KICK_USC_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_ECC_RAM_INIT_KICK_USC_EN (0x00000008U) -#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_SHIFT (2U) -#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_EN (0x00000004U) -#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_SHIFT (1U) -#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_EN (0x00000002U) -#define RGX_CR_ECC_RAM_INIT_KICK_MARS_SHIFT (0U) -#define RGX_CR_ECC_RAM_INIT_KICK_MARS_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_ECC_RAM_INIT_KICK_MARS_EN (0x00000001U) +#define RGX_CR_ECC_RAM_INIT_KICK (0xF348U) +#define RGX_CR_ECC_RAM_INIT_KICK_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_SHIFT (4U) +#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_EN (0x00000010U) +#define RGX_CR_ECC_RAM_INIT_KICK_USC_SHIFT (3U) +#define RGX_CR_ECC_RAM_INIT_KICK_USC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_ECC_RAM_INIT_KICK_USC_EN (0x00000008U) +#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_SHIFT (2U) +#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_EN (0x00000004U) +#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_SHIFT (1U) +#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_EN (0x00000002U) +#define RGX_CR_ECC_RAM_INIT_KICK_MARS_SHIFT (0U) +#define RGX_CR_ECC_RAM_INIT_KICK_MARS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ECC_RAM_INIT_KICK_MARS_EN (0x00000001U) /* Register RGX_CR_ECC_RAM_INIT_DONE */ -#define RGX_CR_ECC_RAM_INIT_DONE (0xF350U) -#define RGX_CR_ECC_RAM_INIT_DONE_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_SHIFT (4U) -#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_EN (0x00000010U) -#define RGX_CR_ECC_RAM_INIT_DONE_USC_SHIFT (3U) -#define RGX_CR_ECC_RAM_INIT_DONE_USC_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_ECC_RAM_INIT_DONE_USC_EN (0x00000008U) -#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_SHIFT (2U) -#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_EN (0x00000004U) -#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_SHIFT (1U) -#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_EN (0x00000002U) -#define RGX_CR_ECC_RAM_INIT_DONE_MARS_SHIFT (0U) -#define RGX_CR_ECC_RAM_INIT_DONE_MARS_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_ECC_RAM_INIT_DONE_MARS_EN (0x00000001U) +#define RGX_CR_ECC_RAM_INIT_DONE (0xF350U) +#define RGX_CR_ECC_RAM_INIT_DONE_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_SHIFT (4U) +#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_EN (0x00000010U) +#define RGX_CR_ECC_RAM_INIT_DONE_USC_SHIFT (3U) +#define RGX_CR_ECC_RAM_INIT_DONE_USC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_ECC_RAM_INIT_DONE_USC_EN (0x00000008U) +#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_SHIFT (2U) +#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_EN (0x00000004U) +#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_SHIFT (1U) +#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_EN (0x00000002U) +#define RGX_CR_ECC_RAM_INIT_DONE_MARS_SHIFT (0U) +#define RGX_CR_ECC_RAM_INIT_DONE_MARS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ECC_RAM_INIT_DONE_MARS_EN (0x00000001U) /* Register RGX_CR_SAFETY_EVENT_ENABLE */ -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE (0xF390U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN (0x00000008U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN (0x00000004U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE (0xF390U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN (0x00000008U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN (0x00000004U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) /* Register RGX_CR_SAFETY_EVENT_STATUS */ -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE (0xF398U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_SHIFT (7U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_SHIFT (3U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_EN (0x00000008U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT (2U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN (0x00000004U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_SHIFT (1U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_EN (0x00000002U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE (0xF398U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_SHIFT (7U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_SHIFT (3U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_EN (0x00000008U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT (2U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN (0x00000004U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_SHIFT (1U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_EN (0x00000002U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) /* Register RGX_CR_SAFETY_EVENT_CLEAR */ -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE (0xF3A0U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_SHIFT (7U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_SHIFT (3U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_EN (0x00000008U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_SHIFT (2U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_EN (0x00000004U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_SHIFT (1U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_EN (0x00000002U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE (0xF3A0U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_SHIFT (7U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_SHIFT (3U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_EN (0x00000008U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_SHIFT (2U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_EN (0x00000004U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_SHIFT (1U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_EN (0x00000002U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) /* Register RGX_CR_FAULT_FW_STATUS */ -#define RGX_CR_FAULT_FW_STATUS (0xF3B0U) -#define RGX_CR_FAULT_FW_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000010001)) -#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT (16U) -#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_EN (0x00010000U) -#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT (0U) -#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_EN (0x00000001U) +#define RGX_CR_FAULT_FW_STATUS (0xF3B0U) +#define RGX_CR_FAULT_FW_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000010001)) +#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT (16U) +#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_EN (0x00010000U) +#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT (0U) +#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_EN (0x00000001U) /* Register RGX_CR_FAULT_FW_CLEAR */ -#define RGX_CR_FAULT_FW_CLEAR (0xF3B8U) -#define RGX_CR_FAULT_FW_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000010001)) -#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT (16U) -#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN (0x00010000U) -#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT (0U) -#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_EN (0x00000001U) +#define RGX_CR_FAULT_FW_CLEAR (0xF3B8U) +#define RGX_CR_FAULT_FW_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000010001)) +#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT (16U) +#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN (0x00010000U) +#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT (0U) +#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_EN (0x00000001U) /* Register RGX_CR_MTS_SAFETY_EVENT_ENABLE */ -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE (0xF3D8U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN (0x00000008U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN (0x00000004U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE (0xF3D8U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN (0x00000008U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN (0x00000004U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) #endif /* RGX_CR_DEFS_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxdefs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxdefs_km.h index 296ec4752dbe..818166dff902 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxdefs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxdefs_km.h @@ -48,7 +48,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif #define IMG_EXPLICIT_INCLUDE_HWDEFS -#if defined(__KERNEL__) || defined(SUPPORT_SERVICES_SC_UNITTESTS_SERVER) +#if defined(__KERNEL__) || defined(TEE_DDK) #include "rgx_cr_defs_km.h" #endif #undef IMG_EXPLICIT_INCLUDE_HWDEFS @@ -106,10 +106,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION)) #define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION)) -#define BVNC_PACK(B,V,N,C) ((((IMG_UINT64)(B))) << (B_POSITION) | \ - (((IMG_UINT64)(V))) << (V_POSITION) | \ - (((IMG_UINT64)(N))) << (N_POSITION) | \ - (((IMG_UINT64)(C))) << (C_POSITION) \ +#define BVNC_PACK(B,V,N,C) (((((IMG_UINT64)(B))) << (B_POSITION)) | \ + ((((IMG_UINT64)(V))) << (V_POSITION)) | \ + ((((IMG_UINT64)(N))) << (N_POSITION)) | \ + ((((IMG_UINT64)(C))) << (C_POSITION)) \ ) #define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U) @@ -118,9 +118,21 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU) #define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U) -#define RGXFW_MAX_NUM_OS (8U) -#define RGXFW_HOST_OS (0U) -#define RGXFW_GUEST_OSID_START (1U) +#if defined(RGX_CR_CORE_ID__PBVNC) +#define GET_PBVNC_B(X) ((X & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >> RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT) +#define GET_PBVNC_V(X) ((X & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >> RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT) +#define GET_PBVNC_N(X) ((X & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT) +#define GET_PBVNC_C(X) ((X & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >> RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT) +#endif + +#if defined(RGX_FEATURE_NUM_OSIDS) +#define RGXFW_MAX_NUM_OSIDS (RGX_FEATURE_NUM_OSIDS) +#else +#define RGXFW_MAX_NUM_OSIDS (8U) +#endif + +#define RGXFW_HOST_DRIVER_ID (0U) +#define RGXFW_GUEST_DRIVER_ID_START (RGXFW_HOST_DRIVER_ID + 1U) #define RGXFW_THREAD_0 (0U) #define RGXFW_THREAD_1 (1U) @@ -139,13 +151,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_META_COREMEM_128K (128*1024) #define RGX_META_COREMEM_256K (256*1024) -#if !defined(__KERNEL__) +#if !(defined(__KERNEL__) || defined(TEE_DDK)) #if (!defined(SUPPORT_TRUSTED_DEVICE) || defined(RGX_FEATURE_META_DMA)) && \ (defined(RGX_FEATURE_META_COREMEM_SIZE) && RGX_FEATURE_META_COREMEM_SIZE != 0) #define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024U) #define RGX_META_COREMEM (1) #define RGX_META_COREMEM_CODE (1) -#if !defined(FIX_HW_BRN_50767) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) +#if !defined(FIX_HW_BRN_50767) && (!defined(RGX_NUM_DRIVERS_SUPPORTED) || (RGX_NUM_DRIVERS_SUPPORTED == 1)) #define RGX_META_COREMEM_DATA (1) #endif #else @@ -159,6 +171,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define GET_ROGUE_CACHE_LINE_SIZE(x) ((((IMG_UINT32)(x)) > 0U) ? ((IMG_UINT32)(x)/8U) : (0U)) +#if defined(RGX_FEATURE_META_DMA) +#define RGX_META_DMA_BLOCK_SIZE (32U) +#else +#define RGX_META_DMA_BLOCK_SIZE (0U) +#endif #if defined(SUPPORT_AGP) #define MAX_HW_TA3DCONTEXTS 3U @@ -209,6 +226,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. RGX_CR_SOFT_RESET2_VERTEX_EN) +/* PM interactive mode enabled default for all rogue cores */ +#define PM_INTERACTIVE_MODE + +#define RGX_MLIST_ENTRY_STRIDE (4U) /* 4 bytes */ +#define RGX_NUM_PM_ADDR_SPACES (2U) /* VCE & TE share virtual space and Alist */ +#define RGX_PM_MAX_PB_VIRT_ADDR_SPACE (IMG_UINT64_C(0x400000000)) /* PM Maximum addressable limit */ #define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U) #define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1UL << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) @@ -230,7 +253,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_REQ_NUM_BERNADOS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) #define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) -#if !defined(__KERNEL__) +#if !(defined(__KERNEL__) || defined(TEE_DDK)) # define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS)) #endif @@ -242,19 +265,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXFW_META_SUPPORT_2ND_THREAD #endif - -/* - * FW MMU contexts - */ -#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_META) -#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) /* FW code/private data */ -#define MMU_CONTEXT_MAPPING_FWIF (0x7U) /* Host/FW data */ -#else -#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) -#define MMU_CONTEXT_MAPPING_FWIF (0x0U) -#endif - - /* * Utility macros to calculate CAT_BASE register addresses */ @@ -271,7 +281,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK #define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U) - /****************************************************************************** * WA HWBRNs *****************************************************************************/ @@ -288,7 +297,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) #endif -#if !defined(__KERNEL__) +#if !(defined(__KERNEL__) || defined(TEE_DDK)) #if defined(RGX_FEATURE_ROGUEXE) #define RGX_NUM_RASTERISATION_MODULES RGX_FEATURE_NUM_CLUSTERS @@ -296,10 +305,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_NUM_RASTERISATION_MODULES RGX_NUM_PHANTOMS #endif -#endif /* defined(__KERNEL__) */ +#endif /* !(defined(__KERNEL__) || defined(TEE_DDK)) */ /* GPU CR timer tick in GPU cycles */ -#define RGX_CRTIME_TICK_IN_CYCLES (256U) +#define RGX_CRTIME_TICK_IN_CYCLES (256U) +#define RGX_CRTIME_TICK_IN_CYCLES_SHIFT (8U) /* for nohw multicore return max cores possible to client */ #define RGX_MULTICORE_MAX_NOHW_CORES (4U) @@ -325,12 +335,25 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE) +/* + * Renaming MTS sideband bitfields to emphasize that the Register Bank number + * of the MTS register used identifies a specific Driver/VM rather than the OSID tag + * emitted on bus memory transactions. + */ +#define RGX_MTS_SBDATA_DRIVERID_CLRMSK RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_CLRMSK +#define RGX_MTS_SBDATA_DRIVERID_SHIFT RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_SHIFT + /* * Register Bank containing registers secured against host access */ #define RGX_HOST_SECURE_REGBANK_OFFSET (0xF0000U) #define RGX_HOST_SECURE_REGBANK_SIZE (0x10000U) +/* + Maximum number of render targets in array +*/ +#define RGX_MAX_TA_RENDER_TARGETS (2048U) + /* * Macro used to indicate which version of HWPerf is active */ @@ -346,4 +369,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define RGX_WGP_MAX_NUM_CORES (8U) +#if defined(RGX_FEATURE_VOLCANIC_TB) +#define SUPPORT_VOLCANIC_TB +#endif +#define RGX_FEATURE_SECURITY_ROGUE + +/* Typically the PCI bus returns this value on error */ +#define RGX_PCI_ERROR_VALUE_BYTE (0xFFU) +#define RGX_PCI_ERROR_VALUE_DWORD (0xFFFFFFFFU) + #endif /* RGXDEFS_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxmhdefs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxmhdefs_km.h index fe8272b8584f..663001dd9ebc 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxmhdefs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxmhdefs_km.h @@ -57,227 +57,227 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXMHDEFS_KM_REVISION 0 -#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE (0x00000000U) -#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT (0x00000001U) -#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE (0x00000002U) - - -#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM (0x00000000U) -#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER (0x00000001U) -#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL (0x00000002U) - - -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK (0x00000008U) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST (0x00000009U) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK (0x0000000aU) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST (0x0000000bU) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0 (0x0000000cU) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1 (0x0000002dU) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK (0x0000000fU) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK (0x00000012U) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK (0x00000013U) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK (0x00000016U) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK (0x00000017U) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP (0x00000019U) -#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP (0x0000001aU) - - -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK (0x00000000U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST (0x00000001U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK (0x00000002U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST (0x00000003U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0 (0x00000004U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1 (0x00000025U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP (0x00000006U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK (0x00000007U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK (0x00000008U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK (0x00000009U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK (0x00000014U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK (0x00000015U) -#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP (0x00000018U) - - -#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PPP (0x00000008U) -#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCERTC (0x00000007U) -#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TEACRTC (0x00000006U) -#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGRTC (0x00000005U) -#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGR (0x00000004U) -#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGS (0x00000003U) -#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TPC (0x00000002U) -#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCE (0x00000001U) - - -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ00 (0x00000000U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ01 (0x00000001U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ00 (0x00000002U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ01 (0x00000003U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_RREQ (0x00000004U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DBSC (0x00000005U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CPF (0x00000006U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DELTA (0x00000007U) - - -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ00 (0x00000000U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ01 (0x00000001U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ02 (0x00000002U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ03 (0x00000003U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ00 (0x00000004U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ01 (0x00000005U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ02 (0x00000006U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ03 (0x00000007U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_RREQ (0x00000008U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DBSC (0x00000009U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CPF (0x0000000aU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DELTA (0x0000000bU) - - -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ00 (0x00000000U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ01 (0x00000001U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ02 (0x00000002U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ03 (0x00000003U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ04 (0x00000004U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ05 (0x00000005U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ06 (0x00000006U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ00 (0x00000007U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ01 (0x00000008U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ02 (0x00000009U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ03 (0x0000000aU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ04 (0x0000000bU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ05 (0x0000000cU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ06 (0x0000000dU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_RREQ (0x0000000eU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DBSC (0x0000000fU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CPF (0x00000010U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DELTA (0x00000011U) - - -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ00 (0x00000000U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ01 (0x00000001U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ02 (0x00000002U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ03 (0x00000003U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ04 (0x00000004U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ05 (0x00000005U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ06 (0x00000006U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ07 (0x00000007U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ08 (0x00000008U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ09 (0x00000009U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ10 (0x0000000aU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ11 (0x0000000bU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ12 (0x0000000cU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ13 (0x0000000dU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ00 (0x0000000eU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ01 (0x0000000fU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ02 (0x00000010U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ03 (0x00000011U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ04 (0x00000012U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ05 (0x00000013U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ06 (0x00000014U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ07 (0x00000015U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ08 (0x00000016U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ09 (0x00000017U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ10 (0x00000018U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ11 (0x00000019U) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ12 (0x0000001aU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ13 (0x0000001bU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_RREQ (0x0000001cU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DBSC (0x0000001dU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CPF (0x0000001eU) -#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DELTA (0x0000001fU) - - -#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE (0x00000000U) -#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS (0x00000001U) -#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA (0x00000002U) -#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA (0x00000003U) - - -#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_ZLS (0x00000000U) -#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_DS (0x00000001U) - - -#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL (0x00000000U) -#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE (0x00000001U) -#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX (0x00000002U) -#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK (0x00000004U) -#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT (0x00000008U) - - -#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM (0x00000000U) -#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA (0x00000001U) -#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA (0x00000002U) -#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE (0x00000003U) - - -#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH (0x00000002U) -#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS (0x00000003U) - - -#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST (0x00000000U) -#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST (0x00000001U) -#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST (0x00000002U) -#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST (0x00000003U) -#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST (0x00000004U) -#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST (0x00000005U) -#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST (0x00000006U) -#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST (0x00000007U) - - -#define RGX_MH_TAG_ENCODING_MH_TAG_MMU (0x00000000U) -#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU (0x00000001U) -#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU (0x00000002U) -#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU (0x00000003U) -#define RGX_MH_TAG_ENCODING_MH_TAG_MIPS (0x00000004U) -#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0 (0x00000005U) -#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1 (0x00000006U) -#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2 (0x00000007U) -#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3 (0x00000008U) -#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0 (0x00000009U) -#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1 (0x0000000aU) -#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2 (0x0000000bU) -#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3 (0x0000000cU) -#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4 (0x0000000dU) -#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_0 (0x0000000eU) -#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_1 (0x0000000fU) -#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA (0x00000010U) -#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB (0x00000011U) -#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC (0x00000012U) -#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD (0x00000013U) -#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA (0x00000014U) -#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB (0x00000015U) -#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC (0x00000016U) -#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD (0x00000017U) -#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW (0x00000018U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_0 (0x00000019U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_1 (0x0000001aU) -#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0 (0x0000001bU) -#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1 (0x0000001cU) -#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2 (0x0000001dU) -#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3 (0x0000001eU) -#define RGX_MH_TAG_ENCODING_MH_TAG_USC (0x0000001fU) -#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS (0x00000020U) -#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS (0x00000021U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TPF (0x00000022U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS (0x00000023U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF (0x00000024U) -#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ (0x00000025U) -#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS (0x00000026U) -#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5 (0x00000027U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP (0x00000028U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC (0x00000029U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC (0x0000002aU) -#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC (0x0000002bU) -#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION (0x0000002cU) -#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM (0x0000002dU) -#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW (0x0000002eU) -#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC (0x0000002fU) -#define RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC (0x00000030U) -#define RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC (0x00000031U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA (0x00000032U) -#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL (0x00000033U) -#define RGX_MH_TAG_ENCODING_MH_TAG_PBE0 (0x00000034U) -#define RGX_MH_TAG_ENCODING_MH_TAG_PBE1 (0x00000035U) -#define RGX_MH_TAG_ENCODING_MH_TAG_PBE2 (0x00000036U) -#define RGX_MH_TAG_ENCODING_MH_TAG_PBE3 (0x00000037U) +#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE (0x00000000U) +#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT (0x00000001U) +#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE (0x00000002U) + + +#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM (0x00000000U) +#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER (0x00000001U) +#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL (0x00000002U) + + +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK (0x00000008U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST (0x00000009U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK (0x0000000aU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST (0x0000000bU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0 (0x0000000cU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1 (0x0000002dU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK (0x0000000fU) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK (0x00000012U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK (0x00000013U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK (0x00000016U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK (0x00000017U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP (0x00000019U) +#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP (0x0000001aU) + + +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK (0x00000000U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST (0x00000001U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK (0x00000002U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST (0x00000003U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0 (0x00000004U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1 (0x00000025U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP (0x00000006U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK (0x00000007U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK (0x00000008U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK (0x00000009U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK (0x00000014U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK (0x00000015U) +#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP (0x00000018U) + + +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PPP (0x00000008U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCERTC (0x00000007U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TEACRTC (0x00000006U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGRTC (0x00000005U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGR (0x00000004U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGS (0x00000003U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TPC (0x00000002U) +#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCE (0x00000001U) + + +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ00 (0x00000000U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ01 (0x00000001U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ00 (0x00000002U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ01 (0x00000003U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_RREQ (0x00000004U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DBSC (0x00000005U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CPF (0x00000006U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DELTA (0x00000007U) + + +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ00 (0x00000000U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ01 (0x00000001U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ02 (0x00000002U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ03 (0x00000003U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ00 (0x00000004U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ01 (0x00000005U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ02 (0x00000006U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ03 (0x00000007U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_RREQ (0x00000008U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DBSC (0x00000009U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CPF (0x0000000aU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DELTA (0x0000000bU) + + +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ00 (0x00000000U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ01 (0x00000001U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ02 (0x00000002U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ03 (0x00000003U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ04 (0x00000004U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ05 (0x00000005U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ06 (0x00000006U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ00 (0x00000007U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ01 (0x00000008U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ02 (0x00000009U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ03 (0x0000000aU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ04 (0x0000000bU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ05 (0x0000000cU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ06 (0x0000000dU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_RREQ (0x0000000eU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DBSC (0x0000000fU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CPF (0x00000010U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DELTA (0x00000011U) + + +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ00 (0x00000000U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ01 (0x00000001U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ02 (0x00000002U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ03 (0x00000003U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ04 (0x00000004U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ05 (0x00000005U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ06 (0x00000006U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ07 (0x00000007U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ08 (0x00000008U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ09 (0x00000009U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ10 (0x0000000aU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ11 (0x0000000bU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ12 (0x0000000cU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ13 (0x0000000dU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ00 (0x0000000eU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ01 (0x0000000fU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ02 (0x00000010U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ03 (0x00000011U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ04 (0x00000012U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ05 (0x00000013U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ06 (0x00000014U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ07 (0x00000015U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ08 (0x00000016U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ09 (0x00000017U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ10 (0x00000018U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ11 (0x00000019U) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ12 (0x0000001aU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ13 (0x0000001bU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_RREQ (0x0000001cU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DBSC (0x0000001dU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CPF (0x0000001eU) +#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DELTA (0x0000001fU) + + +#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE (0x00000000U) +#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS (0x00000001U) +#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA (0x00000002U) +#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA (0x00000003U) + + +#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_ZLS (0x00000000U) +#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_DS (0x00000001U) + + +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL (0x00000000U) +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE (0x00000001U) +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX (0x00000002U) +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK (0x00000004U) +#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT (0x00000008U) + + +#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM (0x00000000U) +#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA (0x00000001U) +#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA (0x00000002U) +#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE (0x00000003U) + + +#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH (0x00000002U) +#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS (0x00000003U) + + +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST (0x00000000U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST (0x00000001U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST (0x00000002U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST (0x00000003U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST (0x00000004U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST (0x00000005U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST (0x00000006U) +#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST (0x00000007U) + + +#define RGX_MH_TAG_ENCODING_MH_TAG_MMU (0x00000000U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU (0x00000001U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU (0x00000002U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU (0x00000003U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MIPS (0x00000004U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0 (0x00000005U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1 (0x00000006U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2 (0x00000007U) +#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3 (0x00000008U) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0 (0x00000009U) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1 (0x0000000aU) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2 (0x0000000bU) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3 (0x0000000cU) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4 (0x0000000dU) +#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_0 (0x0000000eU) +#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_1 (0x0000000fU) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA (0x00000010U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB (0x00000011U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC (0x00000012U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD (0x00000013U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA (0x00000014U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB (0x00000015U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC (0x00000016U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD (0x00000017U) +#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW (0x00000018U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_0 (0x00000019U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_1 (0x0000001aU) +#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0 (0x0000001bU) +#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1 (0x0000001cU) +#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2 (0x0000001dU) +#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3 (0x0000001eU) +#define RGX_MH_TAG_ENCODING_MH_TAG_USC (0x0000001fU) +#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS (0x00000020U) +#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS (0x00000021U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TPF (0x00000022U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS (0x00000023U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF (0x00000024U) +#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ (0x00000025U) +#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS (0x00000026U) +#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5 (0x00000027U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP (0x00000028U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC (0x00000029U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC (0x0000002aU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC (0x0000002bU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION (0x0000002cU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM (0x0000002dU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW (0x0000002eU) +#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC (0x0000002fU) +#define RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC (0x00000030U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC (0x00000031U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA (0x00000032U) +#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL (0x00000033U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PBE0 (0x00000034U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PBE1 (0x00000035U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PBE2 (0x00000036U) +#define RGX_MH_TAG_ENCODING_MH_TAG_PBE3 (0x00000037U) #endif /* RGXMHDEFS_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxmmudefs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxmmudefs_km.h index 65186643a20a..14a6a73c84bc 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxmmudefs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/rogue/km/rgxmmudefs_km.h @@ -57,157 +57,157 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXMMUDEFS_KM_REVISION 0 -#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U) -#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U) -#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U) -#define RGX_BIF_DM_ENCODING_TLA (0x00000003U) -#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U) -#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U) -#define RGX_BIF_DM_ENCODING_META (0x00000007U) -#define RGX_BIF_DM_ENCODING_HOST (0x00000008U) -#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U) +#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U) +#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U) +#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U) +#define RGX_BIF_DM_ENCODING_TLA (0x00000003U) +#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U) +#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U) +#define RGX_BIF_DM_ENCODING_META (0x00000007U) +#define RGX_BIF_DM_ENCODING_HOST (0x00000008U) +#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U) -#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) -#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) -#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) -#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF)) -#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) -#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF)) +#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) +#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) +#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) +#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF)) +#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) +#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF)) -#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) +#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) -#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) +#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) -#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) +#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) -#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) +#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) -#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) +#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) -#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) +#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) -#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) -#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) -#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) -#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) -#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) -#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) -#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) +#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) +#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) +#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) +#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) +#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) +#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) +#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) -#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) -#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) -#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF)) +#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) +#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF)) -#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) -#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF)) +#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) +#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF)) -#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) -#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF)) +#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) +#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF)) -#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) -#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF)) +#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) +#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF)) -#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) -#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF)) +#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) +#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF)) -#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) -#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) -#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF)) +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF)) -#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) -#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF)) +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF)) -#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) -#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F)) +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F)) -#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) -#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) -#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) -#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) -#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) -#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) -#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) -#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) -#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) -#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) -#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U) -#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U) -#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) -#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U) -#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U) +#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U) +#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) -#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) -#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a)) -#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U) -#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) +#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a)) +#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) -#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) -#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) -#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) -#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) -#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) -#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) -#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U) -#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) -#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) +#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) +#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) #endif /* RGXMMUDEFS_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_27.V.254.2.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_27.V.254.2.h index 2a24985009ef..3d7bc8345c05 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_27.V.254.2.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_27.V.254.2.h @@ -67,6 +67,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (0U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -97,6 +98,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (1U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -112,7 +114,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TESSELLATION #define RGX_FEATURE_TILE_SIZE_X (32U) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.408.101.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.408.101.h index 9ca5565c9f89..7e9a1f7b3cd0 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.408.101.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.408.101.h @@ -69,6 +69,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (0U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -103,6 +104,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (1U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -122,7 +124,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS #define RGX_FEATURE_TESSELLATION diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.816.20.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.816.20.h index 5706d9d9072f..f09e158a5ab8 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.816.20.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_30.V.816.20.h @@ -69,6 +69,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (0U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -103,6 +104,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (1U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -122,7 +124,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (256U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS #define RGX_FEATURE_TESSELLATION diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.21.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.21.h index 34b553a21e25..b0faae645326 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.21.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.21.h @@ -70,6 +70,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (0U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -105,6 +106,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (2U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -124,7 +126,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (512U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (2U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS #define RGX_FEATURE_TESSELLATION diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.23.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.23.h index b9c038958796..07fd015bc7a3 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.23.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.23.h @@ -70,6 +70,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (0U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -105,6 +106,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (2U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -124,7 +126,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (512U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (4U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS #define RGX_FEATURE_TESSELLATION diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.34.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.34.h index d2ad281116be..18e0cf4ba89b 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.34.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.34.h @@ -70,6 +70,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (1U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (1U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -104,6 +105,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (2U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -124,7 +126,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (2U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS #define RGX_FEATURE_TESSELLATION diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.35.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.35.h new file mode 100644 index 000000000000..d359272a63f1 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.1632.35.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 35.V.1632.35 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_35_V_1632_35_H +#define RGXCONFIG_KM_35_V_1632_35_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 35 +#define RGX_BNC_KM_N 1632 +#define RGX_BNC_KM_C 35 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (1U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (1U) +#define RGX_FEATURE_FBCDC (4U) +#define RGX_FEATURE_FBCDC_ALGORITHM (4U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (1U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (2U) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (4U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (16U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (2U) +#define RGX_FEATURE_NUM_MEMBUS (2U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (2U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (2U) +#define RGX_FEATURE_RAY_TRACING_ARCH (0U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (2U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (4U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (4U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_REGION_PROTECTION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_WATCHDOG_TIMER +#define RGX_FEATURE_WORKGROUP_PROTECTION +#define RGX_FEATURE_WORKGROUP_PROTECTION_SMP +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_35_V_1632_35_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.101.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.101.h index 4c88835ec1fa..ab5fde00d4d5 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.101.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.101.h @@ -69,6 +69,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (0U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -103,6 +104,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (1U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -122,7 +124,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (1U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS #define RGX_FEATURE_TESSELLATION diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.23.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.23.h index 71994db2dabd..718e12c8ed69 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.23.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.23.h @@ -70,6 +70,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (0U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -105,6 +106,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (2U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -124,7 +126,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (4U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS #define RGX_FEATURE_TESSELLATION diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.33.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.33.h index 783e00ce1ed8..df85d8147f21 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.33.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.33.h @@ -70,6 +70,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (1U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (1U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -104,6 +105,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (2U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -124,7 +126,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS #define RGX_FEATURE_TESSELLATION @@ -140,6 +148,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) #define RGX_FEATURE_WATCHDOG_TIMER #define RGX_FEATURE_WORKGROUP_PROTECTION +#define RGX_FEATURE_WORKGROUP_PROTECTION_SMP #define RGX_FEATURE_ZLS_CHECKSUM #endif /* RGXCONFIG_KM_35_V_408_33_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.34.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.34.h index 7d7562693b47..8fb939b2a65b 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.34.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_35.V.408.34.h @@ -70,6 +70,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_DUST_POWER_ISLAND_S7 #define RGX_FEATURE_ECC_RAMS (1U) #define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (1U) #define RGX_FEATURE_FBCDC (4U) #define RGX_FEATURE_FBCDC_ALGORITHM (4U) #define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) @@ -104,6 +105,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_PERFBUS #define RGX_FEATURE_PERF_COUNTER_BATCH #define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) #define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES #define RGX_FEATURE_POWER_ISLAND_VERSION (2U) #define RGX_FEATURE_RAY_TRACING_ARCH (0U) @@ -124,7 +126,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U) #define RGX_FEATURE_SLC_VIVT #define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) #define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (2U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) #define RGX_FEATURE_TDM_PDS_CHECKSUM #define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS #define RGX_FEATURE_TESSELLATION @@ -140,6 +148,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) #define RGX_FEATURE_WATCHDOG_TIMER #define RGX_FEATURE_WORKGROUP_PROTECTION +#define RGX_FEATURE_WORKGROUP_PROTECTION_SMP #define RGX_FEATURE_ZLS_CHECKSUM #endif /* RGXCONFIG_KM_35_V_408_34_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_38.V.2448.402.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_38.V.2448.402.h new file mode 100644 index 000000000000..4d4d7c7f9c1d --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_38.V.2448.402.h @@ -0,0 +1,153 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 38.V.2448.402 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_38_V_2448_402_H +#define RGXCONFIG_KM_38_V_2448_402_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 38 +#define RGX_BNC_KM_N 2448 +#define RGX_BNC_KM_C 402 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (4U) +#define RGX_FEATURE_FBCDC_ALGORITHM (4U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (1U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (2U) +#define RGX_FEATURE_META (MTP219) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (6U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (2U) +#define RGX_FEATURE_NUM_MEMBUS (4U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (2U) +#define RGX_FEATURE_RAY_TRACING_ARCH (3U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (8U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_38_V_2448_402_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1041.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1041.h new file mode 100644 index 000000000000..3732194dda31 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1041.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 70.V.2448.1041 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_70_V_2448_1041_H +#define RGXCONFIG_KM_70_V_2448_1041_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 70 +#define RGX_BNC_KM_N 2448 +#define RGX_BNC_KM_C 1041 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (3U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (2U) +#define RGX_FEATURE_META (MTP219) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (6U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (2U) +#define RGX_FEATURE_NUM_MEMBUS (3U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (3U) +#define RGX_FEATURE_RAY_TRACING_ARCH (0U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (6U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (1536U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_70_V_2448_1041_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1042.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1042.h new file mode 100644 index 000000000000..3c8dbf11a303 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1042.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 70.V.2448.1042 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_70_V_2448_1042_H +#define RGXCONFIG_KM_70_V_2448_1042_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 70 +#define RGX_BNC_KM_N 2448 +#define RGX_BNC_KM_C 1042 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (3U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (2U) +#define RGX_FEATURE_META (MTP219) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (6U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (2U) +#define RGX_FEATURE_NUM_MEMBUS (3U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (3U) +#define RGX_FEATURE_RAY_TRACING_ARCH (0U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (6U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (1536U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_70_V_2448_1042_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1360.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1360.h new file mode 100644 index 000000000000..ec915beeaaf1 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.1360.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 70.V.2448.1360 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_70_V_2448_1360_H +#define RGXCONFIG_KM_70_V_2448_1360_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 70 +#define RGX_BNC_KM_N 2448 +#define RGX_BNC_KM_C 1360 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (3U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (2U) +#define RGX_FEATURE_META (MTP219) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (6U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (2U) +#define RGX_FEATURE_NUM_MEMBUS (4U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (3U) +#define RGX_FEATURE_RAY_TRACING_ARCH (3U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (8U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_70_V_2448_1360_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.418.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.418.h new file mode 100644 index 000000000000..045e17306351 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_70.V.2448.418.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 70.V.2448.418 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_70_V_2448_418_H +#define RGXCONFIG_KM_70_V_2448_418_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 70 +#define RGX_BNC_KM_N 2448 +#define RGX_BNC_KM_C 418 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (2U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (2U) +#define RGX_FEATURE_META (MTP219) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (6U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (2U) +#define RGX_FEATURE_NUM_MEMBUS (4U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (0U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (3U) +#define RGX_FEATURE_RAY_TRACING_ARCH (3U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (8U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (2U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_70_V_2448_418_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1210.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1210.h new file mode 100644 index 000000000000..bc75857064f1 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1210.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 71.V.2448.1210 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_71_V_2448_1210_H +#define RGXCONFIG_KM_71_V_2448_1210_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 71 +#define RGX_BNC_KM_N 2448 +#define RGX_BNC_KM_C 1210 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (5U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (3U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (3U) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (6U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (3U) +#define RGX_FEATURE_NUM_MEMBUS (3U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (2U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (1U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (4U) +#define RGX_FEATURE_RAY_TRACING_ARCH (3U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (2U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SLC_BANKS (6U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (1536U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (3U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_71_V_2448_1210_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1211.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1211.h new file mode 100644 index 000000000000..67b2fa73b0a4 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1211.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 71.V.2448.1211 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_71_V_2448_1211_H +#define RGXCONFIG_KM_71_V_2448_1211_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 71 +#define RGX_BNC_KM_N 2448 +#define RGX_BNC_KM_C 1211 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (5U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (4U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (3U) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (6U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (3U) +#define RGX_FEATURE_NUM_MEMBUS (3U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (2U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (1U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (4U) +#define RGX_FEATURE_RAY_TRACING_ARCH (3U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (2U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SLC_BANKS (6U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (1536U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (3U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_71_V_2448_1211_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1212.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1212.h new file mode 100644 index 000000000000..5d896d60fca8 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.2448.1212.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 71.V.2448.1212 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_71_V_2448_1212_H +#define RGXCONFIG_KM_71_V_2448_1212_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 71 +#define RGX_BNC_KM_N 2448 +#define RGX_BNC_KM_C 1212 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (5U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (4U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (3U) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (6U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (3U) +#define RGX_FEATURE_NUM_MEMBUS (3U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (2U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (1U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (4U) +#define RGX_FEATURE_RAY_TRACING_ARCH (3U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (2U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SLC_BANKS (6U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (1536U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (1U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (3U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_71_V_2448_1212_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2080.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2080.h new file mode 100644 index 000000000000..9b6265e018d8 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2080.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 71.V.3672.2080 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_71_V_3672_2080_H +#define RGXCONFIG_KM_71_V_3672_2080_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 71 +#define RGX_BNC_KM_N 3672 +#define RGX_BNC_KM_C 2080 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (5U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (4U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (3U) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (9U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (36U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (3U) +#define RGX_FEATURE_NUM_MEMBUS (4U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (1U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (4U) +#define RGX_FEATURE_RAY_TRACING_ARCH (0U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SLC_BANKS (8U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (2U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (3U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_71_V_3672_2080_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2081.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2081.h new file mode 100644 index 000000000000..28ca12ce156f --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2081.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 71.V.3672.2081 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_71_V_3672_2081_H +#define RGXCONFIG_KM_71_V_3672_2081_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 71 +#define RGX_BNC_KM_N 3672 +#define RGX_BNC_KM_C 2081 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (5U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (4U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (3U) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (9U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (36U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (3U) +#define RGX_FEATURE_NUM_MEMBUS (4U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (2U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (6U) +#define RGX_FEATURE_RAY_TRACING_ARCH (0U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SLC_BANKS (8U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (2U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (3U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_71_V_3672_2081_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2082.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2082.h new file mode 100644 index 000000000000..f7c2d401daf1 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2082.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 71.V.3672.2082 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_71_V_3672_2082_H +#define RGXCONFIG_KM_71_V_3672_2082_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 71 +#define RGX_BNC_KM_N 3672 +#define RGX_BNC_KM_C 2082 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (5U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (4U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (3U) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (9U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (36U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (3U) +#define RGX_FEATURE_NUM_MEMBUS (4U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (2U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (6U) +#define RGX_FEATURE_RAY_TRACING_ARCH (0U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SLC_BANKS (8U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (2U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (3U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_71_V_3672_2082_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2083.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2083.h new file mode 100644 index 000000000000..2fc12a05bcad --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2083.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 71.V.3672.2083 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_71_V_3672_2083_H +#define RGXCONFIG_KM_71_V_3672_2083_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 71 +#define RGX_BNC_KM_N 3672 +#define RGX_BNC_KM_C 2083 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (5U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (4U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (3U) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (9U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (36U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (3U) +#define RGX_FEATURE_NUM_MEMBUS (4U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (2U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (6U) +#define RGX_FEATURE_RAY_TRACING_ARCH (0U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SLC_BANKS (8U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (2U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (3U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_71_V_3672_2083_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2084.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2084.h new file mode 100644 index 000000000000..7ecc97a74886 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/configs/rgxconfig_km_71.V.3672.2084.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 71.V.3672.2084 (kernel defines) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_71_V_3672_2084_H +#define RGXCONFIG_KM_71_V_3672_2084_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 71 +#define RGX_BNC_KM_N 3672 +#define RGX_BNC_KM_C 2084 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (5U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FAULT_DECODE_VERSION (0U) +#define RGX_FEATURE_FBCDC (5U) +#define RGX_FEATURE_FBCDC_ALGORITHM (5U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_HOST_SECURITY_VERSION (4U) +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (3U) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (9U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (36U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (3U) +#define RGX_FEATURE_NUM_MEMBUS (4U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (3U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION (2U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (6U) +#define RGX_FEATURE_RAY_TRACING_ARCH (0U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR +#define RGX_FEATURE_RT_RAC_PER_SPU +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (2U) +#define RGX_FEATURE_SCALABLE_VCE (3U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SLC_BANKS (8U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SOC_TIMER +#define RGX_FEATURE_SPU0_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU1_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU2_RAC_PRESENT (1U) +#define RGX_FEATURE_SPU3_RAC_PRESENT (0U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TB_GPU_COUNT (2U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT (3U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE +#define RGX_FEATURE_USC_TIMER +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_71_V_3672_2084_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_30.3.408.101.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_30.3.408.101.h index 4a5f799499a5..f05575d4ef65 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_30.3.408.101.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_30.3.408.101.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_30_3_408_101_H #define RGXCORE_KM_30_3_408_101_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ /* CS: @5653222 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_30.3.816.20.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_30.3.816.20.h index f06a756c633c..7319e86fb7bf 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_30.3.816.20.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_30.3.816.20.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_30_3_816_20_H #define RGXCORE_KM_30_3_816_20_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ /* CS: @5690709 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.21.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.21.h index e7008807979f..f2e489cfbab7 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.21.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.21.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_35_2_1632_21_H #define RGXCORE_KM_35_2_1632_21_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ /* CS: @5744712 */ /****************************************************************************** @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define FIX_HW_BRN_71157 #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.23.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.23.h index a52d7dceebb0..9c19267ac08a 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.23.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.23.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_35_2_1632_23_H #define RGXCORE_KM_35_2_1632_23_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ /* CS: @5928760 */ /****************************************************************************** @@ -59,6 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define FIX_HW_BRN_71157 #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.34.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.34.h index 509b1e97715b..a4c3741fb3e4 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.34.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.34.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_35_2_1632_34_H #define RGXCORE_KM_35_2_1632_34_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ /* CS: @6097610 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/secure_export.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.35.h similarity index 69% rename from drivers/gpu/drm/img/img-volcanic/services/server/include/secure_export.h rename to drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.35.h index 390c466ae92e..9a959fe90e65 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/secure_export.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.1632.35.h @@ -1,5 +1,5 @@ /*************************************************************************/ /*! -@File +@Title RGX Core BVNC 35.2.1632.35 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved @License Dual MIT/GPLv2 @@ -39,29 +39,35 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ -#include "img_types.h" -#include "pvrsrv_error.h" -#include "pmr.h" -#include "connection_server.h" +#ifndef RGXCORE_KM_35_2_1632_35_H +#define RGXCORE_KM_35_2_1632_35_H -typedef struct _SECURE_CLEANUP_DATA_ { - PMR *psPMR; -} SECURE_CLEANUP_DATA; +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ +/* CS: @6327132 */ -PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE * psDevNode, - PMR *psPMR, - IMG_SECURE_TYPE *phSecure, - PMR **ppsPMR, - CONNECTION_DATA **ppsSecureConnection); +/****************************************************************************** + * BVNC = 35.2.1632.35 + *****************************************************************************/ +#define RGX_BVNC_KM_B 35 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 1632 +#define RGX_BVNC_KM_C 35 -PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR); +/****************************************************************************** + * Errata + *****************************************************************************/ -PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_SECURE_TYPE hSecure, - PMR **ppsPMR, - IMG_DEVMEM_SIZE_T *puiSize, - IMG_DEVMEM_ALIGN_T *puiAlign); +#define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 -PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR); + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_35_2_1632_35_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.408.34.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.408.34.h index e47e7de60cef..ecd899c933b7 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.408.34.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.2.408.34.h @@ -42,8 +42,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_35_2_408_34_H #define RGXCORE_KM_35_2_408_34_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ -/* CS: @6206572 */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ +/* CS: @6252022 */ /****************************************************************************** * BVNC = 35.2.408.34 @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.1632.23.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.1632.23.h index 83c5726511e3..9726e2ca1fb4 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.1632.23.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.1632.23.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_35_3_1632_23_H #define RGXCORE_KM_35_3_1632_23_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ /* CS: @5906056 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.101.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.101.h index f7e6bdc27601..074fa3d600f8 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.101.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.101.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_35_3_408_101_H #define RGXCORE_KM_35_3_408_101_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ /* CS: @5653222 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.33.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.33.h new file mode 100644 index 000000000000..a17f409b5644 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.33.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 35.3.408.33 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_35_3_408_33_H +#define RGXCORE_KM_35_3_408_33_H + +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ +/* CS: @6249306 */ + +/****************************************************************************** + * BVNC = 35.3.408.33 + *****************************************************************************/ +#define RGX_BVNC_KM_B 35 +#define RGX_BVNC_KM_V 3 +#define RGX_BVNC_KM_N 408 +#define RGX_BVNC_KM_C 33 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_35_3_408_33_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.34.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.34.h new file mode 100644 index 000000000000..c44fe2243220 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.3.408.34.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 35.3.408.34 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_35_3_408_34_H +#define RGXCORE_KM_35_3_408_34_H + +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ +/* CS: @6285885 */ + +/****************************************************************************** + * BVNC = 35.3.408.34 + *****************************************************************************/ +#define RGX_BVNC_KM_B 35 +#define RGX_BVNC_KM_V 3 +#define RGX_BVNC_KM_N 408 +#define RGX_BVNC_KM_C 34 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_35_3_408_34_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.4.1632.23.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.4.1632.23.h index 55cc336affd2..02899da3d8fa 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.4.1632.23.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.4.1632.23.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_35_4_1632_23_H #define RGXCORE_KM_35_4_1632_23_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ /* CS: @5952322 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.5.408.23.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.5.408.23.h index 2de44499c1ba..a5048e951f47 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.5.408.23.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_35.5.408.23.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXCORE_KM_35_5_408_23_H #define RGXCORE_KM_35_5_408_23_H -/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ /* CS: @6080671 */ /****************************************************************************** @@ -58,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72144 diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_38.4.2448.402.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_38.4.2448.402.h new file mode 100644 index 000000000000..7d9e4e4ac323 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_38.4.2448.402.h @@ -0,0 +1,74 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 38.4.2448.402 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_38_4_2448_402_H +#define RGXCORE_KM_38_4_2448_402_H + +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ +/* CS: @5976185 */ + +/****************************************************************************** + * BVNC = 38.4.2448.402 + *****************************************************************************/ +#define RGX_BVNC_KM_B 38 +#define RGX_BVNC_KM_V 4 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 402 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72143 +#define FIX_HW_BRN_72144 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_38_4_2448_402_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_38.6.2448.402.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_38.6.2448.402.h new file mode 100644 index 000000000000..7bf85accef8a --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_38.6.2448.402.h @@ -0,0 +1,74 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 38.6.2448.402 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_38_6_2448_402_H +#define RGXCORE_KM_38_6_2448_402_H + +/* Automatically generated file (31/03/2023 09:11:38): Do not edit manually */ +/* CS: @6068975 */ + +/****************************************************************************** + * BVNC = 38.6.2448.402 + *****************************************************************************/ +#define RGX_BVNC_KM_B 38 +#define RGX_BVNC_KM_V 6 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 402 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_71960 +#define FIX_HW_BRN_72143 +#define FIX_HW_BRN_72144 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_38_6_2448_402_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1041.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1041.h new file mode 100644 index 000000000000..c44b6d4d1e5f --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1041.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 70.2.2448.1041 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_70_2_2448_1041_H +#define RGXCORE_KM_70_2_2448_1041_H + +/* Automatically generated file (06/05/2022 11:58:47): Do not edit manually */ +/* CS: @6243633 */ + +/****************************************************************************** + * BVNC = 70.2.2448.1041 + *****************************************************************************/ +#define RGX_BVNC_KM_B 70 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 1041 + +/****************************************************************************** + * Errata + *****************************************************************************/ + + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_70_2_2448_1041_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1042.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1042.h new file mode 100644 index 000000000000..de04ac9fde5e --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1042.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 70.2.2448.1042 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_70_2_2448_1042_H +#define RGXCORE_KM_70_2_2448_1042_H + +/* Automatically generated file (05/09/2023 09:10:40): Do not edit manually */ +/* CS: @6236243 */ + +/****************************************************************************** + * BVNC = 70.2.2448.1042 + *****************************************************************************/ +#define RGX_BVNC_KM_B 70 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 1042 + +/****************************************************************************** + * Errata + *****************************************************************************/ + + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_70_2_2448_1042_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1360.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1360.h new file mode 100644 index 000000000000..95c2bbad0095 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.1360.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 70.2.2448.1360 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_70_2_2448_1360_H +#define RGXCORE_KM_70_2_2448_1360_H + +/* Automatically generated file (08/06/2022 13:43:04): Do not edit manually */ +/* CS: @6208659 */ + +/****************************************************************************** + * BVNC = 70.2.2448.1360 + *****************************************************************************/ +#define RGX_BVNC_KM_B 70 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 1360 + +/****************************************************************************** + * Errata + *****************************************************************************/ + + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_70_2_2448_1360_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.418.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.418.h new file mode 100644 index 000000000000..80335f71e1aa --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.2.2448.418.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 70.2.2448.418 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_70_2_2448_418_H +#define RGXCORE_KM_70_2_2448_418_H + +/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */ +/* CS: @6208659 */ + +/****************************************************************************** + * BVNC = 70.2.2448.418 + *****************************************************************************/ +#define RGX_BVNC_KM_B 70 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 418 + +/****************************************************************************** + * Errata + *****************************************************************************/ + + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_70_2_2448_418_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.3.2448.1042.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.3.2448.1042.h new file mode 100644 index 000000000000..490f350681f4 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.3.2448.1042.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 70.3.2448.1042 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_70_3_2448_1042_H +#define RGXCORE_KM_70_3_2448_1042_H + +/* Automatically generated file (14/09/2023 09:10:35): Do not edit manually */ +/* CS: @6458228 */ + +/****************************************************************************** + * BVNC = 70.3.2448.1042 + *****************************************************************************/ +#define RGX_BVNC_KM_B 70 +#define RGX_BVNC_KM_V 3 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 1042 + +/****************************************************************************** + * Errata + *****************************************************************************/ + + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_70_3_2448_1042_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.3.2448.1360.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.3.2448.1360.h new file mode 100644 index 000000000000..c07d2e70b8ac --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_70.3.2448.1360.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 70.3.2448.1360 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_70_3_2448_1360_H +#define RGXCORE_KM_70_3_2448_1360_H + +/* Automatically generated file (28/06/2022 10:24:53): Do not edit manually */ +/* CS: @6266740 */ + +/****************************************************************************** + * BVNC = 70.3.2448.1360 + *****************************************************************************/ +#define RGX_BVNC_KM_B 70 +#define RGX_BVNC_KM_V 3 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 1360 + +/****************************************************************************** + * Errata + *****************************************************************************/ + + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_70_3_2448_1360_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1210.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1210.h new file mode 100644 index 000000000000..e70690a29eb8 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1210.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 71.2.2448.1210 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_71_2_2448_1210_H +#define RGXCORE_KM_71_2_2448_1210_H + +/* Automatically generated file (30/11/2023 09:10:38): Do not edit manually */ +/* CS: @6317660 */ + +/****************************************************************************** + * BVNC = 71.2.2448.1210 + *****************************************************************************/ +#define RGX_BVNC_KM_B 71 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 1210 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_74812 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_71_2_2448_1210_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1211.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1211.h new file mode 100644 index 000000000000..4bd717024ef8 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1211.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 71.2.2448.1211 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_71_2_2448_1211_H +#define RGXCORE_KM_71_2_2448_1211_H + +/* Automatically generated file (30/11/2023 09:10:38): Do not edit manually */ +/* CS: @6347253 */ + +/****************************************************************************** + * BVNC = 71.2.2448.1211 + *****************************************************************************/ +#define RGX_BVNC_KM_B 71 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 1211 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_74812 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_71_2_2448_1211_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1212.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1212.h new file mode 100644 index 000000000000..4bf4a7bb6fda --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.2448.1212.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 71.2.2448.1212 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_71_2_2448_1212_H +#define RGXCORE_KM_71_2_2448_1212_H + +/* Automatically generated file (30/11/2023 09:10:38): Do not edit manually */ +/* CS: @6450859 */ + +/****************************************************************************** + * BVNC = 71.2.2448.1212 + *****************************************************************************/ +#define RGX_BVNC_KM_B 71 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 2448 +#define RGX_BVNC_KM_C 1212 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_74812 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_71_2_2448_1212_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2080.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2080.h new file mode 100644 index 000000000000..1a17786ee245 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2080.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 71.2.3672.2080 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_71_2_3672_2080_H +#define RGXCORE_KM_71_2_3672_2080_H + +/* Automatically generated file (30/11/2023 09:10:38): Do not edit manually */ +/* CS: @6375375 */ + +/****************************************************************************** + * BVNC = 71.2.3672.2080 + *****************************************************************************/ +#define RGX_BVNC_KM_B 71 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 3672 +#define RGX_BVNC_KM_C 2080 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_74812 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_71_2_3672_2080_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2081.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2081.h new file mode 100644 index 000000000000..3b0113c6a5c5 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2081.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 71.2.3672.2081 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_71_2_3672_2081_H +#define RGXCORE_KM_71_2_3672_2081_H + +/* Automatically generated file (30/11/2023 09:10:38): Do not edit manually */ +/* CS: @6422248 */ + +/****************************************************************************** + * BVNC = 71.2.3672.2081 + *****************************************************************************/ +#define RGX_BVNC_KM_B 71 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 3672 +#define RGX_BVNC_KM_C 2081 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_74812 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_71_2_3672_2081_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2082.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2082.h new file mode 100644 index 000000000000..651bad6b7c97 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2082.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 71.2.3672.2082 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_71_2_3672_2082_H +#define RGXCORE_KM_71_2_3672_2082_H + +/* Automatically generated file (30/11/2023 09:10:38): Do not edit manually */ +/* CS: @6449305 */ + +/****************************************************************************** + * BVNC = 71.2.3672.2082 + *****************************************************************************/ +#define RGX_BVNC_KM_B 71 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 3672 +#define RGX_BVNC_KM_C 2082 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_74812 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_71_2_3672_2082_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2083.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2083.h new file mode 100644 index 000000000000..e98d838c03f9 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.2.3672.2083.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 71.2.3672.2083 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_71_2_3672_2083_H +#define RGXCORE_KM_71_2_3672_2083_H + +/* Automatically generated file (30/11/2023 09:10:38): Do not edit manually */ +/* CS: @6464565 */ + +/****************************************************************************** + * BVNC = 71.2.3672.2083 + *****************************************************************************/ +#define RGX_BVNC_KM_B 71 +#define RGX_BVNC_KM_V 2 +#define RGX_BVNC_KM_N 3672 +#define RGX_BVNC_KM_C 2083 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_74812 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_71_2_3672_2083_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.3.3672.2083.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.3.3672.2083.h new file mode 100644 index 000000000000..bdc923c78d0e --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.3.3672.2083.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 71.3.3672.2083 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_71_3_3672_2083_H +#define RGXCORE_KM_71_3_3672_2083_H + +/* Automatically generated file (30/11/2023 09:10:38): Do not edit manually */ +/* CS: @6473124 */ + +/****************************************************************************** + * BVNC = 71.3.3672.2083 + *****************************************************************************/ +#define RGX_BVNC_KM_B 71 +#define RGX_BVNC_KM_V 3 +#define RGX_BVNC_KM_N 3672 +#define RGX_BVNC_KM_C 2083 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_74812 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_71_3_3672_2083_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.3.3672.2084.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.3.3672.2084.h new file mode 100644 index 000000000000..f287f89ce147 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/cores/rgxcore_km_71.3.3672.2084.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 71.3.3672.2084 +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_71_3_3672_2084_H +#define RGXCORE_KM_71_3_3672_2084_H + +/* Automatically generated file (30/11/2023 09:10:38): Do not edit manually */ +/* CS: @6495448 */ + +/****************************************************************************** + * BVNC = 71.3.3672.2084 + *****************************************************************************/ +#define RGX_BVNC_KM_B 71 +#define RGX_BVNC_KM_V 3 +#define RGX_BVNC_KM_N 3672 +#define RGX_BVNC_KM_C 2084 + +/****************************************************************************** + * Errata + *****************************************************************************/ + +#define FIX_HW_BRN_74812 + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ +#define HW_ERN_65104 +#define HW_ERN_69700 + + + +#endif /* RGXCORE_KM_71_3_3672_2084_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_bvnc_defs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_bvnc_defs_km.h index b94edec009c8..769165e76a2e 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_bvnc_defs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_bvnc_defs_km.h @@ -63,179 +63,200 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * Mask and bit-position macros for features without values *****************************************************************************/ -#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_POS (0U) -#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_POS (0U) +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) -#define RGX_FEATURE_AXI_ACE_POS (1U) -#define RGX_FEATURE_AXI_ACE_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) +#define RGX_FEATURE_AXI_ACE_POS (1U) +#define RGX_FEATURE_AXI_ACE_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) -#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE_POS (2U) -#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE_POS (2U) +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) -#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE_POS (3U) -#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE_POS (3U) +#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) -#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE_POS (4U) -#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE_POS (4U) +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) -#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE_POS (5U) -#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE_POS (5U) +#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) -#define RGX_FEATURE_CLUSTER_GROUPING_POS (6U) -#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) +#define RGX_FEATURE_CLUSTER_GROUPING_POS (6U) +#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) -#define RGX_FEATURE_COMPUTE_POS (7U) -#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) +#define RGX_FEATURE_COMPUTE_POS (7U) +#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) -#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (8U) -#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (8U) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) -#define RGX_FEATURE_COMPUTE_OVERLAP_POS (9U) -#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) +#define RGX_FEATURE_COMPUTE_ONLY_POS (9U) +#define RGX_FEATURE_COMPUTE_ONLY_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) -#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (10U) -#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) +#define RGX_FEATURE_COMPUTE_OVERLAP_POS (10U) +#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) -#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS_POS (11U) -#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (11U) +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) -#define RGX_FEATURE_COREID_PER_OS_POS (12U) -#define RGX_FEATURE_COREID_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS_POS (12U) +#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) -#define RGX_FEATURE_DUST_POWER_ISLAND_S7_POS (13U) -#define RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) +#define RGX_FEATURE_COREID_PER_OS_POS (13U) +#define RGX_FEATURE_COREID_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) -#define RGX_FEATURE_FASTRENDER_DM_POS (14U) -#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) +#define RGX_FEATURE_CR_PARITY_POS (14U) +#define RGX_FEATURE_CR_PARITY_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) -#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS_POS (15U) -#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE_POS (15U) +#define RGX_FEATURE_DIVANO_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) -#define RGX_FEATURE_GEOMETRY_BIF_ARBITER_POS (16U) -#define RGX_FEATURE_GEOMETRY_BIF_ARBITER_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) +#define RGX_FEATURE_DUST_POWER_ISLAND_S7_POS (16U) +#define RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) -#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS_POS (17U) -#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) +#define RGX_FEATURE_ERYX_TOP_INFRASTRUCTURE_POS (17U) +#define RGX_FEATURE_ERYX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) -#define RGX_FEATURE_GPU_CPU_COHERENCY_POS (18U) -#define RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) +#define RGX_FEATURE_FASTRENDER_DM_POS (18U) +#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) -#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (19U) -#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS_POS (19U) +#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) -#define RGX_FEATURE_GPU_VIRTUALISATION_POS (20U) -#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER_POS (20U) +#define RGX_FEATURE_GEOMETRY_BIF_ARBITER_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) -#define RGX_FEATURE_GS_RTA_SUPPORT_POS (21U) -#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS_POS (21U) +#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) -#define RGX_FEATURE_HYPERVISOR_MMU_POS (22U) -#define RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK (IMG_UINT64_C(0x0000000000400000)) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (22U) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000400000)) -#define RGX_FEATURE_META_DMA_POS (23U) -#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000800000)) +#define RGX_FEATURE_GPU_VIRTUALISATION_POS (23U) +#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000800000)) -#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_POS (24U) -#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_BIT_MASK (IMG_UINT64_C(0x0000000001000000)) +#define RGX_FEATURE_GS_RTA_SUPPORT_POS (24U) +#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000001000000)) -#define RGX_FEATURE_PBE_CHECKSUM_2D_POS (25U) -#define RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK (IMG_UINT64_C(0x0000000002000000)) +#define RGX_FEATURE_IDLE_CYCLE_STEALING_POS (25U) +#define RGX_FEATURE_IDLE_CYCLE_STEALING_BIT_MASK (IMG_UINT64_C(0x0000000002000000)) -#define RGX_FEATURE_PBVNC_COREID_REG_POS (26U) -#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000004000000)) +#define RGX_FEATURE_META_DMA_POS (26U) +#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000004000000)) -#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE_POS (27U) -#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE_BIT_MASK (IMG_UINT64_C(0x0000000008000000)) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_POS (27U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_BIT_MASK (IMG_UINT64_C(0x0000000008000000)) -#define RGX_FEATURE_PDS_TEMPSIZE8_POS (28U) -#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000010000000)) +#define RGX_FEATURE_MH_PARITY_POS (28U) +#define RGX_FEATURE_MH_PARITY_BIT_MASK (IMG_UINT64_C(0x0000000010000000)) -#define RGX_FEATURE_PERFBUS_POS (29U) -#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000020000000)) +#define RGX_FEATURE_PBE_CHECKSUM_2D_POS (29U) +#define RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK (IMG_UINT64_C(0x0000000020000000)) -#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (30U) -#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000000040000000)) +#define RGX_FEATURE_PBVNC_COREID_REG_POS (30U) +#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000040000000)) -#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_POS (31U) -#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_BIT_MASK (IMG_UINT64_C(0x0000000080000000)) +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE_POS (31U) +#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE_BIT_MASK (IMG_UINT64_C(0x0000000080000000)) -#define RGX_FEATURE_PM_MMUSTACK_POS (32U) -#define RGX_FEATURE_PM_MMUSTACK_BIT_MASK (IMG_UINT64_C(0x0000000100000000)) +#define RGX_FEATURE_PDS_TEMPSIZE8_POS (32U) +#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000100000000)) -#define RGX_FEATURE_PM_MMU_VFP_POS (33U) -#define RGX_FEATURE_PM_MMU_VFP_BIT_MASK (IMG_UINT64_C(0x0000000200000000)) +#define RGX_FEATURE_PERFBUS_POS (33U) +#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000200000000)) -#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (34U) -#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000000400000000)) +#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (34U) +#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000000400000000)) -#define RGX_FEATURE_RT_RAC_PER_SPU_POS (35U) -#define RGX_FEATURE_RT_RAC_PER_SPU_BIT_MASK (IMG_UINT64_C(0x0000000800000000)) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_POS (35U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_BIT_MASK (IMG_UINT64_C(0x0000000800000000)) -#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (36U) -#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000001000000000)) +#define RGX_FEATURE_RISCV_DUAL_LOCKSTEP_POS (36U) +#define RGX_FEATURE_RISCV_DUAL_LOCKSTEP_BIT_MASK (IMG_UINT64_C(0x0000001000000000)) -#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (37U) -#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000002000000000)) +#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (37U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000002000000000)) -#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (38U) -#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000004000000000)) +#define RGX_FEATURE_RT_RAC_PER_SPU_POS (38U) +#define RGX_FEATURE_RT_RAC_PER_SPU_BIT_MASK (IMG_UINT64_C(0x0000004000000000)) -#define RGX_FEATURE_SIGNAL_SNOOPING_POS (39U) -#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000008000000000)) +#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (39U) +#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000008000000000)) -#define RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_POS (40U) -#define RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK (IMG_UINT64_C(0x0000010000000000)) +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (40U) +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000010000000000)) -#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT_POS (41U) -#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT_BIT_MASK (IMG_UINT64_C(0x0000020000000000)) +#define RGX_FEATURE_SAFETY_IRQ_POS (41U) +#define RGX_FEATURE_SAFETY_IRQ_BIT_MASK (IMG_UINT64_C(0x0000020000000000)) -#define RGX_FEATURE_SLC_VIVT_POS (42U) -#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000040000000000)) +#define RGX_FEATURE_SAFETY_SELF_TEST_POS (42U) +#define RGX_FEATURE_SAFETY_SELF_TEST_BIT_MASK (IMG_UINT64_C(0x0000040000000000)) -#define RGX_FEATURE_SOC_TIMER_POS (43U) -#define RGX_FEATURE_SOC_TIMER_BIT_MASK (IMG_UINT64_C(0x0000080000000000)) +#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (43U) +#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000080000000000)) -#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (44U) -#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000100000000000)) +#define RGX_FEATURE_SIGNAL_SNOOPING_POS (44U) +#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000100000000000)) -#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (45U) -#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000200000000000)) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT_POS (45U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT_BIT_MASK (IMG_UINT64_C(0x0000200000000000)) -#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS_POS (46U) -#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000400000000000)) +#define RGX_FEATURE_SLC_VIVT_POS (46U) +#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000400000000000)) -#define RGX_FEATURE_TESSELLATION_POS (47U) -#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000800000000000)) +#define RGX_FEATURE_SOC_TIMER_POS (47U) +#define RGX_FEATURE_SOC_TIMER_BIT_MASK (IMG_UINT64_C(0x0000800000000000)) -#define RGX_FEATURE_TILE_REGION_PROTECTION_POS (48U) -#define RGX_FEATURE_TILE_REGION_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0001000000000000)) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING_POS (48U) +#define RGX_FEATURE_SPU_ARCH_CLOCK_GATING_BIT_MASK (IMG_UINT64_C(0x0001000000000000)) -#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (49U) -#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0002000000000000)) +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (49U) +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0002000000000000)) -#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (50U) -#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0004000000000000)) +#define RGX_FEATURE_SYS_PARITY_POS (50U) +#define RGX_FEATURE_SYS_PARITY_BIT_MASK (IMG_UINT64_C(0x0004000000000000)) -#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE_POS (51U) -#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE_BIT_MASK (IMG_UINT64_C(0x0008000000000000)) +#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (51U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0008000000000000)) -#define RGX_FEATURE_USC_TIMER_POS (52U) -#define RGX_FEATURE_USC_TIMER_BIT_MASK (IMG_UINT64_C(0x0010000000000000)) +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS_POS (52U) +#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0010000000000000)) -#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (53U) -#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0020000000000000)) +#define RGX_FEATURE_TESSELLATION_POS (53U) +#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0020000000000000)) -#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (54U) -#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0040000000000000)) +#define RGX_FEATURE_TILE_REGION_PROTECTION_POS (54U) +#define RGX_FEATURE_TILE_REGION_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0040000000000000)) -#define RGX_FEATURE_WATCHDOG_TIMER_POS (55U) -#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x0080000000000000)) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (55U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0080000000000000)) -#define RGX_FEATURE_WORKGROUP_PROTECTION_POS (56U) -#define RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0100000000000000)) +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (56U) +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0100000000000000)) -#define RGX_FEATURE_ZLS_CHECKSUM_POS (57U) -#define RGX_FEATURE_ZLS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0200000000000000)) +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE_POS (57U) +#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE_BIT_MASK (IMG_UINT64_C(0x0200000000000000)) + +#define RGX_FEATURE_USC_TIMER_POS (58U) +#define RGX_FEATURE_USC_TIMER_BIT_MASK (IMG_UINT64_C(0x0400000000000000)) + +#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (59U) +#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0800000000000000)) + +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (60U) +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x1000000000000000)) + +#define RGX_FEATURE_WATCHDOG_TIMER_POS (61U) +#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x2000000000000000)) + +#define RGX_FEATURE_WORKGROUP_PROTECTION_POS (62U) +#define RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK (IMG_UINT64_C(0x4000000000000000)) + +#define RGX_FEATURE_WORKGROUP_PROTECTION_SMP_POS (63U) +#define RGX_FEATURE_WORKGROUP_PROTECTION_SMP_BIT_MASK (IMG_UINT64_C(0x8000000000000000)) + +#define RGX_FEATURE_ZLS_CHECKSUM_POS (64U) +#define RGX_FEATURE_ZLS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) /****************************************************************************** @@ -243,41 +264,49 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * for handling the corresponding values *****************************************************************************/ -#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (2) -#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX (3) -#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (3) -#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (2) -#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (2) -#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (2) -#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX (3) -#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX (3) -#define RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX (2) -#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (2) -#define RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX (3) -#define RGX_FEATURE_META_MAX_VALUE_IDX (2) -#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (2) -#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (2) -#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (3) -#define RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX (3) -#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (4) -#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (4) -#define RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX (3) -#define RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX (3) -#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (2) -#define RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX (3) -#define RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX (3) -#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (2) -#define RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX (3) -#define RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX (2) -#define RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX (3) -#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (3) -#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (3) -#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (4) -#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2) -#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (5) -#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (2) -#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (2) -#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2) +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_FAULT_DECODE_VERSION_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX (5U) +#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_META_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (9U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (9U) +#define RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX (5U) +#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX (5U) +#define RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX (7U) +#define RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (5U) +#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (6U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (7U) +#define RGX_FEATURE_SPU0_RAC_PRESENT_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_SPU1_RAC_PRESENT_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_SPU2_RAC_PRESENT_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_SPU3_RAC_PRESENT_MAX_VALUE_IDX (3U) +#define RGX_FEATURE_TB_GPU_COUNT_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (4U) +#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (2U) +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2U) /****************************************************************************** * Features with values indexes @@ -287,6 +316,7 @@ typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX, RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_IDX, RGX_FEATURE_ECC_RAMS_IDX, + RGX_FEATURE_FAULT_DECODE_VERSION_IDX, RGX_FEATURE_FBCDC_IDX, RGX_FEATURE_FBCDC_ALGORITHM_IDX, RGX_FEATURE_FBCDC_ARCHITECTURE_IDX, @@ -308,6 +338,7 @@ typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { RGX_FEATURE_NUM_SPU_IDX, RGX_FEATURE_PBE_PER_SPU_IDX, RGX_FEATURE_PHYS_BUS_WIDTH_IDX, + RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_IDX, RGX_FEATURE_POWER_ISLAND_VERSION_IDX, RGX_FEATURE_RAY_TRACING_ARCH_IDX, RGX_FEATURE_RENDER_TARGET_XY_MAX_IDX, @@ -316,6 +347,12 @@ typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { RGX_FEATURE_SLC_BANKS_IDX, RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX, RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX, + RGX_FEATURE_SPU0_RAC_PRESENT_IDX, + RGX_FEATURE_SPU1_RAC_PRESENT_IDX, + RGX_FEATURE_SPU2_RAC_PRESENT_IDX, + RGX_FEATURE_SPU3_RAC_PRESENT_IDX, + RGX_FEATURE_TB_GPU_COUNT_IDX, + RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT_IDX, RGX_FEATURE_TILE_SIZE_X_IDX, RGX_FEATURE_TILE_SIZE_Y_IDX, RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX, @@ -327,26 +364,29 @@ typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { * Mask and bit-position macros for ERNs and BRNs *****************************************************************************/ -#define HW_ERN_65104_POS (0U) -#define HW_ERN_65104_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) +#define HW_ERN_65104_POS (0U) +#define HW_ERN_65104_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) + +#define HW_ERN_69700_POS (1U) +#define HW_ERN_69700_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) -#define FIX_HW_BRN_66927_POS (1U) -#define FIX_HW_BRN_66927_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) +#define FIX_HW_BRN_71157_POS (2U) +#define FIX_HW_BRN_71157_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) -#define HW_ERN_69700_POS (2U) -#define HW_ERN_69700_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) +#define FIX_HW_BRN_71422_POS (3U) +#define FIX_HW_BRN_71422_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) -#define FIX_HW_BRN_71157_POS (3U) -#define FIX_HW_BRN_71157_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) +#define FIX_HW_BRN_71960_POS (4U) +#define FIX_HW_BRN_71960_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) -#define FIX_HW_BRN_71422_POS (4U) -#define FIX_HW_BRN_71422_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) +#define FIX_HW_BRN_72143_POS (5U) +#define FIX_HW_BRN_72143_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) -#define FIX_HW_BRN_71960_POS (5U) -#define FIX_HW_BRN_71960_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) +#define FIX_HW_BRN_72144_POS (6U) +#define FIX_HW_BRN_72144_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) -#define FIX_HW_BRN_72143_POS (6U) -#define FIX_HW_BRN_72143_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) +#define FIX_HW_BRN_74812_POS (7U) +#define FIX_HW_BRN_74812_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) /* Macro used for padding the unavailable values for features with values */ #define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFEU) diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_bvnc_table_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_bvnc_table_km.h index 802bf800788f..38ca0805c203 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_bvnc_table_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_bvnc_table_km.h @@ -66,15 +66,17 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * for handling the corresponding values *****************************************************************************/ -static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, }; +static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, 5, }; static const IMG_UINT16 aui16_RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_values[RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; -static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, }; +static const IMG_UINT16 aui16_RGX_FEATURE_FAULT_DECODE_VERSION_values[RGX_FEATURE_FAULT_DECODE_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, 2, }; -static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, }; +static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, 5, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, 5, }; static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 6, }; @@ -82,11 +84,11 @@ static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values[RGX static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, }; -static const IMG_UINT16 aui16_RGX_FEATURE_HOST_SECURITY_VERSION_values[RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; +static const IMG_UINT16 aui16_RGX_FEATURE_HOST_SECURITY_VERSION_values[RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, }; static const IMG_UINT16 aui16_RGX_FEATURE_LAYOUT_MARS_values[RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, }; -static const IMG_UINT16 aui16_RGX_FEATURE_MAX_TPU_PER_SPU_values[RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; +static const IMG_UINT16 aui16_RGX_FEATURE_MAX_TPU_PER_SPU_values[RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, }; static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, MTP219, }; @@ -98,37 +100,51 @@ static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEAT static const IMG_UINT16 aui16_RGX_FEATURE_MMU_VERSION_values[RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; -static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 8, 9, 12, }; -static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, 8, 16, }; +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, 8, 12, 16, 24, 32, 36, 48, }; -static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_PER_SPU_values[RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_PER_SPU_values[RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, }; -static const IMG_UINT16 aui16_RGX_FEATURE_NUM_MEMBUS_values[RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_MEMBUS_values[RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, }; static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, }; -static const IMG_UINT16 aui16_RGX_FEATURE_NUM_SPU_values[RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_SPU_values[RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, }; static const IMG_UINT16 aui16_RGX_FEATURE_PBE_PER_SPU_values[RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, }; -static const IMG_UINT16 aui16_RGX_FEATURE_POWER_ISLAND_VERSION_values[RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; +static const IMG_UINT16 aui16_RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_values[RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, 2, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_POWER_ISLAND_VERSION_values[RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 5, 6, }; -static const IMG_UINT16 aui16_RGX_FEATURE_RAY_TRACING_ARCH_values[RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, }; +static const IMG_UINT16 aui16_RGX_FEATURE_RAY_TRACING_ARCH_values[RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 3, }; static const IMG_UINT16 aui16_RGX_FEATURE_RENDER_TARGET_XY_MAX_values[RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16384, 32768, }; -static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; +static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; -static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; +static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, }; -static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, 8, }; static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1024, }; -static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 128, 256, 512, 2048, }; +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 128, 256, 512, 1024, 1536, 2048, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_SPU0_RAC_PRESENT_values[RGX_FEATURE_SPU0_RAC_PRESENT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_SPU1_RAC_PRESENT_values[RGX_FEATURE_SPU1_RAC_PRESENT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_SPU2_RAC_PRESENT_values[RGX_FEATURE_SPU2_RAC_PRESENT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_SPU3_RAC_PRESENT_values[RGX_FEATURE_SPU3_RAC_PRESENT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_TB_GPU_COUNT_values[RGX_FEATURE_TB_GPU_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; + +static const IMG_UINT16 aui16_RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, }; static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_X_values[RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, }; @@ -143,10 +159,11 @@ static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_ * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h *****************************************************************************/ -static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = { +static const void * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = { aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values, aui16_RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_values, aui16_RGX_FEATURE_ECC_RAMS_values, + aui16_RGX_FEATURE_FAULT_DECODE_VERSION_values, aui16_RGX_FEATURE_FBCDC_values, aui16_RGX_FEATURE_FBCDC_ALGORITHM_values, aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values, @@ -168,6 +185,7 @@ static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX aui16_RGX_FEATURE_NUM_SPU_values, aui16_RGX_FEATURE_PBE_PER_SPU_values, aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values, + aui16_RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_values, aui16_RGX_FEATURE_POWER_ISLAND_VERSION_values, aui16_RGX_FEATURE_RAY_TRACING_ARCH_values, aui16_RGX_FEATURE_RENDER_TARGET_XY_MAX_values, @@ -176,6 +194,12 @@ static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX aui16_RGX_FEATURE_SLC_BANKS_values, aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values, aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values, + aui16_RGX_FEATURE_SPU0_RAC_PRESENT_values, + aui16_RGX_FEATURE_SPU1_RAC_PRESENT_values, + aui16_RGX_FEATURE_SPU2_RAC_PRESENT_values, + aui16_RGX_FEATURE_SPU3_RAC_PRESENT_values, + aui16_RGX_FEATURE_TB_GPU_COUNT_values, + aui16_RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT_values, aui16_RGX_FEATURE_TILE_SIZE_X_values, aui16_RGX_FEATURE_TILE_SIZE_Y_values, aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values, @@ -192,6 +216,7 @@ static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX, RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX, RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX, + RGX_FEATURE_FAULT_DECODE_VERSION_MAX_VALUE_IDX, RGX_FEATURE_FBCDC_MAX_VALUE_IDX, RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX, RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX, @@ -213,6 +238,7 @@ static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX, RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX, RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX, + RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_MAX_VALUE_IDX, RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX, RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX, RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX, @@ -221,52 +247,69 @@ static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX, RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX, RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX, + RGX_FEATURE_SPU0_RAC_PRESENT_MAX_VALUE_IDX, + RGX_FEATURE_SPU1_RAC_PRESENT_MAX_VALUE_IDX, + RGX_FEATURE_SPU2_RAC_PRESENT_MAX_VALUE_IDX, + RGX_FEATURE_SPU3_RAC_PRESENT_MAX_VALUE_IDX, + RGX_FEATURE_TB_GPU_COUNT_MAX_VALUE_IDX, + RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX, RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX, RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX, RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX, }; +#define RGX_FEATURE_VALUE_TYPE_UINT16 (0x0000U) +#define RGX_FEATURE_VALUE_TYPE_UINT32 (0x8000U) +#define RGX_FEATURE_TYPE_BIT_SHIFT 14 /****************************************************************************** * Bit-positions for features with values *****************************************************************************/ static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = { - (0U), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */ - (2U), /* RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_POS */ - (4U), /* RGX_FEATURE_ECC_RAMS_POS */ - (6U), /* RGX_FEATURE_FBCDC_POS */ - (8U), /* RGX_FEATURE_FBCDC_ALGORITHM_POS */ - (10U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */ - (12U), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */ - (14U), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */ - (16U), /* RGX_FEATURE_HOST_SECURITY_VERSION_POS */ - (18U), /* RGX_FEATURE_LAYOUT_MARS_POS */ - (20U), /* RGX_FEATURE_MAX_TPU_PER_SPU_POS */ - (22U), /* RGX_FEATURE_META_POS */ - (24U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */ - (26U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */ - (28U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */ - (30U), /* RGX_FEATURE_MMU_VERSION_POS */ - (32U), /* RGX_FEATURE_NUM_CLUSTERS_POS */ - (35U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */ - (38U), /* RGX_FEATURE_NUM_ISP_PER_SPU_POS */ - (40U), /* RGX_FEATURE_NUM_MEMBUS_POS */ - (42U), /* RGX_FEATURE_NUM_OSIDS_POS */ - (44U), /* RGX_FEATURE_NUM_SPU_POS */ - (46U), /* RGX_FEATURE_PBE_PER_SPU_POS */ - (48U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */ - (50U), /* RGX_FEATURE_POWER_ISLAND_VERSION_POS */ - (52U), /* RGX_FEATURE_RAY_TRACING_ARCH_POS */ - (54U), /* RGX_FEATURE_RENDER_TARGET_XY_MAX_POS */ - (56U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */ - (58U), /* RGX_FEATURE_SCALABLE_VCE_POS */ - (60U), /* RGX_FEATURE_SLC_BANKS_POS */ - (64U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */ - (66U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */ - (69U), /* RGX_FEATURE_TILE_SIZE_X_POS */ - (71U), /* RGX_FEATURE_TILE_SIZE_Y_POS */ - (73U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */ + (0U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */ + (2U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_POS */ + (4U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_ECC_RAMS_POS */ + (6U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FAULT_DECODE_VERSION_POS */ + (9U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_POS */ + (11U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ALGORITHM_POS */ + (13U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */ + (15U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */ + (17U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */ + (19U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_HOST_SECURITY_VERSION_POS */ + (22U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_LAYOUT_MARS_POS */ + (24U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_MAX_TPU_PER_SPU_POS */ + (27U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_POS */ + (29U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_COREMEM_BANKS_POS */ + (31U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_COREMEM_SIZE_POS */ + (33U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */ + (35U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_MMU_VERSION_POS */ + (37U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_CLUSTERS_POS */ + (41U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */ + (45U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_ISP_PER_SPU_POS */ + (48U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_MEMBUS_POS */ + (51U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_OSIDS_POS */ + (53U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_SPU_POS */ + (56U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_PBE_PER_SPU_POS */ + (58U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */ + (60U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_POS */ + (64U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_POWER_ISLAND_VERSION_POS */ + (67U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_RAY_TRACING_ARCH_POS */ + (69U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_RENDER_TARGET_XY_MAX_POS */ + (71U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */ + (74U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SCALABLE_VCE_POS */ + (77U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_BANKS_POS */ + (80U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */ + (82U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */ + (85U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SPU0_RAC_PRESENT_POS */ + (87U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SPU1_RAC_PRESENT_POS */ + (89U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SPU2_RAC_PRESENT_POS */ + (91U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SPU3_RAC_PRESENT_POS */ + (93U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TB_GPU_COUNT_POS */ + (96U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT_POS */ + (99U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_X_POS */ + (101U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_Y_POS */ + (103U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */ }; @@ -278,38 +321,46 @@ static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = { (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */ (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_BIT_MASK */ (IMG_UINT64_C(0x0000000000000030)), /* RGX_FEATURE_ECC_RAMS_BIT_MASK */ - (IMG_UINT64_C(0x00000000000000C0)), /* RGX_FEATURE_FBCDC_BIT_MASK */ - (IMG_UINT64_C(0x0000000000000300)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */ - (IMG_UINT64_C(0x0000000000000C00)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */ - (IMG_UINT64_C(0x0000000000003000)), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_BIT_MASK */ - (IMG_UINT64_C(0x000000000000C000)), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_BIT_MASK */ - (IMG_UINT64_C(0x0000000000030000)), /* RGX_FEATURE_HOST_SECURITY_VERSION_BIT_MASK */ - (IMG_UINT64_C(0x00000000000C0000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */ - (IMG_UINT64_C(0x0000000000300000)), /* RGX_FEATURE_MAX_TPU_PER_SPU_BIT_MASK */ - (IMG_UINT64_C(0x0000000000C00000)), /* RGX_FEATURE_META_BIT_MASK */ - (IMG_UINT64_C(0x0000000003000000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */ - (IMG_UINT64_C(0x000000000C000000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */ - (IMG_UINT64_C(0x0000000030000000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */ - (IMG_UINT64_C(0x00000000C0000000)), /* RGX_FEATURE_MMU_VERSION_BIT_MASK */ - (IMG_UINT64_C(0x0000000700000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */ - (IMG_UINT64_C(0x0000003800000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */ - (IMG_UINT64_C(0x000000C000000000)), /* RGX_FEATURE_NUM_ISP_PER_SPU_BIT_MASK */ - (IMG_UINT64_C(0x0000030000000000)), /* RGX_FEATURE_NUM_MEMBUS_BIT_MASK */ - (IMG_UINT64_C(0x00000C0000000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */ - (IMG_UINT64_C(0x0000300000000000)), /* RGX_FEATURE_NUM_SPU_BIT_MASK */ - (IMG_UINT64_C(0x0000C00000000000)), /* RGX_FEATURE_PBE_PER_SPU_BIT_MASK */ - (IMG_UINT64_C(0x0003000000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */ - (IMG_UINT64_C(0x000C000000000000)), /* RGX_FEATURE_POWER_ISLAND_VERSION_BIT_MASK */ - (IMG_UINT64_C(0x0030000000000000)), /* RGX_FEATURE_RAY_TRACING_ARCH_BIT_MASK */ - (IMG_UINT64_C(0x00C0000000000000)), /* RGX_FEATURE_RENDER_TARGET_XY_MAX_BIT_MASK */ - (IMG_UINT64_C(0x0300000000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */ - (IMG_UINT64_C(0x0C00000000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */ - (IMG_UINT64_C(0x7000000000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */ - (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */ - (IMG_UINT64_C(0x000000000000001C)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */ - (IMG_UINT64_C(0x0000000000000060)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */ - (IMG_UINT64_C(0x0000000000000180)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */ - (IMG_UINT64_C(0x0000000000000600)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */ + (IMG_UINT64_C(0x00000000000001C0)), /* RGX_FEATURE_FAULT_DECODE_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000600)), /* RGX_FEATURE_FBCDC_BIT_MASK */ + (IMG_UINT64_C(0x0000000000001800)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */ + (IMG_UINT64_C(0x0000000000006000)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */ + (IMG_UINT64_C(0x0000000000018000)), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000060000)), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000380000)), /* RGX_FEATURE_HOST_SECURITY_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x0000000000C00000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */ + (IMG_UINT64_C(0x0000000007000000)), /* RGX_FEATURE_MAX_TPU_PER_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0000000018000000)), /* RGX_FEATURE_META_BIT_MASK */ + (IMG_UINT64_C(0x0000000060000000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */ + (IMG_UINT64_C(0x0000000180000000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */ + (IMG_UINT64_C(0x0000000600000000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */ + (IMG_UINT64_C(0x0000001800000000)), /* RGX_FEATURE_MMU_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x000001E000000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */ + (IMG_UINT64_C(0x00001E0000000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */ + (IMG_UINT64_C(0x0000E00000000000)), /* RGX_FEATURE_NUM_ISP_PER_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0007000000000000)), /* RGX_FEATURE_NUM_MEMBUS_BIT_MASK */ + (IMG_UINT64_C(0x0018000000000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */ + (IMG_UINT64_C(0x00E0000000000000)), /* RGX_FEATURE_NUM_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0300000000000000)), /* RGX_FEATURE_PBE_PER_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0C00000000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */ + (IMG_UINT64_C(0x7000000000000000)), /* RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000007)), /* RGX_FEATURE_POWER_ISLAND_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000018)), /* RGX_FEATURE_RAY_TRACING_ARCH_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000060)), /* RGX_FEATURE_RENDER_TARGET_XY_MAX_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000380)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */ + (IMG_UINT64_C(0x0000000000001C00)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */ + (IMG_UINT64_C(0x000000000000E000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000030000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */ + (IMG_UINT64_C(0x00000000001C0000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */ + (IMG_UINT64_C(0x0000000000600000)), /* RGX_FEATURE_SPU0_RAC_PRESENT_BIT_MASK */ + (IMG_UINT64_C(0x0000000001800000)), /* RGX_FEATURE_SPU1_RAC_PRESENT_BIT_MASK */ + (IMG_UINT64_C(0x0000000006000000)), /* RGX_FEATURE_SPU2_RAC_PRESENT_BIT_MASK */ + (IMG_UINT64_C(0x0000000018000000)), /* RGX_FEATURE_SPU3_RAC_PRESENT_BIT_MASK */ + (IMG_UINT64_C(0x00000000E0000000)), /* RGX_FEATURE_TB_GPU_COUNT_BIT_MASK */ + (IMG_UINT64_C(0x0000000700000000)), /* RGX_FEATURE_TDM_CONTROL_STREAM_FORMAT_BIT_MASK */ + (IMG_UINT64_C(0x0000001800000000)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */ + (IMG_UINT64_C(0x0000006000000000)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */ + (IMG_UINT64_C(0x0000018000000000)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */ }; @@ -320,17 +371,30 @@ static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = { static const IMG_UINT64 gaFeatures[][4]= { - { IMG_UINT64_C(0x001b000000fe0002), IMG_UINT64_C(0x0276bcf0f6b177ca), IMG_UINT64_C(0x1555958a65555555), IMG_UINT64_C(0x00000000000002a5) }, /* 27.0.254.2 */ - { IMG_UINT64_C(0x001e000001980065), IMG_UINT64_C(0x027efef0ffb3ffcb), IMG_UINT64_C(0x15555549a5555559), IMG_UINT64_C(0x00000000000002a5) }, /* 30.0.408.101 */ - { IMG_UINT64_C(0x001e000003300014), IMG_UINT64_C(0x027efef0ffb3ffcb), IMG_UINT64_C(0x25555592a5655559), IMG_UINT64_C(0x00000000000002a9) }, /* 30.0.816.20 */ - { IMG_UINT64_C(0x001e000006600001), IMG_UINT64_C(0x027efef0ffb3ffcb), IMG_UINT64_C(0x3a55669ba5655559), IMG_UINT64_C(0x00000000000002ad) }, /* 30.0.1632.1 */ - { IMG_UINT64_C(0x0023000001980017), IMG_UINT64_C(0x027efef0ffbbffcf), IMG_UINT64_C(0x15995549a555a559), IMG_UINT64_C(0x00000000000002a5) }, /* 35.0.408.23 */ - { IMG_UINT64_C(0x0023000001980021), IMG_UINT64_C(0x03fffef4ffbbffcf), IMG_UINT64_C(0x159955499515a569), IMG_UINT64_C(0x00000000000002a5) }, /* 35.0.408.33 */ - { IMG_UINT64_C(0x0023000001980022), IMG_UINT64_C(0x03fffef4ffbbffcf), IMG_UINT64_C(0x159955499515a569), IMG_UINT64_C(0x00000000000002a5) }, /* 35.0.408.34 */ - { IMG_UINT64_C(0x0023000001980065), IMG_UINT64_C(0x027efef0ffb3ffcb), IMG_UINT64_C(0x15555549a5555559), IMG_UINT64_C(0x00000000000002a5) }, /* 35.0.408.101 */ - { IMG_UINT64_C(0x0023000006600015), IMG_UINT64_C(0x027efef0ffbbffcf), IMG_UINT64_C(0x3a99669ba565a559), IMG_UINT64_C(0x00000000000002ad) }, /* 35.0.1632.21 */ - { IMG_UINT64_C(0x0023000006600017), IMG_UINT64_C(0x027efef0ffbbffcf), IMG_UINT64_C(0x3a99669ba565a559), IMG_UINT64_C(0x00000000000002ad) }, /* 35.0.1632.23 */ - { IMG_UINT64_C(0x0023000006600022), IMG_UINT64_C(0x03fffef4ffbbffcf), IMG_UINT64_C(0x3a99669b9525a569), IMG_UINT64_C(0x00000000000002b1) }, /* 35.0.1632.34 */ + { IMG_UINT64_C(0x001b000000fe0002), IMG_UINT64_C(0x1daad98f65952dca), IMG_UINT64_C(0x1629424ca94aaa55), IMG_UINT64_C(0x000000a92aa524a9) }, /* 27.0.254.2 */ + { IMG_UINT64_C(0x001e000001980065), IMG_UINT64_C(0x1fbaf98fedbd3dcb), IMG_UINT64_C(0x15292234a94aaa59), IMG_UINT64_C(0x000000a92aa524a9) }, /* 30.0.408.101 */ + { IMG_UINT64_C(0x001e000003300014), IMG_UINT64_C(0x1fbaf98fedbd3dcb), IMG_UINT64_C(0x15294454aa4aaa59), IMG_UINT64_C(0x000000a92aa944a9) }, /* 30.0.816.20 */ + { IMG_UINT64_C(0x0023000001980017), IMG_UINT64_C(0x1fbaf98fedfd3dcf), IMG_UINT64_C(0x15292234a94d2a59), IMG_UINT64_C(0x000000aa6aa524ca) }, /* 35.0.408.23 */ + { IMG_UINT64_C(0x0023000001980021), IMG_UINT64_C(0xfffaf9afedfd3dcf), IMG_UINT64_C(0x15292232a14d2aa9), IMG_UINT64_C(0x000000aa2aa524ca) }, /* 35.0.408.33 */ + { IMG_UINT64_C(0x0023000001980022), IMG_UINT64_C(0xfffaf9afedfd3dcf), IMG_UINT64_C(0x15292232a14d2aa9), IMG_UINT64_C(0x000000aa4aa524ca) }, /* 35.0.408.34 */ + { IMG_UINT64_C(0x0023000001980065), IMG_UINT64_C(0x1fbaf98fedbd3dcb), IMG_UINT64_C(0x15292234a94aaa59), IMG_UINT64_C(0x000000a92aa524a9) }, /* 35.0.408.101 */ + { IMG_UINT64_C(0x0023000006600015), IMG_UINT64_C(0x1fbaf98fedfd3dcf), IMG_UINT64_C(0x154a4894aa4d2a59), IMG_UINT64_C(0x000000aa4aad694a) }, /* 35.0.1632.21 */ + { IMG_UINT64_C(0x0023000006600017), IMG_UINT64_C(0x1fbaf98fedfd3dcf), IMG_UINT64_C(0x154a4894aa4d2a59), IMG_UINT64_C(0x000000aa6aad694a) }, /* 35.0.1632.23 */ + { IMG_UINT64_C(0x0023000006600022), IMG_UINT64_C(0x7ffaf9afedfd3dcf), IMG_UINT64_C(0x154a4892a24d2aa9), IMG_UINT64_C(0x000000aa4ab9694a) }, /* 35.0.1632.34 */ + { IMG_UINT64_C(0x0023000006600023), IMG_UINT64_C(0xfffaf9afedfd3dcf), IMG_UINT64_C(0x154a4892a24d2aa9), IMG_UINT64_C(0x000000aa6ab9694a) }, /* 35.0.1632.35 */ + { IMG_UINT64_C(0x0026000009900192), IMG_UINT64_C(0x1fbbf9cfedfd3ddf), IMG_UINT64_C(0x156c4ab4aa4d2a59), IMG_UINT64_C(0x000000aa2d59ad52) }, /* 38.0.2448.402 */ + { IMG_UINT64_C(0x00460000099001a2), IMG_UINT64_C(0x1fbbf9cfedfd3dff), IMG_UINT64_C(0x156c4ab4aa553459), IMG_UINT64_C(0x000000aa2d59ad53) }, /* 70.0.2448.418 */ + { IMG_UINT64_C(0x0046000009900411), IMG_UINT64_C(0x1fbbf9cfedfd3dff), IMG_UINT64_C(0x156b4ab4aa5d3459), IMG_UINT64_C(0x000000aa2ab58d4b) }, /* 70.0.2448.1041 */ + { IMG_UINT64_C(0x0046000009900412), IMG_UINT64_C(0x1fbbf9cfedfd3dff), IMG_UINT64_C(0x156b4ab4aa5d3459), IMG_UINT64_C(0x000000aa2ab58d4b) }, /* 70.0.2448.1042 */ + { IMG_UINT64_C(0x0046000009900550), IMG_UINT64_C(0x1fbbf9cfedfd3dff), IMG_UINT64_C(0x156c4ab4aa5d3459), IMG_UINT64_C(0x000000aa2d59ad53) }, /* 70.0.2448.1360 */ + { IMG_UINT64_C(0x00470000099004ba), IMG_UINT64_C(0x1fbbe9efedfdbdff), IMG_UINT64_C(0x254b6ab2a35d345a), IMG_UINT64_C(0x000000ab2b558954) }, /* 71.0.2448.1210 */ + { IMG_UINT64_C(0x00470000099004bb), IMG_UINT64_C(0x1fbbe9efedfdbdff), IMG_UINT64_C(0x254b6ab2a365345a), IMG_UINT64_C(0x000000ab2b558954) }, /* 71.0.2448.1211 */ + { IMG_UINT64_C(0x00470000099004bc), IMG_UINT64_C(0x1fbbe9efedfdbdff), IMG_UINT64_C(0x254b6ab2a365345a), IMG_UINT64_C(0x000000ab2b558954) }, /* 71.0.2448.1212 */ + { IMG_UINT64_C(0x004700000e580820), IMG_UINT64_C(0x1fbbe9efedfdbdff), IMG_UINT64_C(0x256c6ef2a365345a), IMG_UINT64_C(0x000000ab4d59ad4c) }, /* 71.0.3672.2080 */ + { IMG_UINT64_C(0x004700000e580821), IMG_UINT64_C(0x1fbbe9efedfdbdff), IMG_UINT64_C(0x356c6ef2a365345a), IMG_UINT64_C(0x000000ab4d59ad4e) }, /* 71.0.3672.2081 */ + { IMG_UINT64_C(0x004700000e580822), IMG_UINT64_C(0x1fbbe9efedfdbdff), IMG_UINT64_C(0x356c6ef2a365345a), IMG_UINT64_C(0x000000ab4d59ad4e) }, /* 71.0.3672.2082 */ + { IMG_UINT64_C(0x004700000e580823), IMG_UINT64_C(0x1fbbe9efedfdbdff), IMG_UINT64_C(0x356c6ef2a365345a), IMG_UINT64_C(0x000000ab4d59ad4e) }, /* 71.0.3672.2083 */ + { IMG_UINT64_C(0x004700000e580824), IMG_UINT64_C(0x1fbbe9efedfdbdff), IMG_UINT64_C(0x356c6ef2a365345a), IMG_UINT64_C(0x000000ab4d59ad4e) }, /* 71.0.3672.2084 */ }; /****************************************************************************** @@ -340,26 +404,44 @@ static const IMG_UINT64 gaFeatures[][4]= static const IMG_UINT64 gaErnsBrns[][2]= { - { IMG_UINT64_C(0x001b000500fe0002), IMG_UINT64_C(0x0000000000000011) }, /* 27.5.254.2 */ + { IMG_UINT64_C(0x001b000500fe0002), IMG_UINT64_C(0x0000000000000009) }, /* 27.5.254.2 */ { IMG_UINT64_C(0x001b000700fe0002), IMG_UINT64_C(0x0000000000000001) }, /* 27.7.254.2 */ { IMG_UINT64_C(0x001b000800fe0002), IMG_UINT64_C(0x0000000000000001) }, /* 27.8.254.2 */ - { IMG_UINT64_C(0x001e000301980065), IMG_UINT64_C(0x0000000000000025) }, /* 30.3.408.101 */ - { IMG_UINT64_C(0x001e000303300014), IMG_UINT64_C(0x0000000000000025) }, /* 30.3.816.20 */ - { IMG_UINT64_C(0x001e000506600001), IMG_UINT64_C(0x0000000000000025) }, /* 30.5.1632.1 */ - { IMG_UINT64_C(0x0023000201980021), IMG_UINT64_C(0x0000000000000025) }, /* 35.2.408.33 */ - { IMG_UINT64_C(0x0023000201980022), IMG_UINT64_C(0x0000000000000025) }, /* 35.2.408.34 */ - { IMG_UINT64_C(0x0023000206600015), IMG_UINT64_C(0x000000000000002d) }, /* 35.2.1632.21 */ - { IMG_UINT64_C(0x0023000206600017), IMG_UINT64_C(0x000000000000002d) }, /* 35.2.1632.23 */ - { IMG_UINT64_C(0x0023000206600022), IMG_UINT64_C(0x0000000000000025) }, /* 35.2.1632.34 */ - { IMG_UINT64_C(0x0023000301980065), IMG_UINT64_C(0x0000000000000025) }, /* 35.3.408.101 */ - { IMG_UINT64_C(0x0023000306600017), IMG_UINT64_C(0x0000000000000025) }, /* 35.3.1632.23 */ - { IMG_UINT64_C(0x0023000406600017), IMG_UINT64_C(0x0000000000000025) }, /* 35.4.1632.23 */ - { IMG_UINT64_C(0x0023000501980017), IMG_UINT64_C(0x0000000000000025) }, /* 35.5.408.23 */ + { IMG_UINT64_C(0x001e000301980065), IMG_UINT64_C(0x0000000000000053) }, /* 30.3.408.101 */ + { IMG_UINT64_C(0x001e000303300014), IMG_UINT64_C(0x0000000000000053) }, /* 30.3.816.20 */ + { IMG_UINT64_C(0x0023000201980022), IMG_UINT64_C(0x0000000000000053) }, /* 35.2.408.34 */ + { IMG_UINT64_C(0x0023000206600015), IMG_UINT64_C(0x0000000000000057) }, /* 35.2.1632.21 */ + { IMG_UINT64_C(0x0023000206600017), IMG_UINT64_C(0x0000000000000057) }, /* 35.2.1632.23 */ + { IMG_UINT64_C(0x0023000206600022), IMG_UINT64_C(0x0000000000000053) }, /* 35.2.1632.34 */ + { IMG_UINT64_C(0x0023000206600023), IMG_UINT64_C(0x0000000000000053) }, /* 35.2.1632.35 */ + { IMG_UINT64_C(0x0023000301980021), IMG_UINT64_C(0x0000000000000053) }, /* 35.3.408.33 */ + { IMG_UINT64_C(0x0023000301980022), IMG_UINT64_C(0x0000000000000053) }, /* 35.3.408.34 */ + { IMG_UINT64_C(0x0023000301980065), IMG_UINT64_C(0x0000000000000053) }, /* 35.3.408.101 */ + { IMG_UINT64_C(0x0023000306600017), IMG_UINT64_C(0x0000000000000053) }, /* 35.3.1632.23 */ + { IMG_UINT64_C(0x0023000406600017), IMG_UINT64_C(0x0000000000000053) }, /* 35.4.1632.23 */ + { IMG_UINT64_C(0x0023000501980017), IMG_UINT64_C(0x0000000000000053) }, /* 35.5.408.23 */ + { IMG_UINT64_C(0x0026000409900192), IMG_UINT64_C(0x0000000000000073) }, /* 38.4.2448.402 */ + { IMG_UINT64_C(0x0026000609900192), IMG_UINT64_C(0x0000000000000073) }, /* 38.6.2448.402 */ + { IMG_UINT64_C(0x00460002099001a2), IMG_UINT64_C(0x0000000000000003) }, /* 70.2.2448.418 */ + { IMG_UINT64_C(0x0046000209900411), IMG_UINT64_C(0x0000000000000003) }, /* 70.2.2448.1041 */ + { IMG_UINT64_C(0x0046000209900412), IMG_UINT64_C(0x0000000000000003) }, /* 70.2.2448.1042 */ + { IMG_UINT64_C(0x0046000209900550), IMG_UINT64_C(0x0000000000000003) }, /* 70.2.2448.1360 */ + { IMG_UINT64_C(0x0046000309900412), IMG_UINT64_C(0x0000000000000003) }, /* 70.3.2448.1042 */ + { IMG_UINT64_C(0x0046000309900550), IMG_UINT64_C(0x0000000000000003) }, /* 70.3.2448.1360 */ + { IMG_UINT64_C(0x00470002099004ba), IMG_UINT64_C(0x0000000000000083) }, /* 71.2.2448.1210 */ + { IMG_UINT64_C(0x00470002099004bb), IMG_UINT64_C(0x0000000000000083) }, /* 71.2.2448.1211 */ + { IMG_UINT64_C(0x00470002099004bc), IMG_UINT64_C(0x0000000000000083) }, /* 71.2.2448.1212 */ + { IMG_UINT64_C(0x004700020e580820), IMG_UINT64_C(0x0000000000000083) }, /* 71.2.3672.2080 */ + { IMG_UINT64_C(0x004700020e580821), IMG_UINT64_C(0x0000000000000083) }, /* 71.2.3672.2081 */ + { IMG_UINT64_C(0x004700020e580822), IMG_UINT64_C(0x0000000000000083) }, /* 71.2.3672.2082 */ + { IMG_UINT64_C(0x004700020e580823), IMG_UINT64_C(0x0000000000000083) }, /* 71.2.3672.2083 */ + { IMG_UINT64_C(0x004700030e580823), IMG_UINT64_C(0x0000000000000083) }, /* 71.3.3672.2083 */ + { IMG_UINT64_C(0x004700030e580824), IMG_UINT64_C(0x0000000000000083) }, /* 71.3.3672.2084 */ }; #if defined(DEBUG) -#define FEATURE_NO_VALUES_NAMES_MAX_IDX (58) +#define FEATURE_NO_VALUES_NAMES_MAX_IDX (65U) static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] = { @@ -372,22 +454,26 @@ static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_ "CLUSTER_GROUPING", "COMPUTE", "COMPUTE_MORTON_CAPABLE", + "COMPUTE_ONLY", "COMPUTE_OVERLAP", "COMPUTE_OVERLAP_WITH_BARRIERS", "COMPUTE_SLC_MMU_AUTO_CACHE_OPS", "COREID_PER_OS", + "CR_PARITY", + "DIVANO_TOP_INFRASTRUCTURE", "DUST_POWER_ISLAND_S7", + "ERYX_TOP_INFRASTRUCTURE", "FASTRENDER_DM", "FRAG_SLC_MMU_AUTO_CACHE_OPS", "GEOMETRY_BIF_ARBITER", "GEOM_SLC_MMU_AUTO_CACHE_OPS", - "GPU_CPU_COHERENCY", "GPU_MULTICORE_SUPPORT", "GPU_VIRTUALISATION", "GS_RTA_SUPPORT", - "HYPERVISOR_MMU", + "IDLE_CYCLE_STEALING", "META_DMA", "META_REGISTER_UNPACKED_ACCESSES", + "MH_PARITY", "PBE_CHECKSUM_2D", "PBVNC_COREID_REG", "PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE", @@ -395,19 +481,21 @@ static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_ "PERFBUS", "PERF_COUNTER_BATCH", "PM_BYTE_ALIGNED_BASE_ADDRESSES", - "PM_MMUSTACK", - "PM_MMU_VFP", + "RISCV_DUAL_LOCKSTEP", "RISCV_FW_PROCESSOR", "RT_RAC_PER_SPU", "S7_CACHE_HIERARCHY", "S7_TOP_INFRASTRUCTURE", + "SAFETY_IRQ", + "SAFETY_SELF_TEST", "SCALABLE_VDM_GPP", "SIGNAL_SNOOPING", - "SLC_FAULT_ACCESS_ADDR_PHYS", "SLC_SIZE_ADJUSTMENT", "SLC_VIVT", "SOC_TIMER", + "SPU_ARCH_CLOCK_GATING", "SYS_BUS_SECURE_RESET", + "SYS_PARITY", "TDM_PDS_CHECKSUM", "TDM_SLC_MMU_AUTO_CACHE_OPS", "TESSELLATION", @@ -420,20 +508,22 @@ static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_ "VDM_OBJECT_LEVEL_LLS", "WATCHDOG_TIMER", "WORKGROUP_PROTECTION", + "WORKGROUP_PROTECTION_SMP", "ZLS_CHECKSUM", }; -#define ERNSBRNS_IDS_MAX_IDX (7) +#define ERNSBRNS_IDS_MAX_IDX (8U) static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] = { 65104, - 66927, 69700, 71157, 71422, 71960, 72143, + 72144, + 74812, }; #endif /* defined(DEBUG) */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_cr_defs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_cr_defs_km.h index 72cdb95fa62c..a9edab2f32e1 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_cr_defs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgx_cr_defs_km.h @@ -58,7550 +58,10272 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_defs.h" -#define RGX_CR_DEFS_KM_REVISION 165 +#define RGX_CR_DEFS_KM_REVISION 217 /* Register RGX_CR_USC_INDIRECT */ -#define RGX_CR_USC_INDIRECT (0x8000U) -#define RGX_CR_USC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_USC_INDIRECT (0x8000U) +#define RGX_CR_USC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) /* Register RGX_CR_MERCER_INDIRECT */ -#define RGX_CR_MERCER_INDIRECT (0x8238U) -#define RGX_CR_MERCER_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_MERCER_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_MERCER_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_MERCER_INDIRECT (0x8238U) +#define RGX_CR_MERCER_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_MERCER_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_MERCER_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) /* Register RGX_CR_PBE_INDIRECT */ -#define RGX_CR_PBE_INDIRECT (0x83E0U) -#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_PBE_INDIRECT (0x83E0U) +#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) /* Register RGX_CR_PBE_SHARED_INDIRECT */ -#define RGX_CR_PBE_SHARED_INDIRECT (0x8388U) -#define RGX_CR_PBE_SHARED_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U) +#define RGX_CR_PBE_SHARED_INDIRECT (0x8388U) +#define RGX_CR_PBE_SHARED_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U) /* Register RGX_CR_ISP_INDIRECT */ -#define RGX_CR_ISP_INDIRECT (0x83F8U) -#define RGX_CR_ISP_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_ISP_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_ISP_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_ISP_INDIRECT (0x83F8U) +#define RGX_CR_ISP_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_ISP_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_ISP_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) /* Register RGX_CR_TPU_INDIRECT */ -#define RGX_CR_TPU_INDIRECT (0x83E8U) -#define RGX_CR_TPU_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_TPU_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_TPU_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_TPU_INDIRECT (0x83E8U) +#define RGX_CR_TPU_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_TPU_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TPU_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) /* Register RGX_CR_SWIFT_INDIRECT */ -#define RGX_CR_SWIFT_INDIRECT (0x8308U) -#define RGX_CR_SWIFT_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_SWIFT_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_SWIFT_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_SWIFT_INDIRECT (0x8308U) +#define RGX_CR_SWIFT_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_SWIFT_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_SWIFT_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) /* Register RGX_CR_TEXAS_INDIRECT */ -#define RGX_CR_TEXAS_INDIRECT (0x8390U) -#define RGX_CR_TEXAS_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_TEXAS_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_TEXAS_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U) +#define RGX_CR_TEXAS_INDIRECT (0x8390U) +#define RGX_CR_TEXAS_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_TEXAS_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TEXAS_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U) /* Register RGX_CR_RAC_INDIRECT */ -#define RGX_CR_RAC_INDIRECT (0x8398U) -#define RGX_CR_RAC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_RAC_INDIRECT_ADDRESS_SHIFT (0U) -#define RGX_CR_RAC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_RAC_INDIRECT (0x8398U) +#define RGX_CR_RAC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_RAC_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_RAC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) /* Register RGX_CR_CLK_CTRL0 */ -#define RGX_CR_CLK_CTRL0 (0x0000U) -#define RGX_CR_CLK_CTRL0_MASKFULL (IMG_UINT64_C(0xFFCF0303FF3F3303)) -#define RGX_CR_CLK_CTRL0_BIF_TEXAS_SHIFT (62U) -#define RGX_CR_CLK_CTRL0_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL0_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_CLK_CTRL0_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000)) -#define RGX_CR_CLK_CTRL0_FBCACHE_SHIFT (60U) -#define RGX_CR_CLK_CTRL0_FBCACHE_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL0_FBCACHE_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_FBCACHE_ON (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_CLK_CTRL0_FBCACHE_AUTO (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_CLK_CTRL0_FBC_SHIFT (58U) -#define RGX_CR_CLK_CTRL0_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL0_FBC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_FBC_ON (IMG_UINT64_C(0x0400000000000000)) -#define RGX_CR_CLK_CTRL0_FBC_AUTO (IMG_UINT64_C(0x0800000000000000)) -#define RGX_CR_CLK_CTRL0_FBDC_SHIFT (56U) -#define RGX_CR_CLK_CTRL0_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL0_FBDC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_FBDC_ON (IMG_UINT64_C(0x0100000000000000)) -#define RGX_CR_CLK_CTRL0_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000)) -#define RGX_CR_CLK_CTRL0_FBM_SHIFT (54U) -#define RGX_CR_CLK_CTRL0_FBM_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL0_FBM_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_FBM_ON (IMG_UINT64_C(0x0040000000000000)) -#define RGX_CR_CLK_CTRL0_FBM_AUTO (IMG_UINT64_C(0x0080000000000000)) -#define RGX_CR_CLK_CTRL0_PBE_SHIFT (50U) -#define RGX_CR_CLK_CTRL0_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL0_PBE_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_PBE_ON (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_CLK_CTRL0_PBE_AUTO (IMG_UINT64_C(0x0008000000000000)) -#define RGX_CR_CLK_CTRL0_MCU_L1_SHIFT (48U) -#define RGX_CR_CLK_CTRL0_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL0_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000)) -#define RGX_CR_CLK_CTRL0_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000)) -#define RGX_CR_CLK_CTRL0_BIF_SHIFT (40U) -#define RGX_CR_CLK_CTRL0_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL0_BIF_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_BIF_ON (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_CLK_CTRL0_BIF_AUTO (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_CLK_CTRL0_TAG_SHIFT (32U) -#define RGX_CR_CLK_CTRL0_TAG_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) -#define RGX_CR_CLK_CTRL0_TAG_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_TAG_ON (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_CLK_CTRL0_TAG_AUTO (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_CLK_CTRL0_MADD_SHIFT (30U) -#define RGX_CR_CLK_CTRL0_MADD_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) -#define RGX_CR_CLK_CTRL0_MADD_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_MADD_ON (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_CLK_CTRL0_MADD_AUTO (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_CLK_CTRL0_TF_SHIFT (28U) -#define RGX_CR_CLK_CTRL0_TF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) -#define RGX_CR_CLK_CTRL0_TF_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_TF_ON (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_CLK_CTRL0_TF_AUTO (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_CLK_CTRL0_MCU_L0_SHIFT (26U) -#define RGX_CR_CLK_CTRL0_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) -#define RGX_CR_CLK_CTRL0_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_CLK_CTRL0_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_CLK_CTRL0_TPU_SHIFT (24U) -#define RGX_CR_CLK_CTRL0_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) -#define RGX_CR_CLK_CTRL0_TPU_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_TPU_ON (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_CLK_CTRL0_TPU_AUTO (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_CLK_CTRL0_USC_SHIFT (20U) -#define RGX_CR_CLK_CTRL0_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) -#define RGX_CR_CLK_CTRL0_USC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_USC_ON (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_CLK_CTRL0_USC_AUTO (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_CLK_CTRL0_SLC_BANK_SHIFT (18U) -#define RGX_CR_CLK_CTRL0_SLC_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) -#define RGX_CR_CLK_CTRL0_SLC_BANK_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_SLC_BANK_ON (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_CLK_CTRL0_SLC_BANK_AUTO (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_CLK_CTRL0_SLC_SHIFT (16U) -#define RGX_CR_CLK_CTRL0_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -#define RGX_CR_CLK_CTRL0_SLC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_SLC_ON (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_CLK_CTRL0_SLC_AUTO (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_CLK_CTRL0_PDS_SHIFT (12U) -#define RGX_CR_CLK_CTRL0_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) -#define RGX_CR_CLK_CTRL0_PDS_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_PDS_ON (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_CLK_CTRL0_PDS_AUTO (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_CLK_CTRL0_PM_SHIFT (8U) -#define RGX_CR_CLK_CTRL0_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -#define RGX_CR_CLK_CTRL0_PM_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_PM_ON (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_CLK_CTRL0_PM_AUTO (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_CLK_CTRL0_ISP_SHIFT (0U) -#define RGX_CR_CLK_CTRL0_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) -#define RGX_CR_CLK_CTRL0_ISP_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL0_ISP_ON (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_CLK_CTRL0_ISP_AUTO (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_CTRL0 (0x0000U) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__MASKFULL (IMG_UINT64_C(0xFFCF0303FF3F333F)) +#define RGX_CR_CLK_CTRL0_MASKFULL (IMG_UINT64_C(0xFFCF0303FF3F3303)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_SHIFT (62U) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS__PIPEDM_GT0__ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE_SHIFT (60U) +#define RGX_CR_CLK_CTRL0_FBCACHE_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBCACHE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE_ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE_AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE__PIPEDM_GT0__ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL0_FBC_SHIFT (58U) +#define RGX_CR_CLK_CTRL0_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBC_ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL0_FBC_AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL0_FBC__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBC__PIPEDM_GT0__ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL0_FBC__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC_SHIFT (56U) +#define RGX_CR_CLK_CTRL0_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBDC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC_ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC__PIPEDM_GT0__ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL0_FBM_SHIFT (54U) +#define RGX_CR_CLK_CTRL0_FBM_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBM_ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL0_FBM_AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL0_FBM__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBM__PIPEDM_GT0__ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL0_FBM__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL0_PBE_SHIFT (50U) +#define RGX_CR_CLK_CTRL0_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_PBE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PBE_ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL0_PBE_AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL0_PBE__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PBE__PIPEDM_GT0__ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL0_PBE__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1_SHIFT (48U) +#define RGX_CR_CLK_CTRL0_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1__PIPEDM_GT0__ON (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_SHIFT (40U) +#define RGX_CR_CLK_CTRL0_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_BIF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL0_BIF_AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL0_BIF__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL0_BIF__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL0_TAG_SHIFT (32U) +#define RGX_CR_CLK_CTRL0_TAG_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_TAG_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_TAG_ON (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_CTRL0_TAG_AUTO (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_CLK_CTRL0_TAG__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_TAG__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_CTRL0_TAG__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_CLK_CTRL0_MADD_SHIFT (30U) +#define RGX_CR_CLK_CTRL0_MADD_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) +#define RGX_CR_CLK_CTRL0_MADD_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MADD_ON (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_CTRL0_MADD_AUTO (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_CTRL0_MADD__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MADD__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_CTRL0_MADD__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_CTRL0_TF_SHIFT (28U) +#define RGX_CR_CLK_CTRL0_TF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) +#define RGX_CR_CLK_CTRL0_TF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_TF_ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL0_TF_AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL0_TF__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_TF__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL0_TF__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0_SHIFT (26U) +#define RGX_CR_CLK_CTRL0_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_CTRL0_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL0_TPU_SHIFT (24U) +#define RGX_CR_CLK_CTRL0_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_CTRL0_TPU_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_TPU_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL0_TPU_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL0_TPU__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_TPU__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL0_TPU__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL0_USC_SHIFT (20U) +#define RGX_CR_CLK_CTRL0_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_CTRL0_USC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_USC_ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL0_USC_AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL0_USC__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_USC__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL0_USC__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL0_SLC_BANK_SHIFT (18U) +#define RGX_CR_CLK_CTRL0_SLC_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_CTRL0_SLC_BANK_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_SLC_BANK_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL0_SLC_BANK_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL0_SLC_BANK__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_SLC_BANK__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL0_SLC_BANK__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL0_SLC_SHIFT (16U) +#define RGX_CR_CLK_CTRL0_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_CTRL0_SLC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_SLC_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL0_SLC_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL0_SLC__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_SLC__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL0_SLC__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL0_PDS_SHIFT (12U) +#define RGX_CR_CLK_CTRL0_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_CLK_CTRL0_PDS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PDS_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL0_PDS_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL0_PDS__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PDS__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL0_PDS__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL0_PM_SHIFT (8U) +#define RGX_CR_CLK_CTRL0_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL0_PM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PM_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL0_PM_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL0_PM__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PM__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL0_PM__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__TPU2_LCTRL_SHIFT (4U) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__TPU2_LCTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__TPU2_LCTRL_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__TPU2_LCTRL_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__TPU2_LCTRL_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_BE_SHIFT (2U) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_BE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_BE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_BE_ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_BE_AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_CTRL0_ISP_SHIFT (0U) +#define RGX_CR_CLK_CTRL0_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL0_ISP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_ISP_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL0_ISP_AUTO (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_CTRL0_ISP__PIPEDM_GT0__OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_ISP__PIPEDM_GT0__ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL0_ISP__PIPEDM_GT0__AUTO (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_FE_SHIFT (0U) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_FE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_FE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_FE_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ISP_FE_AUTO (IMG_UINT64_C(0x0000000000000002)) /* Register RGX_CR_CLK_STATUS0 */ -#define RGX_CR_CLK_STATUS0 (0x0008U) -#define RGX_CR_CLK_STATUS0_MASKFULL (IMG_UINT64_C(0x00000001BF107F51)) -#define RGX_CR_CLK_STATUS0_MCU_L0_SHIFT (32U) -#define RGX_CR_CLK_STATUS0_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_CLK_STATUS0_MCU_L0_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_CLK_STATUS0_BIF_TEXAS_SHIFT (31U) -#define RGX_CR_CLK_STATUS0_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_CLK_STATUS0_BIF_TEXAS_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_CLK_STATUS0_FBCACHE_SHIFT (29U) -#define RGX_CR_CLK_STATUS0_FBCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_CLK_STATUS0_FBCACHE_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_FBCACHE_RUNNING (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_CLK_STATUS0_FBC_SHIFT (28U) -#define RGX_CR_CLK_STATUS0_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_CLK_STATUS0_FBC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_FBC_RUNNING (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_CLK_STATUS0_FBDC_SHIFT (27U) -#define RGX_CR_CLK_STATUS0_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_CLK_STATUS0_FBDC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_FBDC_RUNNING (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_CLK_STATUS0_FBM_SHIFT (26U) -#define RGX_CR_CLK_STATUS0_FBM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_CLK_STATUS0_FBM_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_FBM_RUNNING (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_CLK_STATUS0_PBE_SHIFT (25U) -#define RGX_CR_CLK_STATUS0_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_CLK_STATUS0_PBE_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_CLK_STATUS0_MCU_L1_SHIFT (24U) -#define RGX_CR_CLK_STATUS0_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_CLK_STATUS0_MCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_CLK_STATUS0_BIF_SHIFT (20U) -#define RGX_CR_CLK_STATUS0_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_CLK_STATUS0_BIF_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_CLK_STATUS0_TF_SHIFT (14U) -#define RGX_CR_CLK_STATUS0_TF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_CLK_STATUS0_TF_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_TF_RUNNING (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_CLK_STATUS0_MADD_SHIFT (13U) -#define RGX_CR_CLK_STATUS0_MADD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_CLK_STATUS0_MADD_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_MADD_RUNNING (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_CLK_STATUS0_TPU_SHIFT (12U) -#define RGX_CR_CLK_STATUS0_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_CLK_STATUS0_TPU_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_CLK_STATUS0_TAG_SHIFT (11U) -#define RGX_CR_CLK_STATUS0_TAG_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_CLK_STATUS0_TAG_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_TAG_RUNNING (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_CLK_STATUS0_USC_SHIFT (10U) -#define RGX_CR_CLK_STATUS0_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_CLK_STATUS0_USC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_USC_RUNNING (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_CLK_STATUS0_SLC_BANK_SHIFT (9U) -#define RGX_CR_CLK_STATUS0_SLC_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_CLK_STATUS0_SLC_BANK_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_SLC_BANK_RUNNING (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_CLK_STATUS0_SLC_SHIFT (8U) -#define RGX_CR_CLK_STATUS0_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_CLK_STATUS0_SLC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_CLK_STATUS0_PDS_SHIFT (6U) -#define RGX_CR_CLK_STATUS0_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_CLK_STATUS0_PDS_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_CLK_STATUS0_PM_SHIFT (4U) -#define RGX_CR_CLK_STATUS0_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_CLK_STATUS0_PM_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_PM_RUNNING (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_CLK_STATUS0_ISP_SHIFT (0U) -#define RGX_CR_CLK_STATUS0_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_CLK_STATUS0_ISP_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS0_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_STATUS0 (0x0008U) +#define RGX_CR_CLK_STATUS0__PIPEDM_GT0__MASKFULL (IMG_UINT64_C(0x00000001BF107F53)) +#define RGX_CR_CLK_STATUS0_MASKFULL (IMG_UINT64_C(0x00000001BF107F51)) +#define RGX_CR_CLK_STATUS0_MCU_L0_SHIFT (32U) +#define RGX_CR_CLK_STATUS0_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CLK_STATUS0_MCU_L0_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_STATUS0_MCU_L0__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_MCU_L0__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS_SHIFT (31U) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_STATUS0_FBCACHE_SHIFT (29U) +#define RGX_CR_CLK_STATUS0_FBCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_CLK_STATUS0_FBCACHE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBCACHE_RUNNING (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_STATUS0_FBCACHE__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBCACHE__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_STATUS0_FBC_SHIFT (28U) +#define RGX_CR_CLK_STATUS0_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_CLK_STATUS0_FBC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBC_RUNNING (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_STATUS0_FBC__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBC__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_STATUS0_FBDC_SHIFT (27U) +#define RGX_CR_CLK_STATUS0_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_CLK_STATUS0_FBDC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBDC_RUNNING (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_STATUS0_FBDC__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBDC__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_STATUS0_FBM_SHIFT (26U) +#define RGX_CR_CLK_STATUS0_FBM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_CLK_STATUS0_FBM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBM_RUNNING (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_STATUS0_FBM__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBM__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_STATUS0_PBE_SHIFT (25U) +#define RGX_CR_CLK_STATUS0_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_CLK_STATUS0_PBE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_STATUS0_PBE__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_PBE__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_STATUS0_MCU_L1_SHIFT (24U) +#define RGX_CR_CLK_STATUS0_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_CLK_STATUS0_MCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_STATUS0_MCU_L1__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_MCU_L1__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_STATUS0_BIF_SHIFT (20U) +#define RGX_CR_CLK_STATUS0_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_CLK_STATUS0_BIF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_STATUS0_BIF__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_BIF__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_STATUS0_TF_SHIFT (14U) +#define RGX_CR_CLK_STATUS0_TF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_CLK_STATUS0_TF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_TF_RUNNING (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_STATUS0_TF__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_TF__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_STATUS0_MADD_SHIFT (13U) +#define RGX_CR_CLK_STATUS0_MADD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_CLK_STATUS0_MADD_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_MADD_RUNNING (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_STATUS0_MADD__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_MADD__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_STATUS0_TPU_SHIFT (12U) +#define RGX_CR_CLK_STATUS0_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_CLK_STATUS0_TPU_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS0_TPU__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_TPU__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS0_TAG_SHIFT (11U) +#define RGX_CR_CLK_STATUS0_TAG_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_CLK_STATUS0_TAG_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_TAG_RUNNING (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_STATUS0_TAG__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_TAG__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_STATUS0_USC_SHIFT (10U) +#define RGX_CR_CLK_STATUS0_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_CLK_STATUS0_USC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_USC_RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS0_USC__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_USC__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS0_SLC_BANK_SHIFT (9U) +#define RGX_CR_CLK_STATUS0_SLC_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_CLK_STATUS0_SLC_BANK_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_SLC_BANK_RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS0_SLC_BANK__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_SLC_BANK__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS0_SLC_SHIFT (8U) +#define RGX_CR_CLK_STATUS0_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_CLK_STATUS0_SLC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS0_SLC__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_SLC__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS0_PDS_SHIFT (6U) +#define RGX_CR_CLK_STATUS0_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_CLK_STATUS0_PDS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS0_PDS__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_PDS__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS0_PM_SHIFT (4U) +#define RGX_CR_CLK_STATUS0_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_STATUS0_PM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_PM_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS0_PM__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_PM__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS0__PIPEDM_GT0__ISP_BE_SHIFT (1U) +#define RGX_CR_CLK_STATUS0__PIPEDM_GT0__ISP_BE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_CLK_STATUS0__PIPEDM_GT0__ISP_BE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0__PIPEDM_GT0__ISP_BE_RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_STATUS0_ISP_SHIFT (0U) +#define RGX_CR_CLK_STATUS0_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS0_ISP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_STATUS0_ISP__PIPEDM_GT0__GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_ISP__PIPEDM_GT0__RUNNING (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_STATUS0__PIPEDM_GT0__ISP_FE_SHIFT (0U) +#define RGX_CR_CLK_STATUS0__PIPEDM_GT0__ISP_FE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS0__PIPEDM_GT0__ISP_FE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0__PIPEDM_GT0__ISP_FE_RUNNING (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_CORE_ID */ -#define RGX_CR_CORE_ID (0x0020U) -#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_CORE_ID_BRANCH_ID_SHIFT (48U) -#define RGX_CR_CORE_ID_BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -#define RGX_CR_CORE_ID_VERSION_ID_SHIFT (32U) -#define RGX_CR_CORE_ID_VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) -#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT (16U) -#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) -#define RGX_CR_CORE_ID_CONFIG_ID_SHIFT (0U) -#define RGX_CR_CORE_ID_CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) +#define RGX_CR_CORE_ID (0x0020U) +#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_CORE_ID_BRANCH_ID_SHIFT (48U) +#define RGX_CR_CORE_ID_BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CORE_ID_VERSION_ID_SHIFT (32U) +#define RGX_CR_CORE_ID_VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT (16U) +#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_CR_CORE_ID_CONFIG_ID_SHIFT (0U) +#define RGX_CR_CORE_ID_CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) /* Register RGX_CR_SPU_ENABLE */ -#define RGX_CR_SPU_ENABLE (0x0050U) -#define RGX_CR_SPU_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SPU_ENABLE_ENABLE_SHIFT (0U) -#define RGX_CR_SPU_ENABLE_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_SPU_ENABLE (0x0050U) +#define RGX_CR_SPU_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SPU_ENABLE_ENABLE_SHIFT (0U) +#define RGX_CR_SPU_ENABLE_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_SOC_TIMER_GRAY */ -#define RGX_CR_SOC_TIMER_GRAY (0x00E0U) -#define RGX_CR_SOC_TIMER_GRAY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT (0U) -#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SOC_TIMER_GRAY (0x00E0U) +#define RGX_CR_SOC_TIMER_GRAY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT (0U) +#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SOC_TIMER_BINARY */ -#define RGX_CR_SOC_TIMER_BINARY (0x00E8U) -#define RGX_CR_SOC_TIMER_BINARY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT (0U) -#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SOC_TIMER_BINARY (0x00E8U) +#define RGX_CR_SOC_TIMER_BINARY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT (0U) +#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_CLK_CTRL1 */ -#define RGX_CR_CLK_CTRL1 (0x0080U) -#define RGX_CR_CLK_CTRL1_MASKFULL (IMG_UINT64_C(0xFFFC3F3FFFCFFFFF)) -#define RGX_CR_CLK_CTRL1_BSC_SHIFT (62U) -#define RGX_CR_CLK_CTRL1_BSC_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_BSC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_BSC_ON (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_CLK_CTRL1_BSC_AUTO (IMG_UINT64_C(0x8000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_SHIFT (60U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_ON (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_AUTO (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_SHIFT (58U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_ON (IMG_UINT64_C(0x0400000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_AUTO (IMG_UINT64_C(0x0800000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_SHIFT (56U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_ON (IMG_UINT64_C(0x0100000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_AUTO (IMG_UINT64_C(0x0200000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_SHIFT (54U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_ON (IMG_UINT64_C(0x0040000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_AUTO (IMG_UINT64_C(0x0080000000000000)) -#define RGX_CR_CLK_CTRL1_PSB_SHIFT (52U) -#define RGX_CR_CLK_CTRL1_PSB_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_PSB_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_PSB_ON (IMG_UINT64_C(0x0010000000000000)) -#define RGX_CR_CLK_CTRL1_PSB_AUTO (IMG_UINT64_C(0x0020000000000000)) -#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_SHIFT (50U) -#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_ON (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_AUTO (IMG_UINT64_C(0x0008000000000000)) -#define RGX_CR_CLK_CTRL1_CDM_PIPE_SHIFT (44U) -#define RGX_CR_CLK_CTRL1_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_CDM_PIPE_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_CDM_PIPE_ON (IMG_UINT64_C(0x0000100000000000)) -#define RGX_CR_CLK_CTRL1_CDM_PIPE_AUTO (IMG_UINT64_C(0x0000200000000000)) -#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_SHIFT (42U) -#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_ON (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_AUTO (IMG_UINT64_C(0x0000080000000000)) -#define RGX_CR_CLK_CTRL1_TCU_L1_SHIFT (40U) -#define RGX_CR_CLK_CTRL1_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_TCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_TCU_L1_ON (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_CLK_CTRL1_TCU_L1_AUTO (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_CLK_CTRL1_TDM_SHIFT (36U) -#define RGX_CR_CLK_CTRL1_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_TDM_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_TDM_ON (IMG_UINT64_C(0x0000001000000000)) -#define RGX_CR_CLK_CTRL1_TDM_AUTO (IMG_UINT64_C(0x0000002000000000)) -#define RGX_CR_CLK_CTRL1_ASTC_SHIFT (34U) -#define RGX_CR_CLK_CTRL1_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF)) -#define RGX_CR_CLK_CTRL1_ASTC_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_ASTC_ON (IMG_UINT64_C(0x0000000400000000)) -#define RGX_CR_CLK_CTRL1_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000)) -#define RGX_CR_CLK_CTRL1_IPF_SHIFT (32U) -#define RGX_CR_CLK_CTRL1_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) -#define RGX_CR_CLK_CTRL1_IPF_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_IPF_ON (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_CLK_CTRL1_IPF_AUTO (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_CLK_CTRL1_COMPUTE_SHIFT (30U) -#define RGX_CR_CLK_CTRL1_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) -#define RGX_CR_CLK_CTRL1_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_CLK_CTRL1_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_CLK_CTRL1_PIXEL_SHIFT (28U) -#define RGX_CR_CLK_CTRL1_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) -#define RGX_CR_CLK_CTRL1_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_PIXEL_ON (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_CLK_CTRL1_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_CLK_CTRL1_VERTEX_SHIFT (26U) -#define RGX_CR_CLK_CTRL1_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) -#define RGX_CR_CLK_CTRL1_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_VERTEX_ON (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_CLK_CTRL1_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_CLK_CTRL1_TPF_SHIFT (24U) -#define RGX_CR_CLK_CTRL1_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) -#define RGX_CR_CLK_CTRL1_TPF_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_TPF_ON (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_CLK_CTRL1_TPF_AUTO (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_CLK_CTRL1_GEO_VERTEX_SHIFT (22U) -#define RGX_CR_CLK_CTRL1_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) -#define RGX_CR_CLK_CTRL1_GEO_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_GEO_VERTEX_ON (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_CLK_CTRL1_GEO_VERTEX_AUTO (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_CLK_CTRL1_GEO_SHARED_SHIFT (18U) -#define RGX_CR_CLK_CTRL1_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) -#define RGX_CR_CLK_CTRL1_GEO_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_GEO_SHARED_ON (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_CLK_CTRL1_GEO_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_CLK_CTRL1_GEO_TESS_SHIFT (16U) -#define RGX_CR_CLK_CTRL1_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -#define RGX_CR_CLK_CTRL1_GEO_TESS_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_GEO_TESS_ON (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_CLK_CTRL1_GEO_TESS_AUTO (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_SHIFT (14U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_ON (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_AUTO (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_SHIFT (12U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_ON (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_AUTO (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_SHIFT (10U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_ON (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_AUTO (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_SHIFT (8U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_ON (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_AUTO (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_SHIFT (6U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_ON (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_AUTO (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_SHIFT (4U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_ON (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_AUTO (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_SHIFT (2U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_ON (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_AUTO (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_SHIFT (0U) -#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_OFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_ON (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_AUTO (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_CTRL1 (0x0080U) +#define RGX_CR_CLK_CTRL1_MASKFULL (IMG_UINT64_C(0xFFFC3F3FFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_BSC_SHIFT (62U) +#define RGX_CR_CLK_CTRL1_BSC_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_BSC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_BSC_ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL1_BSC_AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_SHIFT (60U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_SHIFT (58U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_SHIFT (56U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_SHIFT (54U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL1_PSB_SHIFT (52U) +#define RGX_CR_CLK_CTRL1_PSB_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_PSB_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_PSB_ON (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_CLK_CTRL1_PSB_AUTO (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_SHIFT (50U) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_SHIFT (44U) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_ON (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_AUTO (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_SHIFT (42U) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_ON (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_AUTO (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_CLK_CTRL1_TCU_L1_SHIFT (40U) +#define RGX_CR_CLK_CTRL1_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_TCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TCU_L1_ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL1_TCU_L1_AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL1_TDM_SHIFT (36U) +#define RGX_CR_CLK_CTRL1_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_TDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TDM_ON (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_CLK_CTRL1_TDM_AUTO (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_CLK_CTRL1_ASTC_SHIFT (34U) +#define RGX_CR_CLK_CTRL1_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF)) +#define RGX_CR_CLK_CTRL1_ASTC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_ASTC_ON (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_CLK_CTRL1_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_CLK_CTRL1_IPF_SHIFT (32U) +#define RGX_CR_CLK_CTRL1_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_IPF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_IPF_ON (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_CTRL1_IPF_AUTO (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_CLK_CTRL1_COMPUTE_SHIFT (30U) +#define RGX_CR_CLK_CTRL1_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) +#define RGX_CR_CLK_CTRL1_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_CTRL1_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_CTRL1_PIXEL_SHIFT (28U) +#define RGX_CR_CLK_CTRL1_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) +#define RGX_CR_CLK_CTRL1_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_PIXEL_ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL1_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL1_VERTEX_SHIFT (26U) +#define RGX_CR_CLK_CTRL1_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_CTRL1_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_VERTEX_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL1_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL1_TPF_SHIFT (24U) +#define RGX_CR_CLK_CTRL1_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_CTRL1_TPF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TPF_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL1_TPF_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_SHIFT (22U) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_ON (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_AUTO (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_LMS_SHIFT (20U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_LMS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_LMS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_LMS_ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_LMS_AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_SHIFT (18U) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_SHIFT (16U) +#define RGX_CR_CLK_CTRL1_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_SHIFT (14U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_ON (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_AUTO (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_SHIFT (12U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_SHIFT (10U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_SHIFT (8U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_SHIFT (6U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_ON (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_AUTO (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_SHIFT (4U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_SHIFT (2U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_SHIFT (0U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_AUTO (IMG_UINT64_C(0x0000000000000002)) /* Register RGX_CR_CLK_STATUS1 */ -#define RGX_CR_CLK_STATUS1 (0x0088U) -#define RGX_CR_CLK_STATUS1_MASKFULL (IMG_UINT64_C(0x00000000FFFE77FB)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_SHIFT (31U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_RUNNING (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_SHIFT (30U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_RUNNING (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_SHIFT (29U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_RUNNING (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_SHIFT (28U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_RUNNING (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_CLK_STATUS1_BSC_SHIFT (27U) -#define RGX_CR_CLK_STATUS1_BSC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_CLK_STATUS1_BSC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_BSC_RUNNING (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_CLK_STATUS1_PSB_SHIFT (26U) -#define RGX_CR_CLK_STATUS1_PSB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_CLK_STATUS1_PSB_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_PSB_RUNNING (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_SHIFT (25U) -#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_RUNNING (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_SHIFT (24U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_RUNNING (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_SHIFT (23U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_RUNNING (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_SHIFT (22U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_RUNNING (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_SHIFT (21U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_RUNNING (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_SHIFT (20U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_RUNNING (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_SHIFT (19U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_RUNNING (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_SHIFT (18U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_RUNNING (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_SHIFT (17U) -#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_RUNNING (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_CLK_STATUS1_CDM_PIPE_SHIFT (14U) -#define RGX_CR_CLK_STATUS1_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_CLK_STATUS1_CDM_PIPE_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_CDM_PIPE_RUNNING (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_CLK_STATUS1_TCU_L1_SHIFT (13U) -#define RGX_CR_CLK_STATUS1_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_CLK_STATUS1_TCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_TCU_L1_RUNNING (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_SHIFT (12U) -#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_RUNNING (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_CLK_STATUS1_TDM_SHIFT (10U) -#define RGX_CR_CLK_STATUS1_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_CLK_STATUS1_TDM_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_CLK_STATUS1_IPF_SHIFT (9U) -#define RGX_CR_CLK_STATUS1_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_CLK_STATUS1_IPF_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_CLK_STATUS1_COMPUTE_SHIFT (8U) -#define RGX_CR_CLK_STATUS1_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_CLK_STATUS1_COMPUTE_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_CLK_STATUS1_ASTC_SHIFT (7U) -#define RGX_CR_CLK_STATUS1_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_CLK_STATUS1_ASTC_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_CLK_STATUS1_PIXEL_SHIFT (6U) -#define RGX_CR_CLK_STATUS1_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_CLK_STATUS1_PIXEL_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_CLK_STATUS1_VERTEX_SHIFT (5U) -#define RGX_CR_CLK_STATUS1_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_CLK_STATUS1_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_CLK_STATUS1_TPF_SHIFT (4U) -#define RGX_CR_CLK_STATUS1_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_CLK_STATUS1_TPF_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_TPF_RUNNING (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_CLK_STATUS1_GEO_SHARED_SHIFT (3U) -#define RGX_CR_CLK_STATUS1_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_CLK_STATUS1_GEO_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_GEO_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_CLK_STATUS1_GEO_VERTEX_SHIFT (1U) -#define RGX_CR_CLK_STATUS1_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_CLK_STATUS1_GEO_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_GEO_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_CLK_STATUS1_GEO_TESS_SHIFT (0U) -#define RGX_CR_CLK_STATUS1_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_CLK_STATUS1_GEO_TESS_GATED (IMG_UINT64_C(0x0000000000000000)) -#define RGX_CR_CLK_STATUS1_GEO_TESS_RUNNING (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_STATUS1 (0x0088U) +#define RGX_CR_CLK_STATUS1_MASKFULL (IMG_UINT64_C(0x00000007FFFF77FB)) +#define RGX_CR_CLK_STATUS1_TPU2_LCTRL_SHIFT (34U) +#define RGX_CR_CLK_STATUS1_TPU2_LCTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_CLK_STATUS1_TPU2_LCTRL_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TPU2_LCTRL_RUNNING (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_CLK_STATUS1_TILING_SHIFT (33U) +#define RGX_CR_CLK_STATUS1_TILING_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_CLK_STATUS1_TILING_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TILING_RUNNING (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_CLK_STATUS1_GEO_VDM_SHIFT (32U) +#define RGX_CR_CLK_STATUS1_GEO_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CLK_STATUS1_GEO_VDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_GEO_VDM_RUNNING (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_SHIFT (31U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_RUNNING (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_SHIFT (30U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_RUNNING (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_SHIFT (29U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_RUNNING (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_SHIFT (28U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_RUNNING (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_STATUS1_BSC_SHIFT (27U) +#define RGX_CR_CLK_STATUS1_BSC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_CLK_STATUS1_BSC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_BSC_RUNNING (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_STATUS1_PSB_SHIFT (26U) +#define RGX_CR_CLK_STATUS1_PSB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_CLK_STATUS1_PSB_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_PSB_RUNNING (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_SHIFT (25U) +#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_RUNNING (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_SHIFT (24U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_RUNNING (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_SHIFT (23U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_RUNNING (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_SHIFT (22U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_RUNNING (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_SHIFT (21U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_RUNNING (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_SHIFT (20U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_RUNNING (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_SHIFT (19U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_RUNNING (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_SHIFT (18U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_RUNNING (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_SHIFT (17U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_RUNNING (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_LMS_SHIFT (16U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_LMS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_LMS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_LMS_RUNNING (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_STATUS1_CDM_PIPE_SHIFT (14U) +#define RGX_CR_CLK_STATUS1_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_CLK_STATUS1_CDM_PIPE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_CDM_PIPE_RUNNING (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_STATUS1_TCU_L1_SHIFT (13U) +#define RGX_CR_CLK_STATUS1_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_CLK_STATUS1_TCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TCU_L1_RUNNING (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_SHIFT (12U) +#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS1_TDM_SHIFT (10U) +#define RGX_CR_CLK_STATUS1_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_CLK_STATUS1_TDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS1_IPF_SHIFT (9U) +#define RGX_CR_CLK_STATUS1_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_CLK_STATUS1_IPF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS1_COMPUTE_SHIFT (8U) +#define RGX_CR_CLK_STATUS1_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_CLK_STATUS1_COMPUTE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS1_ASTC_SHIFT (7U) +#define RGX_CR_CLK_STATUS1_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_CLK_STATUS1_ASTC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_STATUS1_PIXEL_SHIFT (6U) +#define RGX_CR_CLK_STATUS1_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_CLK_STATUS1_PIXEL_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS1_VERTEX_SHIFT (5U) +#define RGX_CR_CLK_STATUS1_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_CLK_STATUS1_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_STATUS1_TPF_SHIFT (4U) +#define RGX_CR_CLK_STATUS1_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_STATUS1_TPF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TPF_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS1_GEO_SHARED_SHIFT (3U) +#define RGX_CR_CLK_STATUS1_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_CLK_STATUS1_GEO_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_GEO_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_STATUS1_GEO_VERTEX_SHIFT (1U) +#define RGX_CR_CLK_STATUS1_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_CLK_STATUS1_GEO_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_GEO_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_STATUS1_GEO_TESS_SHIFT (0U) +#define RGX_CR_CLK_STATUS1_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS1_GEO_TESS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_GEO_TESS_RUNNING (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_CLK_CTRL2 +*/ +#define RGX_CR_CLK_CTRL2 (0x0090U) +#define RGX_CR_CLK_CTRL2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_CLK_CTRL2_TILING_SHIFT (28U) +#define RGX_CR_CLK_CTRL2_TILING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) +#define RGX_CR_CLK_CTRL2_TILING_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_TILING_ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL2_TILING_AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL2_GEO_VDM_SHIFT (26U) +#define RGX_CR_CLK_CTRL2_GEO_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_CTRL2_GEO_VDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_GEO_VDM_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL2_GEO_VDM_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL2_RTS_SHIFT (24U) +#define RGX_CR_CLK_CTRL2_RTS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_CTRL2_RTS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_RTS_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL2_RTS_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL2_PCG_SHIFT (22U) +#define RGX_CR_CLK_CTRL2_PCG_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) +#define RGX_CR_CLK_CTRL2_PCG_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_PCG_ON (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_CTRL2_PCG_AUTO (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_CTRL2_BPS_SHIFT (20U) +#define RGX_CR_CLK_CTRL2_BPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_CTRL2_BPS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_BPS_ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL2_BPS_AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL2_RS_SHIFT (18U) +#define RGX_CR_CLK_CTRL2_RS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_CTRL2_RS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_RS_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL2_RS_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL2_RRC_SHIFT (16U) +#define RGX_CR_CLK_CTRL2_RRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_CTRL2_RRC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_RRC_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL2_RRC_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL2_URI_SHIFT (14U) +#define RGX_CR_CLK_CTRL2_URI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) +#define RGX_CR_CLK_CTRL2_URI_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_URI_ON (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_CTRL2_URI_AUTO (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_CLK_CTRL2_RCE_SHIFT (12U) +#define RGX_CR_CLK_CTRL2_RCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_CLK_CTRL2_RCE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_RCE_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL2_RCE_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL2_ASC_SHIFT (10U) +#define RGX_CR_CLK_CTRL2_ASC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_CLK_CTRL2_ASC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_ASC_ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL2_ASC_AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL2_FP_SHIFT (8U) +#define RGX_CR_CLK_CTRL2_FP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL2_FP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_FP_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL2_FP_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL2_INT_SHIFT (6U) +#define RGX_CR_CLK_CTRL2_INT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) +#define RGX_CR_CLK_CTRL2_INT_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_INT_ON (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_CTRL2_INT_AUTO (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_CTRL2_YUV_SHIFT (4U) +#define RGX_CR_CLK_CTRL2_YUV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_CLK_CTRL2_YUV_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_YUV_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL2_YUV_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_DOT8_SHIFT (2U) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_DOT8_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_DOT8_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_DOT8_ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_DOT8_AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_FOP_SHIFT (0U) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_FOP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_FOP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_FOP_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL2_USC_PIPE_PAP_FOP_AUTO (IMG_UINT64_C(0x0000000000000002)) + + +/* + Register RGX_CR_CLK_STATUS2 +*/ +#define RGX_CR_CLK_STATUS2 (0x0098U) +#define RGX_CR_CLK_STATUS2_MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) +#define RGX_CR_CLK_STATUS2_RTS_SHIFT (12U) +#define RGX_CR_CLK_STATUS2_RTS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_CLK_STATUS2_RTS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_RTS_RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS2_PCG_SHIFT (11U) +#define RGX_CR_CLK_STATUS2_PCG_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_CLK_STATUS2_PCG_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_PCG_RUNNING (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_STATUS2_BPS_SHIFT (10U) +#define RGX_CR_CLK_STATUS2_BPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_CLK_STATUS2_BPS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_BPS_RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS2_RS_SHIFT (9U) +#define RGX_CR_CLK_STATUS2_RS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_CLK_STATUS2_RS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_RS_RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS2_RRC_SHIFT (8U) +#define RGX_CR_CLK_STATUS2_RRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_CLK_STATUS2_RRC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_RRC_RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS2_URI_SHIFT (7U) +#define RGX_CR_CLK_STATUS2_URI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_CLK_STATUS2_URI_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_URI_RUNNING (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_STATUS2_RCE_SHIFT (6U) +#define RGX_CR_CLK_STATUS2_RCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_CLK_STATUS2_RCE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_RCE_RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS2_ASC_SHIFT (5U) +#define RGX_CR_CLK_STATUS2_ASC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_CLK_STATUS2_ASC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_ASC_RUNNING (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_STATUS2_FP_SHIFT (4U) +#define RGX_CR_CLK_STATUS2_FP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_STATUS2_FP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_FP_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS2_INT_SHIFT (3U) +#define RGX_CR_CLK_STATUS2_INT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_CLK_STATUS2_INT_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_INT_RUNNING (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_STATUS2_YUV_SHIFT (2U) +#define RGX_CR_CLK_STATUS2_YUV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_CLK_STATUS2_YUV_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_YUV_RUNNING (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_STATUS2_USC_PIPE_PAP_DOT8_SHIFT (1U) +#define RGX_CR_CLK_STATUS2_USC_PIPE_PAP_DOT8_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_CLK_STATUS2_USC_PIPE_PAP_DOT8_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_USC_PIPE_PAP_DOT8_RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_STATUS2_USC_PIPE_PAP_FOP_SHIFT (0U) +#define RGX_CR_CLK_STATUS2_USC_PIPE_PAP_FOP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS2_USC_PIPE_PAP_FOP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS2_USC_PIPE_PAP_FOP_RUNNING (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_SOFT_RESET */ -#define RGX_CR_SOFT_RESET (0x0100U) -#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x03FFFFE0007BDEFF)) -#define RGX_CR_SOFT_RESET_RAC_SHIFT (57U) -#define RGX_CR_SOFT_RESET_RAC_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_RAC_EN (IMG_UINT64_C(0x0200000000000000)) -#define RGX_CR_SOFT_RESET_GEO_TESS_SHIFT (56U) -#define RGX_CR_SOFT_RESET_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_GEO_TESS_EN (IMG_UINT64_C(0x0100000000000000)) -#define RGX_CR_SOFT_RESET_INT_SHIFT (55U) -#define RGX_CR_SOFT_RESET_INT_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_INT_EN (IMG_UINT64_C(0x0080000000000000)) -#define RGX_CR_SOFT_RESET_FP_SHIFT (54U) -#define RGX_CR_SOFT_RESET_FP_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_FP_EN (IMG_UINT64_C(0x0040000000000000)) -#define RGX_CR_SOFT_RESET_YUV_SHIFT (53U) -#define RGX_CR_SOFT_RESET_YUV_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_YUV_EN (IMG_UINT64_C(0x0020000000000000)) -#define RGX_CR_SOFT_RESET_PSB_SHIFT (52U) -#define RGX_CR_SOFT_RESET_PSB_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_PSB_EN (IMG_UINT64_C(0x0010000000000000)) -#define RGX_CR_SOFT_RESET_ASC_SHIFT (51U) -#define RGX_CR_SOFT_RESET_ASC_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_ASC_EN (IMG_UINT64_C(0x0008000000000000)) -#define RGX_CR_SOFT_RESET_RCE_SHIFT (50U) -#define RGX_CR_SOFT_RESET_RCE_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_RCE_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_SOFT_RESET_BSC_SHIFT (49U) -#define RGX_CR_SOFT_RESET_BSC_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_BSC_EN (IMG_UINT64_C(0x0002000000000000)) -#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_SHIFT (48U) -#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_EN (IMG_UINT64_C(0x0001000000000000)) -#define RGX_CR_SOFT_RESET_USC_L2ICACHE_SHIFT (47U) -#define RGX_CR_SOFT_RESET_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_USC_L2ICACHE_EN (IMG_UINT64_C(0x0000800000000000)) -#define RGX_CR_SOFT_RESET_TCU_L1_SHIFT (46U) -#define RGX_CR_SOFT_RESET_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_TCU_L1_EN (IMG_UINT64_C(0x0000400000000000)) -#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (45U) -#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000200000000000)) -#define RGX_CR_SOFT_RESET_BIF_JONES_SHIFT (44U) -#define RGX_CR_SOFT_RESET_BIF_JONES_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_BIF_JONES_EN (IMG_UINT64_C(0x0000100000000000)) -#define RGX_CR_SOFT_RESET_SLC_SHIFT (43U) -#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000080000000000)) -#define RGX_CR_SOFT_RESET_FBCACHE_SHIFT (42U) -#define RGX_CR_SOFT_RESET_FBCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_FBCACHE_EN (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_SOFT_RESET_FBM_SHIFT (41U) -#define RGX_CR_SOFT_RESET_FBM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_FBM_EN (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_SOFT_RESET_FBDC_SHIFT (40U) -#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_SOFT_RESET_FBC_SHIFT (39U) -#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) -#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000008000000000)) -#define RGX_CR_SOFT_RESET_PM_SHIFT (38U) -#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000004000000000)) -#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (37U) -#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000002000000000)) -#define RGX_CR_SOFT_RESET_TAG_SHIFT (22U) -#define RGX_CR_SOFT_RESET_TAG_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_SOFT_RESET_TAG_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_SOFT_RESET_TF_SHIFT (21U) -#define RGX_CR_SOFT_RESET_TF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_SOFT_RESET_TF_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_SOFT_RESET_MADD_SHIFT (20U) -#define RGX_CR_SOFT_RESET_MADD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_SOFT_RESET_MADD_EN (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U) -#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U) -#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_SOFT_RESET_CDM_PIPE_SHIFT (16U) -#define RGX_CR_SOFT_RESET_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_SOFT_RESET_CDM_PIPE_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_SOFT_RESET_TDM_SHIFT (15U) -#define RGX_CR_SOFT_RESET_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_SOFT_RESET_TDM_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_SOFT_RESET_ASTC_SHIFT (14U) -#define RGX_CR_SOFT_RESET_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_SOFT_RESET_ASTC_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U) -#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U) -#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_SOFT_RESET_TPF_SHIFT (10U) -#define RGX_CR_SOFT_RESET_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_SOFT_RESET_TPF_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_SOFT_RESET_IPF_SHIFT (9U) -#define RGX_CR_SOFT_RESET_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_SOFT_RESET_IPF_EN (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_SOFT_RESET_GEO_SHARED_SHIFT (7U) -#define RGX_CR_SOFT_RESET_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_SOFT_RESET_GEO_SHARED_EN (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_SOFT_RESET_GEO_VERTEX_SHIFT (6U) -#define RGX_CR_SOFT_RESET_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_SOFT_RESET_GEO_VERTEX_EN (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_SOFT_RESET_PIXEL_SHIFT (5U) -#define RGX_CR_SOFT_RESET_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_SOFT_RESET_PIXEL_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_SOFT_RESET_COMPUTE_SHIFT (4U) -#define RGX_CR_SOFT_RESET_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_SOFT_RESET_COMPUTE_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U) -#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U) -#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_SOFT_RESET_VERTEX_SHIFT (1U) -#define RGX_CR_SOFT_RESET_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_SOFT_RESET_VERTEX_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_SOFT_RESET_USC_SHIFT (0U) -#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SOFT_RESET (0x0100U) +#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x1FFFFFE0007BDEFF)) +#define RGX_CR_SOFT_RESET_TPU2_LCTRL_SHIFT (60U) +#define RGX_CR_SOFT_RESET_TPU2_LCTRL_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TPU2_LCTRL_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_SOFT_RESET_TILING_SHIFT (59U) +#define RGX_CR_SOFT_RESET_TILING_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TILING_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_SOFT_RESET_GEO_VDM_SHIFT (58U) +#define RGX_CR_SOFT_RESET_GEO_VDM_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_GEO_VDM_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_SOFT_RESET_RAC_SHIFT (57U) +#define RGX_CR_SOFT_RESET_RAC_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_RAC_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_SOFT_RESET_GEO_TESS_SHIFT (56U) +#define RGX_CR_SOFT_RESET_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_GEO_TESS_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SOFT_RESET_INT_SHIFT (55U) +#define RGX_CR_SOFT_RESET_INT_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_INT_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SOFT_RESET_FP_SHIFT (54U) +#define RGX_CR_SOFT_RESET_FP_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FP_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SOFT_RESET_YUV_SHIFT (53U) +#define RGX_CR_SOFT_RESET_YUV_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_YUV_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SOFT_RESET_PSB_SHIFT (52U) +#define RGX_CR_SOFT_RESET_PSB_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PSB_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_SOFT_RESET_ASC_SHIFT (51U) +#define RGX_CR_SOFT_RESET_ASC_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_ASC_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SOFT_RESET_RCE_SHIFT (50U) +#define RGX_CR_SOFT_RESET_RCE_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_RCE_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SOFT_RESET_BSC_SHIFT (49U) +#define RGX_CR_SOFT_RESET_BSC_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BSC_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_SHIFT (48U) +#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SOFT_RESET_USC_L2ICACHE_SHIFT (47U) +#define RGX_CR_SOFT_RESET_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_USC_L2ICACHE_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SOFT_RESET_TCU_L1_SHIFT (46U) +#define RGX_CR_SOFT_RESET_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TCU_L1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (45U) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SOFT_RESET_BIF_JONES_SHIFT (44U) +#define RGX_CR_SOFT_RESET_BIF_JONES_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF_JONES_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SOFT_RESET_SLC_SHIFT (43U) +#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SOFT_RESET_FBCACHE_SHIFT (42U) +#define RGX_CR_SOFT_RESET_FBCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBCACHE_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SOFT_RESET_FBM_SHIFT (41U) +#define RGX_CR_SOFT_RESET_FBM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBM_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SOFT_RESET_FBDC_SHIFT (40U) +#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SOFT_RESET_FBC_SHIFT (39U) +#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SOFT_RESET_PM_SHIFT (38U) +#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (37U) +#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SOFT_RESET_TAG_SHIFT (22U) +#define RGX_CR_SOFT_RESET_TAG_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SOFT_RESET_TAG_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SOFT_RESET_TF_SHIFT (21U) +#define RGX_CR_SOFT_RESET_TF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SOFT_RESET_TF_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SOFT_RESET_MADD_SHIFT (20U) +#define RGX_CR_SOFT_RESET_MADD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SOFT_RESET_MADD_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U) +#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U) +#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SOFT_RESET_CDM_PIPE_SHIFT (16U) +#define RGX_CR_SOFT_RESET_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SOFT_RESET_CDM_PIPE_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SOFT_RESET_TDM_SHIFT (15U) +#define RGX_CR_SOFT_RESET_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SOFT_RESET_TDM_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SOFT_RESET_ASTC_SHIFT (14U) +#define RGX_CR_SOFT_RESET_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SOFT_RESET_ASTC_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U) +#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U) +#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SOFT_RESET_TPF_SHIFT (10U) +#define RGX_CR_SOFT_RESET_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SOFT_RESET_TPF_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SOFT_RESET_IPF_SHIFT (9U) +#define RGX_CR_SOFT_RESET_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SOFT_RESET_IPF_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SOFT_RESET_GEO_SHARED_SHIFT (7U) +#define RGX_CR_SOFT_RESET_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SOFT_RESET_GEO_SHARED_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SOFT_RESET_GEO_VERTEX_SHIFT (6U) +#define RGX_CR_SOFT_RESET_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SOFT_RESET_GEO_VERTEX_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SOFT_RESET_PIXEL_SHIFT (5U) +#define RGX_CR_SOFT_RESET_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SOFT_RESET_PIXEL_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SOFT_RESET_COMPUTE_SHIFT (4U) +#define RGX_CR_SOFT_RESET_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SOFT_RESET_COMPUTE_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U) +#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U) +#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SOFT_RESET_VERTEX_SHIFT (1U) +#define RGX_CR_SOFT_RESET_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SOFT_RESET_VERTEX_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SOFT_RESET_USC_SHIFT (0U) +#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_SOFT_RESET_SPU */ -#define RGX_CR_SOFT_RESET_SPU (0x0108U) -#define RGX_CR_SOFT_RESET_SPU_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SOFT_RESET_SPU_SPU31_SHIFT (31U) -#define RGX_CR_SOFT_RESET_SPU_SPU31_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU31_EN (0x80000000U) -#define RGX_CR_SOFT_RESET_SPU_SPU30_SHIFT (30U) -#define RGX_CR_SOFT_RESET_SPU_SPU30_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU30_EN (0x40000000U) -#define RGX_CR_SOFT_RESET_SPU_SPU29_SHIFT (29U) -#define RGX_CR_SOFT_RESET_SPU_SPU29_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU29_EN (0x20000000U) -#define RGX_CR_SOFT_RESET_SPU_SPU28_SHIFT (28U) -#define RGX_CR_SOFT_RESET_SPU_SPU28_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU28_EN (0x10000000U) -#define RGX_CR_SOFT_RESET_SPU_SPU27_SHIFT (27U) -#define RGX_CR_SOFT_RESET_SPU_SPU27_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU27_EN (0x08000000U) -#define RGX_CR_SOFT_RESET_SPU_SPU26_SHIFT (26U) -#define RGX_CR_SOFT_RESET_SPU_SPU26_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU26_EN (0x04000000U) -#define RGX_CR_SOFT_RESET_SPU_SPU25_SHIFT (25U) -#define RGX_CR_SOFT_RESET_SPU_SPU25_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU25_EN (0x02000000U) -#define RGX_CR_SOFT_RESET_SPU_SPU24_SHIFT (24U) -#define RGX_CR_SOFT_RESET_SPU_SPU24_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU24_EN (0x01000000U) -#define RGX_CR_SOFT_RESET_SPU_SPU23_SHIFT (23U) -#define RGX_CR_SOFT_RESET_SPU_SPU23_CLRMSK (0xFF7FFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU23_EN (0x00800000U) -#define RGX_CR_SOFT_RESET_SPU_SPU22_SHIFT (22U) -#define RGX_CR_SOFT_RESET_SPU_SPU22_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU22_EN (0x00400000U) -#define RGX_CR_SOFT_RESET_SPU_SPU21_SHIFT (21U) -#define RGX_CR_SOFT_RESET_SPU_SPU21_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU21_EN (0x00200000U) -#define RGX_CR_SOFT_RESET_SPU_SPU20_SHIFT (20U) -#define RGX_CR_SOFT_RESET_SPU_SPU20_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU20_EN (0x00100000U) -#define RGX_CR_SOFT_RESET_SPU_SPU19_SHIFT (19U) -#define RGX_CR_SOFT_RESET_SPU_SPU19_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU19_EN (0x00080000U) -#define RGX_CR_SOFT_RESET_SPU_SPU18_SHIFT (18U) -#define RGX_CR_SOFT_RESET_SPU_SPU18_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU18_EN (0x00040000U) -#define RGX_CR_SOFT_RESET_SPU_SPU17_SHIFT (17U) -#define RGX_CR_SOFT_RESET_SPU_SPU17_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU17_EN (0x00020000U) -#define RGX_CR_SOFT_RESET_SPU_SPU16_SHIFT (16U) -#define RGX_CR_SOFT_RESET_SPU_SPU16_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU16_EN (0x00010000U) -#define RGX_CR_SOFT_RESET_SPU_SPU15_SHIFT (15U) -#define RGX_CR_SOFT_RESET_SPU_SPU15_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU15_EN (0x00008000U) -#define RGX_CR_SOFT_RESET_SPU_SPU14_SHIFT (14U) -#define RGX_CR_SOFT_RESET_SPU_SPU14_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU14_EN (0x00004000U) -#define RGX_CR_SOFT_RESET_SPU_SPU13_SHIFT (13U) -#define RGX_CR_SOFT_RESET_SPU_SPU13_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU13_EN (0x00002000U) -#define RGX_CR_SOFT_RESET_SPU_SPU12_SHIFT (12U) -#define RGX_CR_SOFT_RESET_SPU_SPU12_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU12_EN (0x00001000U) -#define RGX_CR_SOFT_RESET_SPU_SPU11_SHIFT (11U) -#define RGX_CR_SOFT_RESET_SPU_SPU11_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_SOFT_RESET_SPU_SPU11_EN (0x00000800U) -#define RGX_CR_SOFT_RESET_SPU_SPU10_SHIFT (10U) -#define RGX_CR_SOFT_RESET_SPU_SPU10_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU10_EN (0x00000400U) -#define RGX_CR_SOFT_RESET_SPU_SPU9_SHIFT (9U) -#define RGX_CR_SOFT_RESET_SPU_SPU9_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU9_EN (0x00000200U) -#define RGX_CR_SOFT_RESET_SPU_SPU8_SHIFT (8U) -#define RGX_CR_SOFT_RESET_SPU_SPU8_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_SOFT_RESET_SPU_SPU8_EN (0x00000100U) -#define RGX_CR_SOFT_RESET_SPU_SPU7_SHIFT (7U) -#define RGX_CR_SOFT_RESET_SPU_SPU7_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_SOFT_RESET_SPU_SPU7_EN (0x00000080U) -#define RGX_CR_SOFT_RESET_SPU_SPU6_SHIFT (6U) -#define RGX_CR_SOFT_RESET_SPU_SPU6_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_SOFT_RESET_SPU_SPU6_EN (0x00000040U) -#define RGX_CR_SOFT_RESET_SPU_SPU5_SHIFT (5U) -#define RGX_CR_SOFT_RESET_SPU_SPU5_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_SOFT_RESET_SPU_SPU5_EN (0x00000020U) -#define RGX_CR_SOFT_RESET_SPU_SPU4_SHIFT (4U) -#define RGX_CR_SOFT_RESET_SPU_SPU4_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_SOFT_RESET_SPU_SPU4_EN (0x00000010U) -#define RGX_CR_SOFT_RESET_SPU_SPU3_SHIFT (3U) -#define RGX_CR_SOFT_RESET_SPU_SPU3_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SOFT_RESET_SPU_SPU3_EN (0x00000008U) -#define RGX_CR_SOFT_RESET_SPU_SPU2_SHIFT (2U) -#define RGX_CR_SOFT_RESET_SPU_SPU2_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SOFT_RESET_SPU_SPU2_EN (0x00000004U) -#define RGX_CR_SOFT_RESET_SPU_SPU1_SHIFT (1U) -#define RGX_CR_SOFT_RESET_SPU_SPU1_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SOFT_RESET_SPU_SPU1_EN (0x00000002U) -#define RGX_CR_SOFT_RESET_SPU_SPU0_SHIFT (0U) -#define RGX_CR_SOFT_RESET_SPU_SPU0_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SOFT_RESET_SPU_SPU0_EN (0x00000001U) +#define RGX_CR_SOFT_RESET_SPU (0x0108U) +#define RGX_CR_SOFT_RESET_SPU_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SOFT_RESET_SPU_SPU31_SHIFT (31U) +#define RGX_CR_SOFT_RESET_SPU_SPU31_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU31_EN (0x80000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU30_SHIFT (30U) +#define RGX_CR_SOFT_RESET_SPU_SPU30_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU30_EN (0x40000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU29_SHIFT (29U) +#define RGX_CR_SOFT_RESET_SPU_SPU29_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU29_EN (0x20000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU28_SHIFT (28U) +#define RGX_CR_SOFT_RESET_SPU_SPU28_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU28_EN (0x10000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU27_SHIFT (27U) +#define RGX_CR_SOFT_RESET_SPU_SPU27_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU27_EN (0x08000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU26_SHIFT (26U) +#define RGX_CR_SOFT_RESET_SPU_SPU26_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU26_EN (0x04000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU25_SHIFT (25U) +#define RGX_CR_SOFT_RESET_SPU_SPU25_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU25_EN (0x02000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU24_SHIFT (24U) +#define RGX_CR_SOFT_RESET_SPU_SPU24_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU24_EN (0x01000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU23_SHIFT (23U) +#define RGX_CR_SOFT_RESET_SPU_SPU23_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU23_EN (0x00800000U) +#define RGX_CR_SOFT_RESET_SPU_SPU22_SHIFT (22U) +#define RGX_CR_SOFT_RESET_SPU_SPU22_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU22_EN (0x00400000U) +#define RGX_CR_SOFT_RESET_SPU_SPU21_SHIFT (21U) +#define RGX_CR_SOFT_RESET_SPU_SPU21_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU21_EN (0x00200000U) +#define RGX_CR_SOFT_RESET_SPU_SPU20_SHIFT (20U) +#define RGX_CR_SOFT_RESET_SPU_SPU20_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU20_EN (0x00100000U) +#define RGX_CR_SOFT_RESET_SPU_SPU19_SHIFT (19U) +#define RGX_CR_SOFT_RESET_SPU_SPU19_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU19_EN (0x00080000U) +#define RGX_CR_SOFT_RESET_SPU_SPU18_SHIFT (18U) +#define RGX_CR_SOFT_RESET_SPU_SPU18_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU18_EN (0x00040000U) +#define RGX_CR_SOFT_RESET_SPU_SPU17_SHIFT (17U) +#define RGX_CR_SOFT_RESET_SPU_SPU17_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU17_EN (0x00020000U) +#define RGX_CR_SOFT_RESET_SPU_SPU16_SHIFT (16U) +#define RGX_CR_SOFT_RESET_SPU_SPU16_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU16_EN (0x00010000U) +#define RGX_CR_SOFT_RESET_SPU_SPU15_SHIFT (15U) +#define RGX_CR_SOFT_RESET_SPU_SPU15_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU15_EN (0x00008000U) +#define RGX_CR_SOFT_RESET_SPU_SPU14_SHIFT (14U) +#define RGX_CR_SOFT_RESET_SPU_SPU14_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU14_EN (0x00004000U) +#define RGX_CR_SOFT_RESET_SPU_SPU13_SHIFT (13U) +#define RGX_CR_SOFT_RESET_SPU_SPU13_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU13_EN (0x00002000U) +#define RGX_CR_SOFT_RESET_SPU_SPU12_SHIFT (12U) +#define RGX_CR_SOFT_RESET_SPU_SPU12_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU12_EN (0x00001000U) +#define RGX_CR_SOFT_RESET_SPU_SPU11_SHIFT (11U) +#define RGX_CR_SOFT_RESET_SPU_SPU11_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_SOFT_RESET_SPU_SPU11_EN (0x00000800U) +#define RGX_CR_SOFT_RESET_SPU_SPU10_SHIFT (10U) +#define RGX_CR_SOFT_RESET_SPU_SPU10_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU10_EN (0x00000400U) +#define RGX_CR_SOFT_RESET_SPU_SPU9_SHIFT (9U) +#define RGX_CR_SOFT_RESET_SPU_SPU9_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU9_EN (0x00000200U) +#define RGX_CR_SOFT_RESET_SPU_SPU8_SHIFT (8U) +#define RGX_CR_SOFT_RESET_SPU_SPU8_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU8_EN (0x00000100U) +#define RGX_CR_SOFT_RESET_SPU_SPU7_SHIFT (7U) +#define RGX_CR_SOFT_RESET_SPU_SPU7_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SOFT_RESET_SPU_SPU7_EN (0x00000080U) +#define RGX_CR_SOFT_RESET_SPU_SPU6_SHIFT (6U) +#define RGX_CR_SOFT_RESET_SPU_SPU6_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SOFT_RESET_SPU_SPU6_EN (0x00000040U) +#define RGX_CR_SOFT_RESET_SPU_SPU5_SHIFT (5U) +#define RGX_CR_SOFT_RESET_SPU_SPU5_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SOFT_RESET_SPU_SPU5_EN (0x00000020U) +#define RGX_CR_SOFT_RESET_SPU_SPU4_SHIFT (4U) +#define RGX_CR_SOFT_RESET_SPU_SPU4_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SOFT_RESET_SPU_SPU4_EN (0x00000010U) +#define RGX_CR_SOFT_RESET_SPU_SPU3_SHIFT (3U) +#define RGX_CR_SOFT_RESET_SPU_SPU3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SOFT_RESET_SPU_SPU3_EN (0x00000008U) +#define RGX_CR_SOFT_RESET_SPU_SPU2_SHIFT (2U) +#define RGX_CR_SOFT_RESET_SPU_SPU2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SOFT_RESET_SPU_SPU2_EN (0x00000004U) +#define RGX_CR_SOFT_RESET_SPU_SPU1_SHIFT (1U) +#define RGX_CR_SOFT_RESET_SPU_SPU1_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SOFT_RESET_SPU_SPU1_EN (0x00000002U) +#define RGX_CR_SOFT_RESET_SPU_SPU0_SHIFT (0U) +#define RGX_CR_SOFT_RESET_SPU_SPU0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SOFT_RESET_SPU_SPU0_EN (0x00000001U) /* Register RGX_CR_MULTICORE_EVENT_REDUCE */ -#define RGX_CR_MULTICORE_EVENT_REDUCE (0x2428U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FC0FFFFF)) -#define RGX_CR_MULTICORE_EVENT_REDUCE_MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_FINISHED_SHIFT (31U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_FINISHED_EN (0x80000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE1_SHIFT (31U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE1_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE1_EN (0x80000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BUFFER_STALL_SHIFT (30U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BUFFER_STALL_EN (0x40000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE0_SHIFT (30U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE0_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE0_EN (0x40000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE1_SHIFT (29U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE1_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE1_EN (0x20000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_FINISHED_SHIFT (28U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_FINISHED_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_FINISHED_EN (0x10000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE0_SHIFT (28U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE0_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE0_EN (0x10000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_BARRIER_HIT_SHIFT (27U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_BARRIER_HIT_EN (0x08000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BARRIER_HIT_SHIFT (26U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BARRIER_HIT_EN (0x04000000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_WDT_TIMEOUT_SHIFT (19U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_WDT_TIMEOUT_EN (0x00080000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_CDM_BARRIER_HIT_SHIFT (18U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_CDM_BARRIER_HIT_EN (0x00040000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__WDT_TIMEOUT_SHIFT (18U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__WDT_TIMEOUT_EN (0x00040000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__CDM_BARRIER_HIT_SHIFT (17U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_BUFFER_STALL_SHIFT (16U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_BUFFER_STALL_EN (0x00010000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_USC_TRIGGER_SHIFT (15U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_USC_TRIGGER_EN (0x00008000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_SHIFT (14U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_EN (0x00004000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_ACK_SHIFT (13U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_ACK_EN (0x00002000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_REQ_SHIFT (12U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_REQ_EN (0x00001000U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_ABORT_SHIFT (11U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_ABORT_EN (0x00000800U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_COMPLETE_SHIFT (10U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_COMPLETE_EN (0x00000400U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_MMU_PAGE_FAULT_SHIFT (9U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_MMU_PAGE_FAULT_EN (0x00000200U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_FRAG_DONE_SHIFT (8U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_FRAG_DONE_EN (0x00000100U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_DONE1_SHIFT (8U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_DONE1_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_DONE1_EN (0x00000100U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_OUT_OF_MEMORY_SHIFT (7U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_OUT_OF_MEMORY_EN (0x00000080U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_DONE0_SHIFT (7U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_DONE0_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_DONE0_EN (0x00000080U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_TERMINATE_SHIFT (6U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_TERMINATE_EN (0x00000040U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_FINISHED_SHIFT (5U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_FINISHED_EN (0x00000020U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__TA_TERMINATE_SHIFT (5U) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__TA_TERMINATE_EN (0x00000020U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_IPP_END_RENDER_SENT_SHIFT (4U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_IPP_END_RENDER_SENT_EN (0x00000010U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE1_SHIFT (4U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE1_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE1_EN (0x00000010U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_ISP_END_RENDER_SHIFT (3U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_ISP_END_RENDER_EN (0x00000008U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE0_SHIFT (3U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE0_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE0_EN (0x00000008U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_FINISHED_SHIFT (2U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_FINISHED_EN (0x00000004U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE1_SHIFT (2U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE1_EN (0x00000004U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TE_END_SHIFT (1U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TE_END_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_TE_END_EN (0x00000002U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE0_SHIFT (1U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE0_EN (0x00000002U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_SHIFT (0U) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_EN (0x00000001U) +#define RGX_CR_MULTICORE_EVENT_REDUCE (0x2428U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FD7FFFFF)) +#define RGX_CR_MULTICORE_EVENT_REDUCE_MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_FINISHED_SHIFT (31U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_FINISHED_EN (0x80000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TWO_D_DONE1_SHIFT (31U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TWO_D_DONE1_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TWO_D_DONE1_EN (0x80000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TWO_D_DONE0_SHIFT (30U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TWO_D_DONE0_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TWO_D_DONE0_EN (0x40000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__RAY_DONE1_SHIFT (29U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__RAY_DONE1_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__RAY_DONE1_EN (0x20000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_FINISHED_SHIFT (28U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_FINISHED_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_FINISHED_EN (0x10000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__RAY_DONE0_SHIFT (28U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__RAY_DONE0_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__RAY_DONE0_EN (0x10000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_BARRIER_HIT_SHIFT (27U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_BARRIER_HIT_EN (0x08000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TDM_BARRIER_HIT_SHIFT (26U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TDM_BARRIER_HIT_EN (0x04000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FAULT_CORRECTABLE_SHIFT (24U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FAULT_CORRECTABLE_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FAULT_CORRECTABLE_EN (0x01000000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FAULT_LATENT_SHIFT (22U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FAULT_LATENT_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FAULT_LATENT_EN (0x00400000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_BE_DONE1_SHIFT (21U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_BE_DONE1_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_BE_DONE1_EN (0x00200000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_BE_DONE0_SHIFT (20U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_BE_DONE0_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_BE_DONE0_EN (0x00100000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_WDT_TIMEOUT_SHIFT (19U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_WDT_TIMEOUT_EN (0x00080000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_CDM_BARRIER_HIT_SHIFT (18U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_CDM_BARRIER_HIT_EN (0x00040000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__WDT_TIMEOUT_SHIFT (18U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__WDT_TIMEOUT_EN (0x00040000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__CDM_BARRIER_HIT_SHIFT (17U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_SHIFT (14U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_EN (0x00004000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_ACK_SHIFT (13U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_REQ_SHIFT (12U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_ABORT_SHIFT (11U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_FRAG_DONE_SHIFT (8U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_FRAG_DONE_EN (0x00000100U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_PM_BE_DONE1_SHIFT (8U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_PM_BE_DONE1_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_PM_BE_DONE1_EN (0x00000100U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_PM_BE_DONE0_SHIFT (7U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_PM_BE_DONE0_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__FRAG_PM_BE_DONE0_EN (0x00000080U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_FINISHED_SHIFT (5U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TA_TERMINATE_SHIFT (5U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__TA_TERMINATE_EN (0x00000020U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__GEOM_DONE1_SHIFT (4U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__GEOM_DONE1_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__GEOM_DONE1_EN (0x00000010U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_ISP_END_RENDER_SHIFT (3U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_ISP_END_RENDER_EN (0x00000008U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__GEOM_DONE0_SHIFT (3U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__GEOM_DONE0_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__GEOM_DONE0_EN (0x00000008U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__COMPUTE_DONE1_SHIFT (2U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__COMPUTE_DONE1_EN (0x00000004U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TE_END_SHIFT (1U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_TE_END_EN (0x00000002U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__COMPUTE_DONE0_SHIFT (1U) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MULTICORE_EVENT_REDUCE__PIPEDM_GT0__COMPUTE_DONE0_EN (0x00000002U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_SHIFT (0U) +#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_EN (0x00000001U) /* Register RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON */ -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON (0x24B0U) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) -#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON (0x24B0U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MULTICORE_TDM_CTRL_COMMON */ -#define RGX_CR_MULTICORE_TDM_CTRL_COMMON (0x24B8U) -#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) -#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U) -#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU) -#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) -#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON (0x24B8U) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON */ -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON (0x24C0U) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) -#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON (0x24C0U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MULTICORE_BROADCAST */ -#define RGX_CR_MULTICORE_BROADCAST (0x24E0U) -#define RGX_CR_MULTICORE_BROADCAST_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_MULTICORE_BROADCAST_PROGRAMMER_MASK_SHIFT (8U) -#define RGX_CR_MULTICORE_BROADCAST_PROGRAMMER_MASK_CLRMSK (0xFFFF00FFU) -#define RGX_CR_MULTICORE_BROADCAST_FIRMWARE_MASK_SHIFT (0U) -#define RGX_CR_MULTICORE_BROADCAST_FIRMWARE_MASK_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MULTICORE_BROADCAST (0x24E0U) +#define RGX_CR_MULTICORE_BROADCAST_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MULTICORE_BROADCAST_PROGRAMMER_MASK_SHIFT (8U) +#define RGX_CR_MULTICORE_BROADCAST_PROGRAMMER_MASK_CLRMSK (0xFFFF00FFU) +#define RGX_CR_MULTICORE_BROADCAST_FIRMWARE_MASK_SHIFT (0U) +#define RGX_CR_MULTICORE_BROADCAST_FIRMWARE_MASK_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MULTICORE */ -#define RGX_CR_MULTICORE (0x24E8U) -#define RGX_CR_MULTICORE_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF)) -#define RGX_CR_MULTICORE_DOMAIN_GPU_MASK_SHIFT (27U) -#define RGX_CR_MULTICORE_DOMAIN_GPU_MASK_CLRMSK (IMG_UINT64_C(0xFFFFFFF807FFFFFF)) -#define RGX_CR_MULTICORE_PRIMARY_CORE_ID_SHIFT (3U) -#define RGX_CR_MULTICORE_PRIMARY_CORE_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8000007)) -#define RGX_CR_MULTICORE_ID_SHIFT (0U) -#define RGX_CR_MULTICORE_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8)) +#define RGX_CR_MULTICORE (0x24E8U) +#define RGX_CR_MULTICORE_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF)) +#define RGX_CR_MULTICORE_DOMAIN_GPU_MASK_SHIFT (27U) +#define RGX_CR_MULTICORE_DOMAIN_GPU_MASK_CLRMSK (IMG_UINT64_C(0xFFFFFFF807FFFFFF)) +#define RGX_CR_MULTICORE_PRIMARY_CORE_ID_SHIFT (3U) +#define RGX_CR_MULTICORE_PRIMARY_CORE_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8000007)) +#define RGX_CR_MULTICORE_ID_SHIFT (0U) +#define RGX_CR_MULTICORE_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8)) /* Register RGX_CR_MULTICORE_SYSTEM */ -#define RGX_CR_MULTICORE_SYSTEM (0x24F0U) -#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U) -#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MULTICORE_SYSTEM (0x24F0U) +#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_MULTICORE_DOMAIN */ -#define RGX_CR_MULTICORE_DOMAIN (0x24F8U) -#define RGX_CR_MULTICORE_DOMAIN_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_SHIFT (0U) -#define RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MULTICORE_DOMAIN (0x24F8U) +#define RGX_CR_MULTICORE_DOMAIN_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_SHIFT (0U) +#define RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_MMU_PT_PARITY_CLEAR +*/ +#define RGX_CR_MMU_PT_PARITY_CLEAR (0x0110U) +#define RGX_CR_MMU_PT_PARITY_CLEAR_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_PT_PARITY_CLEAR_CTXTN_SHIFT (0U) +#define RGX_CR_MMU_PT_PARITY_CLEAR_CTXTN_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_MMU_PT_PARITY_STATUS +*/ +#define RGX_CR_MMU_PT_PARITY_STATUS (0x0120U) +#define RGX_CR_MMU_PT_PARITY_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_PT_PARITY_STATUS_CTXTN_SHIFT (0U) +#define RGX_CR_MMU_PT_PARITY_STATUS_CTXTN_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_EVENT_ENABLE */ -#define RGX_CR_EVENT_ENABLE (0x0128U) -#define RGX_CR_EVENT_ENABLE__ALRIF_V1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FC0FFFFF)) -#define RGX_CR_EVENT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_SHIFT (31U) -#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_EN (0x80000000U) -#define RGX_CR_EVENT_ENABLE_TDM_FINISHED_SHIFT (31U) -#define RGX_CR_EVENT_ENABLE_TDM_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_ENABLE_TDM_FINISHED_EN (0x80000000U) -#define RGX_CR_EVENT_ENABLE_TWO_D_DONE1_SHIFT (31U) -#define RGX_CR_EVENT_ENABLE_TWO_D_DONE1_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_ENABLE_TWO_D_DONE1_EN (0x80000000U) -#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_SHIFT (30U) -#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_EN (0x40000000U) -#define RGX_CR_EVENT_ENABLE_TWO_D_DONE0_SHIFT (30U) -#define RGX_CR_EVENT_ENABLE_TWO_D_DONE0_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_EVENT_ENABLE_TWO_D_DONE0_EN (0x40000000U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -#define RGX_CR_EVENT_ENABLE_RAY_DONE1_SHIFT (29U) -#define RGX_CR_EVENT_ENABLE_RAY_DONE1_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_EVENT_ENABLE_RAY_DONE1_EN (0x20000000U) -#define RGX_CR_EVENT_ENABLE_RCE_FINISHED_SHIFT (28U) -#define RGX_CR_EVENT_ENABLE_RCE_FINISHED_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_EVENT_ENABLE_RCE_FINISHED_EN (0x10000000U) -#define RGX_CR_EVENT_ENABLE_RAY_DONE0_SHIFT (28U) -#define RGX_CR_EVENT_ENABLE_RAY_DONE0_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_EVENT_ENABLE_RAY_DONE0_EN (0x10000000U) -#define RGX_CR_EVENT_ENABLE_RCE_BARRIER_HIT_SHIFT (27U) -#define RGX_CR_EVENT_ENABLE_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_EVENT_ENABLE_RCE_BARRIER_HIT_EN (0x08000000U) -#define RGX_CR_EVENT_ENABLE_TDM_BARRIER_HIT_SHIFT (26U) -#define RGX_CR_EVENT_ENABLE_TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_EVENT_ENABLE_TDM_BARRIER_HIT_EN (0x04000000U) -#define RGX_CR_EVENT_ENABLE_WDT_TIMEOUT_SHIFT (19U) -#define RGX_CR_EVENT_ENABLE_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_ENABLE_WDT_TIMEOUT_EN (0x00080000U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U) -#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -#define RGX_CR_EVENT_ENABLE_CDM_BARRIER_HIT_SHIFT (18U) -#define RGX_CR_EVENT_ENABLE_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_ENABLE_CDM_BARRIER_HIT_EN (0x00040000U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_SHIFT (18U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_EN (0x00040000U) -#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_SHIFT (17U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_SHIFT (16U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_EN (0x00010000U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U) -#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_SHIFT (15U) -#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_EN (0x00008000U) -#define RGX_CR_EVENT_ENABLE_FAULT_FW_SHIFT (14U) -#define RGX_CR_EVENT_ENABLE_FAULT_FW_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_EVENT_ENABLE_FAULT_FW_EN (0x00004000U) -#define RGX_CR_EVENT_ENABLE_GPIO_ACK_SHIFT (13U) -#define RGX_CR_EVENT_ENABLE_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_EVENT_ENABLE_GPIO_ACK_EN (0x00002000U) -#define RGX_CR_EVENT_ENABLE_GPIO_REQ_SHIFT (12U) -#define RGX_CR_EVENT_ENABLE_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_EVENT_ENABLE_GPIO_REQ_EN (0x00001000U) -#define RGX_CR_EVENT_ENABLE_POWER_ABORT_SHIFT (11U) -#define RGX_CR_EVENT_ENABLE_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_EVENT_ENABLE_POWER_ABORT_EN (0x00000800U) -#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_SHIFT (10U) -#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_EN (0x00000400U) -#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_SHIFT (9U) -#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_EN (0x00000200U) -#define RGX_CR_EVENT_ENABLE_PM_FRAG_DONE_SHIFT (8U) -#define RGX_CR_EVENT_ENABLE_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_EVENT_ENABLE_PM_FRAG_DONE_EN (0x00000100U) -#define RGX_CR_EVENT_ENABLE_FRAG_DONE1_SHIFT (8U) -#define RGX_CR_EVENT_ENABLE_FRAG_DONE1_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_EVENT_ENABLE_FRAG_DONE1_EN (0x00000100U) -#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_SHIFT (7U) -#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_EN (0x00000080U) -#define RGX_CR_EVENT_ENABLE_FRAG_DONE0_SHIFT (7U) -#define RGX_CR_EVENT_ENABLE_FRAG_DONE0_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_EVENT_ENABLE_FRAG_DONE0_EN (0x00000080U) -#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_SHIFT (6U) -#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_EN (0x00000040U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U) -#define RGX_CR_EVENT_ENABLE_TA_FINISHED_SHIFT (5U) -#define RGX_CR_EVENT_ENABLE_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_EVENT_ENABLE_TA_FINISHED_EN (0x00000020U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_SHIFT (5U) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_EN (0x00000020U) -#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_SHIFT (4U) -#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_EN (0x00000010U) -#define RGX_CR_EVENT_ENABLE_IPP_END_RENDER_SENT_SHIFT (4U) -#define RGX_CR_EVENT_ENABLE_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_ENABLE_IPP_END_RENDER_SENT_EN (0x00000010U) -#define RGX_CR_EVENT_ENABLE_GEOM_DONE1_SHIFT (4U) -#define RGX_CR_EVENT_ENABLE_GEOM_DONE1_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_ENABLE_GEOM_DONE1_EN (0x00000010U) -#define RGX_CR_EVENT_ENABLE_ISP_END_RENDER_SHIFT (3U) -#define RGX_CR_EVENT_ENABLE_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_EVENT_ENABLE_ISP_END_RENDER_EN (0x00000008U) -#define RGX_CR_EVENT_ENABLE_GEOM_DONE0_SHIFT (3U) -#define RGX_CR_EVENT_ENABLE_GEOM_DONE0_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_EVENT_ENABLE_GEOM_DONE0_EN (0x00000008U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_SHIFT (2U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_EN (0x00000004U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE1_SHIFT (2U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE1_EN (0x00000004U) -#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_SHIFT (1U) -#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_EN (0x00000002U) -#define RGX_CR_EVENT_ENABLE_TE_END_SHIFT (1U) -#define RGX_CR_EVENT_ENABLE_TE_END_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_ENABLE_TE_END_EN (0x00000002U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE0_SHIFT (1U) -#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE0_EN (0x00000002U) -#define RGX_CR_EVENT_ENABLE_FAULT_GPU_SHIFT (0U) -#define RGX_CR_EVENT_ENABLE_FAULT_GPU_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_EVENT_ENABLE_FAULT_GPU_EN (0x00000001U) +#define RGX_CR_EVENT_ENABLE (0x0128U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FD7FFFFF)) +#define RGX_CR_EVENT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__TDM_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__TDM_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__TDM_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE1_SHIFT (31U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE1_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE1_EN (0x80000000U) +#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE0_SHIFT (30U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE0_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE0_EN (0x40000000U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE1_SHIFT (29U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE1_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE1_EN (0x20000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_FINISHED_SHIFT (28U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_FINISHED_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_FINISHED_EN (0x10000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE0_SHIFT (28U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE0_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE0_EN (0x10000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_BARRIER_HIT_SHIFT (27U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_BARRIER_HIT_EN (0x08000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__RCE_BARRIER_HIT_SHIFT (27U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__RCE_BARRIER_HIT_EN (0x08000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TDM_BARRIER_HIT_SHIFT (26U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TDM_BARRIER_HIT_EN (0x04000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_CORRECTABLE_SHIFT (24U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_CORRECTABLE_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_CORRECTABLE_EN (0x01000000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_LATENT_SHIFT (22U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_LATENT_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_LATENT_EN (0x00400000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE1_SHIFT (21U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE1_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE1_EN (0x00200000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE0_SHIFT (20U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE0_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE0_EN (0x00100000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_SHIFT (19U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_EN (0x00080000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U) +#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__CDM_BARRIER_HIT_SHIFT (18U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__CDM_BARRIER_HIT_EN (0x00040000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_SHIFT (18U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_EN (0x00040000U) +#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__CDM_BARRIER_HIT_SHIFT (17U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U) +#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_ENABLE_FAULT_FW_SHIFT (14U) +#define RGX_CR_EVENT_ENABLE_FAULT_FW_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_ENABLE_FAULT_FW_EN (0x00004000U) +#define RGX_CR_EVENT_ENABLE_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_ENABLE_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_ENABLE_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_ENABLE_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_ENABLE_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_ENABLE_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_ENABLE_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_ENABLE_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_ENABLE_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_ENABLE_PM_FRAG_DONE_SHIFT (8U) +#define RGX_CR_EVENT_ENABLE_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_ENABLE_PM_FRAG_DONE_EN (0x00000100U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE1_SHIFT (8U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE1_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE1_EN (0x00000100U) +#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE0_SHIFT (7U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE0_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE0_EN (0x00000080U) +#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U) +#define RGX_CR_EVENT_ENABLE_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_ENABLE_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_ENABLE_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TA_TERMINATE_SHIFT (5U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__TA_TERMINATE_EN (0x00000020U) +#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ1__IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ1__IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ1__IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE1_SHIFT (4U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE1_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE1_EN (0x00000010U) +#define RGX_CR_EVENT_ENABLE_ISP_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_ENABLE_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_ENABLE_ISP_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE0_SHIFT (3U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE0_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE0_EN (0x00000008U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE1_SHIFT (2U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE1_EN (0x00000004U) +#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ1__TE_END_SHIFT (1U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ1__TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ1__TE_END_EN (0x00000002U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__TE_END_SHIFT (1U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_EQ0__TE_END_EN (0x00000002U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE0_SHIFT (1U) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_ENABLE__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE0_EN (0x00000002U) +#define RGX_CR_EVENT_ENABLE_FAULT_GPU_SHIFT (0U) +#define RGX_CR_EVENT_ENABLE_FAULT_GPU_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_ENABLE_FAULT_GPU_EN (0x00000001U) /* Register RGX_CR_EVENT_STATUS */ -#define RGX_CR_EVENT_STATUS (0x0130U) -#define RGX_CR_EVENT_STATUS__ALRIF_V1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FC0FFFFF)) -#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U) -#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U) -#define RGX_CR_EVENT_STATUS_TDM_FINISHED_SHIFT (31U) -#define RGX_CR_EVENT_STATUS_TDM_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_STATUS_TDM_FINISHED_EN (0x80000000U) -#define RGX_CR_EVENT_STATUS_TWO_D_DONE1_SHIFT (31U) -#define RGX_CR_EVENT_STATUS_TWO_D_DONE1_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_STATUS_TWO_D_DONE1_EN (0x80000000U) -#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U) -#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U) -#define RGX_CR_EVENT_STATUS_TWO_D_DONE0_SHIFT (30U) -#define RGX_CR_EVENT_STATUS_TWO_D_DONE0_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_EVENT_STATUS_TWO_D_DONE0_EN (0x40000000U) -#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -#define RGX_CR_EVENT_STATUS_RAY_DONE1_SHIFT (29U) -#define RGX_CR_EVENT_STATUS_RAY_DONE1_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_EVENT_STATUS_RAY_DONE1_EN (0x20000000U) -#define RGX_CR_EVENT_STATUS_RCE_FINISHED_SHIFT (28U) -#define RGX_CR_EVENT_STATUS_RCE_FINISHED_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_EVENT_STATUS_RCE_FINISHED_EN (0x10000000U) -#define RGX_CR_EVENT_STATUS_RAY_DONE0_SHIFT (28U) -#define RGX_CR_EVENT_STATUS_RAY_DONE0_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_EVENT_STATUS_RAY_DONE0_EN (0x10000000U) -#define RGX_CR_EVENT_STATUS_RCE_BARRIER_HIT_SHIFT (27U) -#define RGX_CR_EVENT_STATUS_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_EVENT_STATUS_RCE_BARRIER_HIT_EN (0x08000000U) -#define RGX_CR_EVENT_STATUS_TDM_BARRIER_HIT_SHIFT (26U) -#define RGX_CR_EVENT_STATUS_TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_EVENT_STATUS_TDM_BARRIER_HIT_EN (0x04000000U) -#define RGX_CR_EVENT_STATUS_WDT_TIMEOUT_SHIFT (19U) -#define RGX_CR_EVENT_STATUS_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_STATUS_WDT_TIMEOUT_EN (0x00080000U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U) -#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -#define RGX_CR_EVENT_STATUS_CDM_BARRIER_HIT_SHIFT (18U) -#define RGX_CR_EVENT_STATUS_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_STATUS_CDM_BARRIER_HIT_EN (0x00040000U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_SHIFT (18U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_EN (0x00040000U) -#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_SHIFT (17U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U) -#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U) -#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U) -#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U) -#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U) -#define RGX_CR_EVENT_STATUS_FAULT_FW_SHIFT (14U) -#define RGX_CR_EVENT_STATUS_FAULT_FW_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_EVENT_STATUS_FAULT_FW_EN (0x00004000U) -#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U) -#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U) -#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U) -#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U) -#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U) -#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U) -#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U) -#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U) -#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U) -#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U) -#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_SHIFT (8U) -#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_EN (0x00000100U) -#define RGX_CR_EVENT_STATUS_FRAG_DONE1_SHIFT (8U) -#define RGX_CR_EVENT_STATUS_FRAG_DONE1_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_EVENT_STATUS_FRAG_DONE1_EN (0x00000100U) -#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U) -#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U) -#define RGX_CR_EVENT_STATUS_FRAG_DONE0_SHIFT (7U) -#define RGX_CR_EVENT_STATUS_FRAG_DONE0_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_EVENT_STATUS_FRAG_DONE0_EN (0x00000080U) -#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U) -#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U) -#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U) -#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_SHIFT (5U) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_EN (0x00000020U) -#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U) -#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U) -#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_SHIFT (4U) -#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_EN (0x00000010U) -#define RGX_CR_EVENT_STATUS_GEOM_DONE1_SHIFT (4U) -#define RGX_CR_EVENT_STATUS_GEOM_DONE1_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_STATUS_GEOM_DONE1_EN (0x00000010U) -#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_SHIFT (3U) -#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_EN (0x00000008U) -#define RGX_CR_EVENT_STATUS_GEOM_DONE0_SHIFT (3U) -#define RGX_CR_EVENT_STATUS_GEOM_DONE0_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_EVENT_STATUS_GEOM_DONE0_EN (0x00000008U) -#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U) -#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U) -#define RGX_CR_EVENT_STATUS_COMPUTE_DONE1_SHIFT (2U) -#define RGX_CR_EVENT_STATUS_COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_EVENT_STATUS_COMPUTE_DONE1_EN (0x00000004U) -#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U) -#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U) -#define RGX_CR_EVENT_STATUS_TE_END_SHIFT (1U) -#define RGX_CR_EVENT_STATUS_TE_END_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_STATUS_TE_END_EN (0x00000002U) -#define RGX_CR_EVENT_STATUS_COMPUTE_DONE0_SHIFT (1U) -#define RGX_CR_EVENT_STATUS_COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_STATUS_COMPUTE_DONE0_EN (0x00000002U) -#define RGX_CR_EVENT_STATUS_FAULT_GPU_SHIFT (0U) -#define RGX_CR_EVENT_STATUS_FAULT_GPU_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_EVENT_STATUS_FAULT_GPU_EN (0x00000001U) +#define RGX_CR_EVENT_STATUS (0x0130U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FD7FFFFF)) +#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__TDM_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__TDM_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__TDM_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE1_SHIFT (31U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE1_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE1_EN (0x80000000U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE0_SHIFT (30U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE0_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE0_EN (0x40000000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE1_SHIFT (29U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE1_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE1_EN (0x20000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_FINISHED_SHIFT (28U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_FINISHED_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_FINISHED_EN (0x10000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE0_SHIFT (28U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE0_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE0_EN (0x10000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_BARRIER_HIT_SHIFT (27U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_BARRIER_HIT_EN (0x08000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__RCE_BARRIER_HIT_SHIFT (27U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__RCE_BARRIER_HIT_EN (0x08000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TDM_BARRIER_HIT_SHIFT (26U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TDM_BARRIER_HIT_EN (0x04000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_CORRECTABLE_SHIFT (24U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_CORRECTABLE_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_CORRECTABLE_EN (0x01000000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_LATENT_SHIFT (22U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_LATENT_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_LATENT_EN (0x00400000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE1_SHIFT (21U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE1_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE1_EN (0x00200000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE0_SHIFT (20U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE0_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE0_EN (0x00100000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_SHIFT (19U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_EN (0x00080000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__CDM_BARRIER_HIT_SHIFT (18U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__CDM_BARRIER_HIT_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_SHIFT (18U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__CDM_BARRIER_HIT_SHIFT (17U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_STATUS_FAULT_FW_SHIFT (14U) +#define RGX_CR_EVENT_STATUS_FAULT_FW_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_STATUS_FAULT_FW_EN (0x00004000U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_SHIFT (8U) +#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_EN (0x00000100U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE1_SHIFT (8U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE1_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE1_EN (0x00000100U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE0_SHIFT (7U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE0_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE0_EN (0x00000080U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TA_TERMINATE_SHIFT (5U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__TA_TERMINATE_EN (0x00000020U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ1__IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ1__IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ1__IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE1_SHIFT (4U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE1_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE1_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE0_SHIFT (3U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE0_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE0_EN (0x00000008U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE1_SHIFT (2U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE1_EN (0x00000004U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ1__TE_END_SHIFT (1U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ1__TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ1__TE_END_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__TE_END_SHIFT (1U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__TE_END_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE0_SHIFT (1U) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE0_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS_FAULT_GPU_SHIFT (0U) +#define RGX_CR_EVENT_STATUS_FAULT_GPU_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_STATUS_FAULT_GPU_EN (0x00000001U) /* Register RGX_CR_EVENT_CLEAR */ -#define RGX_CR_EVENT_CLEAR (0x0138U) -#define RGX_CR_EVENT_CLEAR__ALRIF_V1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FC0FFFFF)) -#define RGX_CR_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT (31U) -#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN (0x80000000U) -#define RGX_CR_EVENT_CLEAR_TDM_FINISHED_SHIFT (31U) -#define RGX_CR_EVENT_CLEAR_TDM_FINISHED_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_CLEAR_TDM_FINISHED_EN (0x80000000U) -#define RGX_CR_EVENT_CLEAR_TWO_D_DONE1_SHIFT (31U) -#define RGX_CR_EVENT_CLEAR_TWO_D_DONE1_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_EVENT_CLEAR_TWO_D_DONE1_EN (0x80000000U) -#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT (30U) -#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN (0x40000000U) -#define RGX_CR_EVENT_CLEAR_TWO_D_DONE0_SHIFT (30U) -#define RGX_CR_EVENT_CLEAR_TWO_D_DONE0_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_EVENT_CLEAR_TWO_D_DONE0_EN (0x40000000U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -#define RGX_CR_EVENT_CLEAR_RAY_DONE1_SHIFT (29U) -#define RGX_CR_EVENT_CLEAR_RAY_DONE1_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_EVENT_CLEAR_RAY_DONE1_EN (0x20000000U) -#define RGX_CR_EVENT_CLEAR_RCE_FINISHED_SHIFT (28U) -#define RGX_CR_EVENT_CLEAR_RCE_FINISHED_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_EVENT_CLEAR_RCE_FINISHED_EN (0x10000000U) -#define RGX_CR_EVENT_CLEAR_RAY_DONE0_SHIFT (28U) -#define RGX_CR_EVENT_CLEAR_RAY_DONE0_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_EVENT_CLEAR_RAY_DONE0_EN (0x10000000U) -#define RGX_CR_EVENT_CLEAR_RCE_BARRIER_HIT_SHIFT (27U) -#define RGX_CR_EVENT_CLEAR_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_EVENT_CLEAR_RCE_BARRIER_HIT_EN (0x08000000U) -#define RGX_CR_EVENT_CLEAR_TDM_BARRIER_HIT_SHIFT (26U) -#define RGX_CR_EVENT_CLEAR_TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_EVENT_CLEAR_TDM_BARRIER_HIT_EN (0x04000000U) -#define RGX_CR_EVENT_CLEAR_WDT_TIMEOUT_SHIFT (19U) -#define RGX_CR_EVENT_CLEAR_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_CLEAR_WDT_TIMEOUT_EN (0x00080000U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U) -#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -#define RGX_CR_EVENT_CLEAR_CDM_BARRIER_HIT_SHIFT (18U) -#define RGX_CR_EVENT_CLEAR_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_CLEAR_CDM_BARRIER_HIT_EN (0x00040000U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_SHIFT (18U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_EN (0x00040000U) -#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_SHIFT (17U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT (16U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN (0x00010000U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U) -#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT (15U) -#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_EN (0x00008000U) -#define RGX_CR_EVENT_CLEAR_FAULT_FW_SHIFT (14U) -#define RGX_CR_EVENT_CLEAR_FAULT_FW_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_EVENT_CLEAR_FAULT_FW_EN (0x00004000U) -#define RGX_CR_EVENT_CLEAR_GPIO_ACK_SHIFT (13U) -#define RGX_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_EVENT_CLEAR_GPIO_ACK_EN (0x00002000U) -#define RGX_CR_EVENT_CLEAR_GPIO_REQ_SHIFT (12U) -#define RGX_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_EVENT_CLEAR_GPIO_REQ_EN (0x00001000U) -#define RGX_CR_EVENT_CLEAR_POWER_ABORT_SHIFT (11U) -#define RGX_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_EVENT_CLEAR_POWER_ABORT_EN (0x00000800U) -#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT (10U) -#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_EN (0x00000400U) -#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT (9U) -#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN (0x00000200U) -#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_SHIFT (8U) -#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_EN (0x00000100U) -#define RGX_CR_EVENT_CLEAR_FRAG_DONE1_SHIFT (8U) -#define RGX_CR_EVENT_CLEAR_FRAG_DONE1_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_EVENT_CLEAR_FRAG_DONE1_EN (0x00000100U) -#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT (7U) -#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN (0x00000080U) -#define RGX_CR_EVENT_CLEAR_FRAG_DONE0_SHIFT (7U) -#define RGX_CR_EVENT_CLEAR_FRAG_DONE0_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_EVENT_CLEAR_FRAG_DONE0_EN (0x00000080U) -#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT (6U) -#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_EN (0x00000040U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U) -#define RGX_CR_EVENT_CLEAR_TA_FINISHED_SHIFT (5U) -#define RGX_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_EVENT_CLEAR_TA_FINISHED_EN (0x00000020U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_SHIFT (5U) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_EN (0x00000020U) -#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT (4U) -#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN (0x00000010U) -#define RGX_CR_EVENT_CLEAR_IPP_END_RENDER_SENT_SHIFT (4U) -#define RGX_CR_EVENT_CLEAR_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_CLEAR_IPP_END_RENDER_SENT_EN (0x00000010U) -#define RGX_CR_EVENT_CLEAR_GEOM_DONE1_SHIFT (4U) -#define RGX_CR_EVENT_CLEAR_GEOM_DONE1_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_EVENT_CLEAR_GEOM_DONE1_EN (0x00000010U) -#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_SHIFT (3U) -#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_EN (0x00000008U) -#define RGX_CR_EVENT_CLEAR_GEOM_DONE0_SHIFT (3U) -#define RGX_CR_EVENT_CLEAR_GEOM_DONE0_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_EVENT_CLEAR_GEOM_DONE0_EN (0x00000008U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT (2U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN (0x00000004U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE1_SHIFT (2U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE1_EN (0x00000004U) -#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT (1U) -#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_EN (0x00000002U) -#define RGX_CR_EVENT_CLEAR_TE_END_SHIFT (1U) -#define RGX_CR_EVENT_CLEAR_TE_END_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_CLEAR_TE_END_EN (0x00000002U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE0_SHIFT (1U) -#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE0_EN (0x00000002U) -#define RGX_CR_EVENT_CLEAR_FAULT_GPU_SHIFT (0U) -#define RGX_CR_EVENT_CLEAR_FAULT_GPU_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_EVENT_CLEAR_FAULT_GPU_EN (0x00000001U) +#define RGX_CR_EVENT_CLEAR (0x0138U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FD7FFFFF)) +#define RGX_CR_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__TDM_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__TDM_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__TDM_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE1_SHIFT (31U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE1_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE1_EN (0x80000000U) +#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE0_SHIFT (30U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE0_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TWO_D_DONE0_EN (0x40000000U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE1_SHIFT (29U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE1_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE1_EN (0x20000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_FINISHED_SHIFT (28U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_FINISHED_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_FINISHED_EN (0x10000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE0_SHIFT (28U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE0_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__RAY_DONE0_EN (0x10000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_BARRIER_HIT_SHIFT (27U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__RCE_BARRIER_HIT_EN (0x08000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__RCE_BARRIER_HIT_SHIFT (27U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__RCE_BARRIER_HIT_EN (0x08000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TDM_BARRIER_HIT_SHIFT (26U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TDM_BARRIER_HIT_EN (0x04000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_CORRECTABLE_SHIFT (24U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_CORRECTABLE_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_CORRECTABLE_EN (0x01000000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_LATENT_SHIFT (22U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_LATENT_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FAULT_LATENT_EN (0x00400000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE1_SHIFT (21U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE1_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE1_EN (0x00200000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE0_SHIFT (20U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE0_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_BE_DONE0_EN (0x00100000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_SHIFT (19U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_EN (0x00080000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U) +#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__CDM_BARRIER_HIT_SHIFT (18U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__CDM_BARRIER_HIT_EN (0x00040000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_SHIFT (18U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_EN (0x00040000U) +#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__CDM_BARRIER_HIT_SHIFT (17U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U) +#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_CLEAR_FAULT_FW_SHIFT (14U) +#define RGX_CR_EVENT_CLEAR_FAULT_FW_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_CLEAR_FAULT_FW_EN (0x00004000U) +#define RGX_CR_EVENT_CLEAR_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_CLEAR_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_CLEAR_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_CLEAR_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_CLEAR_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_CLEAR_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_SHIFT (8U) +#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_EN (0x00000100U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE1_SHIFT (8U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE1_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE1_EN (0x00000100U) +#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE0_SHIFT (7U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE0_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__FRAG_PM_BE_DONE0_EN (0x00000080U) +#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U) +#define RGX_CR_EVENT_CLEAR_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_CLEAR_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TA_TERMINATE_SHIFT (5U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__TA_TERMINATE_EN (0x00000020U) +#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ1__IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ1__IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ1__IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE1_SHIFT (4U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE1_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE1_EN (0x00000010U) +#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE0_SHIFT (3U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE0_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__GEOM_DONE0_EN (0x00000008U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE1_SHIFT (2U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE1_EN (0x00000004U) +#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ1__TE_END_SHIFT (1U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ1__TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ1__TE_END_EN (0x00000002U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__TE_END_SHIFT (1U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_EQ0__TE_END_EN (0x00000002U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE0_SHIFT (1U) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_CLEAR__AXT_IF_EQ2_AND_PIPEDM_GT0__COMPUTE_DONE0_EN (0x00000002U) +#define RGX_CR_EVENT_CLEAR_FAULT_GPU_SHIFT (0U) +#define RGX_CR_EVENT_CLEAR_FAULT_GPU_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_CLEAR_FAULT_GPU_EN (0x00000001U) /* Register RGX_CR_TIMER */ -#define RGX_CR_TIMER (0x0160U) -#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF)) -#define RGX_CR_TIMER_BIT31_SHIFT (63U) -#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000)) -#define RGX_CR_TIMER_VALUE_SHIFT (0U) -#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) +#define RGX_CR_TIMER (0x0160U) +#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_SHIFT (63U) +#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_TIMER_VALUE_SHIFT (0U) +#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) /* Register RGX_CR_FAULT_FW_STATUS */ -#define RGX_CR_FAULT_FW_STATUS (0x0170U) -#define RGX_CR_FAULT_FW_STATUS_MASKFULL (IMG_UINT64_C(0x000000000007000F)) -#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_SHIFT (18U) -#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_EN (0x00040000U) -#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_SHIFT (17U) -#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_EN (0x00020000U) -#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_SHIFT (16U) -#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_EN (0x00010000U) -#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_SHIFT (3U) -#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_EN (0x00000008U) -#define RGX_CR_FAULT_FW_STATUS_META_DETECT_SHIFT (2U) -#define RGX_CR_FAULT_FW_STATUS_META_DETECT_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_FAULT_FW_STATUS_META_DETECT_EN (0x00000004U) -#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_SHIFT (1U) -#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_EN (0x00000002U) -#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_SHIFT (0U) -#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_EN (0x00000001U) +#define RGX_CR_FAULT_FW_STATUS (0x0170U) +#define RGX_CR_FAULT_FW_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000F007F)) +#define RGX_CR_FAULT_FW_STATUS_SLC_CHEST_CORRECT_SHIFT (19U) +#define RGX_CR_FAULT_FW_STATUS_SLC_CHEST_CORRECT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_FAULT_FW_STATUS_SLC_CHEST_CORRECT_EN (0x00080000U) +#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_SHIFT (18U) +#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_EN (0x00040000U) +#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_SHIFT (17U) +#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_EN (0x00020000U) +#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_SHIFT (16U) +#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_EN (0x00010000U) +#define RGX_CR_FAULT_FW_STATUS_SLC_CHEST_DETECT_SHIFT (6U) +#define RGX_CR_FAULT_FW_STATUS_SLC_CHEST_DETECT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FAULT_FW_STATUS_SLC_CHEST_DETECT_EN (0x00000040U) +#define RGX_CR_FAULT_FW_STATUS_MMU_PTE_PARITY_DETECT_SHIFT (5U) +#define RGX_CR_FAULT_FW_STATUS_MMU_PTE_PARITY_DETECT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FAULT_FW_STATUS_MMU_PTE_PARITY_DETECT_EN (0x00000020U) +#define RGX_CR_FAULT_FW_STATUS_PARITY_DETECT_SHIFT (4U) +#define RGX_CR_FAULT_FW_STATUS_PARITY_DETECT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FAULT_FW_STATUS_PARITY_DETECT_EN (0x00000010U) +#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_SHIFT (3U) +#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_EN (0x00000008U) +#define RGX_CR_FAULT_FW_STATUS_META_DETECT_SHIFT (2U) +#define RGX_CR_FAULT_FW_STATUS_META_DETECT_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_FW_STATUS_META_DETECT_EN (0x00000004U) +#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_SHIFT (1U) +#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_EN (0x00000002U) +#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_SHIFT (0U) +#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_EN (0x00000001U) /* Register RGX_CR_FAULT_FW_CLEAR */ -#define RGX_CR_FAULT_FW_CLEAR (0x0178U) -#define RGX_CR_FAULT_FW_CLEAR_MASKFULL (IMG_UINT64_C(0x000000000007000F)) -#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_SHIFT (18U) -#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_EN (0x00040000U) -#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_SHIFT (17U) -#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_EN (0x00020000U) -#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_SHIFT (16U) -#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_EN (0x00010000U) -#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_SHIFT (3U) -#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_EN (0x00000008U) -#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_SHIFT (2U) -#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_EN (0x00000004U) -#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_SHIFT (1U) -#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_EN (0x00000002U) -#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_SHIFT (0U) -#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_EN (0x00000001U) +#define RGX_CR_FAULT_FW_CLEAR (0x0178U) +#define RGX_CR_FAULT_FW_CLEAR_MASKFULL (IMG_UINT64_C(0x00000000000F007F)) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CHEST_CORRECT_SHIFT (19U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CHEST_CORRECT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CHEST_CORRECT_EN (0x00080000U) +#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_SHIFT (18U) +#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_EN (0x00040000U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_SHIFT (17U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_EN (0x00020000U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_SHIFT (16U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_EN (0x00010000U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CHEST_DETECT_SHIFT (6U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CHEST_DETECT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CHEST_DETECT_EN (0x00000040U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_PTE_PARITY_DETECT_SHIFT (5U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_PTE_PARITY_DETECT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FAULT_FW_CLEAR_MMU_PTE_PARITY_DETECT_EN (0x00000020U) +#define RGX_CR_FAULT_FW_CLEAR_PARITY_DETECT_SHIFT (4U) +#define RGX_CR_FAULT_FW_CLEAR_PARITY_DETECT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FAULT_FW_CLEAR_PARITY_DETECT_EN (0x00000010U) +#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_SHIFT (3U) +#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_EN (0x00000008U) +#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_SHIFT (2U) +#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_EN (0x00000004U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_SHIFT (1U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_EN (0x00000002U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_SHIFT (0U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_GPU_STATUS +*/ +#define RGX_CR_FAULT_GPU_STATUS (0x0180U) +#define RGX_CR_FAULT_GPU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000001000F)) +#define RGX_CR_FAULT_GPU_STATUS_REST_CORRECT_SHIFT (16U) +#define RGX_CR_FAULT_GPU_STATUS_REST_CORRECT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FAULT_GPU_STATUS_REST_CORRECT_EN (0x00010000U) +#define RGX_CR_FAULT_GPU_STATUS_REST_LATENT_FAULT_SHIFT (3U) +#define RGX_CR_FAULT_GPU_STATUS_REST_LATENT_FAULT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_GPU_STATUS_REST_LATENT_FAULT_EN (0x00000008U) +#define RGX_CR_FAULT_GPU_STATUS_REST_PARITY_SHIFT (2U) +#define RGX_CR_FAULT_GPU_STATUS_REST_PARITY_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_GPU_STATUS_REST_PARITY_EN (0x00000004U) +#define RGX_CR_FAULT_GPU_STATUS_REST_LATENT_SHIFT (1U) +#define RGX_CR_FAULT_GPU_STATUS_REST_LATENT_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_GPU_STATUS_REST_LATENT_EN (0x00000002U) +#define RGX_CR_FAULT_GPU_STATUS_REST_DETECT_SHIFT (0U) +#define RGX_CR_FAULT_GPU_STATUS_REST_DETECT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_GPU_STATUS_REST_DETECT_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_GPU_CLEAR +*/ +#define RGX_CR_FAULT_GPU_CLEAR (0x0188U) +#define RGX_CR_FAULT_GPU_CLEAR_MASKFULL (IMG_UINT64_C(0x000000000001000F)) +#define RGX_CR_FAULT_GPU_CLEAR_REST_CORRECT_SHIFT (16U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_CORRECT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FAULT_GPU_CLEAR_REST_CORRECT_EN (0x00010000U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_LATENT_FAULT_SHIFT (3U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_LATENT_FAULT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_LATENT_FAULT_EN (0x00000008U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_PARITY_SHIFT (2U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_PARITY_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_GPU_CLEAR_REST_PARITY_EN (0x00000004U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_LATENT_SHIFT (1U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_LATENT_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_GPU_CLEAR_REST_LATENT_EN (0x00000002U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_DETECT_SHIFT (0U) +#define RGX_CR_FAULT_GPU_CLEAR_REST_DETECT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_GPU_CLEAR_REST_DETECT_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS +*/ +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS (0x1220U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_MASKFULL (IMG_UINT64_C(0x0000003FFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CDM_FAULT_DS_SHIFT (37U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CDM_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CDM_FAULT_DS_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_DCE_FAULT_DS_SHIFT (36U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_DCE_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_DCE_FAULT_DS_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TDM_FAULT_DS_SHIFT (35U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TDM_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TDM_FAULT_DS_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_IPP_FAULT_DS_SHIFT (34U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_IPP_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_IPP_FAULT_DS_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TILING_FAULT_DS_SHIFT (33U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TILING_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TILING_FAULT_DS_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_LOCKSTEP_RED_SHIFT (32U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_LOCKSTEP_RED_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_LOCKSTEP_RED_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FW_CR_PARITY_SHIFT (31U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FW_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FW_CR_PARITY_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FW_CR_BUS_PARITY_SHIFT (30U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FW_CR_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FW_CR_BUS_PARITY_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CR_BUS_PARITY_SHIFT (29U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CR_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CR_BUS_PARITY_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_GPU_IRQ_CR_PARITY_SHIFT (28U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_GPU_IRQ_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_GPU_IRQ_CR_PARITY_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BIF_FILTER_PARITY_SHIFT (27U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BIF_FILTER_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BIF_FILTER_PARITY_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CHEST_PARITY_SHIFT (26U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CHEST_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CHEST_PARITY_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_EXTERNAL_AXI_MEM_BUS_PARITY_SHIFT (25U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_EXTERNAL_AXI_MEM_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_EXTERNAL_AXI_MEM_BUS_PARITY_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SLC_PARITY_SHIFT (24U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SLC_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SLC_PARITY_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BIF_PARITY_SHIFT (23U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BIF_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BIF_PARITY_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_DFU_PARITY_SHIFT (22U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_DFU_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_DFU_PARITY_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SOCIF_DELAY_PARITY_SHIFT (21U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SOCIF_DELAY_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SOCIF_DELAY_PARITY_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_AXI_BUS_PARITY_SHIFT (20U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_AXI_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_AXI_BUS_PARITY_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BUS_PARITY_SHIFT (19U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BUS_PARITY_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SAFETY_IRQ_AXI_BUS_PARITY_SHIFT (18U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SAFETY_IRQ_AXI_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SAFETY_IRQ_AXI_BUS_PARITY_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SAFETY_IRQ_BUS_PARITY_SHIFT (17U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SAFETY_IRQ_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SAFETY_IRQ_BUS_PARITY_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CR_PARITY_SHIFT (16U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CR_PARITY_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SAFETY_CR_PARITY_SHIFT (15U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SAFETY_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SAFETY_CR_PARITY_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_LOCKSTEP_SHIFT (14U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_LOCKSTEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_LOCKSTEP_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TEXAS_SHIFT (13U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TEXAS_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FW_DMA_SHIFT (12U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FW_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FW_DMA_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FBSC_SHIFT (11U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FBSC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FBSC_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FBHC_SHIFT (10U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FBHC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_FBHC_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_DCE_SHIFT (9U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_DCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_DCE_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TDM_SHIFT (8U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TDM_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BIF_SHIFT (7U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_BIF_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_GARTEN_SHIFT (6U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_GARTEN_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TE3_SHIFT (5U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TE3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_TE3_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_MMU_SHIFT (4U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_MMU_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SLC_ACE_SHIFT (3U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SLC_ACE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SLC_ACE_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SLC_SHIFT (2U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_SLC_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CHEST_FBCDC_SHIFT (1U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CHEST_FBCDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CHEST_FBCDC_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CHEST_SLC_SHIFT (0U) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CHEST_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_FAULT_JONES_REDUCER_DETECT_STATUS_CHEST_SLC_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_FAULT_JONES_REDUCER_DISABLE +*/ +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE (0x1230U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_MASKFULL (IMG_UINT64_C(0x0000003FFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CDM_FAULT_DS_SHIFT (37U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CDM_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CDM_FAULT_DS_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_DCE_FAULT_DS_SHIFT (36U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_DCE_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_DCE_FAULT_DS_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TDM_FAULT_DS_SHIFT (35U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TDM_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TDM_FAULT_DS_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_IPP_FAULT_DS_SHIFT (34U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_IPP_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_IPP_FAULT_DS_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TILING_FAULT_DS_SHIFT (33U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TILING_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TILING_FAULT_DS_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_LOCKSTEP_RED_SHIFT (32U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_LOCKSTEP_RED_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_LOCKSTEP_RED_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FW_CR_PARITY_SHIFT (31U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FW_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FW_CR_PARITY_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FW_CR_BUS_PARITY_SHIFT (30U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FW_CR_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FW_CR_BUS_PARITY_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CR_BUS_PARITY_SHIFT (29U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CR_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CR_BUS_PARITY_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_GPU_IRQ_CR_PARITY_SHIFT (28U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_GPU_IRQ_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_GPU_IRQ_CR_PARITY_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BIF_FILTER_PARITY_SHIFT (27U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BIF_FILTER_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BIF_FILTER_PARITY_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CHEST_PARITY_SHIFT (26U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CHEST_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CHEST_PARITY_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_EXTERNAL_AXI_MEM_BUS_PARITY_SHIFT (25U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_EXTERNAL_AXI_MEM_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_EXTERNAL_AXI_MEM_BUS_PARITY_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SLC_PARITY_SHIFT (24U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SLC_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SLC_PARITY_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BIF_PARITY_SHIFT (23U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BIF_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BIF_PARITY_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_DFU_PARITY_SHIFT (22U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_DFU_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_DFU_PARITY_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SOCIF_DELAY_PARITY_SHIFT (21U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SOCIF_DELAY_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SOCIF_DELAY_PARITY_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_AXI_BUS_PARITY_SHIFT (20U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_AXI_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_AXI_BUS_PARITY_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BUS_PARITY_SHIFT (19U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BUS_PARITY_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SAFETY_IRQ_AXI_BUS_PARITY_SHIFT (18U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SAFETY_IRQ_AXI_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SAFETY_IRQ_AXI_BUS_PARITY_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SAFETY_IRQ_BUS_PARITY_SHIFT (17U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SAFETY_IRQ_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SAFETY_IRQ_BUS_PARITY_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CR_PARITY_SHIFT (16U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CR_PARITY_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SAFETY_CR_PARITY_SHIFT (15U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SAFETY_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SAFETY_CR_PARITY_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_LOCKSTEP_SHIFT (14U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_LOCKSTEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_LOCKSTEP_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TEXAS_SHIFT (13U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TEXAS_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FW_DMA_SHIFT (12U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FW_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FW_DMA_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FBSC_SHIFT (11U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FBSC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FBSC_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FBHC_SHIFT (10U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FBHC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_FBHC_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_DCE_SHIFT (9U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_DCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_DCE_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TDM_SHIFT (8U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TDM_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BIF_SHIFT (7U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_BIF_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_GARTEN_SHIFT (6U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_GARTEN_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TE3_SHIFT (5U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TE3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_TE3_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_MMU_SHIFT (4U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_MMU_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SLC_ACE_SHIFT (3U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SLC_ACE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SLC_ACE_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SLC_SHIFT (2U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_SLC_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CHEST_FBCDC_SHIFT (1U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CHEST_FBCDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CHEST_FBCDC_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CHEST_SLC_SHIFT (0U) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CHEST_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_FAULT_JONES_REDUCER_DISABLE_CHEST_SLC_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_FAULT_TEXAS_REDUCER_DISABLE +*/ +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE (0x1248U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MASKFULL (IMG_UINT64_C(0x0000077FFFFFFC0F)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDS_FAULT_DS_SHIFT (42U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDS_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDS_FAULT_DS_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_CDM_PIPE_FAULT_DS_SHIFT (41U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_CDM_PIPE_FAULT_DS_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_CDM_PIPE_FAULT_DS_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_SOCIF_DELAY_PARITY_SHIFT (40U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_SOCIF_DELAY_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_SOCIF_DELAY_PARITY_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDSRW_CACHE_PARITY_SHIFT (38U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDSRW_CACHE_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDSRW_CACHE_PARITY_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MCU_L1_PARITY_SHIFT (37U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MCU_L1_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MCU_L1_PARITY_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS0_PARITY_SHIFT (36U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS0_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS0_PARITY_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS1_PARITY_SHIFT (35U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS1_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS1_PARITY_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TEXAS_BUS_PARITY_SHIFT (34U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TEXAS_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TEXAS_BUS_PARITY_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_SHARED_BUS_PARITY_SHIFT (33U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_SHARED_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_SHARED_BUS_PARITY_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_BUS_PARITY_SHIFT (32U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_BUS_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_BUS_PARITY_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_CR_PARITY_SHIFT (31U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_CR_PARITY_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_SHARED_CR_PARITY_SHIFT (30U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_SHARED_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_SHARED_CR_PARITY_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TEXAS_CR_PARITY_SHIFT (29U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TEXAS_CR_PARITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TEXAS_CR_PARITY_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_SWIFT_SHIFT (28U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_SWIFT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_SWIFT_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MERCER_SHIFT (27U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MERCER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MERCER_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS0_SHIFT (26U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS0_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS1_SHIFT (25U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_BIF_TEXAS1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDSRW_CACHE_SHIFT (24U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDSRW_CACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDSRW_CACHE_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MCU_L1_SHIFT (23U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_MCU_L1_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VDM_SHIFT (22U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VDM_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_IFA_SHIFT (21U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_IFA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_IFA_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_UVB_SHIFT (20U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_UVB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_UVB_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PPP_SHIFT (19U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PPP_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_CLIPPER_SHIFT (18U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_CLIPPER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_CLIPPER_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VBG_SHIFT (17U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VBG_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VBG_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VBS_SHIFT (16U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VBS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VBS_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VCE_SHIFT (15U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_VCE_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PRIM_BUFFER_SHIFT (14U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PRIM_BUFFER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PRIM_BUFFER_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TESS_SHIFT (13U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TESS_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_IPF_SHIFT (12U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_IPF_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_IFPU_SHIFT (11U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_IFPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_IFPU_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TPF_SHIFT (10U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_TPF_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDS_SHIFT (3U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PDS_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_SHIFT (2U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PBE_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_USC_L2ICACHE_SHIFT (1U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_USC_L2ICACHE_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PSB_SHIFT (0U) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PSB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_FAULT_TEXAS_REDUCER_DISABLE_PSB_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_FAULT_SWIFT_REDUCER_DISABLE +*/ +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE (0x1260U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_MASKFULL (IMG_UINT64_C(0x00000000FFC007FF)) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_BUS_PARITY_SHIFT (31U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_BUS_PARITY_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_BUS_PARITY_EN (0x80000000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_SWIFT_CR_PARITY_SHIFT (30U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_SWIFT_CR_PARITY_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_SWIFT_CR_PARITY_EN (0x40000000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_CR_PARITY_SHIFT (29U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_CR_PARITY_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_CR_PARITY_EN (0x20000000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_CR_PARITY_SHIFT (28U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_CR_PARITY_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_CR_PARITY_EN (0x10000000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_LATENT_SHIFT (27U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_LATENT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_LATENT_EN (0x08000000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ICS_FAULT_SHIFT (26U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ICS_FAULT_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ICS_FAULT_EN (0x04000000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_BSC_PARITY_SHIFT (25U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_BSC_PARITY_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_BSC_PARITY_EN (0x02000000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TCU_L1_PARITY_SHIFT (24U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TCU_L1_PARITY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TCU_L1_PARITY_EN (0x01000000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_DEMUX_PARITY_SHIFT (23U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_DEMUX_PARITY_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_DEMUX_PARITY_EN (0x00800000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_SOCIF_DELAY_PARITY_SHIFT (22U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_SOCIF_DELAY_PARITY_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_SOCIF_DELAY_PARITY_EN (0x00400000U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TCU_SHIFT (10U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TCU_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TCU_EN (0x00000400U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_TF_SHIFT (9U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_TF_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_TF_EN (0x00000200U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_TAG_SHIFT (8U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_TAG_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_TAG_EN (0x00000100U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_MADD_SHIFT (7U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_MADD_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_TPU_MADD_EN (0x00000080U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_BSC_SHIFT (6U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_BSC_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_BSC_EN (0x00000040U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_IDEC_SHIFT (5U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_IDEC_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_IDEC_EN (0x00000020U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_CT_SHIFT (4U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_CT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_CT_EN (0x00000010U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_DEPTHSORT_SHIFT (3U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_DEPTHSORT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_DEPTHSORT_EN (0x00000008U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_TAGSORT_LUT_SHIFT (2U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_TAGSORT_LUT_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_TAGSORT_LUT_EN (0x00000004U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_TAGSORT_BUFFER_SHIFT (1U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_TAGSORT_BUFFER_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_TAGSORT_BUFFER_EN (0x00000002U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_ZLS_SHIFT (0U) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_ZLS_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_SWIFT_REDUCER_DISABLE_ISP_ZLS_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS +*/ +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS (0x1268U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000F000001F)) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_BUS_PARITY_SHIFT (31U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_BUS_PARITY_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_BUS_PARITY_EN (0x80000000U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_SOCIF_DELAY_PARITY_SHIFT (30U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_SOCIF_DELAY_PARITY_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_SOCIF_DELAY_PARITY_EN (0x40000000U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_CR_PARITY_SHIFT (29U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_CR_PARITY_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_CR_PARITY_EN (0x20000000U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_MH_PARITY_SHIFT (28U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_MH_PARITY_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_MH_PARITY_EN (0x10000000U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_ICS_LATENT_SHIFT (4U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_ICS_LATENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_ICS_LATENT_EN (0x00000010U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_ICS_FAULT_SHIFT (3U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_ICS_FAULT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_ICS_FAULT_EN (0x00000008U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_ECC_SHIFT (2U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_ECC_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_ECC_EN (0x00000004U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_SHIFT (1U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_USC_EN (0x00000002U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_MCU_L0_SHIFT (0U) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_MCU_L0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_MERCER_REDUCER_DETECT_STATUS_MCU_L0_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_MERCER_REDUCER_DISABLE +*/ +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE (0x1278U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_MASKFULL (IMG_UINT64_C(0x00000000F000001F)) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_BUS_PARITY_SHIFT (31U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_BUS_PARITY_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_BUS_PARITY_EN (0x80000000U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_SOCIF_DELAY_PARITY_SHIFT (30U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_SOCIF_DELAY_PARITY_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_SOCIF_DELAY_PARITY_EN (0x40000000U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_CR_PARITY_SHIFT (29U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_CR_PARITY_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_CR_PARITY_EN (0x20000000U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_MH_PARITY_SHIFT (28U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_MH_PARITY_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_MH_PARITY_EN (0x10000000U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_ICS_LATENT_SHIFT (4U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_ICS_LATENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_ICS_LATENT_EN (0x00000010U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_ICS_FAULT_SHIFT (3U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_ICS_FAULT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_ICS_FAULT_EN (0x00000008U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_ECC_SHIFT (2U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_ECC_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_ECC_EN (0x00000004U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_SHIFT (1U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_USC_EN (0x00000002U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_MCU_L0_SHIFT (0U) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_MCU_L0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_MERCER_REDUCER_DISABLE_MCU_L0_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS +*/ +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS (0x1280U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFC1FFF)) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_BUS_PARITY_SHIFT (31U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_BUS_PARITY_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_BUS_PARITY_EN (0x80000000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CR_PARITY_SHIFT (30U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CR_PARITY_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CR_PARITY_EN (0x40000000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_PAP_LATENT_SHIFT (29U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_PAP_LATENT_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_PAP_LATENT_EN (0x20000000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SAP_LATENT_SHIFT (28U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SAP_LATENT_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SAP_LATENT_EN (0x10000000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CPX_LATENT_SHIFT (27U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CPX_LATENT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CPX_LATENT_EN (0x08000000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_MOV_LATENT_SHIFT (26U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_MOV_LATENT_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_MOV_LATENT_EN (0x04000000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_PAP_ICS_FAULT_SHIFT (25U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_PAP_ICS_FAULT_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_PAP_ICS_FAULT_EN (0x02000000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SAP_ICS_FAULT_SHIFT (24U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SAP_ICS_FAULT_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SAP_ICS_FAULT_EN (0x01000000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CPX_ICS_FAULT_SHIFT (23U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CPX_ICS_FAULT_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CPX_ICS_FAULT_EN (0x00800000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_MOV_ICS_FAULT_SHIFT (22U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_MOV_ICS_FAULT_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_MOV_ICS_FAULT_EN (0x00400000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_DMX_PARITY_FAULT_SHIFT (21U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_DMX_PARITY_FAULT_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_DMX_PARITY_FAULT_EN (0x00200000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_US_PARITY_FAULT_SHIFT (20U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_US_PARITY_FAULT_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_US_PARITY_FAULT_EN (0x00100000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SCHED_FAULT_DS_SHIFT (19U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SCHED_FAULT_DS_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SCHED_FAULT_DS_EN (0x00080000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_ICTRL_FAULT_DS_SHIFT (18U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_ICTRL_FAULT_DS_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_ICTRL_FAULT_DS_EN (0x00040000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_PRED_STORE_SHIFT (12U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_PRED_STORE_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_PRED_STORE_EN (0x00001000U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_DMX_SHIFT (11U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_DMX_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_DMX_EN (0x00000800U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_TPX_TS_SHIFT (10U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_TPX_TS_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_TPX_TS_EN (0x00000400U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_TPX_CRB_SHIFT (9U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_TPX_CRB_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_TPX_CRB_EN (0x00000200U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_ICTRL_IF_SHIFT (8U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_ICTRL_IF_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_ICTRL_IF_EN (0x00000100U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_US_SHIFT (7U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_US_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_US_EN (0x00000080U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CEU_SHIFT (6U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CEU_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_CEU_EN (0x00000040U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SCHED_TS_SHIFT (5U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SCHED_TS_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SCHED_TS_EN (0x00000020U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_IRS_SHIFT (4U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_IRS_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_IRS_EN (0x00000010U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_LMS_SHIFT (3U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_LMS_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_LMS_EN (0x00000008U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SHS_SHIFT (2U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SHS_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_SHS_EN (0x00000004U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_COEFF_STORE_SHIFT (1U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_COEFF_STORE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_COEFF_STORE_EN (0x00000002U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_ITR_SHIFT (0U) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_ITR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_USC_REDUCER_DETECT_STATUS_ITR_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_USC_REDUCER_DISABLE +*/ +#define RGX_CR_FAULT_USC_REDUCER_DISABLE (0x1290U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFC1FFF)) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_BUS_PARITY_SHIFT (31U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_BUS_PARITY_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_BUS_PARITY_EN (0x80000000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CR_PARITY_SHIFT (30U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CR_PARITY_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CR_PARITY_EN (0x40000000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_PAP_LATENT_SHIFT (29U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_PAP_LATENT_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_PAP_LATENT_EN (0x20000000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SAP_LATENT_SHIFT (28U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SAP_LATENT_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SAP_LATENT_EN (0x10000000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CPX_LATENT_SHIFT (27U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CPX_LATENT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CPX_LATENT_EN (0x08000000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_MOV_LATENT_SHIFT (26U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_MOV_LATENT_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_MOV_LATENT_EN (0x04000000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_PAP_ICS_FAULT_SHIFT (25U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_PAP_ICS_FAULT_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_PAP_ICS_FAULT_EN (0x02000000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SAP_ICS_FAULT_SHIFT (24U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SAP_ICS_FAULT_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SAP_ICS_FAULT_EN (0x01000000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CPX_ICS_FAULT_SHIFT (23U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CPX_ICS_FAULT_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CPX_ICS_FAULT_EN (0x00800000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_MOV_ICS_FAULT_SHIFT (22U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_MOV_ICS_FAULT_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_MOV_ICS_FAULT_EN (0x00400000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_DMX_PARITY_FAULT_SHIFT (21U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_DMX_PARITY_FAULT_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_DMX_PARITY_FAULT_EN (0x00200000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_US_PARITY_FAULT_SHIFT (20U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_US_PARITY_FAULT_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_US_PARITY_FAULT_EN (0x00100000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SCHED_FAULT_DS_SHIFT (19U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SCHED_FAULT_DS_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SCHED_FAULT_DS_EN (0x00080000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_ICTRL_FAULT_DS_SHIFT (18U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_ICTRL_FAULT_DS_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_ICTRL_FAULT_DS_EN (0x00040000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_PRED_STORE_SHIFT (12U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_PRED_STORE_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_PRED_STORE_EN (0x00001000U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_DMX_SHIFT (11U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_DMX_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_DMX_EN (0x00000800U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_TPX_TS_SHIFT (10U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_TPX_TS_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_TPX_TS_EN (0x00000400U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_TPX_CRB_SHIFT (9U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_TPX_CRB_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_TPX_CRB_EN (0x00000200U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_ICTRL_IF_SHIFT (8U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_ICTRL_IF_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_ICTRL_IF_EN (0x00000100U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_US_SHIFT (7U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_US_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_US_EN (0x00000080U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CEU_SHIFT (6U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CEU_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_CEU_EN (0x00000040U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SCHED_TS_SHIFT (5U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SCHED_TS_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SCHED_TS_EN (0x00000020U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_IRS_SHIFT (4U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_IRS_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_IRS_EN (0x00000010U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_LMS_SHIFT (3U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_LMS_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_LMS_EN (0x00000008U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SHS_SHIFT (2U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SHS_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_SHS_EN (0x00000004U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_COEFF_STORE_SHIFT (1U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_COEFF_STORE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_COEFF_STORE_EN (0x00000002U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_ITR_SHIFT (0U) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_ITR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_USC_REDUCER_DISABLE_ITR_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_CHEST_REDUCER_DISABLE +*/ +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE (0x12A8U) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_CR_BUS_PARITY_SHIFT (3U) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_CR_BUS_PARITY_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_CR_BUS_PARITY_EN (0x00000008U) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_CR_PARITY_SHIFT (2U) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_CR_PARITY_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_CR_PARITY_EN (0x00000004U) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_BANK_PARITY_SHIFT (1U) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_BANK_PARITY_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_BANK_PARITY_EN (0x00000002U) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_OWDB_PARITY_SHIFT (0U) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_OWDB_PARITY_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_CHEST_REDUCER_DISABLE_OWDB_PARITY_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_SLC_REDUCER_DISABLE +*/ +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE (0x1328U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MASKFULL (IMG_UINT64_C(0x00000000C00007FF)) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_BUS_PARITY_SHIFT (31U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_BUS_PARITY_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_BUS_PARITY_EN (0x80000000U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_CR_PARITY_SHIFT (30U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_CR_PARITY_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_CR_PARITY_EN (0x40000000U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_HMMU_HPD_CACHE_SHIFT (10U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_HMMU_HPD_CACHE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_HMMU_HPD_CACHE_EN (0x00000400U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_HMMU_HPC_CACHE_SHIFT (9U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_HMMU_HPC_CACHE_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_HMMU_HPC_CACHE_EN (0x00000200U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_HMMU_HPT_CACHE_SHIFT (8U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_HMMU_HPT_CACHE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_HMMU_HPT_CACHE_EN (0x00000100U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PT_DATA_STORE_SHIFT (7U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PT_DATA_STORE_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PT_DATA_STORE_EN (0x00000080U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PT_CAM_SHIFT (6U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PT_CAM_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PT_CAM_EN (0x00000040U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_INST_QUEUE_SHIFT (5U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_INST_QUEUE_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_INST_QUEUE_EN (0x00000020U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PDPC_DATA_SHIFT (4U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PDPC_DATA_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PDPC_DATA_EN (0x00000010U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PDPC_CAM_SHIFT (3U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PDPC_CAM_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_MMU4_PDPC_CAM_EN (0x00000008U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_SLC_SPFILTER_SHIFT (2U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_SLC_SPFILTER_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_SLC_SPFILTER_EN (0x00000004U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_SLC_CCM_SHIFT (1U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_SLC_CCM_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_SLC_CCM_EN (0x00000002U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_SLC_IWDB_SHIFT (0U) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_SLC_IWDB_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_SLC_REDUCER_DISABLE_SLC_IWDB_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_REDUCER_CLEAR +*/ +#define RGX_CR_FAULT_REDUCER_CLEAR (0x1330U) +#define RGX_CR_FAULT_REDUCER_CLEAR_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_FAULT_REDUCER_CLEAR_SLC_SHIFT (6U) +#define RGX_CR_FAULT_REDUCER_CLEAR_SLC_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FAULT_REDUCER_CLEAR_SLC_EN (0x00000040U) +#define RGX_CR_FAULT_REDUCER_CLEAR_CHEST_SHIFT (5U) +#define RGX_CR_FAULT_REDUCER_CLEAR_CHEST_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FAULT_REDUCER_CLEAR_CHEST_EN (0x00000020U) +#define RGX_CR_FAULT_REDUCER_CLEAR_JONES_SHIFT (4U) +#define RGX_CR_FAULT_REDUCER_CLEAR_JONES_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FAULT_REDUCER_CLEAR_JONES_EN (0x00000010U) +#define RGX_CR_FAULT_REDUCER_CLEAR_TEXAS_SHIFT (3U) +#define RGX_CR_FAULT_REDUCER_CLEAR_TEXAS_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_REDUCER_CLEAR_TEXAS_EN (0x00000008U) +#define RGX_CR_FAULT_REDUCER_CLEAR_SWIFT_SHIFT (2U) +#define RGX_CR_FAULT_REDUCER_CLEAR_SWIFT_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_REDUCER_CLEAR_SWIFT_EN (0x00000004U) +#define RGX_CR_FAULT_REDUCER_CLEAR_MERCER_SHIFT (1U) +#define RGX_CR_FAULT_REDUCER_CLEAR_MERCER_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_REDUCER_CLEAR_MERCER_EN (0x00000002U) +#define RGX_CR_FAULT_REDUCER_CLEAR_USC_SHIFT (0U) +#define RGX_CR_FAULT_REDUCER_CLEAR_USC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_REDUCER_CLEAR_USC_EN (0x00000001U) + + +/* + Register RGX_CR_FIRMWARE_PROCESSOR_LS +*/ +#define RGX_CR_FIRMWARE_PROCESSOR_LS (0x01A0U) +#define RGX_CR_FIRMWARE_PROCESSOR_LS__RDL__MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_FIRMWARE_PROCESSOR_LS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FIRMWARE_PROCESSOR_LS__RDL__SELF_TEST_ENABLE_SHIFT (2U) +#define RGX_CR_FIRMWARE_PROCESSOR_LS__RDL__SELF_TEST_ENABLE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_SHIFT (0U) +#define RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FIRMWARE_PROCESSOR_LS__RDL__ENABLE_SHIFT (0U) +#define RGX_CR_FIRMWARE_PROCESSOR_LS__RDL__ENABLE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_FIRMWARE_PROCESSOR_LS_STATUS +*/ +#define RGX_CR_FIRMWARE_PROCESSOR_LS_STATUS (0x01A8U) +#define RGX_CR_FIRMWARE_PROCESSOR_LS_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_FIRMWARE_PROCESSOR_LS_STATUS_SELF_TEST_DONE_SHIFT (0U) +#define RGX_CR_FIRMWARE_PROCESSOR_LS_STATUS_SELF_TEST_DONE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_JONES_SAFETY_SELF_TEST_EN +*/ +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN (0x8C30U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SLC_CHEST_SHIFT (22U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SLC_CHEST_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SLC_CHEST_EN (0x00400000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_DCLS_RED_SHIFT (21U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_DCLS_RED_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_DCLS_RED_EN (0x00200000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_DCLS_PRI_SHIFT (20U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_DCLS_PRI_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_DCLS_PRI_EN (0x00100000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_BIF_FILTER_SHIFT (19U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_BIF_FILTER_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_BIF_FILTER_EN (0x00080000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_PT_PARITY_SHIFT (18U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_PT_PARITY_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_PT_PARITY_EN (0x00040000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_JONES_REGBANK_SHIFT (17U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_JONES_REGBANK_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_JONES_REGBANK_EN (0x00020000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SI_AXI2IMG_SHIFT (16U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SI_AXI2IMG_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SI_AXI2IMG_EN (0x00010000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_REDUCER_SHIFT (15U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_REDUCER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_REDUCER_EN (0x00008000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_AXI_R_SHIFT (14U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_AXI_R_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_AXI_R_EN (0x00004000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_DFU_SHIFT (13U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_DFU_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_DFU_EN (0x00002000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SAFETY_IRQ_SHIFT (12U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SAFETY_IRQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SAFETY_IRQ_EN (0x00001000U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_GPU_IRQ_SHIFT (11U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_GPU_IRQ_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_GPU_IRQ_EN (0x00000800U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_AXI2IMG_SHIFT (10U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_AXI2IMG_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_AXI2IMG_EN (0x00000400U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SOCIF_SHIFT (9U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SOCIF_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SOCIF_EN (0x00000200U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_GARTEN_SHIFT (8U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_GARTEN_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_GARTEN_EN (0x00000100U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_TDM_SHIFT (7U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_TDM_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_TDM_EN (0x00000080U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_VERTEX_SHIFT (6U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_VERTEX_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_VERTEX_EN (0x00000040U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_PIXEL_SHIFT (5U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_PIXEL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_PIXEL_EN (0x00000020U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_COMPUTE_SHIFT (4U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_COMPUTE_EN (0x00000010U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_FBCDC_SHIFT (3U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_FBCDC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_FBCDC_EN (0x00000008U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_PM_SHIFT (2U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_PM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_PM_EN (0x00000004U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_BIF_SHIFT (1U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_BIF_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_BIF_EN (0x00000002U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SLC_SHIFT (0U) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SLC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_SAFETY_SELF_TEST_EN_SLC_EN (0x00000001U) + + +/* + Register RGX_CR_TEXAS_SAFETY_SELF_TEST_EN +*/ +#define RGX_CR_TEXAS_SAFETY_SELF_TEST_EN (0x8C38U) +#define RGX_CR_TEXAS_SAFETY_SELF_TEST_EN_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TEXAS_SAFETY_SELF_TEST_EN_SPU_SHIFT (0U) +#define RGX_CR_TEXAS_SAFETY_SELF_TEST_EN_SPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) + + +/* + Register RGX_CR_SWIFT_SAFETY_SELF_TEST_EN +*/ +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN (0x8C40U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU31_SWIFT0_SHIFT (31U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU31_SWIFT0_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU31_SWIFT0_EN (0x80000000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU30_SWIFT0_SHIFT (30U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU30_SWIFT0_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU30_SWIFT0_EN (0x40000000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU29_SWIFT0_SHIFT (29U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU29_SWIFT0_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU29_SWIFT0_EN (0x20000000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU28_SWIFT0_SHIFT (28U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU28_SWIFT0_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU28_SWIFT0_EN (0x10000000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU27_SWIFT0_SHIFT (27U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU27_SWIFT0_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU27_SWIFT0_EN (0x08000000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU26_SWIFT0_SHIFT (26U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU26_SWIFT0_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU26_SWIFT0_EN (0x04000000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU25_SWIFT0_SHIFT (25U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU25_SWIFT0_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU25_SWIFT0_EN (0x02000000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU24_SWIFT0_SHIFT (24U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU24_SWIFT0_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU24_SWIFT0_EN (0x01000000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU23_SWIFT0_SHIFT (23U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU23_SWIFT0_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU23_SWIFT0_EN (0x00800000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU22_SWIFT0_SHIFT (22U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU22_SWIFT0_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU22_SWIFT0_EN (0x00400000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU21_SWIFT0_SHIFT (21U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU21_SWIFT0_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU21_SWIFT0_EN (0x00200000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU20_SWIFT0_SHIFT (20U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU20_SWIFT0_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU20_SWIFT0_EN (0x00100000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU19_SWIFT0_SHIFT (19U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU19_SWIFT0_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU19_SWIFT0_EN (0x00080000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU18_SWIFT0_SHIFT (18U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU18_SWIFT0_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU18_SWIFT0_EN (0x00040000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU17_SWIFT0_SHIFT (17U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU17_SWIFT0_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU17_SWIFT0_EN (0x00020000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU16_SWIFT0_SHIFT (16U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU16_SWIFT0_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU16_SWIFT0_EN (0x00010000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU15_SWIFT0_SHIFT (15U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU15_SWIFT0_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU15_SWIFT0_EN (0x00008000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU14_SWIFT0_SHIFT (14U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU14_SWIFT0_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU14_SWIFT0_EN (0x00004000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU13_SWIFT0_SHIFT (13U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU13_SWIFT0_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU13_SWIFT0_EN (0x00002000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU12_SWIFT0_SHIFT (12U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU12_SWIFT0_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU12_SWIFT0_EN (0x00001000U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU11_SWIFT0_SHIFT (11U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU11_SWIFT0_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU11_SWIFT0_EN (0x00000800U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU10_SWIFT0_SHIFT (10U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU10_SWIFT0_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU10_SWIFT0_EN (0x00000400U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU9_SWIFT0_SHIFT (9U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU9_SWIFT0_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU9_SWIFT0_EN (0x00000200U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU8_SWIFT0_SHIFT (8U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU8_SWIFT0_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU8_SWIFT0_EN (0x00000100U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU7_SWIFT0_SHIFT (7U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU7_SWIFT0_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU7_SWIFT0_EN (0x00000080U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU6_SWIFT0_SHIFT (6U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU6_SWIFT0_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU6_SWIFT0_EN (0x00000040U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU5_SWIFT0_SHIFT (5U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU5_SWIFT0_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU5_SWIFT0_EN (0x00000020U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU4_SWIFT0_SHIFT (4U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU4_SWIFT0_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU4_SWIFT0_EN (0x00000010U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU3_SWIFT0_SHIFT (3U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU3_SWIFT0_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU3_SWIFT0_EN (0x00000008U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU2_SWIFT0_SHIFT (2U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU2_SWIFT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU2_SWIFT0_EN (0x00000004U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU1_SWIFT0_SHIFT (1U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU1_SWIFT0_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU1_SWIFT0_EN (0x00000002U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU0_SWIFT0_SHIFT (0U) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU0_SWIFT0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SWIFT_SAFETY_SELF_TEST_EN_SPU0_SWIFT0_EN (0x00000001U) + + +/* + Register RGX_CR_MERCER_SAFETY_SELF_TEST_EN +*/ +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN (0x8C48U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU20_MERCER2_SHIFT (62U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU20_MERCER2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU20_MERCER2_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU20_MERCER1_SHIFT (61U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU20_MERCER1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU20_MERCER1_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU20_MERCER0_SHIFT (60U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU20_MERCER0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU20_MERCER0_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU19_MERCER2_SHIFT (59U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU19_MERCER2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU19_MERCER2_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU19_MERCER1_SHIFT (58U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU19_MERCER1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU19_MERCER1_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU19_MERCER0_SHIFT (57U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU19_MERCER0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU19_MERCER0_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU18_MERCER2_SHIFT (56U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU18_MERCER2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU18_MERCER2_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU18_MERCER1_SHIFT (55U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU18_MERCER1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU18_MERCER1_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU18_MERCER0_SHIFT (54U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU18_MERCER0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU18_MERCER0_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU17_MERCER2_SHIFT (53U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU17_MERCER2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU17_MERCER2_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU17_MERCER1_SHIFT (52U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU17_MERCER1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU17_MERCER1_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU17_MERCER0_SHIFT (51U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU17_MERCER0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU17_MERCER0_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU16_MERCER2_SHIFT (50U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU16_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU16_MERCER2_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU16_MERCER1_SHIFT (49U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU16_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU16_MERCER1_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU16_MERCER0_SHIFT (48U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU16_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU16_MERCER0_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU15_MERCER2_SHIFT (47U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU15_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU15_MERCER2_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU15_MERCER1_SHIFT (46U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU15_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU15_MERCER1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU15_MERCER0_SHIFT (45U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU15_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU15_MERCER0_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU14_MERCER2_SHIFT (44U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU14_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU14_MERCER2_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU14_MERCER1_SHIFT (43U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU14_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU14_MERCER1_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU14_MERCER0_SHIFT (42U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU14_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU14_MERCER0_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU13_MERCER2_SHIFT (41U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU13_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU13_MERCER2_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU13_MERCER1_SHIFT (40U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU13_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU13_MERCER1_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU13_MERCER0_SHIFT (39U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU13_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU13_MERCER0_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU12_MERCER2_SHIFT (38U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU12_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU12_MERCER2_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU12_MERCER1_SHIFT (37U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU12_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU12_MERCER1_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU12_MERCER0_SHIFT (36U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU12_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU12_MERCER0_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU11_MERCER2_SHIFT (35U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU11_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU11_MERCER2_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU11_MERCER1_SHIFT (34U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU11_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU11_MERCER1_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU11_MERCER0_SHIFT (33U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU11_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU11_MERCER0_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU10_MERCER2_SHIFT (32U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU10_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU10_MERCER2_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU10_MERCER1_SHIFT (31U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU10_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU10_MERCER1_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU10_MERCER0_SHIFT (30U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU10_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU10_MERCER0_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU9_MERCER2_SHIFT (29U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU9_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU9_MERCER2_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU9_MERCER1_SHIFT (28U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU9_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU9_MERCER1_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU9_MERCER0_SHIFT (27U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU9_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU9_MERCER0_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU8_MERCER2_SHIFT (26U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU8_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU8_MERCER2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU8_MERCER1_SHIFT (25U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU8_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU8_MERCER1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU8_MERCER0_SHIFT (24U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU8_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU8_MERCER0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU7_MERCER2_SHIFT (23U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU7_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU7_MERCER2_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU7_MERCER1_SHIFT (22U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU7_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU7_MERCER1_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU7_MERCER0_SHIFT (21U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU7_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU7_MERCER0_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU6_MERCER2_SHIFT (20U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU6_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU6_MERCER2_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU6_MERCER1_SHIFT (19U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU6_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU6_MERCER1_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU6_MERCER0_SHIFT (18U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU6_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU6_MERCER0_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU5_MERCER2_SHIFT (17U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU5_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU5_MERCER2_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU5_MERCER1_SHIFT (16U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU5_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU5_MERCER1_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU5_MERCER0_SHIFT (15U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU5_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU5_MERCER0_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU4_MERCER2_SHIFT (14U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU4_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU4_MERCER2_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU4_MERCER1_SHIFT (13U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU4_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU4_MERCER1_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU4_MERCER0_SHIFT (12U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU4_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU4_MERCER0_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU3_MERCER2_SHIFT (11U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU3_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU3_MERCER2_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU3_MERCER1_SHIFT (10U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU3_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU3_MERCER1_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU3_MERCER0_SHIFT (9U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU3_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU3_MERCER0_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU2_MERCER2_SHIFT (8U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU2_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU2_MERCER2_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU2_MERCER1_SHIFT (7U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU2_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU2_MERCER1_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU2_MERCER0_SHIFT (6U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU2_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU2_MERCER0_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU1_MERCER2_SHIFT (5U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU1_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU1_MERCER2_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU1_MERCER1_SHIFT (4U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU1_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU1_MERCER1_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU1_MERCER0_SHIFT (3U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU1_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU1_MERCER0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU0_MERCER2_SHIFT (2U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU0_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU0_MERCER2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU0_MERCER1_SHIFT (1U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU0_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU0_MERCER1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU0_MERCER0_SHIFT (0U) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU0_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MERCER_SAFETY_SELF_TEST_EN_SPU0_MERCER0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_USC3_SAFETY_SELF_TEST_EN +*/ +#define RGX_CR_USC3_SAFETY_SELF_TEST_EN (0x8C60U) +#define RGX_CR_USC3_SAFETY_SELF_TEST_EN_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_USC3_SAFETY_SELF_TEST_EN_REGBANK_SHIFT (1U) +#define RGX_CR_USC3_SAFETY_SELF_TEST_EN_REGBANK_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_USC3_SAFETY_SELF_TEST_EN_REGBANK_EN (0x00000002U) +#define RGX_CR_USC3_SAFETY_SELF_TEST_EN_REDUCER_SHIFT (0U) +#define RGX_CR_USC3_SAFETY_SELF_TEST_EN_REDUCER_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_USC3_SAFETY_SELF_TEST_EN_REDUCER_EN (0x00000001U) + + +/* + Register RGX_CR_TPU_SAFETY_SELF_TEST_EN +*/ +#define RGX_CR_TPU_SAFETY_SELF_TEST_EN (0x8C68U) +#define RGX_CR_TPU_SAFETY_SELF_TEST_EN_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_TPU_SAFETY_SELF_TEST_EN_REGBANK_SHIFT (1U) +#define RGX_CR_TPU_SAFETY_SELF_TEST_EN_REGBANK_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TPU_SAFETY_SELF_TEST_EN_REGBANK_EN (0x00000002U) +#define RGX_CR_TPU_SAFETY_SELF_TEST_EN_REDUCER_SHIFT (0U) +#define RGX_CR_TPU_SAFETY_SELF_TEST_EN_REDUCER_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TPU_SAFETY_SELF_TEST_EN_REDUCER_EN (0x00000001U) + + +/* + Register RGX_CR_ISP_SAFETY_SELF_TEST_EN +*/ +#define RGX_CR_ISP_SAFETY_SELF_TEST_EN (0x8C70U) +#define RGX_CR_ISP_SAFETY_SELF_TEST_EN_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_ISP_SAFETY_SELF_TEST_EN_REGBANK_SHIFT (1U) +#define RGX_CR_ISP_SAFETY_SELF_TEST_EN_REGBANK_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ISP_SAFETY_SELF_TEST_EN_REGBANK_EN (0x00000002U) +#define RGX_CR_ISP_SAFETY_SELF_TEST_EN_REDUCER_SHIFT (0U) +#define RGX_CR_ISP_SAFETY_SELF_TEST_EN_REDUCER_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ISP_SAFETY_SELF_TEST_EN_REDUCER_EN (0x00000001U) + + +/* + Register RGX_CR_SLC_SAFETY_SELF_TEST_EN +*/ +#define RGX_CR_SLC_SAFETY_SELF_TEST_EN (0x8C78U) +#define RGX_CR_SLC_SAFETY_SELF_TEST_EN_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_SLC_SAFETY_SELF_TEST_EN_REGBANK_SHIFT (1U) +#define RGX_CR_SLC_SAFETY_SELF_TEST_EN_REGBANK_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_SAFETY_SELF_TEST_EN_REGBANK_EN (0x00000002U) +#define RGX_CR_SLC_SAFETY_SELF_TEST_EN_REDUCER_SHIFT (0U) +#define RGX_CR_SLC_SAFETY_SELF_TEST_EN_REDUCER_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_SAFETY_SELF_TEST_EN_REDUCER_EN (0x00000001U) + + +/* + Register RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE +*/ +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE (0x2458U) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_CHEST_REDUCER_SHIFT (3U) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_CHEST_REDUCER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_CHEST_REDUCER_EN (0x00000008U) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_CHEST_REGBANK_SHIFT (2U) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_CHEST_REGBANK_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_CHEST_REGBANK_EN (0x00000004U) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_BANK_SHIFT (1U) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_BANK_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_BANK_EN (0x00000002U) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_OWDB_SHIFT (0U) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_OWDB_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CHEST_PARITY_SELF_TEST_ENABLE_OWDB_EN (0x00000001U) /* Register RGX_CR_JONES_RAM_STATUS */ -#define RGX_CR_JONES_RAM_STATUS (0x1148U) -#define RGX_CR_JONES_RAM_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_JONES_RAM_STATUS_GARTEN_SHIFT (8U) -#define RGX_CR_JONES_RAM_STATUS_GARTEN_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_JONES_RAM_STATUS_GARTEN_EN (0x00000100U) -#define RGX_CR_JONES_RAM_STATUS_TDM_SHIFT (7U) -#define RGX_CR_JONES_RAM_STATUS_TDM_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_JONES_RAM_STATUS_TDM_EN (0x00000080U) -#define RGX_CR_JONES_RAM_STATUS_VERTEX_SHIFT (6U) -#define RGX_CR_JONES_RAM_STATUS_VERTEX_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_JONES_RAM_STATUS_VERTEX_EN (0x00000040U) -#define RGX_CR_JONES_RAM_STATUS_PIXEL_SHIFT (5U) -#define RGX_CR_JONES_RAM_STATUS_PIXEL_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_JONES_RAM_STATUS_PIXEL_EN (0x00000020U) -#define RGX_CR_JONES_RAM_STATUS_COMPUTE_SHIFT (4U) -#define RGX_CR_JONES_RAM_STATUS_COMPUTE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_JONES_RAM_STATUS_COMPUTE_EN (0x00000010U) -#define RGX_CR_JONES_RAM_STATUS_FBCDC_SHIFT (3U) -#define RGX_CR_JONES_RAM_STATUS_FBCDC_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_JONES_RAM_STATUS_FBCDC_EN (0x00000008U) -#define RGX_CR_JONES_RAM_STATUS_PM_SHIFT (2U) -#define RGX_CR_JONES_RAM_STATUS_PM_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_JONES_RAM_STATUS_PM_EN (0x00000004U) -#define RGX_CR_JONES_RAM_STATUS_BIF_SHIFT (1U) -#define RGX_CR_JONES_RAM_STATUS_BIF_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_JONES_RAM_STATUS_BIF_EN (0x00000002U) -#define RGX_CR_JONES_RAM_STATUS_SLC_SHIFT (0U) -#define RGX_CR_JONES_RAM_STATUS_SLC_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_JONES_RAM_STATUS_SLC_EN (0x00000001U) +#define RGX_CR_JONES_RAM_STATUS (0x1148U) +#define RGX_CR_JONES_RAM_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000003FF)) +#define RGX_CR_JONES_RAM_STATUS_SLC_CHEST_SHIFT (9U) +#define RGX_CR_JONES_RAM_STATUS_SLC_CHEST_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_JONES_RAM_STATUS_SLC_CHEST_EN (0x00000200U) +#define RGX_CR_JONES_RAM_STATUS_GARTEN_SHIFT (8U) +#define RGX_CR_JONES_RAM_STATUS_GARTEN_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_JONES_RAM_STATUS_GARTEN_EN (0x00000100U) +#define RGX_CR_JONES_RAM_STATUS_TDM_SHIFT (7U) +#define RGX_CR_JONES_RAM_STATUS_TDM_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_RAM_STATUS_TDM_EN (0x00000080U) +#define RGX_CR_JONES_RAM_STATUS_VERTEX_SHIFT (6U) +#define RGX_CR_JONES_RAM_STATUS_VERTEX_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_RAM_STATUS_VERTEX_EN (0x00000040U) +#define RGX_CR_JONES_RAM_STATUS_PIXEL_SHIFT (5U) +#define RGX_CR_JONES_RAM_STATUS_PIXEL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_RAM_STATUS_PIXEL_EN (0x00000020U) +#define RGX_CR_JONES_RAM_STATUS_COMPUTE_SHIFT (4U) +#define RGX_CR_JONES_RAM_STATUS_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_RAM_STATUS_COMPUTE_EN (0x00000010U) +#define RGX_CR_JONES_RAM_STATUS_FBCDC_SHIFT (3U) +#define RGX_CR_JONES_RAM_STATUS_FBCDC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_RAM_STATUS_FBCDC_EN (0x00000008U) +#define RGX_CR_JONES_RAM_STATUS_PM_SHIFT (2U) +#define RGX_CR_JONES_RAM_STATUS_PM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_RAM_STATUS_PM_EN (0x00000004U) +#define RGX_CR_JONES_RAM_STATUS_BIF_SHIFT (1U) +#define RGX_CR_JONES_RAM_STATUS_BIF_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_RAM_STATUS_BIF_EN (0x00000002U) +#define RGX_CR_JONES_RAM_STATUS_SLC_SHIFT (0U) +#define RGX_CR_JONES_RAM_STATUS_SLC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_RAM_STATUS_SLC_EN (0x00000001U) /* Register RGX_CR_JONES_RAM_INIT_KICK */ -#define RGX_CR_JONES_RAM_INIT_KICK (0x1158U) -#define RGX_CR_JONES_RAM_INIT_KICK_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_SHIFT (8U) -#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_EN (0x00000100U) -#define RGX_CR_JONES_RAM_INIT_KICK_TDM_SHIFT (7U) -#define RGX_CR_JONES_RAM_INIT_KICK_TDM_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_JONES_RAM_INIT_KICK_TDM_EN (0x00000080U) -#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_SHIFT (6U) -#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_EN (0x00000040U) -#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_SHIFT (5U) -#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_EN (0x00000020U) -#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_SHIFT (4U) -#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_EN (0x00000010U) -#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_SHIFT (3U) -#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_EN (0x00000008U) -#define RGX_CR_JONES_RAM_INIT_KICK_PM_SHIFT (2U) -#define RGX_CR_JONES_RAM_INIT_KICK_PM_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_JONES_RAM_INIT_KICK_PM_EN (0x00000004U) -#define RGX_CR_JONES_RAM_INIT_KICK_BIF_SHIFT (1U) -#define RGX_CR_JONES_RAM_INIT_KICK_BIF_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_JONES_RAM_INIT_KICK_BIF_EN (0x00000002U) -#define RGX_CR_JONES_RAM_INIT_KICK_SLC_SHIFT (0U) -#define RGX_CR_JONES_RAM_INIT_KICK_SLC_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_JONES_RAM_INIT_KICK_SLC_EN (0x00000001U) +#define RGX_CR_JONES_RAM_INIT_KICK (0x1158U) +#define RGX_CR_JONES_RAM_INIT_KICK_MASKFULL (IMG_UINT64_C(0x00000000000003FF)) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_CHEST_SHIFT (9U) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_CHEST_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_CHEST_EN (0x00000200U) +#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_SHIFT (8U) +#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_EN (0x00000100U) +#define RGX_CR_JONES_RAM_INIT_KICK_TDM_SHIFT (7U) +#define RGX_CR_JONES_RAM_INIT_KICK_TDM_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_RAM_INIT_KICK_TDM_EN (0x00000080U) +#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_SHIFT (6U) +#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_EN (0x00000040U) +#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_SHIFT (5U) +#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_EN (0x00000020U) +#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_SHIFT (4U) +#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_EN (0x00000010U) +#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_SHIFT (3U) +#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_EN (0x00000008U) +#define RGX_CR_JONES_RAM_INIT_KICK_PM_SHIFT (2U) +#define RGX_CR_JONES_RAM_INIT_KICK_PM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_RAM_INIT_KICK_PM_EN (0x00000004U) +#define RGX_CR_JONES_RAM_INIT_KICK_BIF_SHIFT (1U) +#define RGX_CR_JONES_RAM_INIT_KICK_BIF_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_RAM_INIT_KICK_BIF_EN (0x00000002U) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_SHIFT (0U) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_EN (0x00000001U) + + +/* + Register RGX_CR_ARCH_CLK_GATE_ENABLE +*/ +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG (0x1208U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__SACG_AND_NOT_CXTP_INFRA__MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU7_SHIFT (14U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU7_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU7_ON (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU7_AUTO (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU6_SHIFT (12U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU6_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU6_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU6_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU5_SHIFT (10U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU5_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU5_ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU5_AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU4_SHIFT (8U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU4_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU4_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU4_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU3_SHIFT (6U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU3_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU3_ON (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU3_AUTO (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU2_SHIFT (4U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU2_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU2_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU2_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU1_SHIFT (2U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU1_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU1_ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU1_AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU0_SHIFT (0U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU0_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU0_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__CXTP_INFRA_AND_SACG__SPU0_AUTO (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__SACG_AND_NOT_CXTP_INFRA__CLUSTER_SHIFT (0U) +#define RGX_CR_ARCH_CLK_GATE_ENABLE__SACG_AND_NOT_CXTP_INFRA__CLUSTER_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_PM_PARTIAL_RENDER_ENABLE */ -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U) -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U) -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U) /* Register RGX_CR_CDM_CONTEXT_STORE_STATUS */ -#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_LT5 (0x04A0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_LT5__MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_GT4__MASKFULL (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_LT5__NEED_RESUME_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_LT5__NEED_RESUME_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_LT5__NEED_RESUME_EN (0x00000002U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_GT4__NEED_RESUME_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_GT4__NEED_RESUME_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_GT4__NEED_RESUME_EN (0x00000002U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_LT5__COMPLETE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_LT5__COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS__CDM_CSF_LT5__COMPLETE_EN (0x00000001U) /* Register RGX_CR_CDM_CONTEXT_PDS0 */ -#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U) -#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) -#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U) -#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U) -#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U) -#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U) +#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U) /* Register RGX_CR_CDM_CONTEXT_PDS1 */ -#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U) -#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (32U) -#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (31U) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (30U) -#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (23U) -#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) -#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_SHIFT (22U) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_SHIFT (11U) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (4U) -#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (16U) -#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (6U) -#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) -#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) -#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSIZE (2U) -#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (0U) -#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) -#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSHIFT (2U) -#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSIZE (4U) +#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U) +#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (32U) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (31U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (30U) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (23U) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (6U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSHIFT (2U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSIZE (4U) /* Register RGX_CR_CDM_CONTEXT_LOAD_PDS0 */ -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U) /* Register RGX_CR_CDM_CONTEXT_LOAD_PDS1 */ -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (32U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (31U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (23U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_SHIFT (22U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_SHIFT (11U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (6U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSIZE (2U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (0U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSHIFT (2U) -#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSIZE (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (32U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (31U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (23U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (6U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSHIFT (2U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSIZE (4U) /* Register RGX_CR_CDM_TERMINATE_PDS */ -#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U) -#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) -#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U) -#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U) -#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U) -#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U) -#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U) +#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U) /* Register RGX_CR_CDM_TERMINATE_PDS1 */ -#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U) -#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF)) -#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (32U) -#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (31U) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (23U) -#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) -#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) -#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_SHIFT (22U) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_SHIFT (11U) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U) -#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U) -#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (6U) -#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) -#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) -#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSIZE (2U) -#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (0U) -#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) -#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSHIFT (2U) -#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSIZE (4U) +#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U) +#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (32U) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (31U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (23U) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_SHIFT (22U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_SHIFT (11U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (6U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (0U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSHIFT (2U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSIZE (4U) /* Register group: RGX_CR_SCRATCH, with 16 repeats */ -#define RGX_CR_SCRATCH_REPEATCOUNT (16U) +#define RGX_CR_SCRATCH_REPEATCOUNT (16U) /* Register RGX_CR_SCRATCH0 */ -#define RGX_CR_SCRATCH0 (0x0800U) -#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH0_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH0 (0x0800U) +#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH0_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH1 */ -#define RGX_CR_SCRATCH1 (0x0808U) -#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH1_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH1 (0x0808U) +#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH1_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH2 */ -#define RGX_CR_SCRATCH2 (0x0810U) -#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH2_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH2 (0x0810U) +#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH2_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH3 */ -#define RGX_CR_SCRATCH3 (0x0818U) -#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH3_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH3 (0x0818U) +#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH3_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH4 */ -#define RGX_CR_SCRATCH4 (0x0820U) -#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH4_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH4_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH4 (0x0820U) +#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH4_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH4_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH5 */ -#define RGX_CR_SCRATCH5 (0x0828U) -#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH5_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH5_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH5 (0x0828U) +#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH5_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH5_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH6 */ -#define RGX_CR_SCRATCH6 (0x0830U) -#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH6_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH6_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH6 (0x0830U) +#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH6_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH6_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH7 */ -#define RGX_CR_SCRATCH7 (0x0838U) -#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH7_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH7_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH7 (0x0838U) +#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH7_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH7_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH8 */ -#define RGX_CR_SCRATCH8 (0x0840U) -#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH8_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH8_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH8 (0x0840U) +#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH8_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH8_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH9 */ -#define RGX_CR_SCRATCH9 (0x0848U) -#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH9_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH9_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH9 (0x0848U) +#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH9_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH9_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH10 */ -#define RGX_CR_SCRATCH10 (0x0850U) -#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH10_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH10_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH10 (0x0850U) +#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH10_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH10_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH11 */ -#define RGX_CR_SCRATCH11 (0x0858U) -#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH11_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH11_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH11 (0x0858U) +#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH11_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH11_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH12 */ -#define RGX_CR_SCRATCH12 (0x0860U) -#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH12_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH12_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH12 (0x0860U) +#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH12_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH12_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH13 */ -#define RGX_CR_SCRATCH13 (0x0868U) -#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH13_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH13_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH13 (0x0868U) +#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH13_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH13_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH14 */ -#define RGX_CR_SCRATCH14 (0x0870U) -#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH14_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH14_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH14 (0x0870U) +#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH14_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH14_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_SCRATCH15 */ -#define RGX_CR_SCRATCH15 (0x0878U) -#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SCRATCH15_DATA_SHIFT (0U) -#define RGX_CR_SCRATCH15_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_SCRATCH15 (0x0878U) +#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH15_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH15_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register group: RGX_CR_OS0_SCRATCH, with 4 repeats */ -#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (4U) +#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (4U) /* Register RGX_CR_OS0_SCRATCH0 */ -#define RGX_CR_OS0_SCRATCH0 (0x0880U) -#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS0_SCRATCH0 (0x0880U) +#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS0_SCRATCH1 */ -#define RGX_CR_OS0_SCRATCH1 (0x0888U) -#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS0_SCRATCH1 (0x0888U) +#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS0_SCRATCH2 */ -#define RGX_CR_OS0_SCRATCH2 (0x0890U) -#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS0_SCRATCH2 (0x0890U) +#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS0_SCRATCH3 */ -#define RGX_CR_OS0_SCRATCH3 (0x0898U) -#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS0_SCRATCH3 (0x0898U) +#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0x00000000U) /* Register group: RGX_CR_OS1_SCRATCH, with 4 repeats */ -#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (4U) +#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (4U) /* Register RGX_CR_OS1_SCRATCH0 */ -#define RGX_CR_OS1_SCRATCH0 (0x10880U) -#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS1_SCRATCH0 (0x10880U) +#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS1_SCRATCH1 */ -#define RGX_CR_OS1_SCRATCH1 (0x10888U) -#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS1_SCRATCH1 (0x10888U) +#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS1_SCRATCH2 */ -#define RGX_CR_OS1_SCRATCH2 (0x10890U) -#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS1_SCRATCH2 (0x10890U) +#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS1_SCRATCH3 */ -#define RGX_CR_OS1_SCRATCH3 (0x10898U) -#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS1_SCRATCH3 (0x10898U) +#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0x00000000U) /* Register group: RGX_CR_OS2_SCRATCH, with 4 repeats */ -#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (4U) +#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (4U) /* Register RGX_CR_OS2_SCRATCH0 */ -#define RGX_CR_OS2_SCRATCH0 (0x20880U) -#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS2_SCRATCH0 (0x20880U) +#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS2_SCRATCH1 */ -#define RGX_CR_OS2_SCRATCH1 (0x20888U) -#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS2_SCRATCH1 (0x20888U) +#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS2_SCRATCH2 */ -#define RGX_CR_OS2_SCRATCH2 (0x20890U) -#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS2_SCRATCH2 (0x20890U) +#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS2_SCRATCH3 */ -#define RGX_CR_OS2_SCRATCH3 (0x20898U) -#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS2_SCRATCH3 (0x20898U) +#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0x00000000U) /* Register group: RGX_CR_OS3_SCRATCH, with 4 repeats */ -#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (4U) +#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (4U) /* Register RGX_CR_OS3_SCRATCH0 */ -#define RGX_CR_OS3_SCRATCH0 (0x30880U) -#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS3_SCRATCH0 (0x30880U) +#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS3_SCRATCH1 */ -#define RGX_CR_OS3_SCRATCH1 (0x30888U) -#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS3_SCRATCH1 (0x30888U) +#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS3_SCRATCH2 */ -#define RGX_CR_OS3_SCRATCH2 (0x30890U) -#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS3_SCRATCH2 (0x30890U) +#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS3_SCRATCH3 */ -#define RGX_CR_OS3_SCRATCH3 (0x30898U) -#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS3_SCRATCH3 (0x30898U) +#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0x00000000U) /* Register group: RGX_CR_OS4_SCRATCH, with 4 repeats */ -#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (4U) +#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (4U) /* Register RGX_CR_OS4_SCRATCH0 */ -#define RGX_CR_OS4_SCRATCH0 (0x40880U) -#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS4_SCRATCH0 (0x40880U) +#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS4_SCRATCH1 */ -#define RGX_CR_OS4_SCRATCH1 (0x40888U) -#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS4_SCRATCH1 (0x40888U) +#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS4_SCRATCH2 */ -#define RGX_CR_OS4_SCRATCH2 (0x40890U) -#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS4_SCRATCH2 (0x40890U) +#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS4_SCRATCH3 */ -#define RGX_CR_OS4_SCRATCH3 (0x40898U) -#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS4_SCRATCH3 (0x40898U) +#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0x00000000U) /* Register group: RGX_CR_OS5_SCRATCH, with 4 repeats */ -#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (4U) +#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (4U) /* Register RGX_CR_OS5_SCRATCH0 */ -#define RGX_CR_OS5_SCRATCH0 (0x50880U) -#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS5_SCRATCH0 (0x50880U) +#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS5_SCRATCH1 */ -#define RGX_CR_OS5_SCRATCH1 (0x50888U) -#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS5_SCRATCH1 (0x50888U) +#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS5_SCRATCH2 */ -#define RGX_CR_OS5_SCRATCH2 (0x50890U) -#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS5_SCRATCH2 (0x50890U) +#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS5_SCRATCH3 */ -#define RGX_CR_OS5_SCRATCH3 (0x50898U) -#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS5_SCRATCH3 (0x50898U) +#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0x00000000U) /* Register group: RGX_CR_OS6_SCRATCH, with 4 repeats */ -#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (4U) +#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (4U) /* Register RGX_CR_OS6_SCRATCH0 */ -#define RGX_CR_OS6_SCRATCH0 (0x60880U) -#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS6_SCRATCH0 (0x60880U) +#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS6_SCRATCH1 */ -#define RGX_CR_OS6_SCRATCH1 (0x60888U) -#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS6_SCRATCH1 (0x60888U) +#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS6_SCRATCH2 */ -#define RGX_CR_OS6_SCRATCH2 (0x60890U) -#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS6_SCRATCH2 (0x60890U) +#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS6_SCRATCH3 */ -#define RGX_CR_OS6_SCRATCH3 (0x60898U) -#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS6_SCRATCH3 (0x60898U) +#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0x00000000U) /* Register group: RGX_CR_OS7_SCRATCH, with 4 repeats */ -#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (4U) +#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (4U) /* Register RGX_CR_OS7_SCRATCH0 */ -#define RGX_CR_OS7_SCRATCH0 (0x70880U) -#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U) -#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS7_SCRATCH0 (0x70880U) +#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS7_SCRATCH1 */ -#define RGX_CR_OS7_SCRATCH1 (0x70888U) -#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U) -#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS7_SCRATCH1 (0x70888U) +#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS7_SCRATCH2 */ -#define RGX_CR_OS7_SCRATCH2 (0x70890U) -#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U) -#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS7_SCRATCH2 (0x70890U) +#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_OS7_SCRATCH3 */ -#define RGX_CR_OS7_SCRATCH3 (0x70898U) -#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U) -#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0x00000000U) +#define RGX_CR_OS7_SCRATCH3 (0x70898U) +#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVDATAX */ -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3000U) -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVDATAX_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVDATAX_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA (0xF3000U) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA__MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA__MSLVDATAX_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVDATAX */ -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A00U) -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVDATAX_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVDATAX_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0A00U) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVDATAX_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVDATAX */ -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3000U) -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVDATAX_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVDATAX_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA (0x3000U) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA__MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA__MSLVDATAX_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVDATAX */ -#define RGX_CR_META_SP_MSLVDATAX (0x0A00U) -#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAX (0x0A00U) +#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVDATAT */ -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3040U) -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVDATAT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVDATAT_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA (0xF3040U) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA__MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA__MSLVDATAT_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVDATAT */ -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A08U) -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVDATAT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVDATAT_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0A08U) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVDATAT_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVDATAT */ -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3040U) -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVDATAT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVDATAT_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_EQ1_AND_MRUA (0x3040U) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_EQ1_AND_MRUA__MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_EQ1_AND_MRUA__MSLVDATAT_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVDATAT */ -#define RGX_CR_META_SP_MSLVDATAT (0x0A08U) -#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U) +#define RGX_CR_META_SP_MSLVDATAT (0x0A08U) +#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U) /* Register RGX_CR_META_SP_MSLVCTRL0 */ -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3080U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ADDR_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ADDR_CLRMSK (0x00000003U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__AUTOINCR_SHIFT (1U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__AUTOINCR_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__AUTOINCR_EN (0x00000002U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA (0xF3080U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__RD_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVCTRL0 */ -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A10U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ADDR_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ADDR_CLRMSK (0x00000003U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__AUTOINCR_SHIFT (1U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__AUTOINCR_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__AUTOINCR_EN (0x00000002U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__RD_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__RD_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__RD_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0A10U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA__ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA__ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA__AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA__AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA__AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA__RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA__RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_MRUA__RD_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVCTRL0 */ -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3080U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ADDR_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ADDR_CLRMSK (0x00000003U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__AUTOINCR_SHIFT (1U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__AUTOINCR_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__AUTOINCR_EN (0x00000002U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA (0x3080U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__RD_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVCTRL0 */ -#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U) -#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U) -#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U) -#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U) -#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U) +#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVCTRL1 */ -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF30C0U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERRTHREAD_SHIFT (30U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_SHIFT (29U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_EN (0x20000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_SHIFT (28U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_EN (0x10000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_SHIFT (26U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN (0x04000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__COREMEM_IDLE_SHIFT (25U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__COREMEM_IDLE_EN (0x02000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_SHIFT (24U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN (0x01000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERRID_SHIFT (21U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERRID_CLRMSK (0xFF1FFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERR_SHIFT (20U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERR_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERR_EN (0x00100000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__WR_ACTIVE_SHIFT (18U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__WR_ACTIVE_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__WR_ACTIVE_EN (0x00040000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__THREAD_SHIFT (4U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__THREAD_CLRMSK (0xFFFFFFCFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRANS_SIZE_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRANS_SIZE_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__BYTE_ROUND_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__BYTE_ROUND_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA (0xF30C0U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__BYTE_ROUND_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVCTRL1 */ -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A18U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERRTHREAD_SHIFT (30U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__LOCK2_INTERLOCK_SHIFT (29U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__LOCK2_INTERLOCK_EN (0x20000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ATOMIC_INTERLOCK_SHIFT (28U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ATOMIC_INTERLOCK_EN (0x10000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__GBLPORT_IDLE_SHIFT (26U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__GBLPORT_IDLE_EN (0x04000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__COREMEM_IDLE_SHIFT (25U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__COREMEM_IDLE_EN (0x02000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__READY_SHIFT (24U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__READY_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__READY_EN (0x01000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERRID_SHIFT (21U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERRID_CLRMSK (0xFF1FFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERR_SHIFT (20U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERR_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERR_EN (0x00100000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__WR_ACTIVE_SHIFT (18U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__WR_ACTIVE_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__WR_ACTIVE_EN (0x00040000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__THREAD_SHIFT (4U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__THREAD_CLRMSK (0xFFFFFFCFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRANS_SIZE_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRANS_SIZE_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__BYTE_ROUND_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__BYTE_ROUND_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0A18U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_MRUA__BYTE_ROUND_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVCTRL1 */ -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x30C0U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERRTHREAD_SHIFT (30U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_SHIFT (29U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_EN (0x20000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_SHIFT (28U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_EN (0x10000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_SHIFT (26U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN (0x04000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__COREMEM_IDLE_SHIFT (25U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__COREMEM_IDLE_EN (0x02000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_SHIFT (24U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN (0x01000000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERRID_SHIFT (21U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERRID_CLRMSK (0xFF1FFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERR_SHIFT (20U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERR_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERR_EN (0x00100000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__WR_ACTIVE_SHIFT (18U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__WR_ACTIVE_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__WR_ACTIVE_EN (0x00040000U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__THREAD_SHIFT (4U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__THREAD_CLRMSK (0xFFFFFFCFU) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRANS_SIZE_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRANS_SIZE_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__BYTE_ROUND_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__BYTE_ROUND_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA (0x30C0U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__BYTE_ROUND_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVCTRL1 */ -#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U) -#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U) -#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U) -#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U) -#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U) -#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U) -#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U) -#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U) -#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U) -#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U) -#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U) -#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U) -#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U) -#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U) -#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU) -#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U) -#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U) -#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U) +#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVHANDSHKE */ -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3280U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__INPUT_SHIFT (2U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__INPUT_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__OUTPUT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__OUTPUT_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_MRUA (0xF3280U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_MRUA__INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_MRUA__INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_MRUA__OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_MRUA__OUTPUT_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVHANDSHKE */ -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A50U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__INPUT_SHIFT (2U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__INPUT_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__OUTPUT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__OUTPUT_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0A50U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_MRUA__INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_MRUA__INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_MRUA__OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_MRUA__OUTPUT_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVHANDSHKE */ -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3280U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__INPUT_SHIFT (2U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__INPUT_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__OUTPUT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__OUTPUT_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_EQ1_AND_MRUA (0x3280U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_EQ1_AND_MRUA__INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_EQ1_AND_MRUA__INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_EQ1_AND_MRUA__OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_EQ1_AND_MRUA__OUTPUT_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVHANDSHKE */ -#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U) -#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U) -#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U) -#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U) +#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_META_SP_MSLVT0KICK */ -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3400U) -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT0KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT0KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_MRUA (0xF3400U) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_MRUA__MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_MRUA__MSLVT0KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT0KICK */ -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A80U) -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT0KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT0KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0A80U) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT0KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT0KICK */ -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3400U) -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT0KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT0KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_EQ1_AND_MRUA (0x3400U) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_EQ1_AND_MRUA__MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_EQ1_AND_MRUA__MSLVT0KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT0KICK */ -#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U) -#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U) +#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT0KICKI */ -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3440U) -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT0KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT0KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_MRUA (0xF3440U) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_MRUA__MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_MRUA__MSLVT0KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT0KICKI */ -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A88U) -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT0KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT0KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0A88U) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT0KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT0KICKI */ -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3440U) -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT0KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT0KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_EQ1_AND_MRUA (0x3440U) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_EQ1_AND_MRUA__MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_EQ1_AND_MRUA__MSLVT0KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT0KICKI */ -#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U) -#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U) +#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICK */ -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3480U) -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT1KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT1KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_MRUA (0xF3480U) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_MRUA__MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_MRUA__MSLVT1KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICK */ -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A90U) -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT1KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT1KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0A90U) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT1KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICK */ -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3480U) -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT1KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT1KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_EQ1_AND_MRUA (0x3480U) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_EQ1_AND_MRUA__MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_EQ1_AND_MRUA__MSLVT1KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICK */ -#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U) -#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U) +#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICKI */ -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF34C0U) -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT1KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT1KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_MRUA (0xF34C0U) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_MRUA__MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_MRUA__MSLVT1KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICKI */ -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A98U) -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT1KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT1KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0A98U) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT1KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICKI */ -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x34C0U) -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT1KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT1KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_EQ1_AND_MRUA (0x34C0U) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_EQ1_AND_MRUA__MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_EQ1_AND_MRUA__MSLVT1KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT1KICKI */ -#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U) -#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U) +#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICK */ -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3500U) -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT2KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT2KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_MRUA (0xF3500U) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_MRUA__MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_MRUA__MSLVT2KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICK */ -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AA0U) -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT2KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT2KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0AA0U) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT2KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICK */ -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3500U) -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT2KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT2KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_EQ1_AND_MRUA (0x3500U) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_EQ1_AND_MRUA__MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_EQ1_AND_MRUA__MSLVT2KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICK */ -#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U) -#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U) +#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICKI */ -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3540U) -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT2KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT2KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_MRUA (0xF3540U) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_MRUA__MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_MRUA__MSLVT2KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICKI */ -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AA8U) -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT2KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT2KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0AA8U) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT2KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICKI */ -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3540U) -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT2KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT2KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_EQ1_AND_MRUA (0x3540U) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_EQ1_AND_MRUA__MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_EQ1_AND_MRUA__MSLVT2KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT2KICKI */ -#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U) -#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U) +#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICK */ -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3580U) -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT3KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT3KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_MRUA (0xF3580U) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_MRUA__MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_MRUA__MSLVT3KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICK */ -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AB0U) -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT3KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT3KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0AB0U) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT3KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICK */ -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3580U) -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT3KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT3KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_EQ1_AND_MRUA (0x3580U) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_EQ1_AND_MRUA__MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_EQ1_AND_MRUA__MSLVT3KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICK */ -#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U) -#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U) +#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICKI */ -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF35C0U) -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT3KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT3KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_MRUA (0xF35C0U) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_MRUA__MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_MRUA__MSLVT3KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICKI */ -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AB8U) -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT3KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT3KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0AB8U) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_MRUA__MSLVT3KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICKI */ -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x35C0U) -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT3KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT3KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_EQ1_AND_MRUA (0x35C0U) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_EQ1_AND_MRUA__MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_EQ1_AND_MRUA__MSLVT3KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVT3KICKI */ -#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U) -#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U) -#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U) +#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U) +#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U) /* Register RGX_CR_META_SP_MSLVRST */ -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3600U) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__SOFTRESET_SHIFT (0U) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__SOFTRESET_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__SOFTRESET_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_MRUA (0xF3600U) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_MRUA__SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_MRUA__SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_MRUA__SOFTRESET_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVRST */ -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AC0U) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__SOFTRESET_SHIFT (0U) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__SOFTRESET_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__SOFTRESET_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0AC0U) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_MRUA__SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_MRUA__SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_MRUA__SOFTRESET_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVRST */ -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3600U) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED__SOFTRESET_SHIFT (0U) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED__SOFTRESET_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED__SOFTRESET_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_EQ1_AND_MRUA (0x3600U) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_EQ1_AND_MRUA__SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_EQ1_AND_MRUA__SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_EQ1_AND_MRUA__SOFTRESET_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVRST */ -#define RGX_CR_META_SP_MSLVRST (0x0AC0U) -#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U) -#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVRST (0x0AC0U) +#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVIRQSTATUS */ -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3640U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT3_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT3_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT3_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT2_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT2_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT2_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA (0xF3640U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA__TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA__TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA__TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA__TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA__TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA__TRIGVECT2_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQSTATUS */ -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AC8U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT3_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT3_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT3_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT2_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT2_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT2_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0AC8U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_MRUA__TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_MRUA__TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_MRUA__TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_MRUA__TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_MRUA__TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_MRUA__TRIGVECT2_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQSTATUS */ -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3640U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT3_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT3_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT3_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT2_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT2_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT2_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA (0x3640U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA__TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA__TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA__TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA__TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA__TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA__TRIGVECT2_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQSTATUS */ -#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQENABLE */ -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3680U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT1_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT1_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT1_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT0_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT0_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA (0xF3680U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA__EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA__EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA__EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA__EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA__EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA__EVENT0_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQENABLE */ -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AD0U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT1_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT1_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT1_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT0_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT0_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0AD0U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_MRUA__EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_MRUA__EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_MRUA__EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_MRUA__EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_MRUA__EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_MRUA__EVENT0_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQENABLE */ -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3680U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT1_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT1_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT1_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT0_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT0_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA (0x3680U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA__EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA__EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA__EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA__EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA__EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA__EVENT0_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQENABLE */ -#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U) -#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C)) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U) +#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U) +#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U) /* Register RGX_CR_META_SP_MSLVIRQLEVEL */ -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF36C0U) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MODE_SHIFT (0U) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MODE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MODE_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_MRUA (0xF36C0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_MRUA__MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_MRUA__MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_MRUA__MODE_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVIRQLEVEL */ -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AD8U) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MODE_SHIFT (0U) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MODE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MODE_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_MRUA (0xF0AD8U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_MRUA__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_MRUA__MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_MRUA__MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_MRUA__MODE_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVIRQLEVEL */ -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x36C0U) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MODE_SHIFT (0U) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MODE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MODE_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_EQ1_AND_MRUA (0x36C0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_EQ1_AND_MRUA__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_EQ1_AND_MRUA__MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_EQ1_AND_MRUA__MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_EQ1_AND_MRUA__MODE_EN (0x00000001U) /* Register RGX_CR_META_SP_MSLVIRQLEVEL */ -#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U) -#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U) -#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U) +#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED00__HOST_SECURITY_GEQ4, with 32 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED00__HOST_SECURITY_GEQ4_REPEATCOUNT (32U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED00 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED00__HOST_SECURITY_GEQ4 (0xF8FC0U) +#define RGX_CR_FWCORE_DMI_RESERVED00__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register group: RGX_CR_FWCORE_DMI_RESERVED0, with 32 repeats */ -#define RGX_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT (32U) +#define RGX_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT (32U) /* Register RGX_CR_FWCORE_DMI_RESERVED00 */ -#define RGX_CR_FWCORE_DMI_RESERVED00 (0x8FC0U) -#define RGX_CR_FWCORE_DMI_RESERVED00_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED00 (0x8FC0U) +#define RGX_CR_FWCORE_DMI_RESERVED00_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED01 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED01__HOST_SECURITY_GEQ4 (0xF8FC8U) +#define RGX_CR_FWCORE_DMI_RESERVED01__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED01 */ -#define RGX_CR_FWCORE_DMI_RESERVED01 (0x8FC8U) -#define RGX_CR_FWCORE_DMI_RESERVED01_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED01 (0x8FC8U) +#define RGX_CR_FWCORE_DMI_RESERVED01_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED02 */ -#define RGX_CR_FWCORE_DMI_RESERVED02 (0x8FD0U) -#define RGX_CR_FWCORE_DMI_RESERVED02_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED02__HOST_SECURITY_GEQ4 (0xF8FD0U) +#define RGX_CR_FWCORE_DMI_RESERVED02__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED02 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED02 (0x8FD0U) +#define RGX_CR_FWCORE_DMI_RESERVED02_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED03 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED03__HOST_SECURITY_GEQ4 (0xF8FD8U) +#define RGX_CR_FWCORE_DMI_RESERVED03__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED03 */ -#define RGX_CR_FWCORE_DMI_RESERVED03 (0x8FD8U) -#define RGX_CR_FWCORE_DMI_RESERVED03_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED03 (0x8FD8U) +#define RGX_CR_FWCORE_DMI_RESERVED03_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_DATA0 */ -#define RGX_CR_FWCORE_DMI_DATA0 (0x90C0U) -#define RGX_CR_FWCORE_DMI_DATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_DATA0_VAL_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_DATA0_VAL_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_DATA0__HOST_SECURITY_GEQ4 (0xF90C0U) +#define RGX_CR_FWCORE_DMI_DATA0__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_DATA0__HOST_SECURITY_GEQ4__VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DATA0__HOST_SECURITY_GEQ4__VAL_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_FWCORE_DMI_DATA0 +*/ +#define RGX_CR_FWCORE_DMI_DATA0 (0x90C0U) +#define RGX_CR_FWCORE_DMI_DATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_DATA0_VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DATA0_VAL_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED10__HOST_SECURITY_GEQ4, with 7 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED10__HOST_SECURITY_GEQ4_REPEATCOUNT (7U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED10 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED10__HOST_SECURITY_GEQ4 (0xF90C8U) +#define RGX_CR_FWCORE_DMI_RESERVED10__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register group: RGX_CR_FWCORE_DMI_RESERVED1, with 7 repeats */ -#define RGX_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT (7U) +#define RGX_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT (7U) /* Register RGX_CR_FWCORE_DMI_RESERVED10 */ -#define RGX_CR_FWCORE_DMI_RESERVED10 (0x90C8U) -#define RGX_CR_FWCORE_DMI_RESERVED10_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED10 (0x90C8U) +#define RGX_CR_FWCORE_DMI_RESERVED10_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED11 */ -#define RGX_CR_FWCORE_DMI_RESERVED11 (0x90D0U) -#define RGX_CR_FWCORE_DMI_RESERVED11_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED11__HOST_SECURITY_GEQ4 (0xF90D0U) +#define RGX_CR_FWCORE_DMI_RESERVED11__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED11 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED11 (0x90D0U) +#define RGX_CR_FWCORE_DMI_RESERVED11_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED12 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED12__HOST_SECURITY_GEQ4 (0xF90D8U) +#define RGX_CR_FWCORE_DMI_RESERVED12__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED12 */ -#define RGX_CR_FWCORE_DMI_RESERVED12 (0x90D8U) -#define RGX_CR_FWCORE_DMI_RESERVED12_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED12 (0x90D8U) +#define RGX_CR_FWCORE_DMI_RESERVED12_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED13 */ -#define RGX_CR_FWCORE_DMI_RESERVED13 (0x90E0U) -#define RGX_CR_FWCORE_DMI_RESERVED13_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED13__HOST_SECURITY_GEQ4 (0xF90E0U) +#define RGX_CR_FWCORE_DMI_RESERVED13__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED13 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED13 (0x90E0U) +#define RGX_CR_FWCORE_DMI_RESERVED13_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED14 */ -#define RGX_CR_FWCORE_DMI_RESERVED14 (0x90E8U) -#define RGX_CR_FWCORE_DMI_RESERVED14_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED14__HOST_SECURITY_GEQ4 (0xF90E8U) +#define RGX_CR_FWCORE_DMI_RESERVED14__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED14 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED14 (0x90E8U) +#define RGX_CR_FWCORE_DMI_RESERVED14_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED15 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED15__HOST_SECURITY_GEQ4 (0xF90F0U) +#define RGX_CR_FWCORE_DMI_RESERVED15__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED15 */ -#define RGX_CR_FWCORE_DMI_RESERVED15 (0x90F0U) -#define RGX_CR_FWCORE_DMI_RESERVED15_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED15 (0x90F0U) +#define RGX_CR_FWCORE_DMI_RESERVED15_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED16 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED16__HOST_SECURITY_GEQ4 (0xF90F8U) +#define RGX_CR_FWCORE_DMI_RESERVED16__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED16 */ -#define RGX_CR_FWCORE_DMI_RESERVED16 (0x90F8U) -#define RGX_CR_FWCORE_DMI_RESERVED16_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED16 (0x90F8U) +#define RGX_CR_FWCORE_DMI_RESERVED16_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_DATA1 */ -#define RGX_CR_FWCORE_DMI_DATA1 (0x9100U) -#define RGX_CR_FWCORE_DMI_DATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_DATA1_VAL_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_DATA1_VAL_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_DATA1__HOST_SECURITY_GEQ4 (0xF9100U) +#define RGX_CR_FWCORE_DMI_DATA1__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_DATA1__HOST_SECURITY_GEQ4__VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DATA1__HOST_SECURITY_GEQ4__VAL_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_FWCORE_DMI_DATA1 +*/ +#define RGX_CR_FWCORE_DMI_DATA1 (0x9100U) +#define RGX_CR_FWCORE_DMI_DATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_DATA1_VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DATA1_VAL_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED20__HOST_SECURITY_GEQ4, with 7 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED20__HOST_SECURITY_GEQ4_REPEATCOUNT (7U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED20 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED20__HOST_SECURITY_GEQ4 (0xF9108U) +#define RGX_CR_FWCORE_DMI_RESERVED20__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register group: RGX_CR_FWCORE_DMI_RESERVED2, with 7 repeats */ -#define RGX_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT (7U) +#define RGX_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT (7U) /* Register RGX_CR_FWCORE_DMI_RESERVED20 */ -#define RGX_CR_FWCORE_DMI_RESERVED20 (0x9108U) -#define RGX_CR_FWCORE_DMI_RESERVED20_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED20 (0x9108U) +#define RGX_CR_FWCORE_DMI_RESERVED20_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED21 */ -#define RGX_CR_FWCORE_DMI_RESERVED21 (0x9110U) -#define RGX_CR_FWCORE_DMI_RESERVED21_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED21__HOST_SECURITY_GEQ4 (0xF9110U) +#define RGX_CR_FWCORE_DMI_RESERVED21__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED21 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED21 (0x9110U) +#define RGX_CR_FWCORE_DMI_RESERVED21_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED22 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED22__HOST_SECURITY_GEQ4 (0xF9118U) +#define RGX_CR_FWCORE_DMI_RESERVED22__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED22 */ -#define RGX_CR_FWCORE_DMI_RESERVED22 (0x9118U) -#define RGX_CR_FWCORE_DMI_RESERVED22_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED22 (0x9118U) +#define RGX_CR_FWCORE_DMI_RESERVED22_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED23 */ -#define RGX_CR_FWCORE_DMI_RESERVED23 (0x9120U) -#define RGX_CR_FWCORE_DMI_RESERVED23_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED23__HOST_SECURITY_GEQ4 (0xF9120U) +#define RGX_CR_FWCORE_DMI_RESERVED23__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED23 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED23 (0x9120U) +#define RGX_CR_FWCORE_DMI_RESERVED23_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED30__HOST_SECURITY_GEQ4, with 80 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED30__HOST_SECURITY_GEQ4_REPEATCOUNT (80U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED30 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED30__HOST_SECURITY_GEQ4 (0xF9140U) +#define RGX_CR_FWCORE_DMI_RESERVED30__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register group: RGX_CR_FWCORE_DMI_RESERVED3, with 80 repeats */ -#define RGX_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT (80U) +#define RGX_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT (80U) /* Register RGX_CR_FWCORE_DMI_RESERVED30 */ -#define RGX_CR_FWCORE_DMI_RESERVED30 (0x9140U) -#define RGX_CR_FWCORE_DMI_RESERVED30_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED30 (0x9140U) +#define RGX_CR_FWCORE_DMI_RESERVED30_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED31 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED31__HOST_SECURITY_GEQ4 (0xF9148U) +#define RGX_CR_FWCORE_DMI_RESERVED31__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED31 */ -#define RGX_CR_FWCORE_DMI_RESERVED31 (0x9148U) -#define RGX_CR_FWCORE_DMI_RESERVED31_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED31 (0x9148U) +#define RGX_CR_FWCORE_DMI_RESERVED31_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED32 */ -#define RGX_CR_FWCORE_DMI_RESERVED32 (0x9150U) -#define RGX_CR_FWCORE_DMI_RESERVED32_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED32__HOST_SECURITY_GEQ4 (0xF9150U) +#define RGX_CR_FWCORE_DMI_RESERVED32__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED32 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED32 (0x9150U) +#define RGX_CR_FWCORE_DMI_RESERVED32_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED33 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED33__HOST_SECURITY_GEQ4 (0xF9158U) +#define RGX_CR_FWCORE_DMI_RESERVED33__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED33 */ -#define RGX_CR_FWCORE_DMI_RESERVED33 (0x9158U) -#define RGX_CR_FWCORE_DMI_RESERVED33_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED33 (0x9158U) +#define RGX_CR_FWCORE_DMI_RESERVED33_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED34 */ -#define RGX_CR_FWCORE_DMI_RESERVED34 (0x9160U) -#define RGX_CR_FWCORE_DMI_RESERVED34_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED34__HOST_SECURITY_GEQ4 (0xF9160U) +#define RGX_CR_FWCORE_DMI_RESERVED34__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED34 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED34 (0x9160U) +#define RGX_CR_FWCORE_DMI_RESERVED34_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED35 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED35__HOST_SECURITY_GEQ4 (0xF9168U) +#define RGX_CR_FWCORE_DMI_RESERVED35__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED35 */ -#define RGX_CR_FWCORE_DMI_RESERVED35 (0x9168U) -#define RGX_CR_FWCORE_DMI_RESERVED35_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED35 (0x9168U) +#define RGX_CR_FWCORE_DMI_RESERVED35_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED36 */ -#define RGX_CR_FWCORE_DMI_RESERVED36 (0x9170U) -#define RGX_CR_FWCORE_DMI_RESERVED36_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED36__HOST_SECURITY_GEQ4 (0xF9170U) +#define RGX_CR_FWCORE_DMI_RESERVED36__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED36 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED36 (0x9170U) +#define RGX_CR_FWCORE_DMI_RESERVED36_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED37 */ -#define RGX_CR_FWCORE_DMI_RESERVED37 (0x9178U) -#define RGX_CR_FWCORE_DMI_RESERVED37_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED37__HOST_SECURITY_GEQ4 (0xF9178U) +#define RGX_CR_FWCORE_DMI_RESERVED37__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED37 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED37 (0x9178U) +#define RGX_CR_FWCORE_DMI_RESERVED37_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED38 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED38__HOST_SECURITY_GEQ4 (0xF9180U) +#define RGX_CR_FWCORE_DMI_RESERVED38__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED38 */ -#define RGX_CR_FWCORE_DMI_RESERVED38 (0x9180U) -#define RGX_CR_FWCORE_DMI_RESERVED38_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED38 (0x9180U) +#define RGX_CR_FWCORE_DMI_RESERVED38_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED39 */ -#define RGX_CR_FWCORE_DMI_RESERVED39 (0x9188U) -#define RGX_CR_FWCORE_DMI_RESERVED39_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED39__HOST_SECURITY_GEQ4 (0xF9188U) +#define RGX_CR_FWCORE_DMI_RESERVED39__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED39 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED39 (0x9188U) +#define RGX_CR_FWCORE_DMI_RESERVED39_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED310 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED310__HOST_SECURITY_GEQ4 (0xF9190U) +#define RGX_CR_FWCORE_DMI_RESERVED310__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED310 */ -#define RGX_CR_FWCORE_DMI_RESERVED310 (0x9190U) -#define RGX_CR_FWCORE_DMI_RESERVED310_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED310 (0x9190U) +#define RGX_CR_FWCORE_DMI_RESERVED310_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED311 */ -#define RGX_CR_FWCORE_DMI_RESERVED311 (0x9198U) -#define RGX_CR_FWCORE_DMI_RESERVED311_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED311__HOST_SECURITY_GEQ4 (0xF9198U) +#define RGX_CR_FWCORE_DMI_RESERVED311__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED311 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED311 (0x9198U) +#define RGX_CR_FWCORE_DMI_RESERVED311_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED312 */ -#define RGX_CR_FWCORE_DMI_RESERVED312 (0x91A0U) -#define RGX_CR_FWCORE_DMI_RESERVED312_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED312__HOST_SECURITY_GEQ4 (0xF91A0U) +#define RGX_CR_FWCORE_DMI_RESERVED312__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED312 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED312 (0x91A0U) +#define RGX_CR_FWCORE_DMI_RESERVED312_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED313 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED313__HOST_SECURITY_GEQ4 (0xF91A8U) +#define RGX_CR_FWCORE_DMI_RESERVED313__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED313 */ -#define RGX_CR_FWCORE_DMI_RESERVED313 (0x91A8U) -#define RGX_CR_FWCORE_DMI_RESERVED313_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED313 (0x91A8U) +#define RGX_CR_FWCORE_DMI_RESERVED313_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED314 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED314__HOST_SECURITY_GEQ4 (0xF91B0U) +#define RGX_CR_FWCORE_DMI_RESERVED314__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED314 */ -#define RGX_CR_FWCORE_DMI_RESERVED314 (0x91B0U) -#define RGX_CR_FWCORE_DMI_RESERVED314_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED314 (0x91B0U) +#define RGX_CR_FWCORE_DMI_RESERVED314_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED315 */ -#define RGX_CR_FWCORE_DMI_RESERVED315 (0x91B8U) -#define RGX_CR_FWCORE_DMI_RESERVED315_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED315__HOST_SECURITY_GEQ4 (0xF91B8U) +#define RGX_CR_FWCORE_DMI_RESERVED315__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED315 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED315 (0x91B8U) +#define RGX_CR_FWCORE_DMI_RESERVED315_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED316 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED316__HOST_SECURITY_GEQ4 (0xF91C0U) +#define RGX_CR_FWCORE_DMI_RESERVED316__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED316 */ -#define RGX_CR_FWCORE_DMI_RESERVED316 (0x91C0U) -#define RGX_CR_FWCORE_DMI_RESERVED316_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED316 (0x91C0U) +#define RGX_CR_FWCORE_DMI_RESERVED316_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED317 */ -#define RGX_CR_FWCORE_DMI_RESERVED317 (0x91C8U) -#define RGX_CR_FWCORE_DMI_RESERVED317_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED317__HOST_SECURITY_GEQ4 (0xF91C8U) +#define RGX_CR_FWCORE_DMI_RESERVED317__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED317 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED317 (0x91C8U) +#define RGX_CR_FWCORE_DMI_RESERVED317_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED318 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED318__HOST_SECURITY_GEQ4 (0xF91D0U) +#define RGX_CR_FWCORE_DMI_RESERVED318__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED318 */ -#define RGX_CR_FWCORE_DMI_RESERVED318 (0x91D0U) -#define RGX_CR_FWCORE_DMI_RESERVED318_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED318 (0x91D0U) +#define RGX_CR_FWCORE_DMI_RESERVED318_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED319 */ -#define RGX_CR_FWCORE_DMI_RESERVED319 (0x91D8U) -#define RGX_CR_FWCORE_DMI_RESERVED319_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED319__HOST_SECURITY_GEQ4 (0xF91D8U) +#define RGX_CR_FWCORE_DMI_RESERVED319__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED319 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED319 (0x91D8U) +#define RGX_CR_FWCORE_DMI_RESERVED319_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED320 */ -#define RGX_CR_FWCORE_DMI_RESERVED320 (0x91E0U) -#define RGX_CR_FWCORE_DMI_RESERVED320_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED320__HOST_SECURITY_GEQ4 (0xF91E0U) +#define RGX_CR_FWCORE_DMI_RESERVED320__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED320 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED320 (0x91E0U) +#define RGX_CR_FWCORE_DMI_RESERVED320_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED321 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED321__HOST_SECURITY_GEQ4 (0xF91E8U) +#define RGX_CR_FWCORE_DMI_RESERVED321__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED321 */ -#define RGX_CR_FWCORE_DMI_RESERVED321 (0x91E8U) -#define RGX_CR_FWCORE_DMI_RESERVED321_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED321 (0x91E8U) +#define RGX_CR_FWCORE_DMI_RESERVED321_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED322 */ -#define RGX_CR_FWCORE_DMI_RESERVED322 (0x91F0U) -#define RGX_CR_FWCORE_DMI_RESERVED322_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED322__HOST_SECURITY_GEQ4 (0xF91F0U) +#define RGX_CR_FWCORE_DMI_RESERVED322__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED322 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED322 (0x91F0U) +#define RGX_CR_FWCORE_DMI_RESERVED322_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED323 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED323__HOST_SECURITY_GEQ4 (0xF91F8U) +#define RGX_CR_FWCORE_DMI_RESERVED323__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED323 */ -#define RGX_CR_FWCORE_DMI_RESERVED323 (0x91F8U) -#define RGX_CR_FWCORE_DMI_RESERVED323_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED323 (0x91F8U) +#define RGX_CR_FWCORE_DMI_RESERVED323_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED324 */ -#define RGX_CR_FWCORE_DMI_RESERVED324 (0x9200U) -#define RGX_CR_FWCORE_DMI_RESERVED324_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED324__HOST_SECURITY_GEQ4 (0xF9200U) +#define RGX_CR_FWCORE_DMI_RESERVED324__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED324 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED324 (0x9200U) +#define RGX_CR_FWCORE_DMI_RESERVED324_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED325 */ -#define RGX_CR_FWCORE_DMI_RESERVED325 (0x9208U) -#define RGX_CR_FWCORE_DMI_RESERVED325_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED325__HOST_SECURITY_GEQ4 (0xF9208U) +#define RGX_CR_FWCORE_DMI_RESERVED325__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED325 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED325 (0x9208U) +#define RGX_CR_FWCORE_DMI_RESERVED325_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED326 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED326__HOST_SECURITY_GEQ4 (0xF9210U) +#define RGX_CR_FWCORE_DMI_RESERVED326__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED326 */ -#define RGX_CR_FWCORE_DMI_RESERVED326 (0x9210U) -#define RGX_CR_FWCORE_DMI_RESERVED326_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED326 (0x9210U) +#define RGX_CR_FWCORE_DMI_RESERVED326_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED327 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED327__HOST_SECURITY_GEQ4 (0xF9218U) +#define RGX_CR_FWCORE_DMI_RESERVED327__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED327 */ -#define RGX_CR_FWCORE_DMI_RESERVED327 (0x9218U) -#define RGX_CR_FWCORE_DMI_RESERVED327_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED327 (0x9218U) +#define RGX_CR_FWCORE_DMI_RESERVED327_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED328 */ -#define RGX_CR_FWCORE_DMI_RESERVED328 (0x9220U) -#define RGX_CR_FWCORE_DMI_RESERVED328_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED328__HOST_SECURITY_GEQ4 (0xF9220U) +#define RGX_CR_FWCORE_DMI_RESERVED328__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED328 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED328 (0x9220U) +#define RGX_CR_FWCORE_DMI_RESERVED328_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED329 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED329__HOST_SECURITY_GEQ4 (0xF9228U) +#define RGX_CR_FWCORE_DMI_RESERVED329__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED329 */ -#define RGX_CR_FWCORE_DMI_RESERVED329 (0x9228U) -#define RGX_CR_FWCORE_DMI_RESERVED329_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED329 (0x9228U) +#define RGX_CR_FWCORE_DMI_RESERVED329_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED330 */ -#define RGX_CR_FWCORE_DMI_RESERVED330 (0x9230U) -#define RGX_CR_FWCORE_DMI_RESERVED330_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED330__HOST_SECURITY_GEQ4 (0xF9230U) +#define RGX_CR_FWCORE_DMI_RESERVED330__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED330 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED330 (0x9230U) +#define RGX_CR_FWCORE_DMI_RESERVED330_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED331 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED331__HOST_SECURITY_GEQ4 (0xF9238U) +#define RGX_CR_FWCORE_DMI_RESERVED331__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED331 */ -#define RGX_CR_FWCORE_DMI_RESERVED331 (0x9238U) -#define RGX_CR_FWCORE_DMI_RESERVED331_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED331 (0x9238U) +#define RGX_CR_FWCORE_DMI_RESERVED331_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_DMCONTROL */ -#define RGX_CR_FWCORE_DMI_DMCONTROL (0x93C0U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_MASKFULL (IMG_UINT64_C(0x00000000D0000003)) -#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_SHIFT (31U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN (0x80000000U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_SHIFT (30U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN (0x40000000U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_SHIFT (28U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_EN (0x10000000U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_SHIFT (1U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_EN (0x00000002U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN (0x00000001U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4 (0xF93C0U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x00000000D0000003)) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__HALTREQ_SHIFT (31U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__HALTREQ_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__HALTREQ_EN (0x80000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__RESUMEREQ_SHIFT (30U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__RESUMEREQ_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__RESUMEREQ_EN (0x40000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__ACKHAVERESET_SHIFT (28U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__ACKHAVERESET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__ACKHAVERESET_EN (0x10000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__NDMRESET_SHIFT (1U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__NDMRESET_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__NDMRESET_EN (0x00000002U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__DMACTIVE_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__DMACTIVE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__DMACTIVE_EN (0x00000001U) + + +/* + Register RGX_CR_FWCORE_DMI_DMCONTROL +*/ +#define RGX_CR_FWCORE_DMI_DMCONTROL (0x93C0U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_MASKFULL (IMG_UINT64_C(0x00000000D0000003)) +#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_SHIFT (31U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN (0x80000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_SHIFT (30U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN (0x40000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_SHIFT (28U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_EN (0x10000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_SHIFT (1U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_EN (0x00000002U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN (0x00000001U) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED40__HOST_SECURITY_GEQ4, with 7 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED40__HOST_SECURITY_GEQ4_REPEATCOUNT (7U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED40 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED40__HOST_SECURITY_GEQ4 (0xF93C8U) +#define RGX_CR_FWCORE_DMI_RESERVED40__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register group: RGX_CR_FWCORE_DMI_RESERVED4, with 7 repeats */ -#define RGX_CR_FWCORE_DMI_RESERVED4_REPEATCOUNT (7U) +#define RGX_CR_FWCORE_DMI_RESERVED4_REPEATCOUNT (7U) /* Register RGX_CR_FWCORE_DMI_RESERVED40 */ -#define RGX_CR_FWCORE_DMI_RESERVED40 (0x93C8U) -#define RGX_CR_FWCORE_DMI_RESERVED40_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED40 (0x93C8U) +#define RGX_CR_FWCORE_DMI_RESERVED40_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_RESERVED41 */ -#define RGX_CR_FWCORE_DMI_RESERVED41 (0x93D0U) -#define RGX_CR_FWCORE_DMI_RESERVED41_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_RESERVED41__HOST_SECURITY_GEQ4 (0xF93D0U) +#define RGX_CR_FWCORE_DMI_RESERVED41__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED41 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED41 (0x93D0U) +#define RGX_CR_FWCORE_DMI_RESERVED41_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_DMSTATUS */ -#define RGX_CR_FWCORE_DMI_DMSTATUS (0x9400U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_MASKFULL (IMG_UINT64_C(0x00000000004FFFFF)) -#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_SHIFT (22U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_EN (0x00400000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_SHIFT (19U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_EN (0x00080000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_SHIFT (18U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_EN (0x00040000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_SHIFT (17U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN (0x00020000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_SHIFT (16U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_EN (0x00010000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_SHIFT (15U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_EN (0x00008000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_SHIFT (14U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_EN (0x00004000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_SHIFT (13U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_EN (0x00002000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_SHIFT (12U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_EN (0x00001000U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_SHIFT (11U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_EN (0x00000800U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_SHIFT (10U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_EN (0x00000400U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_SHIFT (9U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN (0x00000200U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_SHIFT (8U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_EN (0x00000100U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_SHIFT (7U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_EN (0x00000080U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_SHIFT (6U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_EN (0x00000040U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_SHIFT (5U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_EN (0x00000020U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_SHIFT (4U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_EN (0x00000010U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4 (0xF9400U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x00000000004FFFFF)) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__IMPEBREAK_SHIFT (22U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__IMPEBREAK_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__IMPEBREAK_EN (0x00400000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLHAVERESET_SHIFT (19U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLHAVERESET_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLHAVERESET_EN (0x00080000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYHAVERESET_SHIFT (18U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYHAVERESET_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYHAVERESET_EN (0x00040000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLRESUMEACK_SHIFT (17U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLRESUMEACK_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLRESUMEACK_EN (0x00020000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYRESUMEACK_SHIFT (16U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYRESUMEACK_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYRESUMEACK_EN (0x00010000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLNONEXISTENT_SHIFT (15U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLNONEXISTENT_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLNONEXISTENT_EN (0x00008000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYNONEXISTENT_SHIFT (14U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYNONEXISTENT_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYNONEXISTENT_EN (0x00004000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLUNAVAIL_SHIFT (13U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLUNAVAIL_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLUNAVAIL_EN (0x00002000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYUNAVAIL_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYUNAVAIL_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYUNAVAIL_EN (0x00001000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLRUNNING_SHIFT (11U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLRUNNING_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLRUNNING_EN (0x00000800U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYRUNNING_SHIFT (10U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYRUNNING_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYRUNNING_EN (0x00000400U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLHALTED_SHIFT (9U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLHALTED_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ALLHALTED_EN (0x00000200U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYHALTED_SHIFT (8U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYHALTED_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__ANYHALTED_EN (0x00000100U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__AUTHENTICATED_SHIFT (7U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__AUTHENTICATED_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__AUTHENTICATED_EN (0x00000080U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__AUTHBUSY_SHIFT (6U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__AUTHBUSY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__AUTHBUSY_EN (0x00000040U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__HASRESETHALTREQ_SHIFT (5U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__HASRESETHALTREQ_EN (0x00000020U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__CONFSTRPTRVALID_SHIFT (4U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__CONFSTRPTRVALID_EN (0x00000010U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__VERSION_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DMSTATUS__HOST_SECURITY_GEQ4__VERSION_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_FWCORE_DMI_DMSTATUS +*/ +#define RGX_CR_FWCORE_DMI_DMSTATUS (0x9400U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_MASKFULL (IMG_UINT64_C(0x00000000004FFFFF)) +#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_SHIFT (22U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_EN (0x00400000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_SHIFT (19U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_EN (0x00080000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_SHIFT (18U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_EN (0x00040000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_SHIFT (17U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN (0x00020000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_SHIFT (16U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_EN (0x00010000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_SHIFT (15U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_EN (0x00008000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_SHIFT (14U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_EN (0x00004000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_SHIFT (13U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_EN (0x00002000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_EN (0x00001000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_SHIFT (11U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_EN (0x00000800U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_SHIFT (10U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_EN (0x00000400U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_SHIFT (9U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN (0x00000200U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_SHIFT (8U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_EN (0x00000100U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_SHIFT (7U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_EN (0x00000080U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_SHIFT (6U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_EN (0x00000040U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_SHIFT (5U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_EN (0x00000020U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_SHIFT (4U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_EN (0x00000010U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_FWCORE_DMI_ABSTRACTCS +*/ +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4 (0xF9540U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x000000001F00170F)) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__PROGBUFSIZE_SHIFT (24U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__PROGBUFSIZE_CLRMSK (0xE0FFFFFFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__BUSY_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__BUSY_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__BUSY_EN (0x00001000U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__CMDERR_SHIFT (8U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__CMDERR_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__DATACOUNT_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS__HOST_SECURITY_GEQ4__DATACOUNT_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_FWCORE_DMI_ABSTRACTCS */ -#define RGX_CR_FWCORE_DMI_ABSTRACTCS (0x9540U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL (IMG_UINT64_C(0x000000001F00170F)) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_SHIFT (24U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_CLRMSK (0xE0FFFFFFU) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_SHIFT (12U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN (0x00001000U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT (8U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK (0xFFFFF8FFU) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS (0x9540U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL (IMG_UINT64_C(0x000000001F00170F)) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_SHIFT (24U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_CLRMSK (0xE0FFFFFFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN (0x00001000U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT (8U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_FWCORE_DMI_COMMAND +*/ +#define RGX_CR_FWCORE_DMI_COMMAND__HOST_SECURITY_GEQ4 (0xF9580U) +#define RGX_CR_FWCORE_DMI_COMMAND__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_COMMAND__HOST_SECURITY_GEQ4__CMDTYPE_SHIFT (24U) +#define RGX_CR_FWCORE_DMI_COMMAND__HOST_SECURITY_GEQ4__CMDTYPE_CLRMSK (0x00FFFFFFU) +#define RGX_CR_FWCORE_DMI_COMMAND__HOST_SECURITY_GEQ4__CONTROL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_COMMAND__HOST_SECURITY_GEQ4__CONTROL_CLRMSK (0xFF000000U) /* Register RGX_CR_FWCORE_DMI_COMMAND */ -#define RGX_CR_FWCORE_DMI_COMMAND (0x9580U) -#define RGX_CR_FWCORE_DMI_COMMAND_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT (24U) -#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_CLRMSK (0x00FFFFFFU) -#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_CLRMSK (0xFF000000U) +#define RGX_CR_FWCORE_DMI_COMMAND (0x9580U) +#define RGX_CR_FWCORE_DMI_COMMAND_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT (24U) +#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_CLRMSK (0x00FFFFFFU) +#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_CLRMSK (0xFF000000U) /* Register RGX_CR_FWCORE_DMI_SBCS */ -#define RGX_CR_FWCORE_DMI_SBCS (0x9DC0U) -#define RGX_CR_FWCORE_DMI_SBCS_MASKFULL (IMG_UINT64_C(0x00000000E07FFFFF)) -#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_SHIFT (29U) -#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_CLRMSK (0x1FFFFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_SHIFT (22U) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_EN (0x00400000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_SHIFT (21U) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN (0x00200000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_SHIFT (20U) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN (0x00100000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT (17U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_CLRMSK (0xFFF1FFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_SHIFT (16U) -#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_EN (0x00010000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_SHIFT (15U) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_EN (0x00008000U) -#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT (12U) -#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK (0xFFFF8FFFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_SHIFT (5U) -#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_CLRMSK (0xFFFFF01FU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_SHIFT (4U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_EN (0x00000010U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_SHIFT (3U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_EN (0x00000008U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_SHIFT (2U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_EN (0x00000004U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_SHIFT (1U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_EN (0x00000002U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_EN (0x00000001U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4 (0xF9DC0U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x00000000E07FFFFF)) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBVERSION_SHIFT (29U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBVERSION_CLRMSK (0x1FFFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBBUSYERROR_SHIFT (22U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBBUSYERROR_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBBUSYERROR_EN (0x00400000U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBBUSY_SHIFT (21U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBBUSY_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBBUSY_EN (0x00200000U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBREADONADDR_SHIFT (20U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBREADONADDR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBREADONADDR_EN (0x00100000U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS_SHIFT (17U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS_CLRMSK (0xFFF1FFFFU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBAUTOINCREMENT_SHIFT (16U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBAUTOINCREMENT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBAUTOINCREMENT_EN (0x00010000U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBREADONDATA_SHIFT (15U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBREADONDATA_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBREADONDATA_EN (0x00008000U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBERROR_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBERROR_CLRMSK (0xFFFF8FFFU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBASIZE_SHIFT (5U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBASIZE_CLRMSK (0xFFFFF01FU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS128_SHIFT (4U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS128_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS128_EN (0x00000010U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS64_SHIFT (3U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS64_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS64_EN (0x00000008U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS32_SHIFT (2U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS32_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS32_EN (0x00000004U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS16_SHIFT (1U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS16_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS16_EN (0x00000002U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS8_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS8_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_DMI_SBCS__HOST_SECURITY_GEQ4__SBACCESS8_EN (0x00000001U) + + +/* + Register RGX_CR_FWCORE_DMI_SBCS +*/ +#define RGX_CR_FWCORE_DMI_SBCS (0x9DC0U) +#define RGX_CR_FWCORE_DMI_SBCS_MASKFULL (IMG_UINT64_C(0x00000000E07FFFFF)) +#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_SHIFT (29U) +#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_CLRMSK (0x1FFFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_SHIFT (22U) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_EN (0x00400000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_SHIFT (21U) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN (0x00200000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_SHIFT (20U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN (0x00100000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT (17U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_CLRMSK (0xFFF1FFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_SHIFT (16U) +#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_EN (0x00010000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_SHIFT (15U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_EN (0x00008000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK (0xFFFF8FFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_SHIFT (5U) +#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_CLRMSK (0xFFFFF01FU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_SHIFT (4U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_EN (0x00000010U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_SHIFT (3U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_EN (0x00000008U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_SHIFT (2U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_EN (0x00000004U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_SHIFT (1U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_EN (0x00000002U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_EN (0x00000001U) + + +/* + Register RGX_CR_FWCORE_DMI_SBADDRESS0 +*/ +#define RGX_CR_FWCORE_DMI_SBADDRESS0__HOST_SECURITY_GEQ4 (0xF9E00U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_SBADDRESS0__HOST_SECURITY_GEQ4__ADDRESS_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0__HOST_SECURITY_GEQ4__ADDRESS_CLRMSK (0x00000000U) /* Register RGX_CR_FWCORE_DMI_SBADDRESS0 */ -#define RGX_CR_FWCORE_DMI_SBADDRESS0 (0x9E00U) -#define RGX_CR_FWCORE_DMI_SBADDRESS0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0 (0x9E00U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_CLRMSK (0x00000000U) /* Register RGX_CR_FWCORE_DMI_SBDATA0 */ -#define RGX_CR_FWCORE_DMI_SBDATA0 (0x9EC0U) -#define RGX_CR_FWCORE_DMI_SBDATA0_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_SBDATA0__HOST_SECURITY_GEQ4 (0xF9EC0U) +#define RGX_CR_FWCORE_DMI_SBDATA0__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_SBDATA0 +*/ +#define RGX_CR_FWCORE_DMI_SBDATA0 (0x9EC0U) +#define RGX_CR_FWCORE_DMI_SBDATA0_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_SBDATA1 +*/ +#define RGX_CR_FWCORE_DMI_SBDATA1__HOST_SECURITY_GEQ4 (0xF9F00U) +#define RGX_CR_FWCORE_DMI_SBDATA1__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_SBDATA1 */ -#define RGX_CR_FWCORE_DMI_SBDATA1 (0x9F00U) -#define RGX_CR_FWCORE_DMI_SBDATA1_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_SBDATA1 (0x9F00U) +#define RGX_CR_FWCORE_DMI_SBDATA1_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_SBDATA2 */ -#define RGX_CR_FWCORE_DMI_SBDATA2 (0x9F40U) -#define RGX_CR_FWCORE_DMI_SBDATA2_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_SBDATA2__HOST_SECURITY_GEQ4 (0xF9F40U) +#define RGX_CR_FWCORE_DMI_SBDATA2__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_SBDATA2 +*/ +#define RGX_CR_FWCORE_DMI_SBDATA2 (0x9F40U) +#define RGX_CR_FWCORE_DMI_SBDATA2_MASKFULL (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_FWCORE_DMI_SBDATA3 */ -#define RGX_CR_FWCORE_DMI_SBDATA3 (0x9F80U) -#define RGX_CR_FWCORE_DMI_SBDATA3_MASKFULL (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_FWCORE_DMI_SBDATA3__HOST_SECURITY_GEQ4 (0xF9F80U) +#define RGX_CR_FWCORE_DMI_SBDATA3__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_SBDATA3 +*/ +#define RGX_CR_FWCORE_DMI_SBDATA3 (0x9F80U) +#define RGX_CR_FWCORE_DMI_SBDATA3_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_HALTSUM0 +*/ +#define RGX_CR_FWCORE_DMI_HALTSUM0__HOST_SECURITY_GEQ4 (0xF9FC0U) +#define RGX_CR_FWCORE_DMI_HALTSUM0__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_HALTSUM0__HOST_SECURITY_GEQ4__VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_HALTSUM0__HOST_SECURITY_GEQ4__VAL_CLRMSK (0x00000000U) /* Register RGX_CR_FWCORE_DMI_HALTSUM0 */ -#define RGX_CR_FWCORE_DMI_HALTSUM0 (0x9FC0U) -#define RGX_CR_FWCORE_DMI_HALTSUM0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_SHIFT (0U) -#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_DMI_HALTSUM0 (0x9FC0U) +#define RGX_CR_FWCORE_DMI_HALTSUM0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_SCHEDULE */ -#define RGX_CR_MTS_SCHEDULE (0x0B00U) -#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE (0x0B00U) +#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE1 */ -#define RGX_CR_MTS_SCHEDULE1 (0x10B00U) -#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE1 (0x10B00U) +#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE2 */ -#define RGX_CR_MTS_SCHEDULE2 (0x20B00U) -#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE2 (0x20B00U) +#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE3 */ -#define RGX_CR_MTS_SCHEDULE3 (0x30B00U) -#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE3 (0x30B00U) +#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE4 */ -#define RGX_CR_MTS_SCHEDULE4 (0x40B00U) -#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE4 (0x40B00U) +#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE5 */ -#define RGX_CR_MTS_SCHEDULE5 (0x50B00U) -#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE5 (0x50B00U) +#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE6 */ -#define RGX_CR_MTS_SCHEDULE6 (0x60B00U) -#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE6 (0x60B00U) +#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_SCHEDULE7 */ -#define RGX_CR_MTS_SCHEDULE7 (0x70B00U) -#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U) -#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U) -#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U) -#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U) -#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U) -#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U) -#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U) -#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U) -#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU) +#define RGX_CR_MTS_SCHEDULE7 (0x70B00U) +#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU) /* Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC */ -#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U) -#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) -#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) /* Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC */ -#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U) -#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) -#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) /* Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC */ -#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U) -#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) -#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) /* Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC */ -#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U) -#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) -#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) /* Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG */ -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S8_CPR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (1U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (0x00000002U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (0x00000000U) -#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (0x00000001U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S8CPR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (1U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (0x00000002U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (0x00000000U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (0x00000001U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL__S8CPR__META (0x00000000U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL__S8CPR__MTS (0x00000001U) /* Register RGX_CR_MTS_DM0_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE (0x0B58U) -#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE (0x0B58U) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM1_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE (0x0B60U) -#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE (0x0B60U) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM2_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE (0x0B68U) -#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE (0x0B68U) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM3_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE (0x0B70U) -#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE (0x0B70U) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM4_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE (0x0B78U) -#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE (0x0B78U) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM5_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE (0x0B80U) -#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE (0x0B80U) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM6_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE (0xD138U) -#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE (0xD138U) +#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_DM7_INTERRUPT_ENABLE */ -#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE (0xD140U) -#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) +#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE (0xD140U) +#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) /* Register RGX_CR_MTS_INTCTX */ -#define RGX_CR_MTS_INTCTX (0x0B98U) -#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FC0FFFF)) -#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U) -#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU) -#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U) -#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU) -#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U) -#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MTS_INTCTX (0x0B98U) +#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FC0FFFF)) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MTS_BGCTX */ -#define RGX_CR_MTS_BGCTX (0x0BA0U) -#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U) -#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MTS_BGCTX (0x0BA0U) +#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE */ -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U) -#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_MTS_GPU_INT_STATUS */ -#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U) -#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U) -#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U) +#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U) +#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U) /* Register RGX_CR_IRQ_OS0_EVENT_STATUS */ -#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD0U) -#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD0U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS0_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE0U) -#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE0U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS1_EVENT_STATUS */ -#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD0U) -#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD0U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS1_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE0U) -#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE0U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS2_EVENT_STATUS */ -#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD0U) -#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD0U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS2_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE0U) -#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE0U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS3_EVENT_STATUS */ -#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD0U) -#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD0U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS3_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE0U) -#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE0U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS4_EVENT_STATUS */ -#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD0U) -#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD0U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS4_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE0U) -#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE0U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS5_EVENT_STATUS */ -#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD0U) -#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD0U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS5_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE0U) -#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE0U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS6_EVENT_STATUS */ -#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD0U) -#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD0U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS6_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE0U) -#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE0U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS7_EVENT_STATUS */ -#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD0U) -#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD0U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_IRQ_OS7_EVENT_CLEAR */ -#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE0U) -#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U) -#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE0U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) /* Register RGX_CR_MTS_SCHEDULE_ENABLE */ -#define RGX_CR_MTS_SCHEDULE_ENABLE (0x0BD8U) -#define RGX_CR_MTS_SCHEDULE_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT (0U) -#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MTS_SCHEDULE_ENABLE (0x0BD8U) +#define RGX_CR_MTS_SCHEDULE_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_FWCORE_BOOT */ -#define RGX_CR_FWCORE_BOOT (0x70C0U) -#define RGX_CR_FWCORE_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_FWCORE_BOOT_MODE_SHIFT (0U) -#define RGX_CR_FWCORE_BOOT_MODE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_BOOT_MODE_EN (0x00000001U) +#define RGX_CR_FWCORE_BOOT (0x70C0U) +#define RGX_CR_FWCORE_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_BOOT_MODE_SHIFT (0U) +#define RGX_CR_FWCORE_BOOT_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_BOOT_MODE_EN (0x00000001U) /* Register RGX_CR_META_BOOT */ -#define RGX_CR_META_BOOT (0x0BF8U) -#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_META_BOOT_MODE_SHIFT (0U) -#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_META_BOOT_MODE_EN (0x00000001U) +#define RGX_CR_META_BOOT (0x0BF8U) +#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_BOOT_MODE_SHIFT (0U) +#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_BOOT_MODE_EN (0x00000001U) /* Register RGX_CR_GARTEN_SLC */ -#define RGX_CR_GARTEN_SLC (0x0BB8U) -#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U) -#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U) +#define RGX_CR_GARTEN_SLC (0x0BB8U) +#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U) /* Register RGX_CR_ISP_RENDER */ -#define RGX_CR_ISP_RENDER (0x0F08U) -#define RGX_CR_ISP_RENDER__IFR_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x000000000003FFFF)) -#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x000000000007FF53)) -#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x000000000003FFF0)) -#define RGX_CR_ISP_RENDER_TILES_PER_ISP_SHIFT (16U) -#define RGX_CR_ISP_RENDER_TILES_PER_ISP_CLRMSK (0xFFFCFFFFU) -#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__TILES_PER_ISP_SHIFT (16U) -#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__TILES_PER_ISP_CLRMSK (0xFFF8FFFFU) -#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_SHIFT (12U) -#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_CLRMSK (0xFFFF0FFFU) -#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_SHIFT (8U) -#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_CLRMSK (0xFFFFF0FFU) -#define RGX_CR_ISP_RENDER_TILE_STARVATION_SHIFT (7U) -#define RGX_CR_ISP_RENDER_TILE_STARVATION_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_ISP_RENDER_TILE_STARVATION_EN (0x00000080U) -#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_SHIFT (6U) -#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_EN (0x00000040U) -#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U) -#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFCFU) -#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_NONE (0x00000000U) -#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_TILE (0x00000010U) -#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_PBLK (0x00000030U) -#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__RESUME_SHIFT (4U) -#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__RESUME_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__RESUME_CONTEXT_NONE (0x00000000U) -#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__RESUME_CONTEXT_RESUME (0x00000010U) -#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U) -#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_ISP_RENDER_DIR_TL2BR (0x00000000U) -#define RGX_CR_ISP_RENDER_DIR_TR2BL (0x00000004U) -#define RGX_CR_ISP_RENDER_DIR_BL2TR (0x00000008U) -#define RGX_CR_ISP_RENDER_DIR_BR2TL (0x0000000CU) -#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT (1U) -#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN (0x00000002U) -#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U) -#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0xFFFFFFFCU) -#define RGX_CR_ISP_RENDER_MODE_NORM (0x00000000U) -#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0x00000002U) -#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0x00000003U) -#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT (0U) -#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN (0x00000001U) +#define RGX_CR_ISP_RENDER (0x0F08U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__MASKFULL (IMG_UINT64_C(0x000000000003FFFF)) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__MASKFULL (IMG_UINT64_C(0x000000000007FF53)) +#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x000000000003FFF0)) +#define RGX_CR_ISP_RENDER_TILES_PER_ISP_SHIFT (16U) +#define RGX_CR_ISP_RENDER_TILES_PER_ISP_CLRMSK (0xFFFCFFFFU) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__TILES_PER_ISP_SHIFT (16U) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__TILES_PER_ISP_CLRMSK (0xFFF8FFFFU) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_SHIFT (12U) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_SHIFT (8U) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_ISP_RENDER_TILE_STARVATION_SHIFT (7U) +#define RGX_CR_ISP_RENDER_TILE_STARVATION_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_ISP_RENDER_TILE_STARVATION_EN (0x00000080U) +#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_SHIFT (6U) +#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_EN (0x00000040U) +#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U) +#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_NONE (0x00000000U) +#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_TILE (0x00000010U) +#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_PBLK (0x00000030U) +#define RGX_CR_ISP_RENDER_RESUME__CS3DL_EQ4_AND_PIPEDM_EQ0__CONTEXT_NONE (0x00000000U) +#define RGX_CR_ISP_RENDER_RESUME__CS3DL_EQ4_AND_PIPEDM_EQ0__CONTEXT_RESUME (0x00000010U) +#define RGX_CR_ISP_RENDER_RESUME__IFR_AND_PIPEDM_EQ0__CONTEXT_NONE (0x00000000U) +#define RGX_CR_ISP_RENDER_RESUME__IFR_AND_PIPEDM_EQ0__CONTEXT_TILE (0x00000010U) +#define RGX_CR_ISP_RENDER_RESUME__IFR_AND_PIPEDM_EQ0__CONTEXT_PBLK (0x00000030U) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__RESUME_SHIFT (4U) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__RESUME_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__RESUME_CONTEXT_NONE (0x00000000U) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__RESUME_CONTEXT_RESUME (0x00000010U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__DIR_SHIFT (2U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__DIR_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__DIR_TL2BR (0x00000000U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__DIR_TR2BL (0x00000004U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__DIR_BL2TR (0x00000008U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__DIR_BR2TL (0x0000000CU) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__PROCESS_PROTECTED_TILES_SHIFT (1U) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__PROCESS_PROTECTED_TILES_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__PROCESS_PROTECTED_TILES_EN (0x00000002U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__MODE_SHIFT (0U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__MODE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__MODE_NORM (0x00000000U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__MODE_FAST_2D (0x00000002U) +#define RGX_CR_ISP_RENDER__IFR_AND_PIPEDM_EQ0__MODE_FAST_SCALE (0x00000003U) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__PROCESS_UNPROTECTED_TILES_SHIFT (0U) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ISP_RENDER__CS3DL_EQ4_AND_PIPEDM_EQ0__PROCESS_UNPROTECTED_TILES_EN (0x00000001U) /* Register RGX_CR_ISP_CTL */ -#define RGX_CR_ISP_CTL (0x0FB0U) -#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000007BF8FF)) -#define RGX_CR_ISP_CTL_DBUFFER_COUNT_SHIFT (20U) -#define RGX_CR_ISP_CTL_DBUFFER_COUNT_CLRMSK (0xFF8FFFFFU) -#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U) -#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U) -#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_SHIFT (17U) -#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_EN (0x00020000U) -#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_SHIFT (16U) -#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_EN (0x00010000U) -#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_SHIFT (15U) -#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_EN (0x00008000U) -#define RGX_CR_ISP_CTL_LINE_SAMPLE_SHIFT (14U) -#define RGX_CR_ISP_CTL_LINE_SAMPLE_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_ISP_CTL_LINE_SAMPLE_EN (0x00004000U) -#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (13U) -#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x00002000U) -#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_SHIFT (12U) -#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_EN (0x00001000U) -#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (11U) -#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00000800U) -#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U) -#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFF00U) +#define RGX_CR_ISP_CTL (0x0FB0U) +#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000007BF8FF)) +#define RGX_CR_ISP_CTL_DBUFFER_COUNT_SHIFT (20U) +#define RGX_CR_ISP_CTL_DBUFFER_COUNT_CLRMSK (0xFF8FFFFFU) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U) +#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_SHIFT (17U) +#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_EN (0x00020000U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_SHIFT (16U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_EN (0x00010000U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_SHIFT (15U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_EN (0x00008000U) +#define RGX_CR_ISP_CTL_LINE_SAMPLE_SHIFT (14U) +#define RGX_CR_ISP_CTL_LINE_SAMPLE_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_ISP_CTL_LINE_SAMPLE_EN (0x00004000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (13U) +#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x00002000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_SHIFT (12U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_EN (0x00001000U) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (11U) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00000800U) +#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U) +#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFF00U) /* Register group: RGX_CR_MEM_TILING_CFG, with 8 repeats */ -#define RGX_CR_MEM_TILING_CFG_REPEATCOUNT (8U) +#define RGX_CR_MEM_TILING_CFG_REPEATCOUNT (8U) /* Register RGX_CR_MEM_TILING_CFG0 */ -#define RGX_CR_MEM_TILING_CFG0 (0x12D8U) -#define RGX_CR_MEM_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_SHIFT (61U) -#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG0_ENABLE_SHIFT (60U) -#define RGX_CR_MEM_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG0 (0x12D8U) +#define RGX_CR_MEM_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_MEM_TILING_CFG1 */ -#define RGX_CR_MEM_TILING_CFG1 (0x12E0U) -#define RGX_CR_MEM_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_SHIFT (61U) -#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG1_ENABLE_SHIFT (60U) -#define RGX_CR_MEM_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG1 (0x12E0U) +#define RGX_CR_MEM_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_MEM_TILING_CFG2 */ -#define RGX_CR_MEM_TILING_CFG2 (0x12E8U) -#define RGX_CR_MEM_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_SHIFT (61U) -#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG2_ENABLE_SHIFT (60U) -#define RGX_CR_MEM_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG2 (0x12E8U) +#define RGX_CR_MEM_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_MEM_TILING_CFG3 */ -#define RGX_CR_MEM_TILING_CFG3 (0x12F0U) -#define RGX_CR_MEM_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_SHIFT (61U) -#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG3_ENABLE_SHIFT (60U) -#define RGX_CR_MEM_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG3 (0x12F0U) +#define RGX_CR_MEM_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_MEM_TILING_CFG4 */ -#define RGX_CR_MEM_TILING_CFG4 (0x12F8U) -#define RGX_CR_MEM_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_SHIFT (61U) -#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG4_ENABLE_SHIFT (60U) -#define RGX_CR_MEM_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG4 (0x12F8U) +#define RGX_CR_MEM_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_MEM_TILING_CFG5 */ -#define RGX_CR_MEM_TILING_CFG5 (0x1300U) -#define RGX_CR_MEM_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_SHIFT (61U) -#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG5_ENABLE_SHIFT (60U) -#define RGX_CR_MEM_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG5 (0x1300U) +#define RGX_CR_MEM_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_MEM_TILING_CFG6 */ -#define RGX_CR_MEM_TILING_CFG6 (0x1308U) -#define RGX_CR_MEM_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_SHIFT (61U) -#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG6_ENABLE_SHIFT (60U) -#define RGX_CR_MEM_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG6 (0x1308U) +#define RGX_CR_MEM_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_MEM_TILING_CFG7 */ -#define RGX_CR_MEM_TILING_CFG7 (0x1310U) -#define RGX_CR_MEM_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_SHIFT (61U) -#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG7_ENABLE_SHIFT (60U) -#define RGX_CR_MEM_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_SHIFT (32U) -#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U) -#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_SHIFT (0U) -#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U) -#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG7 (0x1310U) +#define RGX_CR_MEM_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U) /* Register RGX_CR_USC_TIMER */ -#define RGX_CR_USC_TIMER (0x46C8U) -#define RGX_CR_USC_TIMER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_USC_TIMER_CNT_SHIFT (0U) -#define RGX_CR_USC_TIMER_CNT_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_USC_TIMER (0x46C8U) +#define RGX_CR_USC_TIMER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_USC_TIMER_CNT_SHIFT (0U) +#define RGX_CR_USC_TIMER_CNT_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_CR_USC_TIMER_CNT */ -#define RGX_CR_USC_TIMER_CNT (0x46D0U) -#define RGX_CR_USC_TIMER_CNT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT (0U) -#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_USC_TIMER_CNT_RESET_EN (0x00000001U) +#define RGX_CR_USC_TIMER_CNT (0x46D0U) +#define RGX_CR_USC_TIMER_CNT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT (0U) +#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_USC_TIMER_CNT_RESET_EN (0x00000001U) + + +/* + Register RGX_CR_PAPSAG_FAULT_INJECT +*/ +#define RGX_CR_PAPSAG_FAULT_INJECT (0x4D00U) +#define RGX_CR_PAPSAG_FAULT_INJECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_PAPSAG_FAULT_INJECT_VALUE_SHIFT (0U) +#define RGX_CR_PAPSAG_FAULT_INJECT_VALUE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_PAPSAG_FAULT_INJECT_VALUE_DISABLE (0x00000000U) +#define RGX_CR_PAPSAG_FAULT_INJECT_VALUE_STUCK0 (0x00000001U) +#define RGX_CR_PAPSAG_FAULT_INJECT_VALUE_STUCK1 (0x00000002U) +#define RGX_CR_PAPSAG_FAULT_INJECT_VALUE_RESERVED (0x00000003U) + + +/* + Register RGX_CR_PAPSAG_FAULT_INJECT_SAFETY +*/ +#define RGX_CR_PAPSAG_FAULT_INJECT_SAFETY (0x4D08U) +#define RGX_CR_PAPSAG_FAULT_INJECT_SAFETY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_PAPSAG_FAULT_INJECT_SAFETY_VALUE_SHIFT (0U) +#define RGX_CR_PAPSAG_FAULT_INJECT_SAFETY_VALUE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_PAPSAG_FAULT_INJECT_SAFETY_VALUE_DISABLE (0x00000000U) +#define RGX_CR_PAPSAG_FAULT_INJECT_SAFETY_VALUE_STUCK0 (0x00000001U) +#define RGX_CR_PAPSAG_FAULT_INJECT_SAFETY_VALUE_STUCK1 (0x00000002U) +#define RGX_CR_PAPSAG_FAULT_INJECT_SAFETY_VALUE_RESERVED (0x00000003U) /* Register RGX_CR_TE_CHECKSUM */ -#define RGX_CR_TE_CHECKSUM (0x5110U) -#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_TE_CHECKSUM (0x5110U) +#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_USC_UVB_CHECKSUM */ -#define RGX_CR_USC_UVB_CHECKSUM (0x5118U) -#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_USC_UVB_CHECKSUM (0x5118U) +#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_TE_TMA_CHECKSUM */ -#define RGX_CR_TE_TMA_CHECKSUM (0x5128U) -#define RGX_CR_TE_TMA_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TE_TMA_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_TE_TMA_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_TE_TMA_CHECKSUM (0x5128U) +#define RGX_CR_TE_TMA_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_TMA_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TE_TMA_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_CDM_PDS_CHECKSUM */ -#define RGX_CR_CDM_PDS_CHECKSUM (0x5130U) -#define RGX_CR_CDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_CDM_PDS_CHECKSUM (0x5130U) +#define RGX_CR_CDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_VCE_CHECKSUM */ -#define RGX_CR_VCE_CHECKSUM (0x5030U) -#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_VCE_CHECKSUM (0x5030U) +#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_ISP_PDS_CHECKSUM */ -#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U) -#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U) +#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_ISP_TPF_CHECKSUM */ -#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U) -#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U) +#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_TFPU_CHECKSUM */ -#define RGX_CR_TFPU_CHECKSUM (0x5048U) -#define RGX_CR_TFPU_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TFPU_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_TFPU_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_TFPU_CHECKSUM (0x5048U) +#define RGX_CR_TFPU_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TFPU_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TFPU_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_ZLS_CHECKSUM */ -#define RGX_CR_ZLS_CHECKSUM (0x5050U) -#define RGX_CR_ZLS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_ZLS_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_ZLS_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_ZLS_CHECKSUM (0x5050U) +#define RGX_CR_ZLS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ZLS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ZLS_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PBE_CHECKSUM_3D */ -#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D (0x5058U) -#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D__VALUE_SHIFT (0U) -#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D__VALUE_CLRMSK (0x00000000U) +#define RGX_CR_PBE_CHECKSUM_3D__PC2 (0x5058U) +#define RGX_CR_PBE_CHECKSUM_3D__PC2__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_3D__PC2__VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_3D__PC2__VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PBE_CHECKSUM */ -#define RGX_CR_PBE_CHECKSUM (0x5058U) -#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_PBE_CHECKSUM (0x5058U) +#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PDS_DOUTM_STM_CHECKSUM */ -#define RGX_CR_PDS_DOUTM_STM_CHECKSUM (0x5060U) -#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM (0x5060U) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_IFPU_ISP_CHECKSUM */ -#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U) -#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U) +#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PPP_CLIP_CHECKSUM */ -#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U) -#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U) +#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_VCE_PRIM_CHECKSUM */ -#define RGX_CR_VCE_PRIM_CHECKSUM (0x5140U) -#define RGX_CR_VCE_PRIM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_VCE_PRIM_CHECKSUM (0x5140U) +#define RGX_CR_VCE_PRIM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_TDM_PDS_CHECKSUM */ -#define RGX_CR_TDM_PDS_CHECKSUM (0x5148U) -#define RGX_CR_TDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_SHIFT (0U) -#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_TDM_PDS_CHECKSUM (0x5148U) +#define RGX_CR_TDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PBE_CHECKSUM_2D */ -#define RGX_CR_PBE_CHECKSUM_2D (0x5158U) -#define RGX_CR_PBE_CHECKSUM_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PBE_CHECKSUM_2D_VALUE_SHIFT (0U) -#define RGX_CR_PBE_CHECKSUM_2D_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_PBE_CHECKSUM_2D (0x5158U) +#define RGX_CR_PBE_CHECKSUM_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_2D_VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_2D_VALUE_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_PHASE_GEOM */ -#define RGX_CR_PERF_PHASE_GEOM (0x6008U) -#define RGX_CR_PERF_PHASE_GEOM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_PHASE_GEOM_COUNT_SHIFT (0U) -#define RGX_CR_PERF_PHASE_GEOM_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_PHASE_GEOM (0x6008U) +#define RGX_CR_PERF_PHASE_GEOM__PIPEDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_GEOM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_GEOM_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_GEOM_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_PHASE_FRAG */ -#define RGX_CR_PERF_PHASE_FRAG (0x6010U) -#define RGX_CR_PERF_PHASE_FRAG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_PHASE_FRAG_COUNT_SHIFT (0U) -#define RGX_CR_PERF_PHASE_FRAG_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_PHASE_FRAG (0x6010U) +#define RGX_CR_PERF_PHASE_FRAG__PIPEDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_FRAG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_FRAG_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_FRAG_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_PHASE_COMP */ -#define RGX_CR_PERF_PHASE_COMP (0x6018U) -#define RGX_CR_PERF_PHASE_COMP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_PHASE_COMP_COUNT_SHIFT (0U) -#define RGX_CR_PERF_PHASE_COMP_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_PHASE_COMP (0x6018U) +#define RGX_CR_PERF_PHASE_COMP__PIPEDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_COMP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_COMP_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_COMP_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_CYCLE_GEOM_TOTAL */ -#define RGX_CR_PERF_CYCLE_GEOM_TOTAL (0x6020U) -#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL (0x6020U) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_CYCLE_FRAG_TOTAL */ -#define RGX_CR_PERF_CYCLE_FRAG_TOTAL (0x6028U) -#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL (0x6028U) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_CYCLE_COMP_TOTAL */ -#define RGX_CR_PERF_CYCLE_COMP_TOTAL (0x6030U) -#define RGX_CR_PERF_CYCLE_COMP_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL (0x6030U) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL */ -#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL (0x6038U) -#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL (0x6038U) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_PHASE_2D */ -#define RGX_CR_PERF_PHASE_2D (0x6050U) -#define RGX_CR_PERF_PHASE_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_PHASE_2D_COUNT_SHIFT (0U) -#define RGX_CR_PERF_PHASE_2D_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_PHASE_2D (0x6050U) +#define RGX_CR_PERF_PHASE_2D__PIPEDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_2D_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_2D_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_CYCLE_2D_TOTAL */ -#define RGX_CR_PERF_CYCLE_2D_TOTAL (0x6058U) -#define RGX_CR_PERF_CYCLE_2D_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_CYCLE_2D_TOTAL (0x6058U) +#define RGX_CR_PERF_CYCLE_2D_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC0_READ_STALL */ -#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U) -#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U) +#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC0_WRITE_STALL */ -#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U) -#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC1_READ_STALL */ -#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U) -#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U) +#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC1_WRITE_STALL */ -#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U) -#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC2_READ_STALL */ -#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U) -#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U) +#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC2_WRITE_STALL */ -#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U) -#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC3_READ_STALL */ -#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U) -#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U) +#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_SLC3_WRITE_STALL */ -#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U) -#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U) /* Register RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL */ -#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL (0x6408U) -#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_SHIFT (0U) -#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL (0x6408U) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4 (0xF4000U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register group: RGX_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT (16U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT (16U) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 (0x7020U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 (0x7020U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 (0x7028U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4 (0xF4008U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 (0x7028U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4 (0xF4010U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 (0x7030U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 (0x7030U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 (0x7038U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4 (0xF4018U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 (0x7038U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 (0x7040U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4 (0xF4020U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 (0x7040U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4 (0xF4028U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 (0x7048U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 (0x7048U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4 (0xF4030U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 (0x7050U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 (0x7050U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 (0x7058U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4 (0xF4038U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 (0x7058U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4 (0xF4040U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 (0x7060U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 (0x7060U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 (0x7068U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4 (0xF4048U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 (0x7068U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4 (0xF4050U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 (0x7070U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 (0x7070U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 (0x7078U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4 (0xF4058U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 (0x7078U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 (0x7080U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4 (0xF4060U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 (0x7080U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4 (0xF4068U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 (0x7088U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 (0x7088U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 (0x7090U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4 (0xF4070U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 (0x7090U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4 (0xF4078U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15__HOST_SECURITY_GEQ4__MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 */ -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 (0x7098U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT (61U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT (60U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT (44U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT (12U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MMU_CONTEXT_SHIFT (0U) -#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 (0x7098U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FWCORE_WRAPPER_NMI_ENABLE */ -#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE (0x70A0U) -#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_SHIFT (0U) -#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_EN (0x00000001U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE (0x70A0U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_SHIFT (0U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_EN (0x00000001U) /* Register RGX_CR_FWCORE_WRAPPER_NMI_EVENT */ -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT (0x70A8U) -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT (0U) -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN (0x00000001U) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT (0x70A8U) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT (0U) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN (0x00000001U) /* Register RGX_CR_FWCORE_WRAPPER_RESET_VECTOR */ -#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR (0x70B0U) -#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) -#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_ADDR_SHIFT (1U) -#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_ADDR_CLRMSK (0x00000001U) +#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR (0x70B0U) +#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) +#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_ADDR_SHIFT (1U) +#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_ADDR_CLRMSK (0x00000001U) /* Register RGX_CR_FWCORE_WRAPPER_NMI_VECTOR */ -#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR (0x70B8U) -#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) -#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_ADDR_SHIFT (1U) -#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_ADDR_CLRMSK (0x00000001U) +#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR (0x70B8U) +#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) +#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_ADDR_SHIFT (1U) +#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_ADDR_CLRMSK (0x00000001U) /* Register RGX_CR_JONES_IDLE */ -#define RGX_CR_JONES_IDLE (0x8328U) -#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x000000000007FEFF)) -#define RGX_CR_JONES_IDLE_ASC_SHIFT (18U) -#define RGX_CR_JONES_IDLE_ASC_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_JONES_IDLE_ASC_EN (0x00040000U) -#define RGX_CR_JONES_IDLE_RCE_SHIFT (17U) -#define RGX_CR_JONES_IDLE_RCE_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_JONES_IDLE_RCE_EN (0x00020000U) -#define RGX_CR_JONES_IDLE_AXI2IMG_SHIFT (16U) -#define RGX_CR_JONES_IDLE_AXI2IMG_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_JONES_IDLE_AXI2IMG_EN (0x00010000U) -#define RGX_CR_JONES_IDLE_SLC_SHIFT (15U) -#define RGX_CR_JONES_IDLE_SLC_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_JONES_IDLE_SLC_EN (0x00008000U) -#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U) -#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U) -#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U) -#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U) -#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U) -#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U) -#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U) -#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U) -#define RGX_CR_JONES_IDLE_DFU_SHIFT (10U) -#define RGX_CR_JONES_IDLE_DFU_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_JONES_IDLE_DFU_EN (0x00000400U) -#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U) -#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U) -#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U) -#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U) -#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U) -#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U) -#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U) -#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U) -#define RGX_CR_JONES_IDLE_USC_GMUTEX_SHIFT (4U) -#define RGX_CR_JONES_IDLE_USC_GMUTEX_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_JONES_IDLE_USC_GMUTEX_EN (0x00000010U) -#define RGX_CR_JONES_IDLE_PM_SHIFT (3U) -#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U) -#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U) -#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U) -#define RGX_CR_JONES_IDLE_DCE_SHIFT (1U) -#define RGX_CR_JONES_IDLE_DCE_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_JONES_IDLE_DCE_EN (0x00000002U) -#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U) -#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U) +#define RGX_CR_JONES_IDLE (0x8328U) +#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000FFEFF)) +#define RGX_CR_JONES_IDLE_SI_AXI2IMG_SHIFT (19U) +#define RGX_CR_JONES_IDLE_SI_AXI2IMG_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_JONES_IDLE_SI_AXI2IMG_EN (0x00080000U) +#define RGX_CR_JONES_IDLE_ASC_SHIFT (18U) +#define RGX_CR_JONES_IDLE_ASC_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_JONES_IDLE_ASC_EN (0x00040000U) +#define RGX_CR_JONES_IDLE_RCE_SHIFT (17U) +#define RGX_CR_JONES_IDLE_RCE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_JONES_IDLE_RCE_EN (0x00020000U) +#define RGX_CR_JONES_IDLE_AXI2IMG_SHIFT (16U) +#define RGX_CR_JONES_IDLE_AXI2IMG_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_JONES_IDLE_AXI2IMG_EN (0x00010000U) +#define RGX_CR_JONES_IDLE_SLC_SHIFT (15U) +#define RGX_CR_JONES_IDLE_SLC_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_JONES_IDLE_SLC_EN (0x00008000U) +#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U) +#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U) +#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U) +#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U) +#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U) +#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U) +#define RGX_CR_JONES_IDLE_DFU_SHIFT (10U) +#define RGX_CR_JONES_IDLE_DFU_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_JONES_IDLE_DFU_EN (0x00000400U) +#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U) +#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U) +#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U) +#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U) +#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U) +#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U) +#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U) +#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U) +#define RGX_CR_JONES_IDLE_USC_GMUTEX_SHIFT (4U) +#define RGX_CR_JONES_IDLE_USC_GMUTEX_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_IDLE_USC_GMUTEX_EN (0x00000010U) +#define RGX_CR_JONES_IDLE_PM_SHIFT (3U) +#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U) +#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U) +#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U) +#define RGX_CR_JONES_IDLE_DCE_SHIFT (1U) +#define RGX_CR_JONES_IDLE_DCE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_IDLE_DCE_EN (0x00000002U) +#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U) +#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U) /* Register RGX_CR_SYS_BUS_SECURE */ -#define RGX_CR_SYS_BUS_SECURE (0xA100U) -#define RGX_CR_SYS_BUS_SECURE__SYS_BUS_SECURE_RESET__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U) -#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U) +#define RGX_CR_SYS_BUS_SECURE (0xA100U) +#define RGX_CR_SYS_BUS_SECURE__SBSR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U) /* Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT */ -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_6 (0xF0000U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_6__MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_6__ID_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_6__ID_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_EQ6 (0xF0000U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_EQ6__MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_EQ6__ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_EQ6__ID_CLRMSK (0xFFFFFFC0U) /* Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT */ -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4 (0xF0000U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__ID_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__ID_CLRMSK (0xFFFFFFE0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4 (0xF0000U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4__MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4__ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4__ID_CLRMSK (0xFFFFFFE0U) /* Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT */ -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_GT6 (0xF0000U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_GT6__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_GT6__ID_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_GT6__ID_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_GT6 (0xF0000U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_GT6__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_GT6__ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_GT6__ID_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT */ -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_6__MASKFULL (IMG_UINT64_C(0x000000000000003F)) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__ID_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__ID_CLRMSK (0xFFFFFFE0U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_6__ID_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_6__ID_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_EQ1_AND_MHPW_LT6_AND_MMU_VER_GEQ4__MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_EQ1_AND_MHPW_EQ6__MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_EQ1_AND_MHPW_LT6_AND_MMU_VER_GEQ4__ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_EQ1_AND_MHPW_LT6_AND_MMU_VER_GEQ4__ID_CLRMSK (0xFFFFFFE0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4__ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4__ID_CLRMSK (0xFFFFFFE0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_EQ1_AND_MHPW_EQ6__ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_EQ1_AND_MHPW_EQ6__ID_CLRMSK (0xFFFFFFC0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_EQ6__ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_EQ6__ID_CLRMSK (0xFFFFFFC0U) /* Register RGX_CR_MMU_CBASE_MAPPING */ -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1 (0xF0008U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_SHIFT (28U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_EN (0x10000000U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_CLRMSK (0xF0000000U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1 (0xF0008U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_SHIFT (28U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_EN (0x10000000U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_MMU_CBASE_MAPPING */ -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU (0xF0008U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__INVALID_SHIFT (28U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__INVALID_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__INVALID_EN (0x10000000U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_CLRMSK (0xF0000000U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU (0xF0008U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__INVALID_SHIFT (28U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__INVALID_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__INVALID_EN (0x10000000U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_MMU_CBASE_MAPPING */ -#define RGX_CR_MMU_CBASE_MAPPING__VPU (0x1E010U) -#define RGX_CR_MMU_CBASE_MAPPING__VPU__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) -#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_SHIFT (28U) -#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_EN (0x10000000U) -#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_CLRMSK (0xF0000000U) -#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU (0x1E010U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_SHIFT (28U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_EN (0x10000000U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_MMU_CBASE_MAPPING */ -#define RGX_CR_MMU_CBASE_MAPPING (0xE148U) -#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) -#define RGX_CR_MMU_CBASE_MAPPING_INVALID_SHIFT (28U) -#define RGX_CR_MMU_CBASE_MAPPING_INVALID_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_MMU_CBASE_MAPPING_INVALID_EN (0x10000000U) -#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U) -#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U) -#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U) -#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U) +#define RGX_CR_MMU_CBASE_MAPPING (0xE148U) +#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING_INVALID_SHIFT (28U) +#define RGX_CR_MMU_CBASE_MAPPING_INVALID_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_CBASE_MAPPING_INVALID_EN (0x10000000U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U) /* Register RGX_CR_MMU_FAULT_STATUS1 */ -#define RGX_CR_MMU_FAULT_STATUS1 (0xE150U) -#define RGX_CR_MMU_FAULT_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT (62U) -#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT (56U) -#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT (48U) -#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT (4U) -#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F)) -#define RGX_CR_MMU_FAULT_STATUS1_RNW_SHIFT (3U) -#define RGX_CR_MMU_FAULT_STATUS1_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_MMU_FAULT_STATUS1_RNW_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT (1U) -#define RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) -#define RGX_CR_MMU_FAULT_STATUS1_FAULT_SHIFT (0U) -#define RGX_CR_MMU_FAULT_STATUS1_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MMU_FAULT_STATUS1_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MMU_FAULT_STATUS1 (0xE150U) +#define RGX_CR_MMU_FAULT_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT (62U) +#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT (56U) +#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT (48U) +#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F)) +#define RGX_CR_MMU_FAULT_STATUS1_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS1_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS1_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS1_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS1_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS1_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MMU_FAULT_STATUS2 */ -#define RGX_CR_MMU_FAULT_STATUS2 (0xE158U) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__MASKFULL (IMG_UINT64_C(0x00000000003FFFFF)) -#define RGX_CR_MMU_FAULT_STATUS2_MASKFULL (IMG_UINT64_C(0x000000003FFF07FF)) -#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_SHIFT (29U) -#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_EN (0x20000000U) -#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT (28U) -#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_EN (0x10000000U) -#define RGX_CR_MMU_FAULT_STATUS2_BANK_SHIFT (24U) -#define RGX_CR_MMU_FAULT_STATUS2_BANK_CLRMSK (0xF0FFFFFFU) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_SHIFT (21U) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_EN (0x00200000U) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_SHIFT (20U) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_EN (0x00100000U) -#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT (16U) -#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK (0xFF00FFFFU) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_SHIFT (12U) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_CLRMSK (0xFFF00FFFU) -#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT (10U) -#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_EN (0x00000400U) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BANK_SHIFT (8U) -#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BANK_CLRMSK (0xFFFFF0FFU) -#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_SHIFT (0U) -#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_CLRMSK (0xFFFFFC00U) -#define RGX_CR_MMU_FAULT_STATUS2_ACTIVE_ID_SHIFT (0U) -#define RGX_CR_MMU_FAULT_STATUS2_ACTIVE_ID_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_FAULT_STATUS2 (0xE158U) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__MASKFULL (IMG_UINT64_C(0x00000000003FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS2_MASKFULL (IMG_UINT64_C(0x000000003FFF07FF)) +#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_SHIFT (29U) +#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_EN (0x20000000U) +#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT (28U) +#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_EN (0x10000000U) +#define RGX_CR_MMU_FAULT_STATUS2_BANK_SHIFT (24U) +#define RGX_CR_MMU_FAULT_STATUS2_BANK_CLRMSK (0xF0FFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__FBM_FAULT_SHIFT (21U) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__FBM_FAULT_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__FBM_FAULT_EN (0x00200000U) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__WRITEBACK_SHIFT (20U) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__WRITEBACK_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__WRITEBACK_EN (0x00100000U) +#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT (16U) +#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK (0xFF00FFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__BIF_ID_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__BIF_ID_CLRMSK (0xFFF00FFFU) +#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT (10U) +#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_EN (0x00000400U) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__BANK_SHIFT (8U) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__BANK_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_CLRMSK (0xFFFFFC00U) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__ACTIVE_ID_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__ACTIVE_ID_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MMU_FAULT_STATUS_META */ -#define RGX_CR_MMU_FAULT_STATUS_META (0xE160U) -#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (62U) -#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (56U) -#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (48U) -#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (4U) -#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F)) -#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U) -#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U) -#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) -#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U) -#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MMU_FAULT_STATUS_META (0xE160U) +#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (62U) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (56U) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (48U) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MMU_FAULT_STATUS2_META */ -#define RGX_CR_MMU_FAULT_STATUS2_META (0xE198U) -#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) -#define RGX_CR_MMU_FAULT_STATUS2_META_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) -#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_SHIFT (13U) -#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_EN (0x00002000U) -#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_SHIFT (12U) -#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_EN (0x00001000U) -#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_SHIFT (12U) -#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_EN (0x00001000U) -#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_SHIFT (8U) -#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_CLRMSK (0xFFFFF0FFU) -#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_SHIFT (0U) -#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_CLRMSK (0xFFFFFF00U) -#define RGX_CR_MMU_FAULT_STATUS2_META_ACTIVE_ID_SHIFT (0U) -#define RGX_CR_MMU_FAULT_STATUS2_META_ACTIVE_ID_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_FAULT_STATUS2_META (0xE198U) +#define RGX_CR_MMU_FAULT_STATUS2_META__AXT_INFRA__MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) +#define RGX_CR_MMU_FAULT_STATUS2_META_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) +#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_SHIFT (13U) +#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_EN (0x00002000U) +#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_EN (0x00001000U) +#define RGX_CR_MMU_FAULT_STATUS2_META__AXT_INFRA__WRITEBACK_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS2_META__AXT_INFRA__WRITEBACK_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_META__AXT_INFRA__WRITEBACK_EN (0x00001000U) +#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_SHIFT (8U) +#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_FAULT_STATUS2_META__AXT_INFRA__ACTIVE_ID_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_META__AXT_INFRA__ACTIVE_ID_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_MMU_FAULT_STATUS_PM */ -#define RGX_CR_MMU_FAULT_STATUS_PM (0xE130U) -#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_PM_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_PM_DM_SHIFT (24U) -#define RGX_CR_MMU_FAULT_STATUS_PM_DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_SHIFT (24U) -#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0FFFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_SHIFT (23U) -#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_EN (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_SHIFT (3U) -#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF800007)) -#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_SHIFT (1U) -#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) -#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_SHIFT (0U) -#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MMU_FAULT_STATUS_PM (0xE130U) +#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_DM_SHIFT (24U) +#define RGX_CR_MMU_FAULT_STATUS_PM_DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_SHIFT (24U) +#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0FFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_SHIFT (23U) +#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF800007)) +#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_MMU_STATUS */ -#define RGX_CR_MMU_STATUS__VPU (0x10288U) -#define RGX_CR_MMU_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) -#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_SHIFT (40U) -#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_MMU_STATUS__VPU__PM_WRITES_SHIFT (38U) -#define RGX_CR_MMU_STATUS__VPU__PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFF3FFFFFFFFF)) -#define RGX_CR_MMU_STATUS__VPU__PM_READS_SHIFT (36U) -#define RGX_CR_MMU_STATUS__VPU__PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) -#define RGX_CR_MMU_STATUS__VPU__PC_READS_SHIFT (24U) -#define RGX_CR_MMU_STATUS__VPU__PC_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) -#define RGX_CR_MMU_STATUS__VPU__PD_READS_SHIFT (12U) -#define RGX_CR_MMU_STATUS__VPU__PD_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) -#define RGX_CR_MMU_STATUS__VPU__PT_READS_SHIFT (0U) -#define RGX_CR_MMU_STATUS__VPU__PT_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) +#define RGX_CR_MMU_STATUS__VPU (0x10288U) +#define RGX_CR_MMU_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_SHIFT (40U) +#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MMU_STATUS__VPU__PM_WRITES_SHIFT (38U) +#define RGX_CR_MMU_STATUS__VPU__PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFF3FFFFFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__PM_READS_SHIFT (36U) +#define RGX_CR_MMU_STATUS__VPU__PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__PC_READS_SHIFT (24U) +#define RGX_CR_MMU_STATUS__VPU__PC_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__PD_READS_SHIFT (12U) +#define RGX_CR_MMU_STATUS__VPU__PD_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_MMU_STATUS__VPU__PT_READS_SHIFT (0U) +#define RGX_CR_MMU_STATUS__VPU__PT_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) /* Register RGX_CR_MMU_STATUS */ -#define RGX_CR_MMU_STATUS (0xE170U) -#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MASKFULL (IMG_UINT64_C(0x000003FFFFFFFFFF)) -#define RGX_CR_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) -#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MMU_STALLED_SHIFT (41U) -#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) -#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MMU_STALLED_EN (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_MMU_STATUS_MMU_STALLED_SHIFT (40U) -#define RGX_CR_MMU_STATUS_MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_CR_MMU_STATUS_MMU_STALLED_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_WRITES_SHIFT (39U) -#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFE7FFFFFFFFF)) -#define RGX_CR_MMU_STATUS_PM_WRITES_SHIFT (38U) -#define RGX_CR_MMU_STATUS_PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFF3FFFFFFFFF)) -#define RGX_CR_MMU_STATUS_PM_READS_SHIFT (36U) -#define RGX_CR_MMU_STATUS_PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) -#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_READS_SHIFT (36U) -#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFF8FFFFFFFFF)) -#define RGX_CR_MMU_STATUS_PC_READS_SHIFT (24U) -#define RGX_CR_MMU_STATUS_PC_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) -#define RGX_CR_MMU_STATUS_PD_READS_SHIFT (12U) -#define RGX_CR_MMU_STATUS_PD_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) -#define RGX_CR_MMU_STATUS_PT_READS_SHIFT (0U) -#define RGX_CR_MMU_STATUS_PT_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) +#define RGX_CR_MMU_STATUS (0xE170U) +#define RGX_CR_MMU_STATUS__CXT_INFRA__MASKFULL (IMG_UINT64_C(0x000003FFFFFFFFFF)) +#define RGX_CR_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__CXT_INFRA__MMU_STALLED_SHIFT (41U) +#define RGX_CR_MMU_STATUS__CXT_INFRA__MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__CXT_INFRA__MMU_STALLED_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_MMU_STATUS_MMU_STALLED_SHIFT (40U) +#define RGX_CR_MMU_STATUS_MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MMU_STATUS_MMU_STALLED_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MMU_STATUS__CXT_INFRA__PM_WRITES_SHIFT (39U) +#define RGX_CR_MMU_STATUS__CXT_INFRA__PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFE7FFFFFFFFF)) +#define RGX_CR_MMU_STATUS_PM_WRITES_SHIFT (38U) +#define RGX_CR_MMU_STATUS_PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFF3FFFFFFFFF)) +#define RGX_CR_MMU_STATUS_PM_READS_SHIFT (36U) +#define RGX_CR_MMU_STATUS_PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__CXT_INFRA__PM_READS_SHIFT (36U) +#define RGX_CR_MMU_STATUS__CXT_INFRA__PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFF8FFFFFFFFF)) +#define RGX_CR_MMU_STATUS_PC_READS_SHIFT (24U) +#define RGX_CR_MMU_STATUS_PC_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_MMU_STATUS_PD_READS_SHIFT (12U) +#define RGX_CR_MMU_STATUS_PD_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_MMU_STATUS_PT_READS_SHIFT (0U) +#define RGX_CR_MMU_STATUS_PT_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) /* Register RGX_CR_MMU_ENTRY_STATUS */ -#define RGX_CR_MMU_ENTRY_STATUS__VPU (0x1E028U) -#define RGX_CR_MMU_ENTRY_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF)) -#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_SHIFT (15U) -#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF)) -#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_SHIFT (0U) -#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_MMU_ENTRY_STATUS__VPU (0x1E028U) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF)) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_SHIFT (15U) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF)) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_MMU_ENTRY_STATUS */ -#define RGX_CR_MMU_ENTRY_STATUS (0xE178U) -#define RGX_CR_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF)) -#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_SHIFT (15U) -#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF)) -#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_SHIFT (0U) -#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_MMU_ENTRY_STATUS (0xE178U) +#define RGX_CR_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF)) +#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_SHIFT (15U) +#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF)) +#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_MMU_ENTRY */ -#define RGX_CR_MMU_ENTRY__VPU (0x1E030U) -#define RGX_CR_MMU_ENTRY__VPU__MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_MMU_ENTRY__VPU__ENABLE_SHIFT (1U) -#define RGX_CR_MMU_ENTRY__VPU__ENABLE_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_MMU_ENTRY__VPU__ENABLE_EN (0x00000002U) -#define RGX_CR_MMU_ENTRY__VPU__PENDING_SHIFT (0U) -#define RGX_CR_MMU_ENTRY__VPU__PENDING_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MMU_ENTRY__VPU__PENDING_EN (0x00000001U) +#define RGX_CR_MMU_ENTRY__VPU (0x1E030U) +#define RGX_CR_MMU_ENTRY__VPU__MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_MMU_ENTRY__VPU__ENABLE_SHIFT (1U) +#define RGX_CR_MMU_ENTRY__VPU__ENABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MMU_ENTRY__VPU__ENABLE_EN (0x00000002U) +#define RGX_CR_MMU_ENTRY__VPU__PENDING_SHIFT (0U) +#define RGX_CR_MMU_ENTRY__VPU__PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MMU_ENTRY__VPU__PENDING_EN (0x00000001U) /* Register RGX_CR_MMU_ENTRY */ -#define RGX_CR_MMU_ENTRY (0xE180U) -#define RGX_CR_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_CR_MMU_ENTRY_ENABLE_SHIFT (1U) -#define RGX_CR_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_MMU_ENTRY_ENABLE_EN (0x00000002U) -#define RGX_CR_MMU_ENTRY_PENDING_SHIFT (0U) -#define RGX_CR_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_MMU_ENTRY_PENDING_EN (0x00000001U) +#define RGX_CR_MMU_ENTRY (0xE180U) +#define RGX_CR_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_MMU_ENTRY_ENABLE_SHIFT (1U) +#define RGX_CR_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MMU_ENTRY_ENABLE_EN (0x00000002U) +#define RGX_CR_MMU_ENTRY_PENDING_SHIFT (0U) +#define RGX_CR_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MMU_ENTRY_PENDING_EN (0x00000001U) /* Register RGX_CR_MMU_PAGE_SIZE_RANGE_ONE */ -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE (0xE350U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT (38U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT (19U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT (21U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSIZE (2097152U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT (0U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT (21U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE (0xE350U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSIZE (2097152U) /* Register RGX_CR_MMU_PAGE_SIZE_RANGE_TWO */ -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO (0xE358U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT (38U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_SHIFT (19U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSHIFT (21U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSIZE (2097152U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_SHIFT (0U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSHIFT (21U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO (0xE358U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSIZE (2097152U) /* Register RGX_CR_MMU_PAGE_SIZE_RANGE_THREE */ -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE (0xE360U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_SHIFT (38U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_SHIFT (19U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSHIFT (21U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSIZE (2097152U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_SHIFT (0U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSHIFT (21U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE (0xE360U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSIZE (2097152U) /* Register RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR */ -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR (0xE368U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_SHIFT (38U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_SHIFT (19U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSHIFT (21U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSIZE (2097152U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_SHIFT (0U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSHIFT (21U) -#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR (0xE368U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSIZE (2097152U) /* Register RGX_CR_SLC_STATUS1 */ -#define RGX_CR_SLC_STATUS1 (0xE210U) -#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_SHIFT (48U) -#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_SHIFT (36U) -#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF)) -#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_SHIFT (24U) -#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) -#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_SHIFT (12U) -#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) -#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_SHIFT (0U) -#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) +#define RGX_CR_SLC_STATUS1 (0xE210U) +#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_SHIFT (48U) +#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_SHIFT (36U) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_SHIFT (24U) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_SHIFT (12U) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_SHIFT (0U) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) /* Register RGX_CR_SLC_STATUS2 */ -#define RGX_CR_SLC_STATUS2 (0xE218U) -#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_SHIFT (48U) -#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_SHIFT (36U) -#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF)) -#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_SHIFT (24U) -#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) -#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_SHIFT (12U) -#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) -#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_SHIFT (0U) -#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) +#define RGX_CR_SLC_STATUS2 (0xE218U) +#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_SHIFT (48U) +#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_SHIFT (36U) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF)) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_SHIFT (24U) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_SHIFT (12U) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_SHIFT (0U) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) /* Register RGX_CR_SLC_IDLE */ -#define RGX_CR_SLC_IDLE (0xE230U) -#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__MASKFULL (IMG_UINT64_C(0x0000000001FFFFFF)) -#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) -#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__FBCDC_ARB_SHIFT (24U) -#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__FBCDC_ARB_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__FBCDC_ARB_EN (0x01000000U) -#define RGX_CR_SLC_IDLE_FBCDC_ARB_SHIFT (20U) -#define RGX_CR_SLC_IDLE_FBCDC_ARB_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_SLC_IDLE_FBCDC_ARB_EN (0x00100000U) -#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__OWDB_SHIFT (20U) -#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__OWDB_CLRMSK (0xFF0FFFFFU) -#define RGX_CR_SLC_IDLE_OWDB_SHIFT (16U) -#define RGX_CR_SLC_IDLE_OWDB_CLRMSK (0xFFF0FFFFU) -#define RGX_CR_SLC_IDLE_ACE_CLBS_SHIFT (16U) -#define RGX_CR_SLC_IDLE_ACE_CLBS_CLRMSK (0xFFF0FFFFU) -#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_SHIFT (12U) -#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK (0xFFFF0FFFU) -#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (4U) -#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU) -#define RGX_CR_SLC_IDLE_MMU_SHIFT (3U) -#define RGX_CR_SLC_IDLE_MMU_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SLC_IDLE_MMU_EN (0x00000008U) -#define RGX_CR_SLC_IDLE_CCM_SHIFT (2U) -#define RGX_CR_SLC_IDLE_CCM_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SLC_IDLE_CCM_EN (0x00000004U) -#define RGX_CR_SLC_IDLE_RDI_SHIFT (1U) -#define RGX_CR_SLC_IDLE_RDI_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_SLC_IDLE_RDI_EN (0x00000002U) -#define RGX_CR_SLC_IDLE_XBAR_SHIFT (0U) -#define RGX_CR_SLC_IDLE_XBAR_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_SLC_IDLE_XBAR_EN (0x00000001U) +#define RGX_CR_SLC_IDLE (0xE230U) +#define RGX_CR_SLC_IDLE__GCC_AND_AXT_IF_GT0__MASKFULL (IMG_UINT64_C(0x0000000001FFFFFF)) +#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) +#define RGX_CR_SLC_IDLE__GCC_AND_AXT_IF_GT0__FBCDC_ARB_SHIFT (24U) +#define RGX_CR_SLC_IDLE__GCC_AND_AXT_IF_GT0__FBCDC_ARB_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_SLC_IDLE__GCC_AND_AXT_IF_GT0__FBCDC_ARB_EN (0x01000000U) +#define RGX_CR_SLC_IDLE_FBCDC_ARB_SHIFT (20U) +#define RGX_CR_SLC_IDLE_FBCDC_ARB_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_SLC_IDLE_FBCDC_ARB_EN (0x00100000U) +#define RGX_CR_SLC_IDLE__GCC_AND_AXT_IF_GT0__OWDB_SHIFT (20U) +#define RGX_CR_SLC_IDLE__GCC_AND_AXT_IF_GT0__OWDB_CLRMSK (0xFF0FFFFFU) +#define RGX_CR_SLC_IDLE_OWDB_SHIFT (16U) +#define RGX_CR_SLC_IDLE_OWDB_CLRMSK (0xFFF0FFFFU) +#define RGX_CR_SLC_IDLE__GCC_AND_AXT_IF_GT0__ACE_CLBS_SHIFT (16U) +#define RGX_CR_SLC_IDLE__GCC_AND_AXT_IF_GT0__ACE_CLBS_CLRMSK (0xFFF0FFFFU) +#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_SHIFT (12U) +#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (4U) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU) +#define RGX_CR_SLC_IDLE_MMU_SHIFT (3U) +#define RGX_CR_SLC_IDLE_MMU_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SLC_IDLE_MMU_EN (0x00000008U) +#define RGX_CR_SLC_IDLE_CCM_SHIFT (2U) +#define RGX_CR_SLC_IDLE_CCM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SLC_IDLE_CCM_EN (0x00000004U) +#define RGX_CR_SLC_IDLE_RDI_SHIFT (1U) +#define RGX_CR_SLC_IDLE_RDI_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_IDLE_RDI_EN (0x00000002U) +#define RGX_CR_SLC_IDLE_XBAR_SHIFT (0U) +#define RGX_CR_SLC_IDLE_XBAR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_IDLE_XBAR_EN (0x00000001U) + + +/* + Register RGX_CR_SLC_STATUS3 +*/ +#define RGX_CR_SLC_STATUS3 (0xE238U) +#define RGX_CR_SLC_STATUS3_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_SHIFT (50U) +#define RGX_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0xF003FFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_SHIFT (40U) +#define RGX_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0xFFFC00FFFFFFFFFF)) +#define RGX_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_SHIFT (30U) +#define RGX_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) +#define RGX_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_SHIFT (20U) +#define RGX_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC00FFFFF)) +#define RGX_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_SHIFT (10U) +#define RGX_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF003FF)) +#define RGX_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_SHIFT (0U) +#define RGX_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFC00)) /* Register RGX_CR_SLC_FAULT_STOP_STATUS */ -#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU (0x1E240U) -#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000000001FFFF)) -#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_SHIFT (0U) -#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_CLRMSK (0xFFFE0000U) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU (0x1E240U) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000000001FFFF)) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_SHIFT (0U) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_CLRMSK (0xFFFE0000U) /* Register RGX_CR_SLC_FAULT_STOP_STATUS */ -#define RGX_CR_SLC_FAULT_STOP_STATUS (0xE240U) -#define RGX_CR_SLC_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x000000000001FFFF)) -#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_SHIFT (0U) -#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFE0000U) +#define RGX_CR_SLC_FAULT_STOP_STATUS (0xE240U) +#define RGX_CR_SLC_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x000000000001FFFF)) +#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_SHIFT (0U) +#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFE0000U) /* Register RGX_CR_SLC_STATUS_DEBUG */ -#define RGX_CR_SLC_STATUS_DEBUG__VPU (0x1E260U) -#define RGX_CR_SLC_STATUS_DEBUG__VPU__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_SHIFT (16U) -#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_CLRMSK (0x0000FFFFU) -#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_SHIFT (0U) -#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU (0x1E260U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_SHIFT (16U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_CLRMSK (0x0000FFFFU) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_SHIFT (0U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U) /* Register RGX_CR_SLC_STATUS_DEBUG */ -#define RGX_CR_SLC_STATUS_DEBUG (0xE260U) -#define RGX_CR_SLC_STATUS_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_SHIFT (16U) -#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_CLRMSK (0x0000FFFFU) -#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_SHIFT (0U) -#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U) +#define RGX_CR_SLC_STATUS_DEBUG (0xE260U) +#define RGX_CR_SLC_STATUS_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_SHIFT (16U) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_CLRMSK (0x0000FFFFU) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_SHIFT (0U) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U) /* Register RGX_CR_HMMU_OSID_PAGE_SIZE */ -#define RGX_CR_HMMU_OSID_PAGE_SIZE (0x80000U) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_MASKFULL (IMG_UINT64_C(0x0000000077777777)) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_SHIFT (28U) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_CLRMSK (0x8FFFFFFFU) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_SHIFT (24U) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_CLRMSK (0xF8FFFFFFU) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_SHIFT (20U) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_CLRMSK (0xFF8FFFFFU) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_SHIFT (16U) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_CLRMSK (0xFFF8FFFFU) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_SHIFT (12U) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_CLRMSK (0xFFFF8FFFU) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_SHIFT (8U) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_CLRMSK (0xFFFFF8FFU) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_SHIFT (4U) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_CLRMSK (0xFFFFFF8FU) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_SHIFT (0U) -#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_CLRMSK (0xFFFFFFF8U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE (0x80000U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_MASKFULL (IMG_UINT64_C(0x0000000077777777)) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_SHIFT (28U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_CLRMSK (0x8FFFFFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_SHIFT (24U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_CLRMSK (0xF8FFFFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_SHIFT (20U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_CLRMSK (0xFF8FFFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_SHIFT (16U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_CLRMSK (0xFFF8FFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_SHIFT (12U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_CLRMSK (0xFFFF8FFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_SHIFT (8U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_SHIFT (4U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_CLRMSK (0xFFFFFF8FU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_SHIFT (0U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_CLRMSK (0xFFFFFFF8U) /* Register RGX_CR_HMMU_BYPASS */ -#define RGX_CR_HMMU_BYPASS (0x80008U) -#define RGX_CR_HMMU_BYPASS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_HMMU_BYPASS_EN_SHIFT (0U) -#define RGX_CR_HMMU_BYPASS_EN_CLRMSK (0xFFFFFF00U) +#define RGX_CR_HMMU_BYPASS (0x80008U) +#define RGX_CR_HMMU_BYPASS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_HMMU_BYPASS_EN_SHIFT (0U) +#define RGX_CR_HMMU_BYPASS_EN_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_HMMU_INVAL */ -#define RGX_CR_HMMU_INVAL (0x80010U) -#define RGX_CR_HMMU_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -#define RGX_CR_HMMU_INVAL_OS_ID_SHIFT (4U) -#define RGX_CR_HMMU_INVAL_OS_ID_CLRMSK (0xFFFFFF8FU) -#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_SHIFT (3U) -#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_EN (0x00000008U) -#define RGX_CR_HMMU_INVAL_HPC_SHIFT (2U) -#define RGX_CR_HMMU_INVAL_HPC_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_HMMU_INVAL_HPC_EN (0x00000004U) -#define RGX_CR_HMMU_INVAL_HPD_SHIFT (1U) -#define RGX_CR_HMMU_INVAL_HPD_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_HMMU_INVAL_HPD_EN (0x00000002U) -#define RGX_CR_HMMU_INVAL_HPT_SHIFT (0U) -#define RGX_CR_HMMU_INVAL_HPT_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_HMMU_INVAL_HPT_EN (0x00000001U) +#define RGX_CR_HMMU_INVAL (0x80010U) +#define RGX_CR_HMMU_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_HMMU_INVAL_OS_ID_SHIFT (4U) +#define RGX_CR_HMMU_INVAL_OS_ID_CLRMSK (0xFFFFFF8FU) +#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_SHIFT (3U) +#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_EN (0x00000008U) +#define RGX_CR_HMMU_INVAL_HPC_SHIFT (2U) +#define RGX_CR_HMMU_INVAL_HPC_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_HMMU_INVAL_HPC_EN (0x00000004U) +#define RGX_CR_HMMU_INVAL_HPD_SHIFT (1U) +#define RGX_CR_HMMU_INVAL_HPD_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_HMMU_INVAL_HPD_EN (0x00000002U) +#define RGX_CR_HMMU_INVAL_HPT_SHIFT (0U) +#define RGX_CR_HMMU_INVAL_HPT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_HMMU_INVAL_HPT_EN (0x00000001U) /* Register RGX_CR_HMMU_HPC_BASE_MAPPING0 */ -#define RGX_CR_HMMU_HPC_BASE_MAPPING0 (0x80018U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_SHIFT (36U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_SHIFT (32U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_SHIFT (4U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_SHIFT (0U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0 (0x80018U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_HMMU_HPC_BASE_MAPPING1 */ -#define RGX_CR_HMMU_HPC_BASE_MAPPING1 (0x80020U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_SHIFT (36U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_SHIFT (32U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_SHIFT (4U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_SHIFT (0U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1 (0x80020U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_HMMU_HPC_BASE_MAPPING2 */ -#define RGX_CR_HMMU_HPC_BASE_MAPPING2 (0x80028U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_SHIFT (36U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_SHIFT (32U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_SHIFT (4U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_SHIFT (0U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2 (0x80028U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_HMMU_HPC_BASE_MAPPING3 */ -#define RGX_CR_HMMU_HPC_BASE_MAPPING3 (0x80030U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_SHIFT (36U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_SHIFT (32U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_SHIFT (4U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_SHIFT (0U) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3 (0x80030U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_EN (IMG_UINT64_C(0x0000000000000001)) /* Register group: RGX_CR_HMMU_PAGE_FAULT_INFO, with 8 repeats */ -#define RGX_CR_HMMU_PAGE_FAULT_INFO_REPEATCOUNT (8U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO_REPEATCOUNT (8U) /* Register RGX_CR_HMMU_PAGE_FAULT_INFO0 */ -#define RGX_CR_HMMU_PAGE_FAULT_INFO0 (0x80038U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0 (0x80038U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PAGE_FAULT_INFO1 */ -#define RGX_CR_HMMU_PAGE_FAULT_INFO1 (0x80040U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1 (0x80040U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PAGE_FAULT_INFO2 */ -#define RGX_CR_HMMU_PAGE_FAULT_INFO2 (0x80048U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2 (0x80048U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PAGE_FAULT_INFO3 */ -#define RGX_CR_HMMU_PAGE_FAULT_INFO3 (0x80050U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3 (0x80050U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PAGE_FAULT_INFO4 */ -#define RGX_CR_HMMU_PAGE_FAULT_INFO4 (0x80058U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4 (0x80058U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PAGE_FAULT_INFO5 */ -#define RGX_CR_HMMU_PAGE_FAULT_INFO5 (0x80060U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5 (0x80060U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PAGE_FAULT_INFO6 */ -#define RGX_CR_HMMU_PAGE_FAULT_INFO6 (0x80068U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6 (0x80068U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PAGE_FAULT_INFO7 */ -#define RGX_CR_HMMU_PAGE_FAULT_INFO7 (0x80070U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7 (0x80070U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register group: RGX_CR_HMMU_PENDING_ENTRY_INFO, with 8 repeats */ -#define RGX_CR_HMMU_PENDING_ENTRY_INFO_REPEATCOUNT (8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO_REPEATCOUNT (8U) /* Register RGX_CR_HMMU_PENDING_ENTRY_INFO0 */ -#define RGX_CR_HMMU_PENDING_ENTRY_INFO0 (0x800C0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0 (0x800C0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PENDING_ENTRY_INFO1 */ -#define RGX_CR_HMMU_PENDING_ENTRY_INFO1 (0x800C8U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1 (0x800C8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PENDING_ENTRY_INFO2 */ -#define RGX_CR_HMMU_PENDING_ENTRY_INFO2 (0x800D0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2 (0x800D0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PENDING_ENTRY_INFO3 */ -#define RGX_CR_HMMU_PENDING_ENTRY_INFO3 (0x800D8U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3 (0x800D8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PENDING_ENTRY_INFO4 */ -#define RGX_CR_HMMU_PENDING_ENTRY_INFO4 (0x800E0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4 (0x800E0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PENDING_ENTRY_INFO5 */ -#define RGX_CR_HMMU_PENDING_ENTRY_INFO5 (0x800E8U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5 (0x800E8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PENDING_ENTRY_INFO6 */ -#define RGX_CR_HMMU_PENDING_ENTRY_INFO6 (0x800F0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6 (0x800F0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_PENDING_ENTRY_INFO7 */ -#define RGX_CR_HMMU_PENDING_ENTRY_INFO7 (0x800F8U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_SHIFT (2U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_SHIFT (0U) -#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7 (0x800F8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) /* Register RGX_CR_HMMU_HOST_IRQ_ENABLE */ -#define RGX_CR_HMMU_HOST_IRQ_ENABLE (0x80100U) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_SHIFT (3U) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_SHIFT (2U) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_SHIFT (1U) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_SHIFT (0U) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE (0x80100U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_SHIFT (3U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_SHIFT (2U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_SHIFT (1U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_SHIFT (0U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_HMMU_PENDING_ENTRY */ -#define RGX_CR_HMMU_PENDING_ENTRY (0x80108U) -#define RGX_CR_HMMU_PENDING_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_SHIFT (0U) -#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_EN (0x00000001U) +#define RGX_CR_HMMU_PENDING_ENTRY (0x80108U) +#define RGX_CR_HMMU_PENDING_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_EN (0x00000001U) /* Register RGX_CR_HMMU_FAULT_STATUS */ -#define RGX_CR_HMMU_FAULT_STATUS (0x80120U) -#define RGX_CR_HMMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_SHIFT (31U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_SHIFT (30U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_EN (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_SHIFT (29U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_EN (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_SHIFT (28U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_EN (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_SHIFT (27U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_EN (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_SHIFT (26U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_EN (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_SHIFT (25U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_EN (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_SHIFT (24U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_SHIFT (23U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_SHIFT (22U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_SHIFT (21U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_SHIFT (20U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_SHIFT (19U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_SHIFT (18U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_SHIFT (17U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_SHIFT (16U) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_SHIFT (15U) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_SHIFT (14U) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_SHIFT (13U) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_SHIFT (12U) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_SHIFT (11U) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_SHIFT (10U) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_SHIFT (9U) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_EN (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_SHIFT (8U) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_SHIFT (7U) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_SHIFT (6U) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_SHIFT (5U) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_SHIFT (4U) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_SHIFT (3U) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_SHIFT (2U) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_SHIFT (1U) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_SHIFT (0U) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_HMMU_FAULT_STATUS (0x80120U) +#define RGX_CR_HMMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_SHIFT (31U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_SHIFT (30U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_SHIFT (29U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_SHIFT (28U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_SHIFT (27U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_SHIFT (26U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_SHIFT (25U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_SHIFT (24U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_SHIFT (23U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_SHIFT (22U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_SHIFT (21U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_SHIFT (20U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_SHIFT (19U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_SHIFT (18U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_SHIFT (17U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_SHIFT (16U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_SHIFT (15U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_SHIFT (14U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_SHIFT (13U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_SHIFT (12U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_SHIFT (11U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_SHIFT (10U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_SHIFT (9U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_SHIFT (8U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_SHIFT (7U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_SHIFT (6U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_SHIFT (5U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_SHIFT (4U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_SHIFT (3U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_SHIFT (2U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_SHIFT (1U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_SHIFT (0U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000000001)) /* Register group: RGX_CR_HMMU_READONLY_FAULT_INFO, with 8 repeats */ -#define RGX_CR_HMMU_READONLY_FAULT_INFO_REPEATCOUNT (8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO_REPEATCOUNT (8U) /* Register RGX_CR_HMMU_READONLY_FAULT_INFO0 */ -#define RGX_CR_HMMU_READONLY_FAULT_INFO0 (0x80190U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_SHIFT (4U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0 (0x80190U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) /* Register RGX_CR_HMMU_READONLY_FAULT_INFO1 */ -#define RGX_CR_HMMU_READONLY_FAULT_INFO1 (0x80198U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_SHIFT (4U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1 (0x80198U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) /* Register RGX_CR_HMMU_READONLY_FAULT_INFO2 */ -#define RGX_CR_HMMU_READONLY_FAULT_INFO2 (0x801A0U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_SHIFT (4U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2 (0x801A0U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) /* Register RGX_CR_HMMU_READONLY_FAULT_INFO3 */ -#define RGX_CR_HMMU_READONLY_FAULT_INFO3 (0x801A8U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_SHIFT (4U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3 (0x801A8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) /* Register RGX_CR_HMMU_READONLY_FAULT_INFO4 */ -#define RGX_CR_HMMU_READONLY_FAULT_INFO4 (0x801B0U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_SHIFT (4U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4 (0x801B0U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) /* Register RGX_CR_HMMU_READONLY_FAULT_INFO5 */ -#define RGX_CR_HMMU_READONLY_FAULT_INFO5 (0x801B8U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_SHIFT (4U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5 (0x801B8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) /* Register RGX_CR_HMMU_READONLY_FAULT_INFO6 */ -#define RGX_CR_HMMU_READONLY_FAULT_INFO6 (0x801C0U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_SHIFT (4U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6 (0x801C0U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) /* Register RGX_CR_HMMU_READONLY_FAULT_INFO7 */ -#define RGX_CR_HMMU_READONLY_FAULT_INFO7 (0x801C8U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_SHIFT (4U) -#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7 (0x801C8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) /* Register group: RGX_CR_HMMU_READONLY_FAULT_PM_INFO, with 8 repeats */ -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO_REPEATCOUNT (8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO_REPEATCOUNT (8U) /* Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO0 */ -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0 (0x801D0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_SHIFT (0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0 (0x801D0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) /* Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO1 */ -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1 (0x801D8U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_SHIFT (0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1 (0x801D8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) /* Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO2 */ -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2 (0x801E0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_SHIFT (0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2 (0x801E0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) /* Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO3 */ -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3 (0x801E8U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_SHIFT (0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3 (0x801E8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) /* Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO4 */ -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4 (0x801F0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_SHIFT (0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4 (0x801F0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) /* Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO5 */ -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5 (0x801F8U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_SHIFT (0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5 (0x801F8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) /* Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO6 */ -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6 (0x80200U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_SHIFT (0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6 (0x80200U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) /* Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO7 */ -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7 (0x80208U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_SHIFT (0U) -#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7 (0x80208U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) /* Register RGX_CR_ACE_CTRL */ -#define RGX_CR_ACE_CTRL__VPU (0x1E320U) -#define RGX_CR_ACE_CTRL__VPU__MASKFULL (IMG_UINT64_C(0x00000000007FCFFF)) -#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_SHIFT (19U) -#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_CLRMSK (0xFF87FFFFU) -#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_SHIFT (15U) -#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU) -#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U) -#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_SHIFT (8U) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_CLRMSK (0xFFFFF0FFU) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U) -#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_SHIFT (4U) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_CLRMSK (0xFFFFFF0FU) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U) -#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U) -#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_SHIFT (2U) -#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_SHIFT (1U) -#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_INNER_SHAREABLE (0x00000000U) -#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_OUTER_SHAREABLE (0x00000002U) -#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SHIFT (0U) -#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U) -#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SYSTEM (0x00000001U) +#define RGX_CR_ACE_CTRL__VPU (0x1E320U) +#define RGX_CR_ACE_CTRL__VPU__MASKFULL (IMG_UINT64_C(0x00000000007FCFFF)) +#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_SHIFT (19U) +#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_CLRMSK (0xFF87FFFFU) +#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_SHIFT (15U) +#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU) +#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U) +#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_SHIFT (8U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_SHIFT (4U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_CLRMSK (0xFFFFFF0FU) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_SHIFT (2U) +#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_SHIFT (1U) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_INNER_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_OUTER_SHAREABLE (0x00000002U) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SHIFT (0U) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SYSTEM (0x00000001U) /* Register RGX_CR_ACE_CTRL */ -#define RGX_CR_ACE_CTRL (0xE320U) -#define RGX_CR_ACE_CTRL_MASKFULL (IMG_UINT64_C(0x0000000000FFCFFF)) -#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_SHIFT (23U) -#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_CLRMSK (0xFF7FFFFFU) -#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_EN (0x00800000U) -#define RGX_CR_ACE_CTRL_CLB_AXQOS_SHIFT (19U) -#define RGX_CR_ACE_CTRL_CLB_AXQOS_CLRMSK (0xFF87FFFFU) -#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT (15U) -#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU) -#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U) -#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_SHIFT (8U) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_CLRMSK (0xFFFFF0FFU) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U) -#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_SHIFT (4U) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_CLRMSK (0xFFFFFF0FU) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U) -#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U) -#define RGX_CR_ACE_CTRL_MMU_DOMAIN_SHIFT (2U) -#define RGX_CR_ACE_CTRL_MMU_DOMAIN_CLRMSK (0xFFFFFFF3U) -#define RGX_CR_ACE_CTRL_COH_DOMAIN_SHIFT (1U) -#define RGX_CR_ACE_CTRL_COH_DOMAIN_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_ACE_CTRL_COH_DOMAIN_INNER_SHAREABLE (0x00000000U) -#define RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE (0x00000002U) -#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SHIFT (0U) -#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U) -#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SYSTEM (0x00000001U) +#define RGX_CR_ACE_CTRL (0xE320U) +#define RGX_CR_ACE_CTRL_MASKFULL (IMG_UINT64_C(0x0000000000FFCFFF)) +#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_SHIFT (23U) +#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_EN (0x00800000U) +#define RGX_CR_ACE_CTRL_CLB_AXQOS_SHIFT (19U) +#define RGX_CR_ACE_CTRL_CLB_AXQOS_CLRMSK (0xFF87FFFFU) +#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT (15U) +#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU) +#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U) +#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_SHIFT (8U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE__VPU__DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE__VPU__DEVICE_BUFFERABLE (0x00000100U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE__VPU__NORMAL_NC_NON_BUFFERABLE (0x00000200U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE__VPU__NORMAL_NC_BUFFERABLE (0x00000300U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE__VPU__WRITE_THROUGH_NO_ALLOCATE (0x00000600U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE__VPU__WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE__VPU__WRITE_BACK_NO_ALLOCATE (0x00000700U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE__VPU__WRITE_BACK_WRITE_ALLOCATE (0x00000F00U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_SHIFT (4U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_CLRMSK (0xFFFFFF0FU) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE__VPU__DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE__VPU__DEVICE_BUFFERABLE (0x00000010U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE__VPU__NORMAL_NC_NON_BUFFERABLE (0x00000020U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE__VPU__NORMAL_NC_BUFFERABLE (0x00000030U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE__VPU__WRITE_THROUGH_NO_ALLOCATE (0x000000A0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE__VPU__WRITE_THROUGH_READ_ALLOCATE (0x000000E0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE__VPU__WRITE_BACK_NO_ALLOCATE (0x000000B0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE__VPU__WRITE_BACK_READ_ALLOCATE (0x000000F0U) +#define RGX_CR_ACE_CTRL_MMU_DOMAIN_SHIFT (2U) +#define RGX_CR_ACE_CTRL_MMU_DOMAIN_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_SHIFT (1U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_INNER_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE (0x00000002U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN__VPU__INNER_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN__VPU__OUTER_SHAREABLE (0x00000002U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SHIFT (0U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SYSTEM (0x00000001U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN__VPU__NON_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN__VPU__SYSTEM (0x00000001U) /* Register RGX_CR_SOC_AXI */ -#define RGX_CR_SOC_AXI (0xE338U) -#define RGX_CR_SOC_AXI_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (3U) -#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000008U) -#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (2U) -#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000004U) -#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT (0U) -#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK (0xFFFFFFFCU) -#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY (0x00000000U) -#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY (0x00000001U) -#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY (0x00000002U) +#define RGX_CR_SOC_AXI (0xE338U) +#define RGX_CR_SOC_AXI_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (3U) +#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000008U) +#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (2U) +#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000004U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT (0U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY (0x00000000U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY (0x00000001U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY (0x00000002U) /* Register RGX_CR_CONTEXT_MAPPING0 */ -#define RGX_CR_CONTEXT_MAPPING0 (0xF078U) -#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U) -#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) -#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U) -#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) -#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U) -#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -#define RGX_CR_CONTEXT_MAPPING0_GEOM_SHIFT (0U) -#define RGX_CR_CONTEXT_MAPPING0_GEOM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_CONTEXT_MAPPING0 (0xF078U) +#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_CONTEXT_MAPPING0_GEOM_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING0_GEOM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_CONTEXT_MAPPING2 */ -#define RGX_CR_CONTEXT_MAPPING2 (0xF088U) -#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) -#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U) -#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU) -#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U) -#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU) -#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U) -#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U) +#define RGX_CR_CONTEXT_MAPPING2 (0xF088U) +#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_CONTEXT_MAPPING3 */ -#define RGX_CR_CONTEXT_MAPPING3 (0xF090U) -#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) -#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U) -#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU) -#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U) -#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU) -#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U) -#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U) +#define RGX_CR_CONTEXT_MAPPING3 (0xF090U) +#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U) /* Register RGX_CR_BIF_OUTSTANDING_READ */ -#define RGX_CR_BIF_OUTSTANDING_READ (0xF098U) -#define RGX_CR_BIF_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_BIF_OUTSTANDING_READ_COUNTER_SHIFT (0U) -#define RGX_CR_BIF_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIF_OUTSTANDING_READ (0xF098U) +#define RGX_CR_BIF_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) /* Register RGX_CR_BIF_TEXAS1_OUTSTANDING_READ */ -#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ (0xF0A0U) -#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_COUNTER_SHIFT (0U) -#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ (0xF0A0U) +#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) /* Register RGX_CR_BIF_TEXAS0_OUTSTANDING_READ */ -#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ (0xF0A8U) -#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_COUNTER_SHIFT (0U) -#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) +#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ (0xF0A8U) +#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) /* Register RGX_CR_BIF_PFS */ -#define RGX_CR_BIF_PFS (0xF0B0U) -#define RGX_CR_BIF_PFS_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_CR_BIF_PFS_SLC_STALLING_SHIFT (8U) -#define RGX_CR_BIF_PFS_SLC_STALLING_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_BIF_PFS_SLC_STALLING_EN (0x00000100U) -#define RGX_CR_BIF_PFS_TILING_IDLE_SHIFT (7U) -#define RGX_CR_BIF_PFS_TILING_IDLE_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_BIF_PFS_TILING_IDLE_EN (0x00000080U) -#define RGX_CR_BIF_PFS_ARB_IDLE_SHIFT (6U) -#define RGX_CR_BIF_PFS_ARB_IDLE_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_BIF_PFS_ARB_IDLE_EN (0x00000040U) -#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_SHIFT (5U) -#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_EN (0x00000020U) -#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_SHIFT (4U) -#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_EN (0x00000010U) -#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_SHIFT (3U) -#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_EN (0x00000008U) -#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_SHIFT (2U) -#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_EN (0x00000004U) -#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_SHIFT (1U) -#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_EN (0x00000002U) -#define RGX_CR_BIF_PFS_STALL_COMPLETE_SHIFT (0U) -#define RGX_CR_BIF_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_PFS_STALL_COMPLETE_EN (0x00000001U) +#define RGX_CR_BIF_PFS (0xF0B0U) +#define RGX_CR_BIF_PFS_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_BIF_PFS_SLC_STALLING_SHIFT (8U) +#define RGX_CR_BIF_PFS_SLC_STALLING_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_BIF_PFS_SLC_STALLING_EN (0x00000100U) +#define RGX_CR_BIF_PFS_TILING_IDLE_SHIFT (7U) +#define RGX_CR_BIF_PFS_TILING_IDLE_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_BIF_PFS_TILING_IDLE_EN (0x00000080U) +#define RGX_CR_BIF_PFS_ARB_IDLE_SHIFT (6U) +#define RGX_CR_BIF_PFS_ARB_IDLE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_PFS_ARB_IDLE_EN (0x00000040U) +#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_SHIFT (5U) +#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_EN (0x00000020U) +#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_SHIFT (4U) +#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_EN (0x00000010U) +#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_SHIFT (3U) +#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_EN (0x00000008U) +#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_SHIFT (2U) +#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_EN (0x00000004U) +#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_SHIFT (1U) +#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_EN (0x00000002U) +#define RGX_CR_BIF_PFS_STALL_COMPLETE_SHIFT (0U) +#define RGX_CR_BIF_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_PFS_STALL_COMPLETE_EN (0x00000001U) /* Register RGX_CR_BIF_TEXAS0_PFS */ -#define RGX_CR_BIF_TEXAS0_PFS (0xF0B8U) -#define RGX_CR_BIF_TEXAS0_PFS_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_SHIFT (6U) -#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_EN (0x00000040U) -#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_SHIFT (5U) -#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_EN (0x00000020U) -#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_SHIFT (4U) -#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_EN (0x00000010U) -#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_SHIFT (3U) -#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_EN (0x00000008U) -#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_SHIFT (2U) -#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_EN (0x00000004U) -#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_SHIFT (1U) -#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_EN (0x00000002U) -#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_SHIFT (0U) -#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_EN (0x00000001U) +#define RGX_CR_BIF_TEXAS0_PFS (0xF0B8U) +#define RGX_CR_BIF_TEXAS0_PFS_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_SHIFT (6U) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_EN (0x00000040U) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_SHIFT (5U) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_EN (0x00000020U) +#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_SHIFT (4U) +#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_EN (0x00000010U) +#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_SHIFT (3U) +#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_EN (0x00000008U) +#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_SHIFT (2U) +#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_EN (0x00000004U) +#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_SHIFT (1U) +#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_EN (0x00000002U) +#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_SHIFT (0U) +#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_EN (0x00000001U) /* Register RGX_CR_BIF_TEXAS1_PFS */ -#define RGX_CR_BIF_TEXAS1_PFS (0xF0C8U) -#define RGX_CR_BIF_TEXAS1_PFS_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_SHIFT (6U) -#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_EN (0x00000040U) -#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_SHIFT (5U) -#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_EN (0x00000020U) -#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_SHIFT (4U) -#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_EN (0x00000010U) -#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_SHIFT (3U) -#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_EN (0x00000008U) -#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_SHIFT (2U) -#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_EN (0x00000004U) -#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_SHIFT (1U) -#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_EN (0x00000002U) -#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_SHIFT (0U) -#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_EN (0x00000001U) +#define RGX_CR_BIF_TEXAS1_PFS (0xF0C8U) +#define RGX_CR_BIF_TEXAS1_PFS_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_SHIFT (6U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_EN (0x00000040U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_SHIFT (5U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_EN (0x00000020U) +#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_SHIFT (4U) +#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_EN (0x00000010U) +#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_SHIFT (3U) +#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_EN (0x00000008U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_SHIFT (2U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_EN (0x00000004U) +#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_SHIFT (1U) +#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_EN (0x00000002U) +#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_SHIFT (0U) +#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_EN (0x00000001U) /* Register RGX_CR_JONES_FIX */ -#define RGX_CR_JONES_FIX__ROGUE3 (0xF0C0U) -#define RGX_CR_JONES_FIX__ROGUE3__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -#define RGX_CR_JONES_FIX__ROGUE3__DISABLE_SHIFT (0U) -#define RGX_CR_JONES_FIX__ROGUE3__DISABLE_CLRMSK (0xFFFF0000U) +#define RGX_CR_JONES_FIX__ROGUE3 (0xF0C0U) +#define RGX_CR_JONES_FIX__ROGUE3__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_JONES_FIX__ROGUE3__DISABLE_SHIFT (0U) +#define RGX_CR_JONES_FIX__ROGUE3__DISABLE_CLRMSK (0xFFFF0000U) /* Register RGX_CR_FBCDC_STATUS */ -#define RGX_CR_FBCDC_STATUS (0xF600U) -#define RGX_CR_FBCDC_STATUS_MASKFULL (IMG_UINT64_C(0x000000000F0F0F0F)) -#define RGX_CR_FBCDC_STATUS_STATE_CHECK_FAIL_SHIFT (24U) -#define RGX_CR_FBCDC_STATUS_STATE_CHECK_FAIL_CLRMSK (0xF0FFFFFFU) -#define RGX_CR_FBCDC_STATUS_UFOD_DECODER_ERROR_SHIFT (16U) -#define RGX_CR_FBCDC_STATUS_UFOD_DECODER_ERROR_CLRMSK (0xFFF0FFFFU) -#define RGX_CR_FBCDC_STATUS_HEADER_CHECK_FAIL_SHIFT (8U) -#define RGX_CR_FBCDC_STATUS_HEADER_CHECK_FAIL_CLRMSK (0xFFFFF0FFU) -#define RGX_CR_FBCDC_STATUS_TILE_LENGTH_CHECK_FAIL_SHIFT (0U) -#define RGX_CR_FBCDC_STATUS_TILE_LENGTH_CHECK_FAIL_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_FBCDC_STATUS (0xF600U) +#define RGX_CR_FBCDC_STATUS_MASKFULL (IMG_UINT64_C(0x000000000F0F0F0F)) +#define RGX_CR_FBCDC_STATUS_STATE_CHECK_FAIL_SHIFT (24U) +#define RGX_CR_FBCDC_STATUS_STATE_CHECK_FAIL_CLRMSK (0xF0FFFFFFU) +#define RGX_CR_FBCDC_STATUS_UFOD_DECODER_ERROR_SHIFT (16U) +#define RGX_CR_FBCDC_STATUS_UFOD_DECODER_ERROR_CLRMSK (0xFFF0FFFFU) +#define RGX_CR_FBCDC_STATUS_HEADER_CHECK_FAIL_SHIFT (8U) +#define RGX_CR_FBCDC_STATUS_HEADER_CHECK_FAIL_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_FBCDC_STATUS_TILE_LENGTH_CHECK_FAIL_SHIFT (0U) +#define RGX_CR_FBCDC_STATUS_TILE_LENGTH_CHECK_FAIL_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_FBCDC_SIGNATURE_STATUS */ -#define RGX_CR_FBCDC_SIGNATURE_STATUS (0xF618U) -#define RGX_CR_FBCDC_SIGNATURE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBM_ERROR_SHIFT (4U) -#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBM_ERROR_CLRMSK (0xFFFFFF0FU) -#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBDC_ERROR_SHIFT (0U) -#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBDC_ERROR_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_FBCDC_SIGNATURE_STATUS (0xF618U) +#define RGX_CR_FBCDC_SIGNATURE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBM_ERROR_SHIFT (4U) +#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBM_ERROR_CLRMSK (0xFFFFFF0FU) +#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBDC_ERROR_SHIFT (0U) +#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBDC_ERROR_CLRMSK (0xFFFFFFF0U) /* Register RGX_CR_CONTEXT_MAPPING4 */ -#define RGX_CR_CONTEXT_MAPPING4 (0xF210U) -#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U) -#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) -#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U) -#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) -#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U) -#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) -#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U) -#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) -#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U) -#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U) -#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_CR_CONTEXT_MAPPING4 (0xF210U) +#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_CR_FBCDC_IDLE */ -#define RGX_CR_FBCDC_IDLE (0xF218U) -#define RGX_CR_FBCDC_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_SHIFT (13U) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_EN (0x00002000U) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_SHIFT (12U) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_EN (0x00001000U) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_SHIFT (11U) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_EN (0x00000800U) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_SHIFT (10U) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_EN (0x00000400U) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_SHIFT (9U) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_EN (0x00000200U) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_SHIFT (8U) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_EN (0x00000100U) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_SHIFT (7U) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_EN (0x00000080U) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_SHIFT (6U) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_EN (0x00000040U) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_SHIFT (5U) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_EN (0x00000020U) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_SHIFT (4U) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_EN (0x00000010U) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_SHIFT (3U) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_EN (0x00000008U) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_SHIFT (2U) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_EN (0x00000004U) -#define RGX_CR_FBCDC_IDLE_FBHC_SHIFT (1U) -#define RGX_CR_FBCDC_IDLE_FBHC_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_FBCDC_IDLE_FBHC_EN (0x00000002U) -#define RGX_CR_FBCDC_IDLE_FBSC_SHIFT (0U) -#define RGX_CR_FBCDC_IDLE_FBSC_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FBCDC_IDLE_FBSC_EN (0x00000001U) +#define RGX_CR_FBCDC_IDLE (0xF218U) +#define RGX_CR_FBCDC_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_SHIFT (13U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_EN (0x00002000U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_SHIFT (12U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_EN (0x00001000U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_SHIFT (11U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_EN (0x00000800U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_SHIFT (10U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_EN (0x00000400U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_SHIFT (9U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_EN (0x00000200U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_SHIFT (8U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_EN (0x00000100U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_SHIFT (7U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_EN (0x00000080U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_SHIFT (6U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_EN (0x00000040U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_SHIFT (5U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_EN (0x00000020U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_SHIFT (4U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_EN (0x00000010U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_SHIFT (3U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_EN (0x00000008U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_SHIFT (2U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_EN (0x00000004U) +#define RGX_CR_FBCDC_IDLE_FBHC_SHIFT (1U) +#define RGX_CR_FBCDC_IDLE_FBHC_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FBCDC_IDLE_FBHC_EN (0x00000002U) +#define RGX_CR_FBCDC_IDLE_FBSC_SHIFT (0U) +#define RGX_CR_FBCDC_IDLE_FBSC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FBCDC_IDLE_FBSC_EN (0x00000001U) /* Register RGX_CR_MERCER_SOFT_RESET */ -#define RGX_CR_MERCER_SOFT_RESET (0x0630U) -#define RGX_CR_MERCER_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_SHIFT (62U) -#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_SHIFT (61U) -#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_SHIFT (60U) -#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_SHIFT (59U) -#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_EN (IMG_UINT64_C(0x0800000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_SHIFT (58U) -#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_EN (IMG_UINT64_C(0x0400000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_SHIFT (57U) -#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_EN (IMG_UINT64_C(0x0200000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_SHIFT (56U) -#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_EN (IMG_UINT64_C(0x0100000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_SHIFT (55U) -#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_EN (IMG_UINT64_C(0x0080000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_SHIFT (54U) -#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_EN (IMG_UINT64_C(0x0040000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_SHIFT (53U) -#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_EN (IMG_UINT64_C(0x0020000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_SHIFT (52U) -#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_EN (IMG_UINT64_C(0x0010000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_SHIFT (51U) -#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_EN (IMG_UINT64_C(0x0008000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_SHIFT (50U) -#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_SHIFT (49U) -#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_EN (IMG_UINT64_C(0x0002000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_SHIFT (48U) -#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_EN (IMG_UINT64_C(0x0001000000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_SHIFT (47U) -#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_EN (IMG_UINT64_C(0x0000800000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_SHIFT (46U) -#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_EN (IMG_UINT64_C(0x0000400000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_SHIFT (45U) -#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_EN (IMG_UINT64_C(0x0000200000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_SHIFT (44U) -#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_EN (IMG_UINT64_C(0x0000100000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_SHIFT (43U) -#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_EN (IMG_UINT64_C(0x0000080000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_SHIFT (42U) -#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_EN (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_SHIFT (41U) -#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_EN (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_SHIFT (40U) -#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_SHIFT (39U) -#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_EN (IMG_UINT64_C(0x0000008000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_SHIFT (38U) -#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_EN (IMG_UINT64_C(0x0000004000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_SHIFT (37U) -#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_EN (IMG_UINT64_C(0x0000002000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_SHIFT (36U) -#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_EN (IMG_UINT64_C(0x0000001000000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_SHIFT (35U) -#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_EN (IMG_UINT64_C(0x0000000800000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_SHIFT (34U) -#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_EN (IMG_UINT64_C(0x0000000400000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_SHIFT (33U) -#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_EN (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_SHIFT (32U) -#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_SHIFT (31U) -#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_SHIFT (30U) -#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_EN (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_SHIFT (29U) -#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_EN (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_SHIFT (28U) -#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_EN (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_SHIFT (27U) -#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_EN (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_SHIFT (26U) -#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_EN (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_SHIFT (25U) -#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_EN (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_SHIFT (24U) -#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_SHIFT (23U) -#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_EN (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_SHIFT (22U) -#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_SHIFT (21U) -#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_SHIFT (20U) -#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_EN (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_SHIFT (19U) -#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_EN (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_SHIFT (18U) -#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_SHIFT (17U) -#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_SHIFT (16U) -#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_SHIFT (15U) -#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_SHIFT (14U) -#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_SHIFT (13U) -#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_SHIFT (12U) -#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_SHIFT (11U) -#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_SHIFT (10U) -#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_SHIFT (9U) -#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_EN (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_SHIFT (8U) -#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_SHIFT (7U) -#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_EN (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_SHIFT (6U) -#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_EN (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_SHIFT (5U) -#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_SHIFT (4U) -#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_SHIFT (3U) -#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_SHIFT (2U) -#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_SHIFT (1U) -#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_SHIFT (0U) -#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MERCER_SOFT_RESET (0x0630U) +#define RGX_CR_MERCER_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_SHIFT (62U) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_SHIFT (61U) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_SHIFT (60U) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_SHIFT (59U) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_SHIFT (58U) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_SHIFT (57U) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_SHIFT (56U) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_SHIFT (55U) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_SHIFT (54U) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_SHIFT (53U) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_SHIFT (52U) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_SHIFT (51U) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_SHIFT (50U) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_SHIFT (49U) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_SHIFT (48U) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_SHIFT (47U) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_SHIFT (46U) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_SHIFT (45U) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_SHIFT (44U) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_SHIFT (43U) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_SHIFT (42U) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_SHIFT (41U) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_SHIFT (40U) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_SHIFT (39U) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_SHIFT (38U) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_SHIFT (37U) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_SHIFT (36U) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_SHIFT (35U) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_SHIFT (34U) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_SHIFT (33U) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_SHIFT (32U) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_SHIFT (31U) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_SHIFT (30U) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_SHIFT (29U) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_SHIFT (28U) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_SHIFT (27U) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_SHIFT (26U) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_SHIFT (25U) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_SHIFT (24U) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_SHIFT (23U) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_SHIFT (22U) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_SHIFT (21U) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_SHIFT (20U) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_SHIFT (19U) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_SHIFT (18U) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_SHIFT (17U) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_SHIFT (16U) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_SHIFT (15U) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_SHIFT (14U) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_SHIFT (13U) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_SHIFT (12U) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_SHIFT (11U) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_SHIFT (10U) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_SHIFT (9U) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_SHIFT (8U) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_SHIFT (7U) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_SHIFT (6U) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_SHIFT (5U) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_SHIFT (4U) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_SHIFT (3U) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_SHIFT (2U) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_SHIFT (1U) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_SHIFT (0U) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_TEXAS_SOFT_RESET */ -#define RGX_CR_TEXAS_SOFT_RESET (0x0640U) -#define RGX_CR_TEXAS_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_TEXAS_SOFT_RESET_SPU31_SHIFT (31U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU31_CLRMSK (0x7FFFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU31_EN (0x80000000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU30_SHIFT (30U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU30_CLRMSK (0xBFFFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU30_EN (0x40000000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU29_SHIFT (29U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU29_CLRMSK (0xDFFFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU29_EN (0x20000000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU28_SHIFT (28U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU28_CLRMSK (0xEFFFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU28_EN (0x10000000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU27_SHIFT (27U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU27_CLRMSK (0xF7FFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU27_EN (0x08000000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU26_SHIFT (26U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU26_CLRMSK (0xFBFFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU26_EN (0x04000000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU25_SHIFT (25U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU25_CLRMSK (0xFDFFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU25_EN (0x02000000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU24_SHIFT (24U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU24_CLRMSK (0xFEFFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU24_EN (0x01000000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU23_SHIFT (23U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU23_CLRMSK (0xFF7FFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU23_EN (0x00800000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU22_SHIFT (22U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU22_CLRMSK (0xFFBFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU22_EN (0x00400000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU21_SHIFT (21U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU21_CLRMSK (0xFFDFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU21_EN (0x00200000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU20_SHIFT (20U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU20_CLRMSK (0xFFEFFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU20_EN (0x00100000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU19_SHIFT (19U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU19_CLRMSK (0xFFF7FFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU19_EN (0x00080000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU18_SHIFT (18U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU18_CLRMSK (0xFFFBFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU18_EN (0x00040000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU17_SHIFT (17U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU17_CLRMSK (0xFFFDFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU17_EN (0x00020000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU16_SHIFT (16U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU16_CLRMSK (0xFFFEFFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU16_EN (0x00010000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU15_SHIFT (15U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU15_CLRMSK (0xFFFF7FFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU15_EN (0x00008000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU14_SHIFT (14U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU14_CLRMSK (0xFFFFBFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU14_EN (0x00004000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU13_SHIFT (13U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU13_CLRMSK (0xFFFFDFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU13_EN (0x00002000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU12_SHIFT (12U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU12_CLRMSK (0xFFFFEFFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU12_EN (0x00001000U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU11_SHIFT (11U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU11_CLRMSK (0xFFFFF7FFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU11_EN (0x00000800U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU10_SHIFT (10U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU10_CLRMSK (0xFFFFFBFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU10_EN (0x00000400U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU9_SHIFT (9U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU9_CLRMSK (0xFFFFFDFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU9_EN (0x00000200U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU8_SHIFT (8U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU8_CLRMSK (0xFFFFFEFFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU8_EN (0x00000100U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU7_SHIFT (7U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU7_CLRMSK (0xFFFFFF7FU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU7_EN (0x00000080U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU6_SHIFT (6U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU6_CLRMSK (0xFFFFFFBFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU6_EN (0x00000040U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU5_SHIFT (5U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU5_CLRMSK (0xFFFFFFDFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU5_EN (0x00000020U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU4_SHIFT (4U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU4_CLRMSK (0xFFFFFFEFU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU4_EN (0x00000010U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU3_SHIFT (3U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU3_CLRMSK (0xFFFFFFF7U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU3_EN (0x00000008U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU2_SHIFT (2U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU2_CLRMSK (0xFFFFFFFBU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU2_EN (0x00000004U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU1_SHIFT (1U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU1_CLRMSK (0xFFFFFFFDU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU1_EN (0x00000002U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU0_SHIFT (0U) -#define RGX_CR_TEXAS_SOFT_RESET_SPU0_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_TEXAS_SOFT_RESET_SPU0_EN (0x00000001U) +#define RGX_CR_TEXAS_SOFT_RESET (0x0640U) +#define RGX_CR_TEXAS_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TEXAS_SOFT_RESET_SPU31_SHIFT (31U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU31_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU31_EN (0x80000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU30_SHIFT (30U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU30_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU30_EN (0x40000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU29_SHIFT (29U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU29_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU29_EN (0x20000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU28_SHIFT (28U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU28_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU28_EN (0x10000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU27_SHIFT (27U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU27_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU27_EN (0x08000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU26_SHIFT (26U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU26_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU26_EN (0x04000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU25_SHIFT (25U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU25_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU25_EN (0x02000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU24_SHIFT (24U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU24_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU24_EN (0x01000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU23_SHIFT (23U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU23_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU23_EN (0x00800000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU22_SHIFT (22U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU22_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU22_EN (0x00400000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU21_SHIFT (21U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU21_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU21_EN (0x00200000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU20_SHIFT (20U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU20_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU20_EN (0x00100000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU19_SHIFT (19U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU19_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU19_EN (0x00080000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU18_SHIFT (18U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU18_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU18_EN (0x00040000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU17_SHIFT (17U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU17_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU17_EN (0x00020000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU16_SHIFT (16U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU16_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU16_EN (0x00010000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU15_SHIFT (15U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU15_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU15_EN (0x00008000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU14_SHIFT (14U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU14_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU14_EN (0x00004000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU13_SHIFT (13U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU13_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU13_EN (0x00002000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU12_SHIFT (12U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU12_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU12_EN (0x00001000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU11_SHIFT (11U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU11_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU11_EN (0x00000800U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU10_SHIFT (10U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU10_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU10_EN (0x00000400U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU9_SHIFT (9U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU9_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU9_EN (0x00000200U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU8_SHIFT (8U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU8_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU8_EN (0x00000100U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU7_SHIFT (7U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU7_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU7_EN (0x00000080U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU6_SHIFT (6U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU6_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU6_EN (0x00000040U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU5_SHIFT (5U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU5_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU5_EN (0x00000020U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU4_SHIFT (4U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU4_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU4_EN (0x00000010U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU3_SHIFT (3U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU3_EN (0x00000008U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU2_SHIFT (2U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU2_EN (0x00000004U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU1_SHIFT (1U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU1_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU1_EN (0x00000002U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU0_SHIFT (0U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU0_EN (0x00000001U) /* Register RGX_CR_SWIFT_SOFT_RESET */ -#define RGX_CR_SWIFT_SOFT_RESET (0x0650U) -#define RGX_CR_SWIFT_SOFT_RESET__ALRIF_GT0__MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_SHIFT (62U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_SHIFT (61U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_SHIFT (60U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_SHIFT (59U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_EN (IMG_UINT64_C(0x0800000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_SHIFT (58U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_EN (IMG_UINT64_C(0x0400000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_SHIFT (57U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_EN (IMG_UINT64_C(0x0200000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_SHIFT (56U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_EN (IMG_UINT64_C(0x0100000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_SHIFT (55U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_EN (IMG_UINT64_C(0x0080000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_SHIFT (54U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_EN (IMG_UINT64_C(0x0040000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_SHIFT (53U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_EN (IMG_UINT64_C(0x0020000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_SHIFT (52U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_EN (IMG_UINT64_C(0x0010000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_SHIFT (51U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_EN (IMG_UINT64_C(0x0008000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_SHIFT (50U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_SHIFT (49U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_EN (IMG_UINT64_C(0x0002000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_SHIFT (48U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_EN (IMG_UINT64_C(0x0001000000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_SHIFT (47U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_EN (IMG_UINT64_C(0x0000800000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_SHIFT (46U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_EN (IMG_UINT64_C(0x0000400000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_SHIFT (45U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_EN (IMG_UINT64_C(0x0000200000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_SHIFT (44U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_EN (IMG_UINT64_C(0x0000100000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_SHIFT (43U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_EN (IMG_UINT64_C(0x0000080000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_SHIFT (42U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_EN (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_SHIFT (41U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_EN (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_SHIFT (40U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_SHIFT (39U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_EN (IMG_UINT64_C(0x0000008000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_SHIFT (38U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_EN (IMG_UINT64_C(0x0000004000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_SHIFT (37U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_EN (IMG_UINT64_C(0x0000002000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_SHIFT (36U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_EN (IMG_UINT64_C(0x0000001000000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_SHIFT (35U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_EN (IMG_UINT64_C(0x0000000800000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_SHIFT (34U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_EN (IMG_UINT64_C(0x0000000400000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_SHIFT (33U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_EN (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_SHIFT (32U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU31_SHIFT (31U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU31_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU31_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_SHIFT (31U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU30_SHIFT (30U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU30_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU30_EN (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_SHIFT (30U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_EN (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU29_SHIFT (29U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU29_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU29_EN (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_SHIFT (29U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_EN (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU28_SHIFT (28U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU28_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU28_EN (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_SHIFT (28U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_EN (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU27_SHIFT (27U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU27_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU27_EN (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_SHIFT (27U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_EN (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU26_SHIFT (26U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU26_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU26_EN (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_SHIFT (26U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_EN (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU25_SHIFT (25U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU25_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU25_EN (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_SHIFT (25U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_EN (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU24_SHIFT (24U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU24_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU24_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_SHIFT (24U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU23_SHIFT (23U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU23_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU23_EN (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_SHIFT (23U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_EN (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU22_SHIFT (22U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU22_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU22_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_SHIFT (22U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU21_SHIFT (21U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU21_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU21_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_SHIFT (21U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SHIFT (20U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU20_EN (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_SHIFT (20U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_EN (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SHIFT (19U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU19_EN (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_SHIFT (19U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_EN (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SHIFT (18U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU18_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_SHIFT (18U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SHIFT (17U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU17_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_SHIFT (17U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SHIFT (16U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU16_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_SHIFT (16U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SHIFT (15U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU15_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_SHIFT (15U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SHIFT (14U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU14_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_SHIFT (14U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SHIFT (13U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU13_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_SHIFT (13U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SHIFT (12U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU12_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_SHIFT (12U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SHIFT (11U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU11_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_SHIFT (11U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SHIFT (10U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU10_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_SHIFT (10U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SHIFT (9U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU9_EN (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_SHIFT (9U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_EN (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SHIFT (8U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU8_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_SHIFT (8U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SHIFT (7U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU7_EN (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_SHIFT (7U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_EN (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SHIFT (6U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU6_EN (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_SHIFT (6U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_EN (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SHIFT (5U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU5_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_SHIFT (5U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SHIFT (4U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU4_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_SHIFT (4U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SHIFT (3U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU3_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_SHIFT (3U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SHIFT (2U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU2_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_SHIFT (2U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SHIFT (1U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU1_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_SHIFT (1U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SHIFT (0U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_EN (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_SHIFT (0U) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SWIFT_SOFT_RESET (0x0650U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU20_SWIFT2_SHIFT (62U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU20_SWIFT2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU20_SWIFT2_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU20_SWIFT1_SHIFT (61U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU20_SWIFT1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU20_SWIFT1_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU20_SWIFT0_SHIFT (60U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU20_SWIFT0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU20_SWIFT0_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU19_SWIFT2_SHIFT (59U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU19_SWIFT2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU19_SWIFT2_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU19_SWIFT1_SHIFT (58U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU19_SWIFT1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU19_SWIFT1_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU19_SWIFT0_SHIFT (57U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU19_SWIFT0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU19_SWIFT0_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU18_SWIFT2_SHIFT (56U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU18_SWIFT2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU18_SWIFT2_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU18_SWIFT1_SHIFT (55U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU18_SWIFT1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU18_SWIFT1_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU18_SWIFT0_SHIFT (54U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU18_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU18_SWIFT0_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU17_SWIFT2_SHIFT (53U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU17_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU17_SWIFT2_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU17_SWIFT1_SHIFT (52U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU17_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU17_SWIFT1_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU17_SWIFT0_SHIFT (51U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU17_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU17_SWIFT0_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU16_SWIFT2_SHIFT (50U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU16_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU16_SWIFT2_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU16_SWIFT1_SHIFT (49U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU16_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU16_SWIFT1_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU16_SWIFT0_SHIFT (48U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU16_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU16_SWIFT0_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU15_SWIFT2_SHIFT (47U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU15_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU15_SWIFT2_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU15_SWIFT1_SHIFT (46U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU15_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU15_SWIFT1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU15_SWIFT0_SHIFT (45U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU15_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU15_SWIFT0_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU14_SWIFT2_SHIFT (44U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU14_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU14_SWIFT2_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU14_SWIFT1_SHIFT (43U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU14_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU14_SWIFT1_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU14_SWIFT0_SHIFT (42U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU14_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU14_SWIFT0_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU13_SWIFT2_SHIFT (41U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU13_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU13_SWIFT2_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU13_SWIFT1_SHIFT (40U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU13_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU13_SWIFT1_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU13_SWIFT0_SHIFT (39U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU13_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU13_SWIFT0_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU12_SWIFT2_SHIFT (38U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU12_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU12_SWIFT2_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU12_SWIFT1_SHIFT (37U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU12_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU12_SWIFT1_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU12_SWIFT0_SHIFT (36U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU12_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU12_SWIFT0_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU11_SWIFT2_SHIFT (35U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU11_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU11_SWIFT2_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU11_SWIFT1_SHIFT (34U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU11_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU11_SWIFT1_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU11_SWIFT0_SHIFT (33U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU11_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU11_SWIFT0_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU10_SWIFT2_SHIFT (32U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU10_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU10_SWIFT2_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU31_SHIFT (31U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU31_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU31_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU10_SWIFT1_SHIFT (31U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU10_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU10_SWIFT1_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU30_SHIFT (30U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU30_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU30_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU10_SWIFT0_SHIFT (30U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU10_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU10_SWIFT0_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU29_SHIFT (29U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU29_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU29_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU9_SWIFT2_SHIFT (29U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU9_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU9_SWIFT2_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU28_SHIFT (28U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU28_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU28_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU9_SWIFT1_SHIFT (28U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU9_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU9_SWIFT1_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU27_SHIFT (27U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU27_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU27_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU9_SWIFT0_SHIFT (27U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU9_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU9_SWIFT0_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU26_SHIFT (26U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU26_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU26_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU8_SWIFT2_SHIFT (26U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU8_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU8_SWIFT2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU25_SHIFT (25U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU25_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU25_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU8_SWIFT1_SHIFT (25U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU8_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU8_SWIFT1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU24_SHIFT (24U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU24_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU24_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU8_SWIFT0_SHIFT (24U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU8_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU8_SWIFT0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU23_SHIFT (23U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU23_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU23_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU7_SWIFT2_SHIFT (23U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU7_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU7_SWIFT2_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU22_SHIFT (22U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU22_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU22_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU7_SWIFT1_SHIFT (22U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU7_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU7_SWIFT1_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU21_SHIFT (21U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU21_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU21_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU7_SWIFT0_SHIFT (21U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU7_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU7_SWIFT0_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SHIFT (20U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU6_SWIFT2_SHIFT (20U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU6_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU6_SWIFT2_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SHIFT (19U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU6_SWIFT1_SHIFT (19U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU6_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU6_SWIFT1_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SHIFT (18U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU6_SWIFT0_SHIFT (18U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU6_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU6_SWIFT0_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SHIFT (17U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU5_SWIFT2_SHIFT (17U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU5_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU5_SWIFT2_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SHIFT (16U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU5_SWIFT1_SHIFT (16U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU5_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU5_SWIFT1_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SHIFT (15U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU5_SWIFT0_SHIFT (15U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU5_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU5_SWIFT0_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SHIFT (14U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU4_SWIFT2_SHIFT (14U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU4_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU4_SWIFT2_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SHIFT (13U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU4_SWIFT1_SHIFT (13U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU4_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU4_SWIFT1_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SHIFT (12U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU4_SWIFT0_SHIFT (12U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU4_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU4_SWIFT0_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SHIFT (11U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU3_SWIFT2_SHIFT (11U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU3_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU3_SWIFT2_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SHIFT (10U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU3_SWIFT1_SHIFT (10U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU3_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU3_SWIFT1_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SHIFT (9U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU3_SWIFT0_SHIFT (9U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU3_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU3_SWIFT0_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SHIFT (8U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU2_SWIFT2_SHIFT (8U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU2_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU2_SWIFT2_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SHIFT (7U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU2_SWIFT1_SHIFT (7U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU2_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU2_SWIFT1_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SHIFT (6U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU2_SWIFT0_SHIFT (6U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU2_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU2_SWIFT0_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SHIFT (5U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU1_SWIFT2_SHIFT (5U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU1_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU1_SWIFT2_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SHIFT (4U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU1_SWIFT1_SHIFT (4U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU1_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU1_SWIFT1_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SHIFT (3U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU1_SWIFT0_SHIFT (3U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU1_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU1_SWIFT0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SHIFT (2U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU0_SWIFT2_SHIFT (2U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU0_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU0_SWIFT2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SHIFT (1U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU0_SWIFT1_SHIFT (1U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU0_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU0_SWIFT1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SHIFT (0U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU0_SWIFT0_SHIFT (0U) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU0_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SWIFT_SOFT_RESET__AXT_IF_GT0__SPU0_SWIFT0_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_RAC_SOFT_RESET */ -#define RGX_CR_RAC_SOFT_RESET (0x0660U) -#define RGX_CR_RAC_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_SHIFT (62U) -#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_SHIFT (61U) -#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_EN (IMG_UINT64_C(0x2000000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_SHIFT (60U) -#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_EN (IMG_UINT64_C(0x1000000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_SHIFT (59U) -#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_EN (IMG_UINT64_C(0x0800000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_SHIFT (58U) -#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_EN (IMG_UINT64_C(0x0400000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_SHIFT (57U) -#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_EN (IMG_UINT64_C(0x0200000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_SHIFT (56U) -#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_EN (IMG_UINT64_C(0x0100000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_SHIFT (55U) -#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_EN (IMG_UINT64_C(0x0080000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_SHIFT (54U) -#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_EN (IMG_UINT64_C(0x0040000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_SHIFT (53U) -#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_EN (IMG_UINT64_C(0x0020000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_SHIFT (52U) -#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_EN (IMG_UINT64_C(0x0010000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_SHIFT (51U) -#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_EN (IMG_UINT64_C(0x0008000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_SHIFT (50U) -#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_EN (IMG_UINT64_C(0x0004000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_SHIFT (49U) -#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_EN (IMG_UINT64_C(0x0002000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_SHIFT (48U) -#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_EN (IMG_UINT64_C(0x0001000000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_SHIFT (47U) -#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_EN (IMG_UINT64_C(0x0000800000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_SHIFT (46U) -#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_EN (IMG_UINT64_C(0x0000400000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_SHIFT (45U) -#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_EN (IMG_UINT64_C(0x0000200000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_SHIFT (44U) -#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_EN (IMG_UINT64_C(0x0000100000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_SHIFT (43U) -#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_EN (IMG_UINT64_C(0x0000080000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_SHIFT (42U) -#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_EN (IMG_UINT64_C(0x0000040000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_SHIFT (41U) -#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_EN (IMG_UINT64_C(0x0000020000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_SHIFT (40U) -#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_SHIFT (39U) -#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_EN (IMG_UINT64_C(0x0000008000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_SHIFT (38U) -#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_EN (IMG_UINT64_C(0x0000004000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_SHIFT (37U) -#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_EN (IMG_UINT64_C(0x0000002000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_SHIFT (36U) -#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_EN (IMG_UINT64_C(0x0000001000000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_SHIFT (35U) -#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_EN (IMG_UINT64_C(0x0000000800000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_SHIFT (34U) -#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_EN (IMG_UINT64_C(0x0000000400000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_SHIFT (33U) -#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_EN (IMG_UINT64_C(0x0000000200000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_SHIFT (32U) -#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_EN (IMG_UINT64_C(0x0000000100000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_SHIFT (31U) -#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_EN (IMG_UINT64_C(0x0000000080000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_SHIFT (30U) -#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_EN (IMG_UINT64_C(0x0000000040000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_SHIFT (29U) -#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_EN (IMG_UINT64_C(0x0000000020000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_SHIFT (28U) -#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_EN (IMG_UINT64_C(0x0000000010000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_SHIFT (27U) -#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_EN (IMG_UINT64_C(0x0000000008000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_SHIFT (26U) -#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_EN (IMG_UINT64_C(0x0000000004000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_SHIFT (25U) -#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_EN (IMG_UINT64_C(0x0000000002000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_SHIFT (24U) -#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_SHIFT (23U) -#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_EN (IMG_UINT64_C(0x0000000000800000)) -#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_SHIFT (22U) -#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_EN (IMG_UINT64_C(0x0000000000400000)) -#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_SHIFT (21U) -#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_EN (IMG_UINT64_C(0x0000000000200000)) -#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_SHIFT (20U) -#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_EN (IMG_UINT64_C(0x0000000000100000)) -#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_SHIFT (19U) -#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_EN (IMG_UINT64_C(0x0000000000080000)) -#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_SHIFT (18U) -#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_SHIFT (17U) -#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_SHIFT (16U) -#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_SHIFT (15U) -#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_EN (IMG_UINT64_C(0x0000000000008000)) -#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_SHIFT (14U) -#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_SHIFT (13U) -#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_SHIFT (12U) -#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_SHIFT (11U) -#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_EN (IMG_UINT64_C(0x0000000000000800)) -#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_SHIFT (10U) -#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_SHIFT (9U) -#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_EN (IMG_UINT64_C(0x0000000000000200)) -#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_SHIFT (8U) -#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_SHIFT (7U) -#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_EN (IMG_UINT64_C(0x0000000000000080)) -#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_SHIFT (6U) -#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_EN (IMG_UINT64_C(0x0000000000000040)) -#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_SHIFT (5U) -#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_SHIFT (4U) -#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_SHIFT (3U) -#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_EN (IMG_UINT64_C(0x0000000000000008)) -#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_SHIFT (2U) -#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_SHIFT (1U) -#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_SHIFT (0U) -#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_RAC_SOFT_RESET (0x0660U) +#define RGX_CR_RAC_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_SHIFT (62U) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_SHIFT (61U) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_SHIFT (60U) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_SHIFT (59U) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_SHIFT (58U) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_SHIFT (57U) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_SHIFT (56U) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_SHIFT (55U) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_SHIFT (54U) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_SHIFT (53U) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_SHIFT (52U) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_SHIFT (51U) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_SHIFT (50U) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_SHIFT (49U) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_SHIFT (48U) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_SHIFT (47U) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_SHIFT (46U) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_SHIFT (45U) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_SHIFT (44U) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_SHIFT (43U) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_SHIFT (42U) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_SHIFT (41U) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_SHIFT (40U) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_SHIFT (39U) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_SHIFT (38U) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_SHIFT (37U) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_SHIFT (36U) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_SHIFT (35U) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_SHIFT (34U) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_SHIFT (33U) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_SHIFT (32U) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_SHIFT (31U) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_SHIFT (30U) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_SHIFT (29U) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_SHIFT (28U) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_SHIFT (27U) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_SHIFT (26U) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_SHIFT (25U) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_SHIFT (24U) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_SHIFT (23U) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_SHIFT (22U) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_SHIFT (21U) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_SHIFT (20U) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_SHIFT (19U) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_SHIFT (18U) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_SHIFT (17U) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_SHIFT (16U) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_SHIFT (15U) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_SHIFT (14U) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_SHIFT (13U) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_SHIFT (12U) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_SHIFT (11U) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_SHIFT (10U) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_SHIFT (9U) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_SHIFT (8U) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_SHIFT (7U) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_SHIFT (6U) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_SHIFT (5U) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_SHIFT (4U) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_SHIFT (3U) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_SHIFT (2U) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_SHIFT (1U) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_SHIFT (0U) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_CR_FWCORE_WDT_RESET */ -#define RGX_CR_FWCORE_WDT_RESET (0x4500U) -#define RGX_CR_FWCORE_WDT_RESET_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_CR_FWCORE_WDT_RESET_EN_SHIFT (0U) -#define RGX_CR_FWCORE_WDT_RESET_EN_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_WDT_RESET_EN_EN (0x00000001U) +#define RGX_CR_FWCORE_WDT_RESET (0x4500U) +#define RGX_CR_FWCORE_WDT_RESET_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_WDT_RESET_EN_SHIFT (0U) +#define RGX_CR_FWCORE_WDT_RESET_EN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WDT_RESET_EN_EN (0x00000001U) /* Register RGX_CR_FWCORE_WDT_CTRL */ -#define RGX_CR_FWCORE_WDT_CTRL (0x4508U) -#define RGX_CR_FWCORE_WDT_CTRL_MASKFULL (IMG_UINT64_C(0x00000000FFFF1F01)) -#define RGX_CR_FWCORE_WDT_CTRL_PROT_SHIFT (16U) -#define RGX_CR_FWCORE_WDT_CTRL_PROT_CLRMSK (0x0000FFFFU) -#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT (8U) -#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK (0xFFFFE0FFU) -#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT (0U) -#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_EN (0x00000001U) +#define RGX_CR_FWCORE_WDT_CTRL (0x4508U) +#define RGX_CR_FWCORE_WDT_CTRL_MASKFULL (IMG_UINT64_C(0x00000000FFFF1F01)) +#define RGX_CR_FWCORE_WDT_CTRL_PROT_SHIFT (16U) +#define RGX_CR_FWCORE_WDT_CTRL_PROT_CLRMSK (0x0000FFFFU) +#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT (8U) +#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK (0xFFFFE0FFU) +#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_CR_FWCORE_WDT_COUNT */ -#define RGX_CR_FWCORE_WDT_COUNT (0x4510U) -#define RGX_CR_FWCORE_WDT_COUNT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_CR_FWCORE_WDT_COUNT_VALUE_SHIFT (0U) -#define RGX_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK (0x00000000U) +#define RGX_CR_FWCORE_WDT_COUNT (0x4510U) +#define RGX_CR_FWCORE_WDT_COUNT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_WDT_COUNT_VALUE_SHIFT (0U) +#define RGX_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK (0x00000000U) #endif /* RGX_CR_DEFS_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxdefs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxdefs_km.h index 8df02e567cd1..3342fceb3a12 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxdefs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxdefs_km.h @@ -48,7 +48,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif #define IMG_EXPLICIT_INCLUDE_HWDEFS -#if defined(__KERNEL__) +#if defined(__KERNEL__) || defined(TEE_DDK) #include "rgx_cr_defs_km.h" #endif #undef IMG_EXPLICIT_INCLUDE_HWDEFS @@ -119,15 +119,20 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U) /* The default number of OSID is 1, higher number implies VZ enabled firmware */ -#if !defined(RGXFW_NATIVE) && defined(PVRSRV_VZ_NUM_OSID) && (PVRSRV_VZ_NUM_OSID + 1U > 1U) -#define RGXFW_NUM_OS PVRSRV_VZ_NUM_OSID +#if !defined(RGXFW_NATIVE) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED + 1U > 1U) +#define RGXFW_NUM_OS RGX_NUM_DRIVERS_SUPPORTED #else #define RGXFW_NUM_OS 1U #endif -#define RGXFW_MAX_NUM_OS (8U) -#define RGXFW_HOST_OS (0U) -#define RGXFW_GUEST_OSID_START (1U) +#if defined(RGX_FEATURE_NUM_OSIDS) +#define RGXFW_MAX_NUM_OSIDS (RGX_FEATURE_NUM_OSIDS) +#else +#define RGXFW_MAX_NUM_OSIDS (8U) +#endif + +#define RGXFW_HOST_DRIVER_ID (0U) +#define RGXFW_GUEST_DRIVER_ID_START (RGXFW_HOST_DRIVER_ID + 1U) #define RGXFW_THREAD_0 (0U) #define RGXFW_THREAD_1 (1U) @@ -146,7 +151,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_META_COREMEM_128K (128*1024) #define RGX_META_COREMEM_256K (256*1024) -#if !defined(__KERNEL__) +#if !(defined(__KERNEL__) || defined(TEE_DDK)) #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(RGX_FEATURE_META_DMA) #undef SUPPORT_META_COREMEM #undef RGX_FEATURE_META_COREMEM_SIZE @@ -190,6 +195,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_CR_CLK_CTRL2_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL2_MASKFULL) #define RGX_CR_CLK_CTRL2_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL2_MASKFULL) +#if defined(__KERNEL__) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL0__PIPEDM_GT0__MASKFULL) +#define RGX_CR_CLK_CTRL0__PIPEDM_GT0__ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL0__PIPEDM_GT0__MASKFULL) +#endif + #define RGX_CR_MERCER0_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_EN | \ RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_EN | \ RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_EN | \ @@ -276,6 +286,20 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. RGX_CR_SOFT_RESET_BIF_JONES_EN | \ RGX_CR_SOFT_RESET_SLC_EN)) +#define RGX_MLIST_ENTRY_STRIDE (4U) /* 4 bytes */ +#define RGX_NUM_PM_ADDR_SPACES (2U) /* VCE & TE share virtual space and Alist */ +#define RGX_PM_MAX_PB_VIRT_ADDR_SPACE (IMG_UINT64_C(0x400000000)) /* PM Maximum addressable limit */ + + +#define RGX_PM_MAX_RSTATE_SIZE_DWORDS (46U) +#define RGX_PM_MLIST_BASE_ADDR_MAX_ALIGNSIZE (32U) +#define RGX_PM_VHEAP_BASE_ADDR_MAX_ALIGNSIZE (32U) +#define _RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_)[2] = ((_x_) & (IMG_UINT64_C(0x00000000fffffff0)))); \ + ((_ft_)[3] = (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32)); } +#define _RGX_PM_RENDERSTATE_BUFFER_SET_VHEAP_BASE_ADDR(_ft_,_x_) { ((_ft_)[6] = ((_x_) & (IMG_UINT64_C(0x00000000fffffff0)))); \ + ((_ft_)[7] = (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32)); } + +#define RGX_PM_RENDERSTATE_BASE_ADDR_ALIGNSIZE (32U) #define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U) #define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1U << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) @@ -292,7 +316,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_REQ_NUM_BERNADOS(CLUSTERS) ((CLUSTERS + 3U) / 4U) #define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) ((CLUSTERS + 3U) / 4U) -#if !defined(__KERNEL__) +#if !(defined(__KERNEL__) || defined(TEE_DDK)) # define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS)) #endif @@ -303,23 +327,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * META second thread feature depending on META variants and * available CoreMem */ -#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && (RGX_FEATURE_META_COREMEM_SIZE >= 96) +#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && (RGX_FEATURE_META_COREMEM_SIZE >= 96) && (RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION == 0) #define RGXFW_META_SUPPORT_2ND_THREAD #endif - -/* - * FW MMU contexts - */ -#if defined(SUPPORT_TRUSTED_DEVICE) -#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) /* FW code/private data */ -#define MMU_CONTEXT_MAPPING_FWIF (0x7U) /* Host/FW data */ -#else -#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) -#define MMU_CONTEXT_MAPPING_FWIF (0x0U) -#endif - - /* * FWCORE wrapper register defines */ @@ -344,12 +355,21 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE) +/* + * Renaming MTS sideband bitfields to emphasize that the Register Bank number + * of the MTS register used identifies a specific Driver/VM rather than the OSID tag + * emitted on bus memory transactions. + */ +#define RGX_MTS_SBDATA_DRIVERID_CLRMSK RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_CLRMSK +#define RGX_MTS_SBDATA_DRIVERID_SHIFT RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_SHIFT + /* Register Bank containing registers secured against host access */ #define RGX_HOST_SECURE_REGBANK_OFFSET (0xF0000U) #define RGX_HOST_SECURE_REGBANK_SIZE (0x10000U) /* GPU CR timer tick in GPU cycles */ -#define RGX_CRTIME_TICK_IN_CYCLES (256U) +#define RGX_CRTIME_TICK_IN_CYCLES (256U) +#define RGX_CRTIME_TICK_IN_CYCLES_SHIFT (8U) #if defined(FIX_HW_BRN_71840) #define ROGUE_RENDERSIZE_MAXX (16384U) @@ -403,4 +423,18 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_BRN71422_WORKAROUND_READ_SIZE (32U) #endif +#define SUPPORT_VOLCANIC_TB +#define RGX_FEATURE_SECURITY_VOLCANIC + +/* Typically the PCI bus returns this value on error */ +#define RGX_PCI_ERROR_VALUE_BYTE (0xFFU) +#define RGX_PCI_ERROR_VALUE_DWORD (0xFFFFFFFFU) + + +/* + Maximum number of render targets in array. + This derives from the maximum value for RGX_FEATURE_RTA_INDICES across all cores. +*/ +#define RGX_MAX_RTA_INDICES (2048U) + #endif /* RGXDEFS_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxmmudefs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxmmudefs_km.h index 3d4523d66c75..4dedb33467e6 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxmmudefs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxmmudefs_km.h @@ -57,216 +57,219 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXMMUDEFS_KM_REVISION 0 -#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_NUM_ENTRIES_VALUE (0x00000010U) +#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_NUM_ENTRIES_VALUE (0x00000010U) -#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_SHIFT_VALUE (0x00000004U) +#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_SHIFT_VALUE (0x00000004U) -#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_MASK_VALUE (0x00000007U) +#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_MASK_VALUE (0x00000007U) -#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U) -#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U) -#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U) -#define RGX_BIF_DM_ENCODING_TLA (0x00000003U) -#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U) -#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U) -#define RGX_BIF_DM_ENCODING_META (0x00000007U) -#define RGX_BIF_DM_ENCODING_HOST (0x00000008U) -#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U) +#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U) +#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U) +#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U) +#define RGX_BIF_DM_ENCODING_TLA (0x00000003U) +#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U) +#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U) +#define RGX_BIF_DM_ENCODING_META (0x00000007U) +#define RGX_BIF_DM_ENCODING_HOST (0x00000008U) +#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U) -#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) -#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) -#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) -#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF)) -#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) -#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF)) +#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) +#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) +#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) +#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF)) +#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) +#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF)) -#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) +#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) -#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) +#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) -#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) +#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) -#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) +#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) -#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) +#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) -#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) +#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) -#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) -#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) -#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) -#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) -#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) -#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) -#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) +#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) +#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) +#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) +#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) +#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) +#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) +#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) -#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) -#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) -#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF)) +#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) +#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF)) -#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) -#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF)) +#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) +#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF)) -#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) -#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF)) +#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) +#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF)) -#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) -#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF)) +#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) +#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF)) -#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) -#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF)) +#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) +#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF)) -#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) -#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) -#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF)) +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF)) -#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) -#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF)) +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF)) -#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) -#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F)) +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F)) -#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) -#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) -#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -#define RGX_MMUCTRL_AXCACHE_MASK (0x0000000FU) +#define RGX_MMUCTRL_AXCACHE_MASK (0x0000000FU) /* Device Non-bufferable */ -#define RGX_MMUCTRL_AXCACHE_DEVNONBUFF (0x00000000U) +#define RGX_MMUCTRL_AXCACHE_DEVNONBUFF (0x00000000U) /* Device Bufferable */ -#define RGX_MMUCTRL_AXCACHE_DEVBUFF (0x00000001U) +#define RGX_MMUCTRL_AXCACHE_DEVBUFF (0x00000001U) /* Normal Non-cacheable Non-bufferable */ -#define RGX_MMUCTRL_AXCACHE_NORMNONBUFF (0x00000002U) +#define RGX_MMUCTRL_AXCACHE_NORMNONBUFF (0x00000002U) /* Normal Non-cacheable Bufferable */ -#define RGX_MMUCTRL_AXCACHE_NORMBUFF (0x00000003U) +#define RGX_MMUCTRL_AXCACHE_NORMBUFF (0x00000003U) /* Write-through No-allocate*/ -#define RGX_MMUCTRL_AXCACHE_WTNOALLOC (0x00000006U) +#define RGX_MMUCTRL_AXCACHE_WTNOALLOC (0x00000006U) /* Write-back No-allocate*/ -#define RGX_MMUCTRL_AXCACHE_WBNOALLOC (0x00000007U) +#define RGX_MMUCTRL_AXCACHE_WBNOALLOC (0x00000007U) /* Write-through Read-Allocate */ -#define RGX_MMUCTRL_AXCACHE_WTRALLOC (0x00000008U) +#define RGX_MMUCTRL_AXCACHE_WTRALLOC (0x00000008U) /* Write-back Read-Allocate */ -#define RGX_MMUCTRL_AXCACHE_WBRALLOC (0x00000009U) +#define RGX_MMUCTRL_AXCACHE_WBRALLOC (0x00000009U) /* Write-through Write-Allocate */ -#define RGX_MMUCTRL_AXCACHE_WTWALLOC (0x0000000aU) +#define RGX_MMUCTRL_AXCACHE_WTWALLOC (0x0000000aU) /* Write-back Write-Allocate */ -#define RGX_MMUCTRL_AXCACHE_WBWALLOC (0x0000000bU) +#define RGX_MMUCTRL_AXCACHE_WBWALLOC (0x0000000bU) /* Write-through Read/Write-Allocate */ -#define RGX_MMUCTRL_AXCACHE_WTRWALLOC (0x0000000eU) +#define RGX_MMUCTRL_AXCACHE_WTRWALLOC (0x0000000eU) /* Write-back Read/Write-Allocate */ -#define RGX_MMUCTRL_AXCACHE_WBRWALLOC (0x0000000fU) - - -#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) -#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_SHIFT (58U) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_CLRMSK (IMG_UINT64_C(0xC3FFFFFFFFFFFFFF)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVNONBUFF (IMG_UINT64_C(0x0000000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVBUFF (IMG_UINT64_C(0x0400000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMNONBUFF (IMG_UINT64_C(0x0800000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMBUFF (IMG_UINT64_C(0x0c00000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTNOALLOC (IMG_UINT64_C(0x1800000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBNOALLOC (IMG_UINT64_C(0x1c00000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRALLOC (IMG_UINT64_C(0x2000000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRALLOC (IMG_UINT64_C(0x2400000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTWALLOC (IMG_UINT64_C(0x2800000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBWALLOC (IMG_UINT64_C(0x2c00000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRWALLOC (IMG_UINT64_C(0x3800000000000000)) -#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC (IMG_UINT64_C(0x3c00000000000000)) -#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) -#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xFC0000FFFFFFFFFF)) -#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) -#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) -#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) -#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) -#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) -#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U) -#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) -#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U) -#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) - - -#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) -#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000)) -#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) -#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008)) -#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a)) -#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U) -#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) - - -#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) -#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) -#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) -#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) -#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) -#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) -#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) -#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U) -#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) -#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) +#define RGX_MMUCTRL_AXCACHE_WBRWALLOC (0x0000000fU) + + +#define RGX_MMUCTRL_PT_DATA_PT_PARITY_SHIFT (63U) +#define RGX_MMUCTRL_PT_DATA_PT_PARITY_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_PT_PARITY_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_SHIFT (58U) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_CLRMSK (IMG_UINT64_C(0xC3FFFFFFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVNONBUFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVBUFF (IMG_UINT64_C(0x0400000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMNONBUFF (IMG_UINT64_C(0x0800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMBUFF (IMG_UINT64_C(0x0c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTNOALLOC (IMG_UINT64_C(0x1800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBNOALLOC (IMG_UINT64_C(0x1c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRALLOC (IMG_UINT64_C(0x2000000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRALLOC (IMG_UINT64_C(0x2400000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTWALLOC (IMG_UINT64_C(0x2800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBWALLOC (IMG_UINT64_C(0x2c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRWALLOC (IMG_UINT64_C(0x3800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC (IMG_UINT64_C(0x3c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xFC0000FFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U) +#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) +#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a)) +#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) +#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) +#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) #endif /* RGXMMUDEFS_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxtbdefs_km.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxtbdefs_km.h index b2289ee11c15..c265ef0059b2 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxtbdefs_km.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/km/rgxtbdefs_km.h @@ -59,771 +59,1838 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* Register RGX_TB_SOFT_RESET */ -#define RGX_TB_SOFT_RESET (0x0000U) -#define RGX_TB_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFF0107)) -#define RGX_TB_SOFT_RESET_SPU_SHIFT (16U) -#define RGX_TB_SOFT_RESET_SPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) -#define RGX_TB_SOFT_RESET_JONES_SHIFT (8U) -#define RGX_TB_SOFT_RESET_JONES_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_TB_SOFT_RESET_JONES_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_TB_SOFT_RESET_SYS_SHIFT (2U) -#define RGX_TB_SOFT_RESET_SYS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_TB_SOFT_RESET_SYS_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_TB_SOFT_RESET_MEM_SHIFT (1U) -#define RGX_TB_SOFT_RESET_MEM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_TB_SOFT_RESET_MEM_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_TB_SOFT_RESET_CORE_SHIFT (0U) -#define RGX_TB_SOFT_RESET_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_TB_SOFT_RESET_CORE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_SOFT_RESET (0x0000U) +#define RGX_TB_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFF0107)) +#define RGX_TB_SOFT_RESET_SPU_SHIFT (16U) +#define RGX_TB_SOFT_RESET_SPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_TB_SOFT_RESET_JONES_SHIFT (8U) +#define RGX_TB_SOFT_RESET_JONES_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_TB_SOFT_RESET_JONES_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_TB_SOFT_RESET_SYS_SHIFT (2U) +#define RGX_TB_SOFT_RESET_SYS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_TB_SOFT_RESET_SYS_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_TB_SOFT_RESET_MEM_SHIFT (1U) +#define RGX_TB_SOFT_RESET_MEM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_TB_SOFT_RESET_MEM_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_TB_SOFT_RESET_CORE_SHIFT (0U) +#define RGX_TB_SOFT_RESET_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_SOFT_RESET_CORE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_TB_PCI_MASTER */ -#define RGX_TB_PCI_MASTER (0x0008U) -#define RGX_TB_PCI_MASTER_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_TB_PCI_MASTER_MODE_SHIFT (0U) -#define RGX_TB_PCI_MASTER_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_TB_PCI_MASTER_MODE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_PCI_MASTER (0x0008U) +#define RGX_TB_PCI_MASTER_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_PCI_MASTER_MODE_SHIFT (0U) +#define RGX_TB_PCI_MASTER_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_PCI_MASTER_MODE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_TB_MEM_ARBITER */ -#define RGX_TB_MEM_ARBITER (0x0088U) -#define RGX_TB_MEM_ARBITER_MASKFULL (IMG_UINT64_C(0x0000000000010F11)) -#define RGX_TB_MEM_ARBITER_LIMIT_BW_SHIFT (16U) -#define RGX_TB_MEM_ARBITER_LIMIT_BW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_TB_MEM_ARBITER_LIMIT_BW_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_TB_MEM_ARBITER_PRI_SKEW_SHIFT (8U) -#define RGX_TB_MEM_ARBITER_PRI_SKEW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF0FF)) -#define RGX_TB_MEM_ARBITER_PRI_RNW_SHIFT (4U) -#define RGX_TB_MEM_ARBITER_PRI_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_TB_MEM_ARBITER_PRI_RNW_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_TB_MEM_ARBITER_ENABLE_SHIFT (0U) -#define RGX_TB_MEM_ARBITER_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_TB_MEM_ARBITER_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_MEM_ARBITER (0x0088U) +#define RGX_TB_MEM_ARBITER_MASKFULL (IMG_UINT64_C(0x0000000000010F11)) +#define RGX_TB_MEM_ARBITER_LIMIT_BW_SHIFT (16U) +#define RGX_TB_MEM_ARBITER_LIMIT_BW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_TB_MEM_ARBITER_LIMIT_BW_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_TB_MEM_ARBITER_PRI_SKEW_SHIFT (8U) +#define RGX_TB_MEM_ARBITER_PRI_SKEW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF0FF)) +#define RGX_TB_MEM_ARBITER_PRI_RNW_SHIFT (4U) +#define RGX_TB_MEM_ARBITER_PRI_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_TB_MEM_ARBITER_PRI_RNW_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_TB_MEM_ARBITER_ENABLE_SHIFT (0U) +#define RGX_TB_MEM_ARBITER_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_MEM_ARBITER_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_TB_QOS_RD_LATENCY */ -#define RGX_TB_QOS_RD_LATENCY (0x0090U) -#define RGX_TB_QOS_RD_LATENCY_MASKFULL (IMG_UINT64_C(0xFFFF3FFF3FFF3FFF)) -#define RGX_TB_QOS_RD_LATENCY_DIST_SHIFT (62U) -#define RGX_TB_QOS_RD_LATENCY_DIST_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) -#define RGX_TB_QOS_RD_LATENCY_MAX_15_SHIFT (48U) -#define RGX_TB_QOS_RD_LATENCY_MAX_15_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_TB_QOS_RD_LATENCY_MIN_15_SHIFT (32U) -#define RGX_TB_QOS_RD_LATENCY_MIN_15_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_TB_QOS_RD_LATENCY_MAX_0_SHIFT (16U) -#define RGX_TB_QOS_RD_LATENCY_MAX_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC000FFFF)) -#define RGX_TB_QOS_RD_LATENCY_MIN_0_SHIFT (0U) -#define RGX_TB_QOS_RD_LATENCY_MIN_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFC000)) +#define RGX_TB_QOS_RD_LATENCY (0x0090U) +#define RGX_TB_QOS_RD_LATENCY_MASKFULL (IMG_UINT64_C(0xFFFF3FFF3FFF3FFF)) +#define RGX_TB_QOS_RD_LATENCY_DIST_SHIFT (62U) +#define RGX_TB_QOS_RD_LATENCY_DIST_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_TB_QOS_RD_LATENCY_MAX_15_SHIFT (48U) +#define RGX_TB_QOS_RD_LATENCY_MAX_15_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_TB_QOS_RD_LATENCY_MIN_15_SHIFT (32U) +#define RGX_TB_QOS_RD_LATENCY_MIN_15_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_TB_QOS_RD_LATENCY_MAX_0_SHIFT (16U) +#define RGX_TB_QOS_RD_LATENCY_MAX_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC000FFFF)) +#define RGX_TB_QOS_RD_LATENCY_MIN_0_SHIFT (0U) +#define RGX_TB_QOS_RD_LATENCY_MIN_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFC000)) /* Register RGX_TB_QOS_WR_LATENCY */ -#define RGX_TB_QOS_WR_LATENCY (0x0098U) -#define RGX_TB_QOS_WR_LATENCY_MASKFULL (IMG_UINT64_C(0xFFFF3FFF3FFF3FFF)) -#define RGX_TB_QOS_WR_LATENCY_DIST_SHIFT (62U) -#define RGX_TB_QOS_WR_LATENCY_DIST_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) -#define RGX_TB_QOS_WR_LATENCY_MAX_15_SHIFT (48U) -#define RGX_TB_QOS_WR_LATENCY_MAX_15_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -#define RGX_TB_QOS_WR_LATENCY_MIN_15_SHIFT (32U) -#define RGX_TB_QOS_WR_LATENCY_MIN_15_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -#define RGX_TB_QOS_WR_LATENCY_MAX_0_SHIFT (16U) -#define RGX_TB_QOS_WR_LATENCY_MAX_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC000FFFF)) -#define RGX_TB_QOS_WR_LATENCY_MIN_0_SHIFT (0U) -#define RGX_TB_QOS_WR_LATENCY_MIN_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFC000)) +#define RGX_TB_QOS_WR_LATENCY (0x0098U) +#define RGX_TB_QOS_WR_LATENCY_MASKFULL (IMG_UINT64_C(0xFFFF3FFF3FFF3FFF)) +#define RGX_TB_QOS_WR_LATENCY_DIST_SHIFT (62U) +#define RGX_TB_QOS_WR_LATENCY_DIST_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_TB_QOS_WR_LATENCY_MAX_15_SHIFT (48U) +#define RGX_TB_QOS_WR_LATENCY_MAX_15_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_TB_QOS_WR_LATENCY_MIN_15_SHIFT (32U) +#define RGX_TB_QOS_WR_LATENCY_MIN_15_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_TB_QOS_WR_LATENCY_MAX_0_SHIFT (16U) +#define RGX_TB_QOS_WR_LATENCY_MAX_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC000FFFF)) +#define RGX_TB_QOS_WR_LATENCY_MIN_0_SHIFT (0U) +#define RGX_TB_QOS_WR_LATENCY_MIN_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFC000)) /* Register RGX_TB_MAX_ID_OUTSTANDING */ -#define RGX_TB_MAX_ID_OUTSTANDING (0x00B0U) -#define RGX_TB_MAX_ID_OUTSTANDING_MASKFULL (IMG_UINT64_C(0x000003FF03FF03FF)) -#define RGX_TB_MAX_ID_OUTSTANDING_RD_WR_SHIFT (32U) -#define RGX_TB_MAX_ID_OUTSTANDING_RD_WR_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) -#define RGX_TB_MAX_ID_OUTSTANDING_WRITE_SHIFT (16U) -#define RGX_TB_MAX_ID_OUTSTANDING_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) -#define RGX_TB_MAX_ID_OUTSTANDING_READ_SHIFT (0U) -#define RGX_TB_MAX_ID_OUTSTANDING_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFC00)) +#define RGX_TB_MAX_ID_OUTSTANDING (0x00B0U) +#define RGX_TB_MAX_ID_OUTSTANDING_MASKFULL (IMG_UINT64_C(0x000003FF03FF03FF)) +#define RGX_TB_MAX_ID_OUTSTANDING_RD_WR_SHIFT (32U) +#define RGX_TB_MAX_ID_OUTSTANDING_RD_WR_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) +#define RGX_TB_MAX_ID_OUTSTANDING_WRITE_SHIFT (16U) +#define RGX_TB_MAX_ID_OUTSTANDING_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) +#define RGX_TB_MAX_ID_OUTSTANDING_READ_SHIFT (0U) +#define RGX_TB_MAX_ID_OUTSTANDING_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFC00)) /* Register RGX_TB_COHERENT_MEM_REGION */ -#define RGX_TB_COHERENT_MEM_REGION (0x00C0U) -#define RGX_TB_COHERENT_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_TB_COHERENT_MEM_REGION_START_ADDR_SHIFT (12U) -#define RGX_TB_COHERENT_MEM_REGION_START_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_TB_COHERENT_MEM_REGION (0x00C0U) +#define RGX_TB_COHERENT_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_TB_COHERENT_MEM_REGION_START_ADDR_SHIFT (12U) +#define RGX_TB_COHERENT_MEM_REGION_START_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) /* Register RGX_TB_LMA_MEM_REGION */ -#define RGX_TB_LMA_MEM_REGION (0x00C8U) -#define RGX_TB_LMA_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_LMA_MEM_REGION_SIZE_SHIFT (0U) -#define RGX_TB_LMA_MEM_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_LMA_MEM_REGION (0x00C8U) +#define RGX_TB_LMA_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_LMA_MEM_REGION_SIZE_SHIFT (0U) +#define RGX_TB_LMA_MEM_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_UMA_MEM_REGION */ -#define RGX_TB_UMA_MEM_REGION (0x00D0U) -#define RGX_TB_UMA_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -#define RGX_TB_UMA_MEM_REGION_START_ADDR_SHIFT (12U) -#define RGX_TB_UMA_MEM_REGION_START_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_TB_UMA_MEM_REGION (0x00D0U) +#define RGX_TB_UMA_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_TB_UMA_MEM_REGION_START_ADDR_SHIFT (12U) +#define RGX_TB_UMA_MEM_REGION_START_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) /* Register RGX_TB_SYSTEM_STATUS */ -#define RGX_TB_SYSTEM_STATUS (0x00E0U) -#define RGX_TB_SYSTEM_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFF33F700FF)) -#define RGX_TB_SYSTEM_STATUS_SPU_ISON_SHIFT (48U) -#define RGX_TB_SYSTEM_STATUS_SPU_ISON_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -#define RGX_TB_SYSTEM_STATUS_SPU_POWER_SHIFT (32U) -#define RGX_TB_SYSTEM_STATUS_SPU_POWER_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) -#define RGX_TB_SYSTEM_STATUS_JONES_ISON_SHIFT (29U) -#define RGX_TB_SYSTEM_STATUS_JONES_ISON_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -#define RGX_TB_SYSTEM_STATUS_JONES_ISON_EN (IMG_UINT64_C(0x0000000020000000)) -#define RGX_TB_SYSTEM_STATUS_JONES_POWER_SHIFT (28U) -#define RGX_TB_SYSTEM_STATUS_JONES_POWER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -#define RGX_TB_SYSTEM_STATUS_JONES_POWER_EN (IMG_UINT64_C(0x0000000010000000)) -#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_SHIFT (25U) -#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_EN (IMG_UINT64_C(0x0000000002000000)) -#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_SHIFT (24U) -#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_EN (IMG_UINT64_C(0x0000000001000000)) -#define RGX_TB_SYSTEM_STATUS_GPU_STATE_SHIFT (20U) -#define RGX_TB_SYSTEM_STATUS_GPU_STATE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF0FFFFF)) -#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_SHIFT (18U) -#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_EN (IMG_UINT64_C(0x0000000000040000)) -#define RGX_TB_SYSTEM_STATUS_TRIGGER_SHIFT (17U) -#define RGX_TB_SYSTEM_STATUS_TRIGGER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -#define RGX_TB_SYSTEM_STATUS_TRIGGER_EN (IMG_UINT64_C(0x0000000000020000)) -#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_SHIFT (16U) -#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_EN (IMG_UINT64_C(0x0000000000010000)) -#define RGX_TB_SYSTEM_STATUS_IRQ_SHIFT (0U) -#define RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) +#define RGX_TB_SYSTEM_STATUS (0x00E0U) +#define RGX_TB_SYSTEM_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFF3FF700FF)) +#define RGX_TB_SYSTEM_STATUS_SPU_ISON_SHIFT (48U) +#define RGX_TB_SYSTEM_STATUS_SPU_ISON_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_SPU_POWER_SHIFT (32U) +#define RGX_TB_SYSTEM_STATUS_SPU_POWER_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_DAVY_ISON_SHIFT (29U) +#define RGX_TB_SYSTEM_STATUS_DAVY_ISON_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_DAVY_ISON_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_TB_SYSTEM_STATUS_DAVY_POWER_SHIFT (28U) +#define RGX_TB_SYSTEM_STATUS_DAVY_POWER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_DAVY_POWER_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_TB_SYSTEM_STATUS_CHEST_ENOLA_ISON_SHIFT (27U) +#define RGX_TB_SYSTEM_STATUS_CHEST_ENOLA_ISON_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_TB_SYSTEM_STATUS_CHEST_ENOLA_ISON_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_TB_SYSTEM_STATUS_CHEST_ENOLA_POWER_SHIFT (26U) +#define RGX_TB_SYSTEM_STATUS_CHEST_ENOLA_POWER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_CHEST_ENOLA_POWER_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_SHIFT (25U) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_SHIFT (24U) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_TB_SYSTEM_STATUS_GPU_STATE_SHIFT (20U) +#define RGX_TB_SYSTEM_STATUS_GPU_STATE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF0FFFFF)) +#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_SHIFT (18U) +#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_TB_SYSTEM_STATUS_TRIGGER_SHIFT (17U) +#define RGX_TB_SYSTEM_STATUS_TRIGGER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_TB_SYSTEM_STATUS_TRIGGER_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_SHIFT (16U) +#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_TB_SYSTEM_STATUS_IRQ_SHIFT (0U) +#define RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) /* Register RGX_TB_SYSTEM_CONFIG */ -#define RGX_TB_SYSTEM_CONFIG (0x00F0U) -#define RGX_TB_SYSTEM_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000007007737)) -#define RGX_TB_SYSTEM_CONFIG_SOC_AXI_FEATURE_SHIFT (24U) -#define RGX_TB_SYSTEM_CONFIG_SOC_AXI_FEATURE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) -#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_SHIFT (14U) -#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_EN (IMG_UINT64_C(0x0000000000004000)) -#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_SHIFT (13U) -#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_EN (IMG_UINT64_C(0x0000000000002000)) -#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_SHIFT (12U) -#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_SHIFT (10U) -#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_EN (IMG_UINT64_C(0x0000000000000400)) -#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_SHIFT (9U) -#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_EN (IMG_UINT64_C(0x0000000000000200)) -#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_SHIFT (8U) -#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_EN (IMG_UINT64_C(0x0000000000000100)) -#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_SHIFT (5U) -#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_EN (IMG_UINT64_C(0x0000000000000020)) -#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_SHIFT (4U) -#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_EN (IMG_UINT64_C(0x0000000000000010)) -#define RGX_TB_SYSTEM_CONFIG_NS_NOC_SHIFT (2U) -#define RGX_TB_SYSTEM_CONFIG_NS_NOC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -#define RGX_TB_SYSTEM_CONFIG_NS_NOC_EN (IMG_UINT64_C(0x0000000000000004)) -#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_SHIFT (1U) -#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_EN (IMG_UINT64_C(0x0000000000000002)) -#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_SHIFT (0U) -#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_SYSTEM_CONFIG (0x00F0U) +#define RGX_TB_SYSTEM_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000007007737)) +#define RGX_TB_SYSTEM_CONFIG_SOC_AXI_FEATURE_SHIFT (24U) +#define RGX_TB_SYSTEM_CONFIG_SOC_AXI_FEATURE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) +#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_SHIFT (14U) +#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_SHIFT (13U) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_SHIFT (12U) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_SHIFT (10U) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_SHIFT (9U) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_SHIFT (8U) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_SHIFT (5U) +#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_SHIFT (4U) +#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_TB_SYSTEM_CONFIG_NS_NOC_SHIFT (2U) +#define RGX_TB_SYSTEM_CONFIG_NS_NOC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_TB_SYSTEM_CONFIG_NS_NOC_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_SHIFT (1U) +#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_SHIFT (0U) +#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_EN (IMG_UINT64_C(0x0000000000000001)) + + +#define RGX_TB_DOMAINSPLIT_TYPE_RESERVED_SHIFT (9U) +#define RGX_TB_DOMAINSPLIT_TYPE_RESERVED_CLRMSK (0xFFFF01FFU) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU7_SHIFT (8U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU7_CLRMSK (0xFFFFFEFFU) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU7_EN (0x00000100U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU6_SHIFT (7U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU6_CLRMSK (0xFFFFFF7FU) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU6_EN (0x00000080U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU5_SHIFT (6U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU5_CLRMSK (0xFFFFFFBFU) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU5_EN (0x00000040U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU4_SHIFT (5U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU4_CLRMSK (0xFFFFFFDFU) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU4_EN (0x00000020U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU3_SHIFT (4U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU3_CLRMSK (0xFFFFFFEFU) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU3_EN (0x00000010U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU2_SHIFT (3U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU2_CLRMSK (0xFFFFFFF7U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU2_EN (0x00000008U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU1_SHIFT (2U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU1_CLRMSK (0xFFFFFFFBU) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU1_EN (0x00000004U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU0_SHIFT (1U) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU0_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_DOMAINSPLIT_TYPE_SPU0_EN (0x00000002U) +#define RGX_TB_DOMAINSPLIT_TYPE_JONES_SHIFT (0U) +#define RGX_TB_DOMAINSPLIT_TYPE_JONES_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_DOMAINSPLIT_TYPE_JONES_EN (0x00000001U) + + +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_MASK (0xFFFFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER31_SHIFT (31U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER31_CLRMSK (0x7FFFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER31_EN (0x80000000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER30_SHIFT (30U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER30_CLRMSK (0xBFFFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER30_EN (0x40000000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER29_SHIFT (29U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER29_CLRMSK (0xDFFFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER29_EN (0x20000000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER28_SHIFT (28U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER28_CLRMSK (0xEFFFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER28_EN (0x10000000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER27_SHIFT (27U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER27_CLRMSK (0xF7FFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER27_EN (0x08000000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER26_SHIFT (26U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER26_CLRMSK (0xFBFFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER26_EN (0x04000000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER25_SHIFT (25U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER25_CLRMSK (0xFDFFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER25_EN (0x02000000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER24_SHIFT (24U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER24_CLRMSK (0xFEFFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER24_EN (0x01000000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER23_SHIFT (23U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER23_CLRMSK (0xFF7FFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER23_EN (0x00800000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER22_SHIFT (22U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER22_CLRMSK (0xFFBFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER22_EN (0x00400000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER21_SHIFT (21U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER21_CLRMSK (0xFFDFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER21_EN (0x00200000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER20_SHIFT (20U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER20_CLRMSK (0xFFEFFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER20_EN (0x00100000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER19_SHIFT (19U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER19_CLRMSK (0xFFF7FFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER19_EN (0x00080000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER18_SHIFT (18U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER18_CLRMSK (0xFFFBFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER18_EN (0x00040000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER17_SHIFT (17U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER17_CLRMSK (0xFFFDFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER17_EN (0x00020000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER16_SHIFT (16U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER16_CLRMSK (0xFFFEFFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER16_EN (0x00010000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER15_SHIFT (15U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER15_CLRMSK (0xFFFF7FFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER15_EN (0x00008000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER14_SHIFT (14U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER14_CLRMSK (0xFFFFBFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER14_EN (0x00004000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER13_SHIFT (13U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER13_CLRMSK (0xFFFFDFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER13_EN (0x00002000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER12_SHIFT (12U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER12_CLRMSK (0xFFFFEFFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER12_EN (0x00001000U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER11_SHIFT (11U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER11_CLRMSK (0xFFFFF7FFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER11_EN (0x00000800U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER10_SHIFT (10U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER10_CLRMSK (0xFFFFFBFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER10_EN (0x00000400U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER9_SHIFT (9U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER9_CLRMSK (0xFFFFFDFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER9_EN (0x00000200U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER8_SHIFT (8U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER8_CLRMSK (0xFFFFFEFFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER8_EN (0x00000100U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER7_SHIFT (7U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER7_CLRMSK (0xFFFFFF7FU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER7_EN (0x00000080U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER6_SHIFT (6U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER6_CLRMSK (0xFFFFFFBFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER6_EN (0x00000040U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER5_SHIFT (5U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER5_CLRMSK (0xFFFFFFDFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER5_EN (0x00000020U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER4_SHIFT (4U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER4_CLRMSK (0xFFFFFFEFU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER4_EN (0x00000010U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER3_SHIFT (3U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER3_CLRMSK (0xFFFFFFF7U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER3_EN (0x00000008U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER2_SHIFT (2U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER2_CLRMSK (0xFFFFFFFBU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER2_EN (0x00000004U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER1_SHIFT (1U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER1_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER1_EN (0x00000002U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER0_SHIFT (0U) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER0_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_DOMAINSPLIT_CLUSTER_TYPE_CLUSTER0_EN (0x00000001U) + + +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC31_SHIFT (31U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC31_CLRMSK (0x7FFFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC31_EN (0x80000000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC30_SHIFT (30U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC30_CLRMSK (0xBFFFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC30_EN (0x40000000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC29_SHIFT (29U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC29_CLRMSK (0xDFFFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC29_EN (0x20000000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC28_SHIFT (28U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC28_CLRMSK (0xEFFFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC28_EN (0x10000000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC27_SHIFT (27U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC27_CLRMSK (0xF7FFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC27_EN (0x08000000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC26_SHIFT (26U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC26_CLRMSK (0xFBFFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC26_EN (0x04000000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC25_SHIFT (25U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC25_CLRMSK (0xFDFFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC25_EN (0x02000000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC24_SHIFT (24U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC24_CLRMSK (0xFEFFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC24_EN (0x01000000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC23_SHIFT (23U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC23_CLRMSK (0xFF7FFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC23_EN (0x00800000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC22_SHIFT (22U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC22_CLRMSK (0xFFBFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC22_EN (0x00400000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC21_SHIFT (21U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC21_CLRMSK (0xFFDFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC21_EN (0x00200000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC20_SHIFT (20U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC20_CLRMSK (0xFFEFFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC20_EN (0x00100000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC19_SHIFT (19U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC19_CLRMSK (0xFFF7FFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC19_EN (0x00080000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC18_SHIFT (18U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC18_CLRMSK (0xFFFBFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC18_EN (0x00040000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC17_SHIFT (17U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC17_CLRMSK (0xFFFDFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC17_EN (0x00020000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC16_SHIFT (16U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC16_CLRMSK (0xFFFEFFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC16_EN (0x00010000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC15_SHIFT (15U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC15_CLRMSK (0xFFFF7FFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC15_EN (0x00008000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC14_SHIFT (14U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC14_CLRMSK (0xFFFFBFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC14_EN (0x00004000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC13_SHIFT (13U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC13_CLRMSK (0xFFFFDFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC13_EN (0x00002000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC12_SHIFT (12U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC12_CLRMSK (0xFFFFEFFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC12_EN (0x00001000U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC11_SHIFT (11U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC11_CLRMSK (0xFFFFF7FFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC11_EN (0x00000800U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC10_SHIFT (10U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC10_CLRMSK (0xFFFFFBFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC10_EN (0x00000400U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC9_SHIFT (9U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC9_CLRMSK (0xFFFFFDFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC9_EN (0x00000200U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC8_SHIFT (8U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC8_CLRMSK (0xFFFFFEFFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC8_EN (0x00000100U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC7_SHIFT (7U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC7_CLRMSK (0xFFFFFF7FU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC7_EN (0x00000080U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC6_SHIFT (6U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC6_CLRMSK (0xFFFFFFBFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC6_EN (0x00000040U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC5_SHIFT (5U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC5_CLRMSK (0xFFFFFFDFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC5_EN (0x00000020U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC4_SHIFT (4U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC4_CLRMSK (0xFFFFFFEFU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC4_EN (0x00000010U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC3_SHIFT (3U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC3_CLRMSK (0xFFFFFFF7U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC3_EN (0x00000008U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC2_SHIFT (2U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC2_CLRMSK (0xFFFFFFFBU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC2_EN (0x00000004U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC1_SHIFT (1U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC1_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC1_EN (0x00000002U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC0_SHIFT (0U) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC0_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_DOMAINSPLIT_RAC_TYPE_RAC0_EN (0x00000001U) + + +/* + Register RGX_TB_HOST_POWER_EVENT +*/ +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3 (0x0100U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__MASKFULL (IMG_UINT64_C(0xFFFFFFFF0000FF03)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_SHIFT (32U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER31_SHIFT (63U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER31_CLRMSK (IMG_UINT64_C(0x7fffffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER30_SHIFT (62U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER30_CLRMSK (IMG_UINT64_C(0xbfffffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER29_SHIFT (61U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER29_CLRMSK (IMG_UINT64_C(0xdfffffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER28_SHIFT (60U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER28_CLRMSK (IMG_UINT64_C(0xefffffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER27_SHIFT (59U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER27_CLRMSK (IMG_UINT64_C(0xf7ffffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER26_SHIFT (58U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER26_CLRMSK (IMG_UINT64_C(0xfbffffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER25_SHIFT (57U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER25_CLRMSK (IMG_UINT64_C(0xfdffffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER24_SHIFT (56U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER24_CLRMSK (IMG_UINT64_C(0xfeffffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER23_SHIFT (55U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER23_CLRMSK (IMG_UINT64_C(0xff7fffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER22_SHIFT (54U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER22_CLRMSK (IMG_UINT64_C(0xffbfffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER21_SHIFT (53U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER21_CLRMSK (IMG_UINT64_C(0xffdfffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER20_SHIFT (52U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER20_CLRMSK (IMG_UINT64_C(0xffefffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER19_SHIFT (51U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER19_CLRMSK (IMG_UINT64_C(0xfff7ffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER18_SHIFT (50U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER18_CLRMSK (IMG_UINT64_C(0xfffbffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER17_SHIFT (49U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER17_CLRMSK (IMG_UINT64_C(0xfffdffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER16_SHIFT (48U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER16_CLRMSK (IMG_UINT64_C(0xfffeffffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER15_SHIFT (47U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER15_CLRMSK (IMG_UINT64_C(0xffff7fffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER14_SHIFT (46U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER14_CLRMSK (IMG_UINT64_C(0xffffbfffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER13_SHIFT (45U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER13_CLRMSK (IMG_UINT64_C(0xffffdfffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER12_SHIFT (44U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER12_CLRMSK (IMG_UINT64_C(0xffffefffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER11_SHIFT (43U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER11_CLRMSK (IMG_UINT64_C(0xfffff7ffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER10_SHIFT (42U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER10_CLRMSK (IMG_UINT64_C(0xfffffbffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER9_SHIFT (41U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER9_CLRMSK (IMG_UINT64_C(0xfffffdffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER8_SHIFT (40U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER8_CLRMSK (IMG_UINT64_C(0xfffffeffffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER7_SHIFT (39U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER7_CLRMSK (IMG_UINT64_C(0xffffff7fffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER6_SHIFT (38U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER6_CLRMSK (IMG_UINT64_C(0xffffffbfffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER5_SHIFT (37U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER5_CLRMSK (IMG_UINT64_C(0xffffffdfffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER4_SHIFT (36U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER4_CLRMSK (IMG_UINT64_C(0xffffffefffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER3_SHIFT (35U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER3_CLRMSK (IMG_UINT64_C(0xfffffff7ffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER2_SHIFT (34U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER2_CLRMSK (IMG_UINT64_C(0xfffffffbffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER1_SHIFT (33U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER1_CLRMSK (IMG_UINT64_C(0xfffffffdffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER0_SHIFT (32U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__DOMAIN_CLUSTER_CLUSTER0_CLRMSK (IMG_UINT64_C(0xfffffffeffffffff)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__GPU_MASK_SHIFT (8U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__GPU_MASK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__REQ_SHIFT (1U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__REQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__REQ_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__TYPE_SHIFT (0U) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__TYPE_POWER_DOWN (IMG_UINT64_C(0x0000000000000000)) +#define RGX_TB_HOST_POWER_EVENT__POWER_ISLAND_GEQ3__TYPE_POWER_UP (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_TB_GPU_CONTROL +*/ +#define RGX_TB_GPU_CONTROL (0x0110U) +#define RGX_TB_GPU_CONTROL_MASKFULL (IMG_UINT64_C(0x0000000000000007)) +#define RGX_TB_GPU_CONTROL_FW_LOG_DISABLE_SHIFT (2U) +#define RGX_TB_GPU_CONTROL_FW_LOG_DISABLE_CLRMSK (0xFFFFFFFBU) +#define RGX_TB_GPU_CONTROL_FW_LOG_DISABLE_EN (0x00000004U) +#define RGX_TB_GPU_CONTROL_DXT_BC_ENABLE_SHIFT (1U) +#define RGX_TB_GPU_CONTROL_DXT_BC_ENABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_GPU_CONTROL_DXT_BC_ENABLE_EN (0x00000002U) +#define RGX_TB_GPU_CONTROL_ASTC_ENABLE_SHIFT (0U) +#define RGX_TB_GPU_CONTROL_ASTC_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPU_CONTROL_ASTC_ENABLE_EN (0x00000001U) + + +/* + Register RGX_TB_HOST_POWER_EVENT_JONES +*/ +#define RGX_TB_HOST_POWER_EVENT_JONES__POWER_ISLAND_GEQ2_AND_POWER_ISLAND_LEQ5 (0x0118U) +#define RGX_TB_HOST_POWER_EVENT_JONES__POWER_ISLAND_GEQ2_AND_POWER_ISLAND_LEQ5__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_HOST_POWER_EVENT_JONES__POWER_ISLAND_GEQ2_AND_POWER_ISLAND_LEQ5__DOMAIN_SHIFT (0U) +#define RGX_TB_HOST_POWER_EVENT_JONES__POWER_ISLAND_GEQ2_AND_POWER_ISLAND_LEQ5__DOMAIN_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_HOST_POWER_EVENT_JONES__POWER_ISLAND_GEQ2_AND_POWER_ISLAND_LEQ5__DOMAIN_EN (0x00000001U) + + +/* + Register RGX_TB_HOST_POWER_EVENT_TEXAS +*/ +#define RGX_TB_HOST_POWER_EVENT_TEXAS__POWER_ISLAND_GEQ3_AND_POWER_ISLAND_LEQ5 (0x0168U) +#define RGX_TB_HOST_POWER_EVENT_TEXAS__POWER_ISLAND_GEQ3_AND_POWER_ISLAND_LEQ5__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_TB_HOST_POWER_EVENT_TEXAS__POWER_ISLAND_GEQ3_AND_POWER_ISLAND_LEQ5__DOMAIN_SHIFT (0U) +#define RGX_TB_HOST_POWER_EVENT_TEXAS__POWER_ISLAND_GEQ3_AND_POWER_ISLAND_LEQ5__DOMAIN_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_TB_HOST_POWER_EVENT_CHEST_ENOLA +*/ +#define RGX_TB_HOST_POWER_EVENT_CHEST_ENOLA__POWER_ISLAND_GEQ3_AND_POWER_ISLAND_LEQ5 (0x0170U) +#define RGX_TB_HOST_POWER_EVENT_CHEST_ENOLA__POWER_ISLAND_GEQ3_AND_POWER_ISLAND_LEQ5__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_HOST_POWER_EVENT_CHEST_ENOLA__POWER_ISLAND_GEQ3_AND_POWER_ISLAND_LEQ5__DOMAIN_SHIFT (0U) +#define RGX_TB_HOST_POWER_EVENT_CHEST_ENOLA__POWER_ISLAND_GEQ3_AND_POWER_ISLAND_LEQ5__DOMAIN_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_HOST_POWER_EVENT_CHEST_ENOLA__POWER_ISLAND_GEQ3_AND_POWER_ISLAND_LEQ5__DOMAIN_EN (0x00000001U) + + +/* + Register RGX_TB_TRIGGER_SHORT +*/ +#define RGX_TB_TRIGGER_SHORT (0x1500U) +#define RGX_TB_TRIGGER_SHORT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_TRIGGER_SHORT_TIMEOUT_SHIFT (0U) +#define RGX_TB_TRIGGER_SHORT_TIMEOUT_CLRMSK (0x00000000U) + + +/* + Register RGX_TB_TRIGGER_MEDIUM +*/ +#define RGX_TB_TRIGGER_MEDIUM (0x1508U) +#define RGX_TB_TRIGGER_MEDIUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_TRIGGER_MEDIUM_TIMEOUT_SHIFT (0U) +#define RGX_TB_TRIGGER_MEDIUM_TIMEOUT_CLRMSK (0x00000000U) + + +/* + Register RGX_TB_TRIGGER_HIGH +*/ +#define RGX_TB_TRIGGER_HIGH (0x1510U) +#define RGX_TB_TRIGGER_HIGH_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_TRIGGER_HIGH_TIMEOUT_SHIFT (0U) +#define RGX_TB_TRIGGER_HIGH_TIMEOUT_CLRMSK (0x00000000U) + + +/* + Register RGX_TB_TRIGGER_MAX +*/ +#define RGX_TB_TRIGGER_MAX (0x1518U) +#define RGX_TB_TRIGGER_MAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_TRIGGER_MAX_TIMEOUT_SHIFT (0U) +#define RGX_TB_TRIGGER_MAX_TIMEOUT_CLRMSK (0x00000000U) /* Register RGX_TB_RDATA_CORRUPT_ENABLE */ -#define RGX_TB_RDATA_CORRUPT_ENABLE (0x1560U) -#define RGX_TB_RDATA_CORRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_SHIFT (0U) -#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_EN (0x00000001U) +#define RGX_TB_RDATA_CORRUPT_ENABLE (0x1560U) +#define RGX_TB_RDATA_CORRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_SHIFT (0U) +#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_EN (0x00000001U) /* Register RGX_TB_RDATA_CORRUPT_MASK */ -#define RGX_TB_RDATA_CORRUPT_MASK (0x1568U) -#define RGX_TB_RDATA_CORRUPT_MASK_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_TB_RDATA_CORRUPT_MASK_MMU_SHIFT (31U) -#define RGX_TB_RDATA_CORRUPT_MASK_MMU_CLRMSK (0x7FFFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_MMU_EN (0x80000000U) -#define RGX_TB_RDATA_CORRUPT_MASK_UPS_SHIFT (30U) -#define RGX_TB_RDATA_CORRUPT_MASK_UPS_CLRMSK (0xBFFFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_UPS_EN (0x40000000U) -#define RGX_TB_RDATA_CORRUPT_MASK_FBM_SHIFT (29U) -#define RGX_TB_RDATA_CORRUPT_MASK_FBM_CLRMSK (0xDFFFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_FBM_EN (0x20000000U) -#define RGX_TB_RDATA_CORRUPT_MASK_TUL_SHIFT (28U) -#define RGX_TB_RDATA_CORRUPT_MASK_TUL_CLRMSK (0xEFFFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_TUL_EN (0x10000000U) -#define RGX_TB_RDATA_CORRUPT_MASK_SHR_SHIFT (27U) -#define RGX_TB_RDATA_CORRUPT_MASK_SHR_CLRMSK (0xF7FFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_SHR_EN (0x08000000U) -#define RGX_TB_RDATA_CORRUPT_MASK_FBA_SHIFT (26U) -#define RGX_TB_RDATA_CORRUPT_MASK_FBA_CLRMSK (0xFBFFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_FBA_EN (0x04000000U) -#define RGX_TB_RDATA_CORRUPT_MASK_VDM_SHIFT (25U) -#define RGX_TB_RDATA_CORRUPT_MASK_VDM_CLRMSK (0xFDFFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_VDM_EN (0x02000000U) -#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_SHIFT (24U) -#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_CLRMSK (0xFEFFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_EN (0x01000000U) -#define RGX_TB_RDATA_CORRUPT_MASK_PDS_SHIFT (23U) -#define RGX_TB_RDATA_CORRUPT_MASK_PDS_CLRMSK (0xFF7FFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_PDS_EN (0x00800000U) -#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_SHIFT (22U) -#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_CLRMSK (0xFFBFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_EN (0x00400000U) -#define RGX_TB_RDATA_CORRUPT_MASK_TPF_SHIFT (21U) -#define RGX_TB_RDATA_CORRUPT_MASK_TPF_CLRMSK (0xFFDFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_TPF_EN (0x00200000U) -#define RGX_TB_RDATA_CORRUPT_MASK_SHF_SHIFT (20U) -#define RGX_TB_RDATA_CORRUPT_MASK_SHF_CLRMSK (0xFFEFFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_SHF_EN (0x00100000U) -#define RGX_TB_RDATA_CORRUPT_MASK_AMC_SHIFT (19U) -#define RGX_TB_RDATA_CORRUPT_MASK_AMC_CLRMSK (0xFFF7FFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_AMC_EN (0x00080000U) -#define RGX_TB_RDATA_CORRUPT_MASK_RAC_SHIFT (18U) -#define RGX_TB_RDATA_CORRUPT_MASK_RAC_CLRMSK (0xFFFBFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_RAC_EN (0x00040000U) -#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_SHIFT (17U) -#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_CLRMSK (0xFFFDFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_EN (0x00020000U) -#define RGX_TB_RDATA_CORRUPT_MASK_ISP_SHIFT (16U) -#define RGX_TB_RDATA_CORRUPT_MASK_ISP_CLRMSK (0xFFFEFFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_ISP_EN (0x00010000U) -#define RGX_TB_RDATA_CORRUPT_MASK_PPP_SHIFT (15U) -#define RGX_TB_RDATA_CORRUPT_MASK_PPP_CLRMSK (0xFFFF7FFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_PPP_EN (0x00008000U) -#define RGX_TB_RDATA_CORRUPT_MASK_IPF_SHIFT (14U) -#define RGX_TB_RDATA_CORRUPT_MASK_IPF_CLRMSK (0xFFFFBFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_IPF_EN (0x00004000U) -#define RGX_TB_RDATA_CORRUPT_MASK_VCE_SHIFT (13U) -#define RGX_TB_RDATA_CORRUPT_MASK_VCE_CLRMSK (0xFFFFDFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_VCE_EN (0x00002000U) -#define RGX_TB_RDATA_CORRUPT_MASK_PBE_SHIFT (12U) -#define RGX_TB_RDATA_CORRUPT_MASK_PBE_CLRMSK (0xFFFFEFFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_PBE_EN (0x00001000U) -#define RGX_TB_RDATA_CORRUPT_MASK_TCU_SHIFT (11U) -#define RGX_TB_RDATA_CORRUPT_MASK_TCU_CLRMSK (0xFFFFF7FFU) -#define RGX_TB_RDATA_CORRUPT_MASK_TCU_EN (0x00000800U) -#define RGX_TB_RDATA_CORRUPT_MASK_MCU_SHIFT (10U) -#define RGX_TB_RDATA_CORRUPT_MASK_MCU_CLRMSK (0xFFFFFBFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_MCU_EN (0x00000400U) -#define RGX_TB_RDATA_CORRUPT_MASK_RPM_SHIFT (9U) -#define RGX_TB_RDATA_CORRUPT_MASK_RPM_CLRMSK (0xFFFFFDFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_RPM_EN (0x00000200U) -#define RGX_TB_RDATA_CORRUPT_MASK_RTU_SHIFT (8U) -#define RGX_TB_RDATA_CORRUPT_MASK_RTU_CLRMSK (0xFFFFFEFFU) -#define RGX_TB_RDATA_CORRUPT_MASK_RTU_EN (0x00000100U) -#define RGX_TB_RDATA_CORRUPT_MASK_TILING_SHIFT (7U) -#define RGX_TB_RDATA_CORRUPT_MASK_TILING_CLRMSK (0xFFFFFF7FU) -#define RGX_TB_RDATA_CORRUPT_MASK_TILING_EN (0x00000080U) -#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_SHIFT (6U) -#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_CLRMSK (0xFFFFFFBFU) -#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_EN (0x00000040U) -#define RGX_TB_RDATA_CORRUPT_MASK_META_SHIFT (5U) -#define RGX_TB_RDATA_CORRUPT_MASK_META_CLRMSK (0xFFFFFFDFU) -#define RGX_TB_RDATA_CORRUPT_MASK_META_EN (0x00000020U) -#define RGX_TB_RDATA_CORRUPT_MASK_CDM_SHIFT (4U) -#define RGX_TB_RDATA_CORRUPT_MASK_CDM_CLRMSK (0xFFFFFFEFU) -#define RGX_TB_RDATA_CORRUPT_MASK_CDM_EN (0x00000010U) -#define RGX_TB_RDATA_CORRUPT_MASK_PM_SHIFT (3U) -#define RGX_TB_RDATA_CORRUPT_MASK_PM_CLRMSK (0xFFFFFFF7U) -#define RGX_TB_RDATA_CORRUPT_MASK_PM_EN (0x00000008U) -#define RGX_TB_RDATA_CORRUPT_MASK_TDM_SHIFT (2U) -#define RGX_TB_RDATA_CORRUPT_MASK_TDM_CLRMSK (0xFFFFFFFBU) -#define RGX_TB_RDATA_CORRUPT_MASK_TDM_EN (0x00000004U) -#define RGX_TB_RDATA_CORRUPT_MASK_DCE_SHIFT (1U) -#define RGX_TB_RDATA_CORRUPT_MASK_DCE_CLRMSK (0xFFFFFFFDU) -#define RGX_TB_RDATA_CORRUPT_MASK_DCE_EN (0x00000002U) -#define RGX_TB_RDATA_CORRUPT_MASK_IPP_SHIFT (0U) -#define RGX_TB_RDATA_CORRUPT_MASK_IPP_CLRMSK (0xFFFFFFFEU) -#define RGX_TB_RDATA_CORRUPT_MASK_IPP_EN (0x00000001U) +#define RGX_TB_RDATA_CORRUPT_MASK (0x1568U) +#define RGX_TB_RDATA_CORRUPT_MASK_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_RDATA_CORRUPT_MASK_MMU_SHIFT (31U) +#define RGX_TB_RDATA_CORRUPT_MASK_MMU_CLRMSK (0x7FFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_MMU_EN (0x80000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_UPS_SHIFT (30U) +#define RGX_TB_RDATA_CORRUPT_MASK_UPS_CLRMSK (0xBFFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_UPS_EN (0x40000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_FBM_SHIFT (29U) +#define RGX_TB_RDATA_CORRUPT_MASK_FBM_CLRMSK (0xDFFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_FBM_EN (0x20000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_TUL_SHIFT (28U) +#define RGX_TB_RDATA_CORRUPT_MASK_TUL_CLRMSK (0xEFFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_TUL_EN (0x10000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_SHR_SHIFT (27U) +#define RGX_TB_RDATA_CORRUPT_MASK_SHR_CLRMSK (0xF7FFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_SHR_EN (0x08000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_FBA_SHIFT (26U) +#define RGX_TB_RDATA_CORRUPT_MASK_FBA_CLRMSK (0xFBFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_FBA_EN (0x04000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_VDM_SHIFT (25U) +#define RGX_TB_RDATA_CORRUPT_MASK_VDM_CLRMSK (0xFDFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_VDM_EN (0x02000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_SHIFT (24U) +#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_CLRMSK (0xFEFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_EN (0x01000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_PDS_SHIFT (23U) +#define RGX_TB_RDATA_CORRUPT_MASK_PDS_CLRMSK (0xFF7FFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_PDS_EN (0x00800000U) +#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_SHIFT (22U) +#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_CLRMSK (0xFFBFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_EN (0x00400000U) +#define RGX_TB_RDATA_CORRUPT_MASK_TPF_SHIFT (21U) +#define RGX_TB_RDATA_CORRUPT_MASK_TPF_CLRMSK (0xFFDFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_TPF_EN (0x00200000U) +#define RGX_TB_RDATA_CORRUPT_MASK_SHF_SHIFT (20U) +#define RGX_TB_RDATA_CORRUPT_MASK_SHF_CLRMSK (0xFFEFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_SHF_EN (0x00100000U) +#define RGX_TB_RDATA_CORRUPT_MASK_AMC_SHIFT (19U) +#define RGX_TB_RDATA_CORRUPT_MASK_AMC_CLRMSK (0xFFF7FFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_AMC_EN (0x00080000U) +#define RGX_TB_RDATA_CORRUPT_MASK_RAC_SHIFT (18U) +#define RGX_TB_RDATA_CORRUPT_MASK_RAC_CLRMSK (0xFFFBFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_RAC_EN (0x00040000U) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_SHIFT (17U) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_CLRMSK (0xFFFDFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_EN (0x00020000U) +#define RGX_TB_RDATA_CORRUPT_MASK_ISP_SHIFT (16U) +#define RGX_TB_RDATA_CORRUPT_MASK_ISP_CLRMSK (0xFFFEFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_ISP_EN (0x00010000U) +#define RGX_TB_RDATA_CORRUPT_MASK_PPP_SHIFT (15U) +#define RGX_TB_RDATA_CORRUPT_MASK_PPP_CLRMSK (0xFFFF7FFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_PPP_EN (0x00008000U) +#define RGX_TB_RDATA_CORRUPT_MASK_IPF_SHIFT (14U) +#define RGX_TB_RDATA_CORRUPT_MASK_IPF_CLRMSK (0xFFFFBFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_IPF_EN (0x00004000U) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_SHIFT (13U) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_CLRMSK (0xFFFFDFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_EN (0x00002000U) +#define RGX_TB_RDATA_CORRUPT_MASK_PBE_SHIFT (12U) +#define RGX_TB_RDATA_CORRUPT_MASK_PBE_CLRMSK (0xFFFFEFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_PBE_EN (0x00001000U) +#define RGX_TB_RDATA_CORRUPT_MASK_TCU_SHIFT (11U) +#define RGX_TB_RDATA_CORRUPT_MASK_TCU_CLRMSK (0xFFFFF7FFU) +#define RGX_TB_RDATA_CORRUPT_MASK_TCU_EN (0x00000800U) +#define RGX_TB_RDATA_CORRUPT_MASK_MCU_SHIFT (10U) +#define RGX_TB_RDATA_CORRUPT_MASK_MCU_CLRMSK (0xFFFFFBFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_MCU_EN (0x00000400U) +#define RGX_TB_RDATA_CORRUPT_MASK_RPM_SHIFT (9U) +#define RGX_TB_RDATA_CORRUPT_MASK_RPM_CLRMSK (0xFFFFFDFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_RPM_EN (0x00000200U) +#define RGX_TB_RDATA_CORRUPT_MASK_RTU_SHIFT (8U) +#define RGX_TB_RDATA_CORRUPT_MASK_RTU_CLRMSK (0xFFFFFEFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_RTU_EN (0x00000100U) +#define RGX_TB_RDATA_CORRUPT_MASK_TILING_SHIFT (7U) +#define RGX_TB_RDATA_CORRUPT_MASK_TILING_CLRMSK (0xFFFFFF7FU) +#define RGX_TB_RDATA_CORRUPT_MASK_TILING_EN (0x00000080U) +#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_SHIFT (6U) +#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_CLRMSK (0xFFFFFFBFU) +#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_EN (0x00000040U) +#define RGX_TB_RDATA_CORRUPT_MASK_META_SHIFT (5U) +#define RGX_TB_RDATA_CORRUPT_MASK_META_CLRMSK (0xFFFFFFDFU) +#define RGX_TB_RDATA_CORRUPT_MASK_META_EN (0x00000020U) +#define RGX_TB_RDATA_CORRUPT_MASK_CDM_SHIFT (4U) +#define RGX_TB_RDATA_CORRUPT_MASK_CDM_CLRMSK (0xFFFFFFEFU) +#define RGX_TB_RDATA_CORRUPT_MASK_CDM_EN (0x00000010U) +#define RGX_TB_RDATA_CORRUPT_MASK_PM_SHIFT (3U) +#define RGX_TB_RDATA_CORRUPT_MASK_PM_CLRMSK (0xFFFFFFF7U) +#define RGX_TB_RDATA_CORRUPT_MASK_PM_EN (0x00000008U) +#define RGX_TB_RDATA_CORRUPT_MASK_TDM_SHIFT (2U) +#define RGX_TB_RDATA_CORRUPT_MASK_TDM_CLRMSK (0xFFFFFFFBU) +#define RGX_TB_RDATA_CORRUPT_MASK_TDM_EN (0x00000004U) +#define RGX_TB_RDATA_CORRUPT_MASK_DCE_SHIFT (1U) +#define RGX_TB_RDATA_CORRUPT_MASK_DCE_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_RDATA_CORRUPT_MASK_DCE_EN (0x00000002U) +#define RGX_TB_RDATA_CORRUPT_MASK_IPP_SHIFT (0U) +#define RGX_TB_RDATA_CORRUPT_MASK_IPP_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_RDATA_CORRUPT_MASK_IPP_EN (0x00000001U) /* Register RGX_TB_RDATA_CORRUPT_FREQ */ -#define RGX_TB_RDATA_CORRUPT_FREQ (0x1570U) -#define RGX_TB_RDATA_CORRUPT_FREQ_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -#define RGX_TB_RDATA_CORRUPT_FREQ_FREQ_SHIFT (0U) -#define RGX_TB_RDATA_CORRUPT_FREQ_FREQ_CLRMSK (0x00000000U) +#define RGX_TB_RDATA_CORRUPT_FREQ (0x1570U) +#define RGX_TB_RDATA_CORRUPT_FREQ_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_RDATA_CORRUPT_FREQ_FREQ_SHIFT (0U) +#define RGX_TB_RDATA_CORRUPT_FREQ_FREQ_CLRMSK (0x00000000U) /* Register RGX_TB_TRUSTED_DEVICE */ -#define RGX_TB_TRUSTED_DEVICE (0x2000U) -#define RGX_TB_TRUSTED_DEVICE_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_SHIFT (4U) -#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_CLRMSK (0xFFFFFFEFU) -#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_EN (0x00000010U) -#define RGX_TB_TRUSTED_DEVICE_HWCONFIG_SHIFT (2U) -#define RGX_TB_TRUSTED_DEVICE_HWCONFIG_CLRMSK (0xFFFFFFF3U) -#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_SHIFT (1U) -#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_CLRMSK (0xFFFFFFFDU) -#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_EN (0x00000002U) -#define RGX_TB_TRUSTED_DEVICE_ENABLE_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_TB_TRUSTED_DEVICE_ENABLE_EN (0x00000001U) +#define RGX_TB_TRUSTED_DEVICE (0x2000U) +#define RGX_TB_TRUSTED_DEVICE_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_SHIFT (4U) +#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_CLRMSK (0xFFFFFFEFU) +#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_EN (0x00000010U) +#define RGX_TB_TRUSTED_DEVICE_HWCONFIG_SHIFT (2U) +#define RGX_TB_TRUSTED_DEVICE_HWCONFIG_CLRMSK (0xFFFFFFF3U) +#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_SHIFT (1U) +#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_EN (0x00000002U) +#define RGX_TB_TRUSTED_DEVICE_ENABLE_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_TRUSTED_DEVICE_ENABLE_EN (0x00000001U) /* Register RGX_TB_TRUSTED_DEVICE_FAULT_STATUS */ -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS (0x2008U) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0x01FFFFFFFFFF1771)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_AXPROT_SHIFT (56U) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_AXPROT_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_AXPROT_EN (IMG_UINT64_C(0x0100000000000000)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_ADDR_SHIFT (16U) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_ADDR_CLRMSK (IMG_UINT64_C(0xFF0000000000FFFF)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_RNW_SHIFT (12U) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0x0000000000001000)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_OS_ID_SHIFT (8U) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_BANK_SHIFT (4U) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF8F)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_VALID_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_VALID_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS (0x2008U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0x01FFFFFFFFFF1771)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_AXPROT_SHIFT (56U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_AXPROT_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_AXPROT_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_ADDR_SHIFT (16U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_ADDR_CLRMSK (IMG_UINT64_C(0xFF0000000000FFFF)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_RNW_SHIFT (12U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_OS_ID_SHIFT (8U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_BANK_SHIFT (4U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF8F)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_VALID_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_VALID_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR */ -#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR (0x2010U) -#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_CLRMSK (0xFFFFFFFEU) -#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_EN (0x00000001U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR (0x2010U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_EN (0x00000001U) /* Register group: RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS, with 8 repeats */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS_REPEATCOUNT (8U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS_REPEATCOUNT (8U) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0 (0x2018U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0 (0x2018U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1 (0x2020U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1 (0x2020U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2 (0x2028U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2 (0x2028U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3 (0x2030U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3 (0x2030U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4 (0x2038U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4 (0x2038U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5 (0x2040U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5 (0x2040U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6 (0x2048U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6 (0x2048U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7 (0x2050U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7 (0x2050U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register group: RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS, with 8 repeats */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS_REPEATCOUNT (8U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS_REPEATCOUNT (8U) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0 (0x2058U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0 (0x2058U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1 (0x2060U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1 (0x2060U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2 (0x2068U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2 (0x2068U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3 (0x2070U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3 (0x2070U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4 (0x2078U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4 (0x2078U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5 (0x2080U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5 (0x2080U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6 (0x2088U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6 (0x2088U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7 */ -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7 (0x2090U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7 (0x2090U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register group: RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS, with 8 repeats */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS_REPEATCOUNT (8U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS_REPEATCOUNT (8U) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0 (0x2098U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0 (0x2098U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1 (0x20A0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1 (0x20A0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2 (0x20A8U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2 (0x20A8U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3 (0x20B0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3 (0x20B0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4 (0x20B8U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4 (0x20B8U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5 (0x20C0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5 (0x20C0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6 (0x20C8U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6 (0x20C8U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7 (0x20D0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7 (0x20D0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register group: RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS, with 8 repeats */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS_REPEATCOUNT (8U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS_REPEATCOUNT (8U) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0 (0x20D8U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0 (0x20D8U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1 (0x20E0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1 (0x20E0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2 (0x20E8U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2 (0x20E8U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3 (0x20F0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3 (0x20F0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4 (0x20F8U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4 (0x20F8U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5 (0x2100U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5 (0x2100U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6 (0x2108U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6 (0x2108U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7 */ -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7 (0x2110U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7_ADDR_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7 (0x2110U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) /* Register RGX_TB_BW_LIMITER */ -#define RGX_TB_BW_LIMITER (0x2118U) -#define RGX_TB_BW_LIMITER_MASKFULL (IMG_UINT64_C(0x00000000007707FF)) -#define RGX_TB_BW_LIMITER_DROPN_EXT_SHIFT (20U) -#define RGX_TB_BW_LIMITER_DROPN_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF8FFFFF)) -#define RGX_TB_BW_LIMITER_PERIOD_EXT_SHIFT (16U) -#define RGX_TB_BW_LIMITER_PERIOD_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF)) -#define RGX_TB_BW_LIMITER_DROPN_SHIFT (6U) -#define RGX_TB_BW_LIMITER_DROPN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) -#define RGX_TB_BW_LIMITER_PERIOD_SHIFT (1U) -#define RGX_TB_BW_LIMITER_PERIOD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1)) -#define RGX_TB_BW_LIMITER_ENABLE_SHIFT (0U) -#define RGX_TB_BW_LIMITER_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_TB_BW_LIMITER_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_BW_LIMITER (0x2118U) +#define RGX_TB_BW_LIMITER_MASKFULL (IMG_UINT64_C(0x00000000007707FF)) +#define RGX_TB_BW_LIMITER_DROPN_EXT_SHIFT (20U) +#define RGX_TB_BW_LIMITER_DROPN_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF8FFFFF)) +#define RGX_TB_BW_LIMITER_PERIOD_EXT_SHIFT (16U) +#define RGX_TB_BW_LIMITER_PERIOD_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF)) +#define RGX_TB_BW_LIMITER_DROPN_SHIFT (6U) +#define RGX_TB_BW_LIMITER_DROPN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_TB_BW_LIMITER_PERIOD_SHIFT (1U) +#define RGX_TB_BW_LIMITER_PERIOD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1)) +#define RGX_TB_BW_LIMITER_ENABLE_SHIFT (0U) +#define RGX_TB_BW_LIMITER_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_BW_LIMITER_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_TB_TRUSTED_DEVICE_ACECONFIG */ -#define RGX_TB_TRUSTED_DEVICE_ACECONFIG (0x2120U) -#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_OSID_SECURITY_SHIFT (1U) -#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE01)) -#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_ENABLE_SHIFT (0U) -#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_TRUSTED_DEVICE_ACECONFIG (0x2120U) +#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_OSID_SECURITY_SHIFT (1U) +#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE01)) +#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_ENABLE_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_TB_DRAM_CROSSBAR */ -#define RGX_TB_DRAM_CROSSBAR (0x2128U) -#define RGX_TB_DRAM_CROSSBAR_MASKFULL (IMG_UINT64_C(0x0000000000003301)) -#define RGX_TB_DRAM_CROSSBAR_CHANNELS_SHIFT (12U) -#define RGX_TB_DRAM_CROSSBAR_CHANNELS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) -#define RGX_TB_DRAM_CROSSBAR_SEL_MODE_SHIFT (8U) -#define RGX_TB_DRAM_CROSSBAR_SEL_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -#define RGX_TB_DRAM_CROSSBAR_ENABLE_SHIFT (0U) -#define RGX_TB_DRAM_CROSSBAR_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -#define RGX_TB_DRAM_CROSSBAR_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_DRAM_CROSSBAR (0x2128U) +#define RGX_TB_DRAM_CROSSBAR_MASKFULL (IMG_UINT64_C(0x0000000000003301)) +#define RGX_TB_DRAM_CROSSBAR_CHANNELS_SHIFT (12U) +#define RGX_TB_DRAM_CROSSBAR_CHANNELS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_TB_DRAM_CROSSBAR_SEL_MODE_SHIFT (8U) +#define RGX_TB_DRAM_CROSSBAR_SEL_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_TB_DRAM_CROSSBAR_ENABLE_SHIFT (0U) +#define RGX_TB_DRAM_CROSSBAR_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_DRAM_CROSSBAR_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_TB_LOCKUP +*/ +#define RGX_TB_LOCKUP (0x2130U) +#define RGX_TB_LOCKUP_MASKFULL (IMG_UINT64_C(0xFFFFFFFF00000001)) +#define RGX_TB_LOCKUP_CYCLES_BEFORE_SHIFT (32U) +#define RGX_TB_LOCKUP_CYCLES_BEFORE_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_LOCKUP_ENABLE_DETECT_SHIFT (0U) +#define RGX_TB_LOCKUP_ENABLE_DETECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_LOCKUP_ENABLE_DETECT_EN (IMG_UINT64_C(0x0000000000000001)) /* Register RGX_TB_SOC_TIMER */ -#define RGX_TB_SOC_TIMER (0x2140U) -#define RGX_TB_SOC_TIMER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -#define RGX_TB_SOC_TIMER_COUNT_SHIFT (0U) -#define RGX_TB_SOC_TIMER_COUNT_CLRMSK (IMG_UINT64_C(0x0000000000000000)) +#define RGX_TB_SOC_TIMER (0x2140U) +#define RGX_TB_SOC_TIMER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_TB_SOC_TIMER_COUNT_SHIFT (0U) +#define RGX_TB_SOC_TIMER_COUNT_CLRMSK (IMG_UINT64_C(0x0000000000000000)) /* Register RGX_TB_PROGRAMABLE_CLK_DIV */ -#define RGX_TB_PROGRAMABLE_CLK_DIV (0x2150U) -#define RGX_TB_PROGRAMABLE_CLK_DIV_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) -#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_SHIFT (11U) -#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_CLRMSK (0xFFFFF7FFU) -#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_EN (0x00000800U) -#define RGX_TB_PROGRAMABLE_CLK_DIV_PROFILE_SEL_SHIFT (9U) -#define RGX_TB_PROGRAMABLE_CLK_DIV_PROFILE_SEL_CLRMSK (0xFFFFF9FFU) -#define RGX_TB_PROGRAMABLE_CLK_DIV_DIV_SHIFT (5U) -#define RGX_TB_PROGRAMABLE_CLK_DIV_DIV_CLRMSK (0xFFFFFE1FU) -#define RGX_TB_PROGRAMABLE_CLK_DIV_DELAY_SHIFT (1U) -#define RGX_TB_PROGRAMABLE_CLK_DIV_DELAY_CLRMSK (0xFFFFFFE1U) -#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_SHIFT (0U) -#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_CLRMSK (0xFFFFFFFEU) -#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_EN (0x00000001U) +#define RGX_TB_PROGRAMABLE_CLK_DIV (0x2150U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) +#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_SHIFT (11U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_CLRMSK (0xFFFFF7FFU) +#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_EN (0x00000800U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_PROFILE_SEL_SHIFT (9U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_PROFILE_SEL_CLRMSK (0xFFFFF9FFU) +#define RGX_TB_PROGRAMABLE_CLK_DIV_DIV_SHIFT (5U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_DIV_CLRMSK (0xFFFFFE1FU) +#define RGX_TB_PROGRAMABLE_CLK_DIV_DELAY_SHIFT (1U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_DELAY_CLRMSK (0xFFFFFFE1U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_SHIFT (0U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_EN (0x00000001U) /* Register RGX_TB_GPIO_FREQ_CTRL */ -#define RGX_TB_GPIO_FREQ_CTRL (0x2160U) -#define RGX_TB_GPIO_FREQ_CTRL_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -#define RGX_TB_GPIO_FREQ_CTRL_COUNT_SHIFT (1U) -#define RGX_TB_GPIO_FREQ_CTRL_COUNT_CLRMSK (0xFFFFFFE1U) -#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_SHIFT (0U) -#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_EN (0x00000001U) +#define RGX_TB_GPIO_FREQ_CTRL (0x2160U) +#define RGX_TB_GPIO_FREQ_CTRL_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_TB_GPIO_FREQ_CTRL_COUNT_SHIFT (1U) +#define RGX_TB_GPIO_FREQ_CTRL_COUNT_CLRMSK (0xFFFFFFE1U) +#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_SHIFT (0U) +#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_EN (0x00000001U) /* Register RGX_TB_GPIO_MODE */ -#define RGX_TB_GPIO_MODE (0x2170U) -#define RGX_TB_GPIO_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -#define RGX_TB_GPIO_MODE_PROTOCOL_SHIFT (0U) -#define RGX_TB_GPIO_MODE_PROTOCOL_CLRMSK (0xFFFFFFFCU) +#define RGX_TB_GPIO_MODE (0x2170U) +#define RGX_TB_GPIO_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_GPIO_MODE_PROTOCOL_SHIFT (0U) +#define RGX_TB_GPIO_MODE_PROTOCOL_CLRMSK (0xFFFFFFFCU) + + +/* + Register group: RGX_TB_SAFETY_STATUS, with 8 repeats +*/ +#define RGX_TB_SAFETY_STATUS_REPEATCOUNT (8U) +/* + Register RGX_TB_SAFETY_STATUS0 +*/ +#define RGX_TB_SAFETY_STATUS0 (0x6000U) +#define RGX_TB_SAFETY_STATUS0_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_STATUS0_SAFETY_IRQ_SHIFT (0U) +#define RGX_TB_SAFETY_STATUS0_SAFETY_IRQ_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_STATUS1 +*/ +#define RGX_TB_SAFETY_STATUS1 (0x6008U) +#define RGX_TB_SAFETY_STATUS1_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_STATUS1_SAFETY_IRQ_SHIFT (0U) +#define RGX_TB_SAFETY_STATUS1_SAFETY_IRQ_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_STATUS2 +*/ +#define RGX_TB_SAFETY_STATUS2 (0x6010U) +#define RGX_TB_SAFETY_STATUS2_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_STATUS2_SAFETY_IRQ_SHIFT (0U) +#define RGX_TB_SAFETY_STATUS2_SAFETY_IRQ_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_STATUS3 +*/ +#define RGX_TB_SAFETY_STATUS3 (0x6018U) +#define RGX_TB_SAFETY_STATUS3_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_STATUS3_SAFETY_IRQ_SHIFT (0U) +#define RGX_TB_SAFETY_STATUS3_SAFETY_IRQ_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_STATUS4 +*/ +#define RGX_TB_SAFETY_STATUS4 (0x6020U) +#define RGX_TB_SAFETY_STATUS4_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_STATUS4_SAFETY_IRQ_SHIFT (0U) +#define RGX_TB_SAFETY_STATUS4_SAFETY_IRQ_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_STATUS5 +*/ +#define RGX_TB_SAFETY_STATUS5 (0x6028U) +#define RGX_TB_SAFETY_STATUS5_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_STATUS5_SAFETY_IRQ_SHIFT (0U) +#define RGX_TB_SAFETY_STATUS5_SAFETY_IRQ_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_STATUS6 +*/ +#define RGX_TB_SAFETY_STATUS6 (0x6030U) +#define RGX_TB_SAFETY_STATUS6_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_STATUS6_SAFETY_IRQ_SHIFT (0U) +#define RGX_TB_SAFETY_STATUS6_SAFETY_IRQ_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_STATUS7 +*/ +#define RGX_TB_SAFETY_STATUS7 (0x6038U) +#define RGX_TB_SAFETY_STATUS7_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_STATUS7_SAFETY_IRQ_SHIFT (0U) +#define RGX_TB_SAFETY_STATUS7_SAFETY_IRQ_CLRMSK (0xFFFFFFFCU) + + +/* + Register group: RGX_TB_SAFETY_IRQ_ENABLE, with 8 repeats +*/ +#define RGX_TB_SAFETY_IRQ_ENABLE_REPEATCOUNT (8U) +/* + Register RGX_TB_SAFETY_IRQ_ENABLE0 +*/ +#define RGX_TB_SAFETY_IRQ_ENABLE0 (0x6040U) +#define RGX_TB_SAFETY_IRQ_ENABLE0_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_IRQ_ENABLE0_SAFETY_IRQ_EN_SHIFT (0U) +#define RGX_TB_SAFETY_IRQ_ENABLE0_SAFETY_IRQ_EN_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_IRQ_ENABLE1 +*/ +#define RGX_TB_SAFETY_IRQ_ENABLE1 (0x6048U) +#define RGX_TB_SAFETY_IRQ_ENABLE1_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_IRQ_ENABLE1_SAFETY_IRQ_EN_SHIFT (0U) +#define RGX_TB_SAFETY_IRQ_ENABLE1_SAFETY_IRQ_EN_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_IRQ_ENABLE2 +*/ +#define RGX_TB_SAFETY_IRQ_ENABLE2 (0x6050U) +#define RGX_TB_SAFETY_IRQ_ENABLE2_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_IRQ_ENABLE2_SAFETY_IRQ_EN_SHIFT (0U) +#define RGX_TB_SAFETY_IRQ_ENABLE2_SAFETY_IRQ_EN_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_IRQ_ENABLE3 +*/ +#define RGX_TB_SAFETY_IRQ_ENABLE3 (0x6058U) +#define RGX_TB_SAFETY_IRQ_ENABLE3_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_IRQ_ENABLE3_SAFETY_IRQ_EN_SHIFT (0U) +#define RGX_TB_SAFETY_IRQ_ENABLE3_SAFETY_IRQ_EN_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_IRQ_ENABLE4 +*/ +#define RGX_TB_SAFETY_IRQ_ENABLE4 (0x6060U) +#define RGX_TB_SAFETY_IRQ_ENABLE4_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_IRQ_ENABLE4_SAFETY_IRQ_EN_SHIFT (0U) +#define RGX_TB_SAFETY_IRQ_ENABLE4_SAFETY_IRQ_EN_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_IRQ_ENABLE5 +*/ +#define RGX_TB_SAFETY_IRQ_ENABLE5 (0x6068U) +#define RGX_TB_SAFETY_IRQ_ENABLE5_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_IRQ_ENABLE5_SAFETY_IRQ_EN_SHIFT (0U) +#define RGX_TB_SAFETY_IRQ_ENABLE5_SAFETY_IRQ_EN_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_IRQ_ENABLE6 +*/ +#define RGX_TB_SAFETY_IRQ_ENABLE6 (0x6070U) +#define RGX_TB_SAFETY_IRQ_ENABLE6_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_IRQ_ENABLE6_SAFETY_IRQ_EN_SHIFT (0U) +#define RGX_TB_SAFETY_IRQ_ENABLE6_SAFETY_IRQ_EN_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_SAFETY_IRQ_ENABLE7 +*/ +#define RGX_TB_SAFETY_IRQ_ENABLE7 (0x6078U) +#define RGX_TB_SAFETY_IRQ_ENABLE7_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_SAFETY_IRQ_ENABLE7_SAFETY_IRQ_EN_SHIFT (0U) +#define RGX_TB_SAFETY_IRQ_ENABLE7_SAFETY_IRQ_EN_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_TB_FAULT_MASK +*/ +#define RGX_TB_FAULT_MASK (0x6080U) +#define RGX_TB_FAULT_MASK_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_FAULT_MASK_MASK_FAULT_EVENT_SHIFT (0U) +#define RGX_TB_FAULT_MASK_MASK_FAULT_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_FAULT_MASK_MASK_FAULT_EVENT_EN (0x00000001U) + + +/* + Register group: RGX_TB_GPU_PARITY_ERROR_CTRL, with 8 repeats +*/ +#define RGX_TB_GPU_PARITY_ERROR_CTRL_REPEATCOUNT (8U) +/* + Register RGX_TB_GPU_PARITY_ERROR_CTRL0 +*/ +#define RGX_TB_GPU_PARITY_ERROR_CTRL0 (0x60A0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL0_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_GPU_PARITY_ERROR_CTRL0_GPU_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL0_GPU_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL0_GPU_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL0_GPU_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL0_GPU_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL0_GPU_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL0_GPU_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL0_GPU_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_GPU_PARITY_ERROR_CTRL1 +*/ +#define RGX_TB_GPU_PARITY_ERROR_CTRL1 (0x60A8U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL1_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_GPU_PARITY_ERROR_CTRL1_GPU_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL1_GPU_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL1_GPU_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL1_GPU_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL1_GPU_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL1_GPU_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL1_GPU_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL1_GPU_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_GPU_PARITY_ERROR_CTRL2 +*/ +#define RGX_TB_GPU_PARITY_ERROR_CTRL2 (0x60B0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_GPU_PARITY_ERROR_CTRL2_GPU_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL2_GPU_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL2_GPU_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL2_GPU_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL2_GPU_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL2_GPU_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL2_GPU_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL2_GPU_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_GPU_PARITY_ERROR_CTRL3 +*/ +#define RGX_TB_GPU_PARITY_ERROR_CTRL3 (0x60B8U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL3_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_GPU_PARITY_ERROR_CTRL3_GPU_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL3_GPU_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL3_GPU_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL3_GPU_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL3_GPU_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL3_GPU_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL3_GPU_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL3_GPU_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_GPU_PARITY_ERROR_CTRL4 +*/ +#define RGX_TB_GPU_PARITY_ERROR_CTRL4 (0x60C0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL4_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_GPU_PARITY_ERROR_CTRL4_GPU_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL4_GPU_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL4_GPU_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL4_GPU_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL4_GPU_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL4_GPU_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL4_GPU_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL4_GPU_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_GPU_PARITY_ERROR_CTRL5 +*/ +#define RGX_TB_GPU_PARITY_ERROR_CTRL5 (0x60C8U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL5_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_GPU_PARITY_ERROR_CTRL5_GPU_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL5_GPU_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL5_GPU_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL5_GPU_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL5_GPU_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL5_GPU_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL5_GPU_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL5_GPU_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_GPU_PARITY_ERROR_CTRL6 +*/ +#define RGX_TB_GPU_PARITY_ERROR_CTRL6 (0x60D0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL6_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_GPU_PARITY_ERROR_CTRL6_GPU_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL6_GPU_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL6_GPU_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL6_GPU_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL6_GPU_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL6_GPU_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL6_GPU_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL6_GPU_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_GPU_PARITY_ERROR_CTRL7 +*/ +#define RGX_TB_GPU_PARITY_ERROR_CTRL7 (0x60D8U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL7_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_GPU_PARITY_ERROR_CTRL7_GPU_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL7_GPU_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL7_GPU_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL7_GPU_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL7_GPU_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL7_GPU_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_GPU_PARITY_ERROR_CTRL7_GPU_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPU_PARITY_ERROR_CTRL7_GPU_ERROR_INSERT_EN (0x00000001U) + + +/* + Register group: RGX_TB_SFTY_PARITY_ERROR_CTRL, with 8 repeats +*/ +#define RGX_TB_SFTY_PARITY_ERROR_CTRL_REPEATCOUNT (8U) +/* + Register RGX_TB_SFTY_PARITY_ERROR_CTRL0 +*/ +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0 (0x60E0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL0_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_SFTY_PARITY_ERROR_CTRL1 +*/ +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1 (0x60E8U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL1_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_SFTY_PARITY_ERROR_CTRL2 +*/ +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2 (0x60F0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL2_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_SFTY_PARITY_ERROR_CTRL3 +*/ +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3 (0x60F8U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL3_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_SFTY_PARITY_ERROR_CTRL4 +*/ +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4 (0x6100U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL4_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_SFTY_PARITY_ERROR_CTRL5 +*/ +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5 (0x6108U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL5_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_SFTY_PARITY_ERROR_CTRL6 +*/ +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6 (0x6110U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL6_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_SFTY_PARITY_ERROR_CTRL7 +*/ +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7 (0x6118U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_SFTY_PARITY_ERROR_CTRL7_ERROR_INSERT_EN (0x00000001U) + + +/* + Register group: RGX_TB_XPU_PARITY_ERROR_CTRL, with 8 repeats +*/ +#define RGX_TB_XPU_PARITY_ERROR_CTRL_REPEATCOUNT (8U) +/* + Register RGX_TB_XPU_PARITY_ERROR_CTRL0 +*/ +#define RGX_TB_XPU_PARITY_ERROR_CTRL0 (0x6120U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL0_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_XPU_PARITY_ERROR_CTRL0_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL0_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL0_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL0_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL0_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL0_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL0_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL0_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_XPU_PARITY_ERROR_CTRL1 +*/ +#define RGX_TB_XPU_PARITY_ERROR_CTRL1 (0x6128U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL1_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_XPU_PARITY_ERROR_CTRL1_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL1_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL1_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL1_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL1_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL1_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL1_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL1_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_XPU_PARITY_ERROR_CTRL2 +*/ +#define RGX_TB_XPU_PARITY_ERROR_CTRL2 (0x6130U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_XPU_PARITY_ERROR_CTRL2_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL2_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL2_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL2_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL2_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL2_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL2_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL2_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_XPU_PARITY_ERROR_CTRL3 +*/ +#define RGX_TB_XPU_PARITY_ERROR_CTRL3 (0x6138U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL3_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_XPU_PARITY_ERROR_CTRL3_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL3_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL3_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL3_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL3_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL3_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL3_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL3_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_XPU_PARITY_ERROR_CTRL4 +*/ +#define RGX_TB_XPU_PARITY_ERROR_CTRL4 (0x6140U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL4_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_XPU_PARITY_ERROR_CTRL4_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL4_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL4_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL4_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL4_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL4_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL4_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL4_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_XPU_PARITY_ERROR_CTRL5 +*/ +#define RGX_TB_XPU_PARITY_ERROR_CTRL5 (0x6148U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL5_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_XPU_PARITY_ERROR_CTRL5_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL5_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL5_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL5_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL5_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL5_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL5_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL5_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_XPU_PARITY_ERROR_CTRL6 +*/ +#define RGX_TB_XPU_PARITY_ERROR_CTRL6 (0x6150U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL6_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_XPU_PARITY_ERROR_CTRL6_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL6_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL6_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL6_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL6_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL6_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL6_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL6_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_XPU_PARITY_ERROR_CTRL7 +*/ +#define RGX_TB_XPU_PARITY_ERROR_CTRL7 (0x6158U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL7_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_XPU_PARITY_ERROR_CTRL7_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL7_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL7_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL7_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL7_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL7_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_XPU_PARITY_ERROR_CTRL7_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_XPU_PARITY_ERROR_CTRL7_ERROR_INSERT_EN (0x00000001U) + + +/* + Register group: RGX_TB_MEM_PARITY_ERROR_CTRL, with 8 repeats +*/ +#define RGX_TB_MEM_PARITY_ERROR_CTRL_REPEATCOUNT (8U) +/* + Register RGX_TB_MEM_PARITY_ERROR_CTRL0 +*/ +#define RGX_TB_MEM_PARITY_ERROR_CTRL0 (0x6160U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL0_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_MEM_PARITY_ERROR_CTRL0_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL0_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL0_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL0_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL0_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL0_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL0_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL0_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_MEM_PARITY_ERROR_CTRL1 +*/ +#define RGX_TB_MEM_PARITY_ERROR_CTRL1 (0x6168U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL1_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_MEM_PARITY_ERROR_CTRL1_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL1_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL1_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL1_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL1_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL1_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL1_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL1_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_MEM_PARITY_ERROR_CTRL2 +*/ +#define RGX_TB_MEM_PARITY_ERROR_CTRL2 (0x6170U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_MEM_PARITY_ERROR_CTRL2_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL2_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL2_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL2_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL2_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL2_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL2_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL2_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_MEM_PARITY_ERROR_CTRL3 +*/ +#define RGX_TB_MEM_PARITY_ERROR_CTRL3 (0x6178U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL3_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_MEM_PARITY_ERROR_CTRL3_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL3_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL3_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL3_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL3_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL3_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL3_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL3_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_MEM_PARITY_ERROR_CTRL4 +*/ +#define RGX_TB_MEM_PARITY_ERROR_CTRL4 (0x6180U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL4_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_MEM_PARITY_ERROR_CTRL4_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL4_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL4_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL4_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL4_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL4_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL4_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL4_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_MEM_PARITY_ERROR_CTRL5 +*/ +#define RGX_TB_MEM_PARITY_ERROR_CTRL5 (0x6188U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL5_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_MEM_PARITY_ERROR_CTRL5_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL5_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL5_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL5_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL5_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL5_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL5_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL5_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_MEM_PARITY_ERROR_CTRL6 +*/ +#define RGX_TB_MEM_PARITY_ERROR_CTRL6 (0x6190U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL6_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_MEM_PARITY_ERROR_CTRL6_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL6_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL6_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL6_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL6_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL6_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL6_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL6_ERROR_INSERT_EN (0x00000001U) + + +/* + Register RGX_TB_MEM_PARITY_ERROR_CTRL7 +*/ +#define RGX_TB_MEM_PARITY_ERROR_CTRL7 (0x6198U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL7_MASKFULL (IMG_UINT64_C(0x0000000000FF0003)) +#define RGX_TB_MEM_PARITY_ERROR_CTRL7_ERROR_COUNT_SHIFT (16U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL7_ERROR_COUNT_CLRMSK (0xFF00FFFFU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL7_ERROR_CHECK_SHIFT (1U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL7_ERROR_CHECK_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL7_ERROR_CHECK_EN (0x00000002U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL7_ERROR_INSERT_SHIFT (0U) +#define RGX_TB_MEM_PARITY_ERROR_CTRL7_ERROR_INSERT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_MEM_PARITY_ERROR_CTRL7_ERROR_INSERT_EN (0x00000001U) #endif /* RGXTBDEFS_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/rgxpmdefs.h b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/rgxpmdefs.h index 0ae6a2c05683..5db7c2585356 100644 --- a/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/rgxpmdefs.h +++ b/drivers/gpu/drm/img/img-volcanic/hwdefs/volcanic/rgxpmdefs.h @@ -42,7 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* **** Autogenerated C -- do not edit **** */ /* - * rogue_pm.def: #13 + * rogue_pm.def: #15 */ @@ -53,7 +53,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_defs.h" -#define RGXPMDEFS_REVISION 13 +#define RGXPMDEFS_REVISION 15 /* The mini PB size on a per-RT basis @@ -3640,7 +3640,7 @@ Base address of the free stack - points to the bottom of the stack. #define RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); } #define RGX_PM_FREELISTSTATE_BUFFER_GET_BASE_ADDR(_ft_) (((_ft_).u32_0 >> (5)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (27))) -#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ +#endif /* defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ #if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) @@ -3731,7 +3731,7 @@ Maximum addressable range supported by hardware is 1 TB. #define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR (0U) #define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_LOWER (0U) #define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_UPPER (68719476735ULL) -#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ +#endif /* defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ #if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) @@ -3772,7 +3772,7 @@ Maximum range supported by hardware is 23 bits. #define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS_UPPER (16777215U) -#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE) +#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE) /* The PM Render Context Buffer Layout */ @@ -3915,10 +3915,10 @@ The number of entries on the ALIST. Must be initialised to zero, meaning no pag #define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } #define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32))) -#endif /* RGX_FEATURE_PM_REGISTER_CONFIG_MODE&&PM_BYTE_ALIGNED_BASE_ADDRESSES&&!SINGLE_TE_VSPACE */ +#endif /* defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE) */ -#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&!defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE) +#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE) /* The PM Render Context Buffer Layout */ @@ -4027,7 +4027,7 @@ The number of entries on the ALIST. Must be initialised to zero, meaning no pag #define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } #define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32))) -#endif /* RGX_FEATURE_PM_REGISTER_CONFIG_MODE&&!PM_BYTE_ALIGNED_BASE_ADDRESSES&&!SINGLE_TE_VSPACE */ +#endif /* defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE) */ #if defined(RGX_FEATURE_SINGLE_TE_VSPACE) @@ -4402,10 +4402,10 @@ The number of entries on the ALIST. #define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x7fffffff00000000))) >> 32))); } #define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0x7fffffffU ) << (32))) -#endif /* RGX_FEATURE_SINGLE_TE_VSPACE */ +#endif /* defined(RGX_FEATURE_SINGLE_TE_VSPACE) */ -#if !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&!defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_SINGLE_TE_VSPACE)) +#if !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_SINGLE_TE_VSPACE)) /* The PM Render Context Buffer Layout */ @@ -4897,7 +4897,7 @@ The number of entries on the ALIST. #define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x7fffffff00000000))) >> 32))); } #define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0x7fffffffU ) << (32))) -#endif /* !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&!defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_SINGLE_TE_VSPACE)) */ +#endif /* !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_SINGLE_TE_VSPACE)) */ /* @@ -4918,7 +4918,7 @@ Maximum addressable range supported by hardware is 1 TB. #define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR (0U) #define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_LOWER (0U) #define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_UPPER (68719476735ULL) -#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ +#endif /* defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ #if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) @@ -4953,7 +4953,7 @@ Maximum addressable range supported by hardware is 1 TB. #define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR (0U) #define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_LOWER (0U) #define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_UPPER (68719476735ULL) -#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ +#endif /* defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ #if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) @@ -4996,7 +4996,7 @@ Maximum addressable range supported by hardware is 1 TB. #define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR (0U) #define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_LOWER (0U) #define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_UPPER (68719476735ULL) -#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ +#endif /* defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ #if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) diff --git a/drivers/gpu/drm/img/img-volcanic/include/devicemem_typedefs.h b/drivers/gpu/drm/img/img-volcanic/include/devicemem_typedefs.h index ca3929fc88dc..d51c099eed7a 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/devicemem_typedefs.h +++ b/drivers/gpu/drm/img/img-volcanic/include/devicemem_typedefs.h @@ -112,23 +112,6 @@ typedef IMG_UINT32 SPARSE_MEM_RESIZE_FLAGS; #define SPARSE_RESIZE_BOTH (SPARSE_RESIZE_ALLOC | SPARSE_RESIZE_FREE) - /* This should be set to silently swap underlying physical memory - * without disturbing its device or cpu virtual maps. - * This flag is not supported in the case of PDUMP and could lead to - * PDUMP panic when used. - */ -#define SPARSE_REMAP_MEM 4U - - /* Should be set to get the sparse changes appear in cpu virtual map */ -#define SPARSE_MAP_CPU_ADDR 8U - - -/* To be used with all the sparse allocations that gets mapped to CPU Virtual - * space. The sparse allocation CPU mapping is torn down and re-mapped every - * time the sparse allocation layout changes. - */ -#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1 - /* To use with DevmemSubAllocate() as the default factor if no over-allocation * is desired. */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/dllist.h b/drivers/gpu/drm/img/img-volcanic/include/dllist.h index 5cc699871d71..f59c26356a87 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/dllist.h +++ b/drivers/gpu/drm/img/img-volcanic/include/dllist.h @@ -54,7 +54,23 @@ typedef struct DLLIST_NODE_TAG *PDLLIST_NODE; /*! - Node in a linked list + Node in a linked list. + A list is comprised of a single list head and 0 to n nodes. + The head and nodes are all represented by `DLLIST_NODE`'s. + The head node is a special sentinel and should not be associated with + any elements in the list. + + For example, the following list of fruits has 3 elements: + `... <--> "banana" <--> "apple" <--> head <--> "orange" <--> ...` + Therefore, using dllist_foreach_*(head) will iterate just the 3 fruits. + + This is important because a `dllist_is_empty` is when the number of + elements == 0 is equivalent to when the head points to itself. + Proper use of the dllist_* functions requires a head. + + For example, the following list is improper: + `... <--> "banana" <--> "apple" <--> "orange" <--> ...` + as one element must be treated as the head, and therefore is ignored. */ /* * Note: the following structure's size is architecture-dependent and clients @@ -149,10 +165,9 @@ void dllist_init(PDLLIST_NODE psListHead) */ /*****************************************************************************/ static INLINE -bool dllist_is_empty(PDLLIST_NODE psListHead) +bool dllist_is_empty(const DLLIST_NODE *const psListHead) { - return ((psListHead->psPrevNode == psListHead) - && (psListHead->psNextNode == psListHead)); + return (psListHead->psPrevNode == psListHead); } /*************************************************************************/ /*! @@ -214,7 +229,7 @@ void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) */ /*****************************************************************************/ static INLINE -bool dllist_node_is_in_list(PDLLIST_NODE psNode) +bool dllist_node_is_in_list(const DLLIST_NODE *const psNode) { return (psNode->psNextNode != NULL); } diff --git a/drivers/gpu/drm/img/img-volcanic/include/drm/pvr_drm.h b/drivers/gpu/drm/img/img-volcanic/include/drm/pvr_drm.h index c0d00c98d8c0..bc76ddf9b597 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/drm/pvr_drm.h +++ b/drivers/gpu/drm/img/img-volcanic/include/drm/pvr_drm.h @@ -85,12 +85,18 @@ struct pvr_sw_sync_create_fence_data { __u64 sync_pt_idx; }; +typedef struct pvr_sw_sync_create_fence_data pvr_exp_sync_create_fence_data_t; + struct pvr_sw_timeline_advance_data { __u64 sync_pt_idx; }; #define PVR_SRVKM_SERVICES_INIT 1 #define PVR_SRVKM_SYNC_INIT 2 +#define PVR_SRVKM_SYNC_EXP_FENCE_INIT 3 +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +#define PVR_SRVKM_SERVICES_PAGE_MIGRATE_INIT 4 +#endif struct drm_pvr_srvkm_init_data { __u32 init_module; }; @@ -119,6 +125,10 @@ struct drm_pvr_srvkm_init_data { /* PVR Services Render Device Init command */ #define DRM_PVR_SRVKM_INIT 5 +/* PVR Services Export Fence Sync commands */ +#define DRM_PVR_EXP_FENCE_SYNC_FORCE_CMD 6 +#define DRM_PVR_SYNC_CREATE_EXPORT_FENCE_CMD 7 + /* These defines must be prefixed with "DRM_IOCTL_". */ #define DRM_IOCTL_PVR_SRVKM_CMD \ DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, \ @@ -143,4 +153,12 @@ struct drm_pvr_srvkm_init_data { DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_SRVKM_INIT, \ struct drm_pvr_srvkm_init_data) +#define DRM_IOCTL_PVR_EXP_FENCE_SYNC_FORCE_CMD \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_EXP_FENCE_SYNC_FORCE_CMD, \ + pvr_exp_sync_create_fence_data_t) + +#define DRM_IOCTL_PVR_SYNC_CREATE_EXPORT_FENCE_CMD \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SYNC_CREATE_EXPORT_FENCE_CMD, \ + pvr_exp_sync_create_fence_data_t) + #endif /* defined(__PVR_DRM_H__) */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/img_3dtypes.h b/drivers/gpu/drm/img/img-volcanic/include/img_3dtypes.h index 916e3a1eedc4..102c6eeda959 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/img_3dtypes.h +++ b/drivers/gpu/drm/img/img-volcanic/include/img_3dtypes.h @@ -188,6 +188,7 @@ typedef enum _IMG_FILTER_ */ typedef enum _IMG_ADDRESSMODE_ { + IMG_ADDRESSMODE_DONTCARE, IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */ IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */ IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */ @@ -195,7 +196,6 @@ typedef enum _IMG_ADDRESSMODE_ IMG_ADDRESSMODE_CLAMPBORDER, IMG_ADDRESSMODE_OGL_CLAMP, IMG_ADDRESSMODE_OVG_TILEFILL, - IMG_ADDRESSMODE_DONTCARE, } IMG_ADDRESSMODE; /** diff --git a/drivers/gpu/drm/img/img-volcanic/include/img_defs.h b/drivers/gpu/drm/img/img-volcanic/include/img_defs.h index ed269e36b859..4cb065edb788 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/img_defs.h +++ b/drivers/gpu/drm/img/img-volcanic/include/img_defs.h @@ -51,7 +51,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #endif #if !(defined(__linux__) && defined(__KERNEL__)) +#if defined(__riscv) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wundef" +#endif #include +#if defined(__riscv) +#pragma GCC diagnostic pop +#endif #endif #include "img_types.h" @@ -88,6 +95,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define GCC_VERSION_AT_LEAST(major, minor) 0 #endif +#if defined(__clang__) +#define CLANG_VERSION_AT_LEAST(major) \ + (__clang_major__ >= (major)) +#else +#define CLANG_VERSION_AT_LEAST(major) 0 +#endif + /* Use Clang's __has_extension and __has_builtin macros if available. */ #if defined(__has_extension) #define has_clang_extension(e) __has_extension(e) @@ -196,11 +210,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /*! Macro to calculate the n-byte aligned value from that supplied rounding up. * n must be a power of two. - * - * Both arguments should be of a type with the same size otherwise the macro may - * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n. */ -#define PVR_ALIGN(_x, _n) (((_x)+((_n)-1U)) & ~((_n)-1U)) +#define PVR_ALIGN(_x, _n) ((((_x)+((_n)-1U))|((_n)-1U))^((_n)-1U)) #if defined(_WIN32) @@ -268,7 +279,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define IMG_INTERNAL #define IMG_EXPORT #define IMG_CALLCONV - #elif defined(__linux__) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv) + #elif defined(__linux__) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv) || defined(__APPLE__) || defined(TEE_DDK) #define IMG_CALLCONV #define C_CALLCONV @@ -320,7 +331,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #if !defined(__fallthrough) - #if GCC_VERSION_AT_LEAST(7, 0) + #if (GCC_VERSION_AT_LEAST(7, 0) || CLANG_VERSION_AT_LEAST(10)) && !defined(__CHECKER__) #define __fallthrough __attribute__((__fallthrough__)) #else #define __fallthrough @@ -346,14 +357,18 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* That one compiler that supports attributes but doesn't support * the printf attribute... */ #if defined(__GNUC__) - #define __printf(fmt, va) __attribute__((format(printf, (fmt), (va)))) + #if defined(__MINGW32__) + #define __printf(fmt, va) __attribute__((format(gnu_printf, (fmt), (va)))) + #else + #define __printf(fmt, va) __attribute__((format(printf, (fmt), (va)))) + #endif #else #define __printf(fmt, va) #endif /* defined(__GNUC__) */ #if defined(__cplusplus) && (__cplusplus >= 201703L) #define __fallthrough [[fallthrough]] - #elif GCC_VERSION_AT_LEAST(7, 0) + #elif GCC_VERSION_AT_LEAST(7, 0) || CLANG_VERSION_AT_LEAST(10) #define __fallthrough __attribute__((__fallthrough__)) #else #define __fallthrough @@ -422,6 +437,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define unlikely(x) (x) #endif +#if !defined(BITS_PER_BYTE) +#define BITS_PER_BYTE (8) +#endif /* BITS_PER_BYTE */ + /* These two macros are also provided by the kernel */ #ifndef BIT #define BIT(b) (1UL << (b)) @@ -456,7 +475,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define SWAP(X, Y) (X) ^= (Y); (Y) ^= (X); (X) ^= (Y); - #if defined(__linux__) && defined(__KERNEL__) #include #include @@ -529,6 +547,32 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define IMG_STRINGIFY_IMPL(x) # x #define IMG_STRINGIFY(x) IMG_STRINGIFY_IMPL(x) +#define IMG_CONCATENATE_IMPL(x,y) x ## y +#define IMG_CONCATENATE(x,y) IMG_CONCATENATE_IMPL(x,y) + +#if defined(DEBUG) && !defined(INTEGRITY_OS) +#define IMG_PAGESLOG2BYTES(_tcast, _npages, _log2) \ + ({ \ + PVR_ASSERT( ((IMG_UINT64)(1ULL) << (sizeof(_tcast)*8UL)) >= ((IMG_UINT64)(_npages) << (_log2)) ); \ + (_tcast)(_npages) << (_log2); \ + }) +#else +#define IMG_PAGESLOG2BYTES(_tcast, _npages, _log2) ((_npages) << (_log2)) +#endif + +#define IMG_PAGE2BYTES32(logsize) IMG_PAGESLOG2BYTES(IMG_UINT32,IMG_UINT32_C(1),logsize) +#define IMG_PAGE2BYTES64(logsize) ((IMG_UINT64)IMG_UINT64_C(1) << (logsize)) + +#define IMG_PAGES2BYTES32(pages,logsize) IMG_PAGESLOG2BYTES(IMG_UINT32,pages,logsize) +#define IMG_PAGES2BYTES64(pages,logsize) ((IMG_UINT64)(pages) << (logsize)) + +#define IMG_PAGE_SHIFT_4KB 12U +#define IMG_PAGE_SHIFT_16KB 14U +#define IMG_PAGE_SHIFT_64KB 16U +#define IMG_PAGE_SHIFT_256KB 18U +#define IMG_PAGE_SHIFT_1MB 20U +#define IMG_PAGE_SHIFT_2MB 21U + #if defined(INTEGRITY_OS) /* Definitions not present in INTEGRITY. */ #define PATH_MAX 200 @@ -567,6 +611,18 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define NOLDSTOPT_VOID #endif +#define PVR_PRE_DPF (void) printf + +/* C STD >= C99 */ +#if !defined(INTEGRITY_OS) && !defined(__CHECKER__) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +#define IMG_FLEX_ARRAY_MEMBER +#define IMG_FLEX_ARRAY_SIZE(size, count) ((size) * (count)) +#else +/* In C STD prior to C99 flexible array members are an extension feature and syntax requires alternative approach */ +#define IMG_FLEX_ARRAY_MEMBER (1) +#define IMG_FLEX_ARRAY_SIZE(size, count) ((size) * ((count) - 1)) +#endif + #endif /* IMG_DEFS_H */ /***************************************************************************** End of file (img_defs.h) diff --git a/drivers/gpu/drm/img/img-volcanic/include/img_drm_fourcc_internal.h b/drivers/gpu/drm/img/img-volcanic/include/img_drm_fourcc_internal.h index ee88e90cde1e..ffde181fcb48 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/img_drm_fourcc_internal.h +++ b/drivers/gpu/drm/img/img-volcanic/include/img_drm_fourcc_internal.h @@ -57,7 +57,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * - identifier for our driver: PVR * - category: FBCDC * - compression tile dimension: 8x8, 16x4, 32x2 - * - FBDC version: V0, V1, V2, V3, V7, V8, V10, V12 + * - FBDC version: V0, V1, V2, V3, V7, V8, V10, V12, V13, V14 */ #define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0 fourcc_mod_code(PVR, 1) #define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0_FIX fourcc_mod_code(PVR, 2) /* Fix for HW_BRN_37464 */ @@ -69,9 +69,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10 - moved to the public header */ /* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12 - moved to the public header */ /* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V13 - moved to the public header */ -/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_LOSSY25_V13 - moved to the public header */ -/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_LOSSY50_V13 - moved to the public header */ -/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_LOSSY75_V13 - moved to the public header */ +/* DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_8x8_V13 - moved to the public header */ +/* DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_8x8_V13 - moved to the public header */ +/* DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_8x8_V13 - moved to the public header */ #define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0 fourcc_mod_code(PVR, 7) #define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0_FIX fourcc_mod_code(PVR, 8) /* Fix for HW_BRN_37464 */ /* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1 - moved to the public header */ @@ -82,9 +82,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10 - moved to the public header */ /* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12 - moved to the public header */ /* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V13 - moved to the public header */ -/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_LOSSY25_V13 - moved to the public header */ -/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_LOSSY50_V13 - moved to the public header */ -/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_LOSSY75_V13 - moved to the public header */ +/* DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_16x4_V13 - moved to the public header */ +/* DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V13 - moved to the public header */ +/* DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V13 - moved to the public header */ #define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V1 fourcc_mod_code(PVR, 13) #define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V3 fourcc_mod_code(PVR, 14) #define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V8 fourcc_mod_code(PVR, 20) diff --git a/drivers/gpu/drm/img/img-volcanic/include/img_types.h b/drivers/gpu/drm/img/img-volcanic/include/img_types.h index 4ffe08d6d582..c18c5da47da5 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/img_types.h +++ b/drivers/gpu/drm/img/img-volcanic/include/img_types.h @@ -69,11 +69,20 @@ extern "C" { #include #include "kernel_types.h" #elif defined(__linux__) || defined(__METAG) || defined(__MINGW32__) || \ - defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv) + defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv) || \ + defined(__APPLE__) || defined(TEE_DDK) #include /* NULL */ #include +#if defined(__riscv) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wundef" +#endif #include /* intX_t/uintX_t, format specifiers */ #include /* INT_MIN, etc */ + #include /* ssize_t */ +#if defined(__riscv) +#pragma GCC diagnostic pop +#endif #include /* bool */ #elif defined(__mips) #include /* NULL */ @@ -142,10 +151,14 @@ typedef int64_t IMG_INT64; #define IMG_UINT64_FMTSPECo PRIo64 #define IMG_INT64_FMTSPECd PRId64 +#define IMG_UINT8_MAX UINT8_MAX #define IMG_UINT16_MAX UINT16_MAX #define IMG_UINT32_MAX UINT32_MAX #define IMG_UINT64_MAX UINT64_MAX +#define IMG_INT_MIN INT_MIN +#define IMG_INT_MAX INT_MAX +#define IMG_INT8_MAX INT8_MAX #define IMG_INT16_MAX INT16_MAX #define IMG_INT32_MAX INT32_MAX #define IMG_INT64_MAX INT64_MAX @@ -164,15 +177,13 @@ typedef int IMG_SECURE_TYPE; typedef bool IMG_BOOL; typedef bool* IMG_PBOOL; -#define IMG_FALSE false -#define IMG_TRUE true +#define IMG_FALSE ((bool) 0) +#define IMG_TRUE ((bool) 1) -#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) typedef IMG_CHAR const* IMG_PCCHAR; -#endif /* Format specifiers for 'size_t' type */ -#if defined(_MSC_VER) || defined(__MINGW32__) +#if defined(_MSC_VER) #define IMG_SIZE_FMTSPEC "%Iu" #define IMG_SIZE_FMTSPECX "%Ix" #else diff --git a/drivers/gpu/drm/img/img-volcanic/include/lock_types.h b/drivers/gpu/drm/img/img-volcanic/include/lock_types.h index 370ffc025d05..bf7167b554f6 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/lock_types.h +++ b/drivers/gpu/drm/img/img-volcanic/include/lock_types.h @@ -66,17 +66,17 @@ typedef struct OS_LOCK_TAG *POS_LOCK; typedef struct OSWR_LOCK_TAG *POSWR_LOCK; #else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ typedef struct OSWR_LOCK_TAG { - IMG_UINT32 ui32Dummy; + IMG_UINT32 ui32Unused; } *POSWR_LOCK; #endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ -#if defined(__linux__) +#if defined(__linux__) || defined(__APPLE__) typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; #elif defined(__QNXNTO__) typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; #elif defined(_WIN32) /* - * Dummy definition. WDDM doesn't use Services, but some headers + * Placeholder definition. WDDM doesn't use Services, but some headers * still have to be shared. This is one such case. */ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; diff --git a/drivers/gpu/drm/img/img-volcanic/include/log2.h b/drivers/gpu/drm/img/img-volcanic/include/log2.h index 30f801e17a32..795660400850 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/log2.h +++ b/drivers/gpu/drm/img/img-volcanic/include/log2.h @@ -270,7 +270,7 @@ static INLINE uint32_t __const_function FloorLog2(uint32_t n) { uint32_t ui32log2 = 0; - while ((n >>= 1) != 0U) + for (n >>= 1; n != 0U; n >>= 1) { ui32log2++; } @@ -287,7 +287,7 @@ static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) { uint32_t ui32log2 = 0; - while ((n >>= 1) != 0U) + for (n >>= 1; n != 0U; n >>= 1) { ui32log2++; } diff --git a/drivers/gpu/drm/img/img-volcanic/include/multicore_defs.h b/drivers/gpu/drm/img/img-volcanic/include/multicore_defs.h index 2ca4e064d886..b410f57075f4 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/multicore_defs.h +++ b/drivers/gpu/drm/img/img-volcanic/include/multicore_defs.h @@ -50,4 +50,4 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_MULTICORE_CAPABILITY_PRIMARY_EN (0x00000008U) #define RGX_MULTICORE_ID_CLRMSK (0xFFFFFFF8U) -#endif /* RGX_MULTICORE_DEFS_H */ +#endif /* RGX_MULTICORE_DEFS_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/osfunc_common.h b/drivers/gpu/drm/img/img-volcanic/include/osfunc_common.h index 6c0a28698ff0..d93cb6d61aba 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/osfunc_common.h +++ b/drivers/gpu/drm/img/img-volcanic/include/osfunc_common.h @@ -110,19 +110,28 @@ void DeviceMemSetBytes(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t ui32Size); /**************************************************************************/ /*! -@Function StringLCopy -@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. - If no null byte ('\0') is contained within the first uDataSize-1 - characters of the source string, the destination string will be - truncated. If the length of the source string is less than uDataSize - an additional NUL byte will be copied to the destination string - to ensure that the string is NUL-terminated. -@Input pszDest char pointer to the destination string -@Input pszSrc const char pointer to the source string -@Input uDataSize the maximum number of bytes to be copied -@Return Size of the source string +@Function OSStringSafeCopy +@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. pszDest + is always null-terminated. If no null byte ('\0') is contained + within the first uDataSize-1 characters of the source string, + the destination will contain the truncated source string, + otherwise pszDest will contain the entire source string. +@Input pszDest char pointer to the destination string +@Input pszSrc const char pointer to the source string +@Input uDataSize the maximum number of bytes to be copied; this + should normally be the size of the destination + buffer +@Return Number of bytes copied if uDataSize > strlen(pszSrc), negative + value otherwise */ /**************************************************************************/ -size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); +#if defined(__linux__) && defined(__KERNEL__) && !defined(DEBUG) +static inline ssize_t OSStringSafeCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) +{ + return strscpy(pszDest, pszSrc, uDataSize); +} +#else +ssize_t OSStringSafeCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); +#endif #if defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) #if defined(__GNUC__) @@ -156,7 +165,7 @@ size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); */ /**************************************************************************/ #define OSDeviceMemSet(a,b,c) \ do { \ - if ((c) != 0) \ + if ((c) != 0U) \ { \ (void) memset((a), (b), (c)); \ (void) *(volatile IMG_UINT32*)((void*)(a)); \ @@ -176,7 +185,7 @@ size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); */ /**************************************************************************/ #define OSDeviceMemCopy(a,b,c) \ do { \ - if ((c) != 0) \ + if ((c) != 0U) \ { \ memcpy((a), (b), (c)); \ (void) *(volatile IMG_UINT32*)((void*)(a)); \ @@ -226,7 +235,7 @@ size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); */ /**************************************************************************/ #define OSCachedMemSetWMB(a,b,c) \ do { \ - if ((c) != 0) \ + if ((c) != 0U) \ { \ (void) memset((a), (b), (c)); \ OSWriteMemoryBarrier(a); \ @@ -246,7 +255,7 @@ size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); */ /**************************************************************************/ #define OSCachedMemCopyWMB(a,b,c) \ do { \ - if ((c) != 0) \ + if ((c) != 0U) \ { \ (void) memcpy((a), (b), (c)); \ OSWriteMemoryBarrier(a); \ @@ -254,25 +263,6 @@ size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); } while (false) #endif /* defined(__KERNEL__) */ -/**************************************************************************/ /*! -@Function OSStringLCopy -@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. - If no null byte ('\0') is contained within the first uDataSize-1 - characters of the source string, the destination string will be - truncated. If the length of the source string is less than uDataSize - an additional NUL byte will be copied to the destination string - to ensure that the string is NUL-terminated. -@Input a char pointer to the destination string -@Input b const char pointer to the source string -@Input c the maximum number of bytes to be copied -@Return Size of the source string - */ /**************************************************************************/ -#if defined(__QNXNTO__) || (defined(__linux__) && defined(__KERNEL__) && !defined(DEBUG)) -#define OSStringLCopy(a,b,c) strlcpy((a), (b), (c)) -#else /* defined(__QNXNTO__) ... */ -#define OSStringLCopy(a,b,c) StringLCopy((a), (b), (c)) -#endif /* defined(__QNXNTO__) ... */ - #ifdef __cplusplus } #endif diff --git a/drivers/gpu/drm/img/img-volcanic/include/pdumpdefs.h b/drivers/gpu/drm/img/img-volcanic/include/pdumpdefs.h index 3f8cccabc824..7cea362a4152 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pdumpdefs.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pdumpdefs.h @@ -113,6 +113,10 @@ typedef enum _PDUMP_PIXEL_FORMAT_ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64, PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65, PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR10_PACK16 = 67, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB10_PACK16 = 68, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA10_PACK16 = 69, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA10_PACK16 = 70, PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff @@ -141,6 +145,9 @@ typedef enum _PDUMP_FBC_SWIZZLE_ #define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT 8 #define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE (1U << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_SHIFT 11 +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_MASK 0x00000800 + #define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT 12 #define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK 0x000FF000 @@ -150,6 +157,9 @@ typedef enum _PDUMP_FBC_SWIZZLE_ #define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT 24 #define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT 25 +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT 26 +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_MASK 0x0C000000 + #define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT 28 #define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK 0xF0000000 @@ -165,6 +175,9 @@ typedef enum _PDUMP_FBC_SWIZZLE_ #define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED (11U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) #define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_ZTWIDDLED (12U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_25_50_75 (0U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_25_37_50 (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_SHIFT) + #define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) #define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT (1U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) #define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT (2U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) @@ -174,6 +187,12 @@ typedef enum _PDUMP_FBC_SWIZZLE_ #define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE (6U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) #define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE (7U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_75 (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_37 (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_50 (2U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_25 (3U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) + #define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR (1U << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT) #define PVRSRV_PDUMP_ADDRMODE_FBC_LOSSY (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT) @@ -206,7 +225,6 @@ typedef enum _PDUMP_POLL_OPERATOR /*! PDump MMU type - (Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13) */ typedef enum { @@ -230,6 +248,7 @@ typedef enum #define PDUMP_STATE_CONNECTED (2U) /*!< Flag represents the PDump Client App being connected on not */ #define PDUMP_STATE_SUSPENDED (4U) /*!< Flag represents the PDump being suspended or not */ #define PDUMP_STATE_CAPTURE_IN_INTERVAL (8U) /*!< Flag represents the PDump being in a capture range interval */ +#define PDUMP_STATE_APP_TERMINATED (16U) /*!< Flag represents the PDump captured app has been terminated */ /*! PDump Capture modes diff --git a/drivers/gpu/drm/img/img-volcanic/include/pdumpdesc.h b/drivers/gpu/drm/img/img-volcanic/include/pdumpdesc.h index d159bf4ee334..273cde910e7f 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pdumpdesc.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pdumpdesc.h @@ -67,12 +67,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* - * Header type (IMGBv2) - 'IMGB' in hex + VERSION 2 + * Header type (IMGBv3) - 'IMGB' in hex + VERSION 3 * Header size - 64 bytes */ #define IMAGE_HEADER_TYPE (0x42474D49) #define IMAGE_HEADER_SIZE (64) -#define IMAGE_HEADER_VERSION (2) +#define IMAGE_HEADER_VERSION (3) /* * Image type-specific fields @@ -188,6 +188,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_CLRMSK (0x00FF0000U) #define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_EN (1 << IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_SHIFT) /* Treat YUV10 optimal formats as 8 bits */ +#define IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_SHIFT (24) +#define IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_CLRMSK (0xFF000000U) +#define IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_EN (1 << IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_SHIFT) /* Override lossy min channel setting */ + /* IMAGE_HEADER_WORD15_RESERVED2 */ /* diff --git a/drivers/gpu/drm/img/img-volcanic/include/public/powervr/img_drm_fourcc.h b/drivers/gpu/drm/img/img-volcanic/include/public/powervr/img_drm_fourcc.h index 5fd79a6c413e..d4717db1cccb 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/public/powervr/img_drm_fourcc.h +++ b/drivers/gpu/drm/img/img-volcanic/include/public/powervr/img_drm_fourcc.h @@ -137,4 +137,25 @@ THE SOFTWARE. #define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V13 fourcc_mod_code(PVR, 30) #define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V13 fourcc_mod_code(PVR, 31) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY37_8x8_V13 fourcc_mod_code(PVR, 32) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY37_16x4_V13 fourcc_mod_code(PVR, 33) + +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V14 fourcc_mod_code(PVR, 34) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_8x8_V14 fourcc_mod_code(PVR, 35) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY37_8x8_V14 fourcc_mod_code(PVR, 36) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_8x8_V14 fourcc_mod_code(PVR, 37) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_8x8_V14 fourcc_mod_code(PVR, 38) + +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V14 fourcc_mod_code(PVR, 39) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_16x4_V14 fourcc_mod_code(PVR, 40) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY37_16x4_V14 fourcc_mod_code(PVR, 41) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V14 fourcc_mod_code(PVR, 42) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V14 fourcc_mod_code(PVR, 43) + +#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V14 fourcc_mod_code(PVR, 44) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_32x2_V14 fourcc_mod_code(PVR, 45) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY37_32x2_V14 fourcc_mod_code(PVR, 46) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_32x2_V14 fourcc_mod_code(PVR, 47) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_32x2_V14 fourcc_mod_code(PVR, 48) + #endif /* IMG_DRM_FOURCC_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/public/powervr/pvrsrv_sync_ext.h b/drivers/gpu/drm/img/img-volcanic/include/public/powervr/pvrsrv_sync_ext.h index 30f7972444cd..933212e03c17 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/public/powervr/pvrsrv_sync_ext.h +++ b/drivers/gpu/drm/img/img-volcanic/include/public/powervr/pvrsrv_sync_ext.h @@ -53,6 +53,7 @@ extern "C" { */ typedef int32_t PVRSRV_FENCE; typedef int32_t PVRSRV_TIMELINE; +typedef int32_t PVRSRV_EXP_FENCE_CTX; /*! Maximum length for an annotation name string for fence sync model objects. */ @@ -62,6 +63,7 @@ typedef int32_t PVRSRV_TIMELINE; */ #define PVRSRV_NO_TIMELINE ((PVRSRV_TIMELINE) -1) #define PVRSRV_NO_FENCE ((PVRSRV_FENCE) -1) +#define PVRSRV_NO_EXP_FENCE_CTX ((PVRSRV_EXP_FENCE_CTX) -1) #define PVRSRV_NO_FENCE_PTR NULL #define PVRSRV_NO_TIMELINE_PTR NULL diff --git a/drivers/gpu/drm/img/img-volcanic/include/pvr_debug.h b/drivers/gpu/drm/img/img-volcanic/include/pvr_debug.h index d7fc0686f9eb..46033a0da47e 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pvr_debug.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pvr_debug.h @@ -48,6 +48,17 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" #include "pvrsrv_error.h" +/* If the kernel pre defined macro is present, we will use the definition of HTBLOGK + * in htbserver.h with the format strings specified in ht_buffer_sf.h. Otherwise, + * HTBLOGK needs to be defined as empty since it is not used by user-mode code. + */ +#if defined(__KERNEL__) +# include "htbserver.h" +# include "htbuffer_sf.h" +#else +# define HTBLOGK(...) ((void) 0) +#endif + /*! @cond Doxygen_Suppress */ #if defined(_MSC_VER) # define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) @@ -56,6 +67,17 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /*! @endcond */ +#if defined(__linux__) && defined(__KERNEL__) + #include + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + #include + #else + #include + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ +#else + #include +#endif /* __linux__ && __KERNEL__*/ + #if defined(__cplusplus) extern "C" { #endif @@ -214,7 +236,7 @@ PVRSRVDebugAssertFail(const IMG_CHAR *pszFile, * macros in a special way when the code is analysed by Klocwork avoids * them. */ - #if defined(__KLOCWORK__) && !defined(SERVICES_SC) + #if defined(__KLOCWORK__) #define PVR_ASSERT(EXPR) do { if (!(EXPR)) {klocwork_abort();} } while (false) #else #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */ @@ -286,155 +308,284 @@ PVRSRVDebugAssertFail(const IMG_CHAR *pszFile, /* Get rid of the double bracketing */ #define PVR_DPF(x) PVR_DPF_EX x - #define PVR_LOG_ERROR(_rc, _call) \ - PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)) + #define PVR_LOG_ERROR(_rc, _call) do \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + } while (false) #define PVR_LOG_IF_ERROR(_rc, _call) do \ - { if (unlikely(_rc != PVRSRV_OK)) { \ - PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ - } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(_rc != PVRSRV_OK)) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_WARN_IF_ERROR(_rc, _call) do \ - { if (unlikely(_rc != PVRSRV_OK)) { \ - PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ - } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(_rc != PVRSRV_OK)) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_WARNING, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do \ - { if (unlikely(_expr == NULL)) { \ - PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \ - return PVRSRV_ERROR_OUT_OF_MEMORY; } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(_expr == NULL)) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_OUT_OF_MEMORY, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do \ - { if (unlikely(_expr == NULL)) { \ - PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", #_expr, __func__)); \ - _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ - goto _go; } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(_expr == NULL)) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_OUT_OF_MEMORY, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", #_expr, __func__)); \ + _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ + goto _go; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do \ - { if (unlikely(_rc != PVRSRV_OK)) { \ - PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ - return _rc; } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(_rc != PVRSRV_OK)) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + return _rc; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do \ - { if (unlikely(_rc != PVRSRV_OK)) { \ - PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ - return; } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(_rc != PVRSRV_OK)) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + return; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do \ - { if (unlikely(_rc != PVRSRV_OK)) { \ - PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ - goto _go; } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(_rc != PVRSRV_OK)) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + goto _go; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do \ - { PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ _err = _rc; \ goto _go; \ - MSC_SUPPRESS_4127\ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_IF_FALSE(_expr, _msg) do \ - { if (unlikely(!(_expr))) { \ - PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ - } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(!(_expr))) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_F, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do \ - { if (unlikely(!(_expr))) { \ - PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ - return _rc; } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(!(_expr))) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_F, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + return _rc; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do \ - { if (unlikely(!(_expr))) { \ - PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ - return; } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(!(_expr))) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_F, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + return; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do \ - { if (unlikely(!(_expr))) { \ - PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ - goto _go; } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(!(_expr))) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_F, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + goto _go; \ + } \ + MSC_SUPPRESS_4127 \ + } while (false) + + #define PVR_LOG_RETURN_IF_TRUE(_expr, _msg, _rc) do \ + { \ + if (unlikely((_expr))) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_T, PVRSRV_ERROR_UNEXPECTED_TRUE_EXPR, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + return _rc; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do \ - { if (unlikely(!(_expr))) { \ - PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", _param, __func__)); \ - return PVRSRV_ERROR_INVALID_PARAMS; } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(!(_expr))) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_INVALID_PARAMS, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", _param, __func__)); \ + return PVRSRV_ERROR_INVALID_PARAMS; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ { if (unlikely(!(_expr))) { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_INVALID_PARAMS, HTB_FILE_NAME, __LINE__); \ PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", #_expr, __func__)); \ _err = PVRSRV_ERROR_INVALID_PARAMS; \ goto _go; } \ - MSC_SUPPRESS_4127\ + MSC_SUPPRESS_4127 \ } while (false) - #define PVR_LOG_MSG(_lvl, _msg) \ - PVR_DPF((_lvl, ("In %s() "_msg), __func__)) + #define PVR_LOG_MSG(_lvl, _msg) do \ + { \ + if (_lvl == PVR_DBG_ERROR) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_MSGLVL_ERROR, HTB_FILE_NAME, __LINE__); \ + } \ + else if (_lvl == PVR_DBG_WARNING) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_MSGLVL_WARN, HTB_FILE_NAME, __LINE__); \ + } \ + PVR_DPF((_lvl, ("In %s() "_msg), __func__)); \ + } while (false) - #define PVR_LOG_VA(_lvl, _msg, ...) \ - PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)) + #define PVR_LOG_VA(_lvl, _msg, ...) do \ + { \ + if (_lvl == PVR_DBG_ERROR) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_MSGLVL_ERROR, HTB_FILE_NAME, __LINE__); \ + } \ + else if (_lvl == PVR_DBG_WARNING) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_MSGLVL_WARN, HTB_FILE_NAME, __LINE__); \ + } \ + PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + } while (false) #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) do \ - { if (unlikely(_rc != PVRSRV_OK)) { \ - PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ - } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(_rc != PVRSRV_OK)) \ + { \ + if (_lvl == PVR_DBG_ERROR) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + } \ + else if (_lvl == PVR_DBG_WARNING) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_WARNING, _rc, HTB_FILE_NAME, __LINE__); \ + } \ + PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) do \ - { if (unlikely(!(_expr))) { \ - PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ - } \ - MSC_SUPPRESS_4127\ + { \ + if (unlikely(!(_expr))) \ + { \ + if (_lvl == PVR_DBG_ERROR) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_MSGLVL_ERROR, HTB_FILE_NAME, __LINE__); \ + } \ + else if (_lvl == PVR_DBG_WARNING) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_MSGLVL_WARN, HTB_FILE_NAME, __LINE__); \ + } \ + PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do \ - { if (unlikely(_rc != PVRSRV_OK)) { \ - PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ - return _rc; \ - } MSC_SUPPRESS_4127\ + { \ + if (unlikely(_rc != PVRSRV_OK)) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + return _rc; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do \ - { if (unlikely(_rc != PVRSRV_OK)) { \ - PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ - goto _go; \ - } MSC_SUPPRESS_4127\ + { \ + if (unlikely(_rc != PVRSRV_OK)) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + goto _go; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do \ - { if (unlikely(!(_expr))) { \ - PVR_DPF((PVR_DBG_ERROR, ("At %s: "_msg), __func__, __VA_ARGS__)); \ - return _rc; \ - } MSC_SUPPRESS_4127\ + { \ + if (unlikely(!(_expr))) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, ("At %s: "_msg), __func__, __VA_ARGS__)); \ + return _rc; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do \ - { if (unlikely(!(_expr))) { \ - PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ - goto _go; \ - } MSC_SUPPRESS_4127\ + { \ + if (unlikely(!(_expr))) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + goto _go; \ + } \ + MSC_SUPPRESS_4127 \ + } while (false) + + #define PVR_LOG_RETURN_IF_TRUE_VA(_expr, _rc, _msg, ...) do \ + { \ + if (unlikely((_expr))) \ + { \ + HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, HTB_FILE_NAME, __LINE__); \ + PVR_DPF((PVR_DBG_ERROR, ("At %s: "_msg), __func__, __VA_ARGS__)); \ + return _rc; \ + } \ + MSC_SUPPRESS_4127 \ } while (false) #else /* defined(PVRSRV_NEED_PVR_DPF) */ @@ -465,6 +616,9 @@ PVRSRVDebugAssertFail(const IMG_CHAR *pszFile, #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) + #define PVR_LOG_RETURN_IF_TRUE(_expr, _msg, _rc) do { if (unlikely((_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) + #define PVR_LOG_RETURN_IF_TRUE_VA(_expr, _rc, _msg, ...) do { if (unlikely((_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) + #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do { if (unlikely(!(_expr))) { return; } MSC_SUPPRESS_4127 } while (false) #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false) #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false) @@ -596,7 +750,7 @@ IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void); #define PVR_GOTO_IF_ERROR(_rc, _go) do \ { if (unlikely(_rc != PVRSRV_OK)) { \ goto _go; } \ - MSC_SUPPRESS_4127\ + MSC_SUPPRESS_4127 \ } while (false) /* Note: Use only when a log message due to the error absolutely should not @@ -648,7 +802,7 @@ IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void); #endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ /*! @endcond */ -#if (defined(__KERNEL__) || defined(SUPPORT_SERVICES_SC_UNITTESTS_SERVER))|| defined(DOXYGEN) || defined(__QNXNTO__) +#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__) /*Use PVR_DPF() unless message is necessary in release build */ #define PVR_LOG(X) PVRSRVReleasePrintf X @@ -663,6 +817,19 @@ IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void); @Return None */ /**************************************************************************/ void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2); + + /*************************************************************************/ /*! + @Function PVRSRVReleasePrintfVArgs + @Description Output an important message, using an OS-specific method, + to the Server log or console which will always be output in + both release and debug builds. + Calls to va_start and va_end should wrap this function when + passing in va_list args. + @Input pszFormat The message format string + @Input vaArgs va_list arguments to print using pszFormat. + @Return None + */ /**************************************************************************/ +void IMG_CALLCONV PVRSRVReleasePrintfVArgs(const IMG_CHAR *pszFormat, va_list vaArgs); #endif /* PVR_TRACE() handling */ @@ -817,6 +984,9 @@ IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... ) @def PVR_LOG_GOTO_IF_FALSE @brief Prints error message if expression is false and jumps to label. + @def PVR_LOG_RETURN_IF_TRUE + @brief Prints error message if expression is true and returns given error. + @def PVR_LOG_RETURN_IF_INVALID_PARAM @brief Prints error message if expression is false and returns PVRSRV_ERROR_INVALID_PARAMS. @@ -878,6 +1048,9 @@ IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... ) @def PVR_LOG_GOTO_IF_FALSE_VA @brief Logs the error message with var-args and goes to a label if the expression is false. + @def PVR_LOG_RETURN_IF_TRUE_VA + @brief Logs the error message with var-args if the expression is true and returns the error code. + @def PVR_TRACE_EMPTY_LINE @brief Prints empty line to a log (PVRSRV_NEED_PVR_LOG must be defined). diff --git a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_device_types.h b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_device_types.h index 5736124688a2..20e0d5ce3ed5 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_device_types.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_device_types.h @@ -45,7 +45,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" -#define PVRSRV_MAX_DEVICES 16U /*!< Largest supported number of devices on the system */ +#if !defined(PVRSRV_MAX_DEVICES) +#error "PVRSRV_MAX_DEVICES must be defined in the DDK build environment" +#endif /* !defined(PVRSRV_NUM_DEVICES) */ #define PVRSRV_HOST_DEVICE_ID 255U /*!< Device ID used for host (non-GPU) device. */ static_assert(PVRSRV_MAX_DEVICES < PVRSRV_HOST_DEVICE_ID, "Invalid host device ID."); diff --git a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_error.h b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_error.h index 47e615596dd1..120c42d87155 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_error.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_error.h @@ -51,9 +51,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. typedef enum PVRSRV_ERROR_TAG { PVRSRV_OK, + #define PVRE(x) x, #include "pvrsrv_errors.h" #undef PVRE + PVRSRV_ERROR_FORCE_I32 = 0x7fffffff } PVRSRV_ERROR; @@ -67,7 +69,7 @@ typedef enum PVRSRV_ERROR_TAG * to retry. */ #define PVRSRVIsRetryError(eError) \ - ((eError == PVRSRV_ERROR_RETRY || eError == PVRSRV_ERROR_KERNEL_CCB_FULL) ? \ + (((eError == PVRSRV_ERROR_RETRY) || (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)) ? \ IMG_TRUE : IMG_FALSE) #endif /* !defined(PVRSRV_ERROR_H) */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_errors.h b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_errors.h index 9a65228351a6..4c88e675d0d4 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_errors.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_errors.h @@ -115,6 +115,8 @@ PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED) PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED) PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED) PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY) +PVRE(PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK) +PVRE(PVRSRV_ERROR_PMR_TOO_LARGE) PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP) PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE) PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION) @@ -164,6 +166,7 @@ PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL) PVRE(PVRSRV_ERROR_INVALID_CONTEXT) PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT) PVRE(PVRSRV_ERROR_INVALID_HEAP) +PVRE(PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE) PVRE(PVRSRV_ERROR_INVALID_KERNELINFO) PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE) PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE) @@ -371,6 +374,7 @@ PVRE(PVRSRV_ERROR_RA_OUT_OF_RESOURCE) PVRE(PVRSRV_ERROR_RA_NO_RESOURCE_WITH_FLAGS) PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED) PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED) +PVRE(PVRSRV_ERROR_RA_FREE_INVALID_CHUNK) PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED) PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED) PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS) @@ -411,6 +415,15 @@ PVRE(PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) PVRE(PVRSRV_ERROR_OUT_OF_RANGE) PVRE(PVRSRV_ERROR_OUT_OF_APP_POOL_MEMORY) PVRE(PVRSRV_ERROR_REFCOUNT_OVERFLOW) -PVRE(PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK) -PVRE(PVRSRV_ERROR_RA_FREE_INVALID_CHUNK) -PVRE(PVRSRV_ERROR_PMR_TOO_LARGE) +PVRE(PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY) +PVRE(PVRSRV_ERROR_UNEXPECTED_TRUE_EXPR) +PVRE(PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR) +PVRE(PVRSRV_ERROR_KERNEL_CCB_OFFSET) +PVRE(PVRSRV_ERROR_PMB_NOT_PAGE_MULTIPLE) +PVRE(PVRSRV_ERROR_ATOMIC_OVERFLOW) +PVRE(PVRSRV_ERROR_ATOMIC_UNDERFLOW) +PVRE(PVRSRV_ERROR_DMABUF_LOCK) +PVRE(PVRSRV_ERROR_DMABUF_ATTACH) +PVRE(PVRSRV_ERROR_DMABUF_ATTACHMENT_MAPPING) +PVRE(PVRSRV_ERROR_DEVICEMEM_REMAP_REJECTED) + diff --git a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_memalloc_physheap.h b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_memalloc_physheap.h index 8b1423e702fa..3c17d8b4495d 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_memalloc_physheap.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_memalloc_physheap.h @@ -56,111 +56,157 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * NOTE: Enum order important, table in physheap.c must change if order changed. */ -typedef IMG_UINT32 PVRSRV_PHYS_HEAP; -/* Services client accessible heaps */ -#define PVRSRV_PHYS_HEAP_DEFAULT 0U /* default phys heap for device memory allocations */ -#define PVRSRV_PHYS_HEAP_GPU_LOCAL 1U /* used for buffers with more GPU access than CPU */ -#define PVRSRV_PHYS_HEAP_CPU_LOCAL 2U /* used for buffers with more CPU access than GPU */ -#define PVRSRV_PHYS_HEAP_GPU_PRIVATE 3U /* used for buffers that only required GPU read/write access, not visible to the CPU. */ - -#define HEAPSTR(x) #x -static inline const IMG_CHAR *PVRSRVGetClientPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeapID) +#define PHYS_HEAP_LIST \ + X(DEFAULT) /* Client: default phys heap for device memory allocations */ \ + X(CPU_LOCAL) /* Client: used for buffers with more CPU access than GPU */ \ + X(GPU_LOCAL) /* Client: used for buffers with more GPU access than CPU */ \ + X(GPU_PRIVATE) /* Client: used for buffers that only required GPU read/write access, not visible to the CPU. */ \ + X(FW_MAIN) /* Internal: runtime data, e.g. CCBs, sync objects */ \ + X(EXTERNAL) /* Internal: used by some PMR import/export factories where the physical memory heap is not managed by the pvrsrv driver */ \ + X(GPU_COHERENT) /* Internal: used for a cache coherent region */ \ + X(GPU_SECURE) /* Internal: used by security validation */ \ + X(FW_CONFIG) /* Internal: subheap of FW_MAIN, configuration data for FW init */ \ + X(FW_CODE) /* Internal: used by security validation or dedicated fw */ \ + X(FW_PRIV_DATA) /* Internal: internal FW data (like the stack, FW control data structures, etc.) */ \ + X(FW_PREMAP_PT) /* Internal: page tables for premapped firmware memory */ \ + X(FW_PREMAP0) /* Internal: Host OS premap fw heap */ \ + X(FW_PREMAP1) /* Internal: Guest OS 1 premap fw heap */ \ + X(FW_PREMAP2) /* Internal: Guest OS 2 premap fw heap */ \ + X(FW_PREMAP3) /* Internal: Guest OS 3 premap fw heap */ \ + X(FW_PREMAP4) /* Internal: Guest OS 4 premap fw heap */ \ + X(FW_PREMAP5) /* Internal: Guest OS 5 premap fw heap */ \ + X(FW_PREMAP6) /* Internal: Guest OS 6 premap fw heap */ \ + X(FW_PREMAP7) /* Internal: Guest OS 7 premap fw heap */ \ + X(WRAP) /* External: Wrap memory */ \ + X(DISPLAY) /* External: Display memory */ \ + X(LAST) + +typedef enum _PVRSRV_PHYS_HEAP_ { - switch (ePhysHeapID) - { - case PVRSRV_PHYS_HEAP_DEFAULT: - return HEAPSTR(PVRSRV_PHYS_HEAP_DEFAULT); - case PVRSRV_PHYS_HEAP_GPU_LOCAL: - return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_LOCAL); - case PVRSRV_PHYS_HEAP_CPU_LOCAL: - return HEAPSTR(PVRSRV_PHYS_HEAP_CPU_LOCAL); - case PVRSRV_PHYS_HEAP_GPU_PRIVATE: - return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_PRIVATE); - default: - return "Unknown Heap"; - } -} +#define X(_name) PVRSRV_PHYS_HEAP_ ## _name, + PHYS_HEAP_LIST +#undef X -/* Services internal heaps */ -#define PVRSRV_PHYS_HEAP_FW_MAIN 4U /* runtime data, e.g. CCBs, sync objects */ -#define PVRSRV_PHYS_HEAP_EXTERNAL 5U /* used by some PMR import/export factories where the physical memory heap is not managed by the pvrsrv driver */ -#define PVRSRV_PHYS_HEAP_GPU_COHERENT 6U /* used for a cache coherent region */ -#define PVRSRV_PHYS_HEAP_GPU_SECURE 7U /* used by security validation */ -#define PVRSRV_PHYS_HEAP_FW_CONFIG 8U /* subheap of FW_MAIN, configuration data for FW init */ -#define PVRSRV_PHYS_HEAP_FW_CODE 9U /* used by security validation or dedicated fw */ -#define PVRSRV_PHYS_HEAP_FW_PRIV_DATA 10U /* internal FW data (like the stack, FW control data structures, etc.) */ -#define PVRSRV_PHYS_HEAP_FW_PREMAP0 11U /* Host OS premap fw heap */ -#define PVRSRV_PHYS_HEAP_FW_PREMAP1 12U /* Guest OS 1 premap fw heap */ -#define PVRSRV_PHYS_HEAP_FW_PREMAP2 13U /* Guest OS 2 premap fw heap */ -#define PVRSRV_PHYS_HEAP_FW_PREMAP3 14U /* Guest OS 3 premap fw heap */ -#define PVRSRV_PHYS_HEAP_FW_PREMAP4 15U /* Guest OS 4 premap fw heap */ -#define PVRSRV_PHYS_HEAP_FW_PREMAP5 16U /* Guest OS 5 premap fw heap */ -#define PVRSRV_PHYS_HEAP_FW_PREMAP6 17U /* Guest OS 6 premap fw heap */ -#define PVRSRV_PHYS_HEAP_FW_PREMAP7 18U /* Guest OS 7 premap fw heap */ -#define PVRSRV_PHYS_HEAP_LAST 19U + PVRSRV_PHYS_HEAP_INVALID = 0x7FFFFFFF +} PVRSRV_PHYS_HEAP; +/* Defines the number of user mode physheaps. These physheaps are: DEFAULT, GPU_LOCAL, + * CPU_LOCAL, GPU_PRIVATE, GPU_SECURE. */ +#define MAX_USER_MODE_ALLOC_PHYS_HEAPS 5 static_assert(PVRSRV_PHYS_HEAP_LAST <= (0x1FU + 1U), "Ensure enum fits in memalloc flags bitfield."); /*! Type conveys the class of physical heap to instantiate within Services * for the physical pool of memory. */ +#define PHYS_HEAP_TYPE_LIST \ + X(UNKNOWN) /* Not a valid value for any config */ \ + X(UMA) /* Heap represents OS managed physical memory heap i.e. system RAM. Unified Memory Architecture physmem_osmem PMR factory */ \ + X(LMA) /* Heap represents physical memory pool managed by Services i.e. carve out from system RAM or local card memory. Local Memory Architecture physmem_lma PMR factory */ \ + X(DLM) /* Heap represents local card memory. Used in a DLM heap (Dedicated Local Memory) system. */ \ + X(IMA) /* Heap represents phys heap that imports PMBs from a DLM heap.*/ \ + X(DMA) /* Heap represents a physical memory pool managed by Services, alias of LMA and is only used on VZ non-native system configurations for a heap used for allocations tagged with PVRSRV_PHYS_HEAP_FW_MAIN or PVRSRV_PHYS_HEAP_FW_CONFIG */ \ + X(WRAP) /* Heap used to group UM buffers given to Services. Integrity OS port only. */ \ + X(LAST) \ + typedef enum _PHYS_HEAP_TYPE_ { - PHYS_HEAP_TYPE_UNKNOWN = 0, /*!< Not a valid value for any config */ - PHYS_HEAP_TYPE_UMA, /*!< Heap represents OS managed physical memory heap - i.e. system RAM. Unified Memory Architecture - physmem_osmem PMR factory */ - PHYS_HEAP_TYPE_LMA, /*!< Heap represents physical memory pool managed by - Services i.e. carve out from system RAM or local - card memory. Local Memory Architecture - physmem_lma PMR factory */ -#if defined(__KERNEL__) - PHYS_HEAP_TYPE_DMA, /*!< Heap represents a physical memory pool managed by - Services, alias of LMA and is only used on - VZ non-native system configurations for - a heap used for PHYS_HEAP_USAGE_FW_MAIN tagged - buffers */ -#if defined(SUPPORT_WRAP_EXTMEMOBJECT) - PHYS_HEAP_TYPE_WRAP, /*!< Heap used to group UM buffers given - to Services. Integrity OS port only. */ -#endif -#endif +#define X(_name) PHYS_HEAP_TYPE_ ## _name, + PHYS_HEAP_TYPE_LIST +#undef X + } PHYS_HEAP_TYPE; /* Defines used when interpreting the ui32PhysHeapFlags in PHYS_HEAP_MEM_STATS - 0x000000000000dttt + 0x000000000000000d d = is this the default heap? (1=yes, 0=no) - ttt = heap type (000 = PHYS_HEAP_TYPE_UNKNOWN, - 001 = PHYS_HEAP_TYPE_UMA, - 010 = PHYS_HEAP_TYPE_LMA, - 011 = PHYS_HEAP_TYPE_DMA) */ -#define PVRSRV_PHYS_HEAP_FLAGS_TYPE_MASK (0x7U << 0) -#define PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT (0x1U << 7) +#define PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT (0x1U) /* Force PHYS_HEAP_MEM_STATS size to be a multiple of 8 bytes * (as type is a parameter in bridge calls) */ -typedef struct PHYS_HEAP_MEM_STATS_TAG +typedef struct PHYS_HEAP_MEM_STATS_V1_TAG { - IMG_UINT64 ui64TotalSize; - IMG_UINT64 ui64FreeSize; - IMG_UINT32 ui32PhysHeapFlags; - IMG_UINT32 ui32Dummy; -}PHYS_HEAP_MEM_STATS, *PHYS_HEAP_MEM_STATS_PTR; + IMG_UINT64 ui64TotalSize; /*!< Total number of bytes in the heap. */ + IMG_UINT64 ui64FreeSize; /*!< Remaining number of bytes free for allocation. */ + IMG_UINT32 ui32PhysHeapFlags; /*!< Flags associated within the heap. */ + PHYS_HEAP_TYPE ePhysHeapType; /*!< The type of physheap. */ +} PHYS_HEAP_MEM_STATS_V1; +typedef struct PHYS_HEAP_MEM_STATS_V2_TAG +{ + IMG_UINT64 ui64TotalSize; /*!< Total number of bytes in the heap. */ + IMG_UINT64 ui64FreeSize; /*!< Remaining number of bytes free for allocation. */ + IMG_UINT32 ui32PhysHeapFlags; /*!< Flags associated within the heap. */ + PHYS_HEAP_TYPE ePhysHeapType; /*!< The type of physheap. */ + IMG_UINT64 ui64DevicesInSPAS; /*!< A bitmap of devices that are linked to the heap via a SPAS. + Where a device is encoded as a bit using the following: + (1 << psDevNode->sDevId.ui32InternalID) */ +} PHYS_HEAP_MEM_STATS_V2; +typedef PHYS_HEAP_MEM_STATS_V2 PHYS_HEAP_MEM_STATS, *PHYS_HEAP_MEM_STATS_PTR; + +#if defined(PHYSHEAP_STRINGS) + +static const char *const _pszPhysHeapStrings[] = { +#define X(_name) #_name, + PHYS_HEAP_LIST +#undef X +}; +/*************************************************************************/ /*! +@Function PVRSRVGetClientPhysHeapTypeName +@Description Returns the phys heap type as a string. + +@Input ePhysHeapType The physheap type. + +@Return const IMG_CHAR pointer. +*/ /**************************************************************************/ static inline const IMG_CHAR *PVRSRVGetClientPhysHeapTypeName(PHYS_HEAP_TYPE ePhysHeapType) { switch (ePhysHeapType) { - case PHYS_HEAP_TYPE_UMA: - return HEAPSTR(PHYS_HEAP_TYPE_UMA); - case PHYS_HEAP_TYPE_LMA: - return HEAPSTR(PHYS_HEAP_TYPE_LMA); +#define X(_name) case PHYS_HEAP_TYPE_ ## _name: return "PHYS_HEAP_TYPE_" # _name; + PHYS_HEAP_TYPE_LIST +#undef X default: return "Unknown Heap Type"; } } -#undef HEAPSTR + +/*************************************************************************/ /*! +@Function PVRSRVGetPhysHeapName +@Description Returns the name of a PhysHeap. + +@Input ePhysHeap The enum value of the physheap. + +@Return const IMG_CHAR pointer. +*/ /**************************************************************************/ +static inline const IMG_CHAR *PVRSRVGetPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeap) +{ + if (ePhysHeap < 0 || ePhysHeap >= PVRSRV_PHYS_HEAP_LAST) + { + return "Undefined"; + } + + return _pszPhysHeapStrings[ePhysHeap]; +} + +/*************************************************************************/ /*! +@Function PVRSRVGetClientPhysHeapName +@Description Returns the name of a client PhysHeap. + +@Input ePhysHeap The enum value of the physheap. + +@Return const IMG_CHAR pointer. +*/ /**************************************************************************/ +static inline const IMG_CHAR *PVRSRVGetClientPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeap) +{ + if (ePhysHeap > PVRSRV_PHYS_HEAP_GPU_PRIVATE) + { + return "Unknown Heap"; + } + + return PVRSRVGetPhysHeapName(ePhysHeap); +} +#endif /* PHYSHEAP_STRINGS */ #endif /* PVRSRV_MEMALLOC_PHYSHEAP_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_memallocflags.h b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_memallocflags.h index c5096df7ee3d..d0703131e62d 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_memallocflags.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_memallocflags.h @@ -67,13 +67,17 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; * | 0-3 | 4-7 | 8-10 | 11-13 | 14 | * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable | * - * --- MISC FLAGS 15..23 (9-bits) --- - * | 15 | 17 | 18 | 19 | 20 | - * | Defer | SVM | Sparse-Dummy-Page | CPU-Cache-Clean | Sparse-Zero-Page | + * --- MISC FLAGS 15..20 (9-bits) --- + * | 15 | 16 | 17 | 18 | 19 | 20 | + * | Defer | Reserved | SVM | Scratch-Pg | CPU-Cache-Clean | Zero-Pg | + * + * --- RI FLAGS 21..23 (3-bits) --- + * | 21 | 22 | 23 | + * | Import | Suballoc | FW alloc | * * --- DEV CONTROL FLAGS 26..27 (2-bits) --- - * | 26-27 | - * | Device-Flags | + * | 24-25 | 26-27 | + * | ... | Device-Flags | * * --- MISC FLAGS 28..31 (4-bits) --- * | 28 | 29 | 30 | 31 | @@ -83,9 +87,17 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; * | 35 | * | Shared-buffer | * + * --- OS SPECIFIC FLAGS --- + * | 36 | 37 | 38 | + * | Linux Pref CMA | Linux Movable | Linux Deny Move | + * + * --- IPA Policy --- + * | 53-55 | + * | IPA Policy | + * * --- PHYS HEAP HINTS --- - * | 59-63 | - * | PhysHeap Hints | + * | 56 | 57-58 | 59-63 | + * | Mandate Heap | | PhysHeap Hints | * */ @@ -514,14 +526,33 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; * * ALLOC MEMORY FLAGS * * * * * ********************************************************** - * - * (Bits 15) - * */ -#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC (IMG_UINT64_C(1)<<15) -#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0U) +/*! ----- Bit 15 + + Indicates when the allocation of physical memory pages backing the PMR + is carried out. When set, pages are not allocated at PMR creation but are + instead deferred until they are first needed, i.e. "on demand". + When unset, the pages may be allocated at the same time the PMR is created + or deferred (at the KM/Server's discretion). + */ +#define PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC (IMG_UINT64_C(1)<<15) /*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) != 0U) + +/*! ----- Bit 16 + + This flag is unused but kept for compatibility reasons. Once not a concern + the flag can be removed and but reused. + */ +#define PVRSRV_MEMALLOCFLAG_PHYS_RESERVED (IMG_UINT64_C(1)<<16) + +/*! ----- Bit 17 + Indicates that the allocation will be accessed by the CPU and GPU using the same virtual address, i.e. for all SVM allocs, IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR @@ -535,26 +566,29 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; */ #define PVRSRV_CHECK_SVM_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0U) -/*! +/*! ----- Bit 18 + Indicates the particular memory that's being allocated is sparse and the - sparse regions should not be backed by dummy page + sparse regions should not be backed by scratch page */ -#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING (IMG_UINT64_C(1) << 18) +#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING (IMG_UINT64_C(1) << 18) /*! - @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING flag is set. + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING flag is set. @Input uiFlags Allocation flags. @Return True if the flag is set, false otherwise */ -#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0U) +#define PVRSRV_IS_SPARSE_SCRATCH_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING) == 0U) -/*! - Used to force Services to carry out at least one CPU cache invalidate on a - CPU cached buffer during allocation of the memory. Applicable to incoherent - systems, it must be used for buffers which are CPU cached and which will not - be 100% written to by the CPU before the GPU accesses it. For performance - reasons, avoid usage if the whole buffer that is allocated is written to by - the CPU anyway before the next GPU kick, or if the system is coherent. +/*! ----- Bit 19 + + Used to force Services to carry out at least one CPU cache flush and + invalidate on a CPU cached buffer during allocation of the memory. Applicable + to incoherent systems, it must be used for buffers which are CPU cached and + which will not be 100% written to by the CPU before the GPU accesses it. For + performance reasons, avoid usage if the whole buffer that is allocated is + written to by the CPU anyway before the next GPU kick, or if the system is + coherent. */ #define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (IMG_UINT64_C(1)<<19) @@ -565,34 +599,82 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; */ #define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0U) -/*! PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING +/*! ----- Bit 20 - Indicates the particular memory that's being allocated is sparse and the - sparse regions should be backed by zero page. This is different with - zero on alloc flag such that only physically unbacked pages are backed - by zero page at the time of mapping. + Indicates the particular memory region should be backed by zero page. + This is different with zero on alloc flag such that only physically unbacked + pages are backed by zero page at the time of mapping. The zero backed page is always with read only attribute irrespective of its original attributes. */ -#define PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING (IMG_UINT64_C(1) << 20) -#define PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiFlags) (((uiFlags) & \ - PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) +#define PVRSRV_MEMALLOCFLAG_ZERO_BACKING (IMG_UINT64_C(1) << 20) +#define PVRSRV_IS_ZERO_BACKING_REQUIRED(uiFlags) (((uiFlags) & \ + PVRSRV_MEMALLOCFLAG_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_ZERO_BACKING) +/* + ************************************************************ + * RI flags * + ************************************************************ + * The Flags are used exclusively by the RI (Reference Info) + * server for tracking KM allocations by various processes + * + * Import - Handle imported from other process + * Suballoc - Handle Suballocation of existing PMR + * FW Alloc - Used by FW during driver initialisation + * + * --- RI FLAGS 21..23 (3-bits) --- + * | 21 | 22 | 23 | + * | Import | Suballoc | FW alloc | + * + */ + +/*! ----- Bit 21 + Used by RI server to register Allocation as an import from other process + */ +#define PVRSRV_MEMALLOCFLAG_RI_IMPORT (IMG_UINT64_C(1)<<21) /*! - @Description Macro extracting the OS id from a variable containing memalloc flags - @Input uiFlags Allocation flags - @Return returns the value of the FW_ALLOC_OSID bitfield + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_RI_IMPORT flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_RI_IMPORT(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_RI_IMPORT) != 0U) + +/*! ----- Bit 22 + Used by RI server to register Allocation as a suballocation of existing PMR + */ +#define PVRSRV_MEMALLOCFLAG_RI_SUBALLOC (IMG_UINT64_C(1)<<22) +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_RI_SUBALLOC flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_RI_SUBALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_RI_SUBALLOC) != 0U) + +/*! ----- Bit 23 + Used by RI server to register Allocation as FW/System process + 'FW Alloc' also means the allocation is considered to belong to the SYS process + (ie that it will have a lifetime longer than the process which allocated it) + */ +#define PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC (IMG_UINT64_C(1)<<23) +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise */ -#define PVRSRV_FW_RAW_ALLOC_OSID(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ - >> PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) +#define PVRSRV_CHECK_RI_FWKMD_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC) != 0U) /*! - @Description Macro converting an OS id value into a memalloc bitfield - @Input uiFlags OS id - @Return returns a shifted bitfield with the OS id value + @Description Macro passing the PVRSRV_MEMALLOCFLAG_RI_ Flags. + @Input uiFlags Allocation flags. + @Return Value of the RI server bit field + */ +#define PVRSRV_MEMALLOCFLAG_RI_MASK(uiFlags) ((uiFlags) & (IMG_UINT64_C(7)<<21)) + + +/*! ----- Bit 24 + * + Not used. */ -#define PVRSRV_MEMALLOCFLAG_FW_RAW_ALLOC_OSID(osid) (((osid) << PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) \ - & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ /* * @@ -709,11 +791,11 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; /*! @Description Helper macro for setting device specific MMU flags. - @Input n Flag index. + @Input uiFlags Flag index. @Return Flag vector with the specified bit set. */ -#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n) \ - (((PVRSRV_MEMALLOCFLAGS_T)(n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \ +#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(uiFlags) \ + (((PVRSRV_MEMALLOCFLAGS_T)(uiFlags) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \ PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) /* @@ -735,6 +817,52 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; #define PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER (IMG_UINT64_C(1)<<35) #define PVRSRV_CHECK_SHARED_BUFFER(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER) != 0U) +/* + * + * ********************************************************** + * * * + * * OS Specific alloc flags * + * * * + * ********************************************************** + * + * (Bits 36 to 38) + * + */ +#define PVRSRV_MEMALLOCFLAG_OS_ALLOCFLAG_OFFSET 36 +#define PVRSRV_MEMALLOCFLAG_OS_ALLOCFLAG_MASK (IMG_UINT64_C(7) << PVRSRV_MEMALLOCFLAG_OS_ALLOCFLAG_OFFSET) + +#define PVRSRV_MEMALLOCFLAG_OS_LINUX_PREFER_CMA (IMG_UINT64_C(1)<<36) +#define PVRSRV_CHECK_OS_LINUX_PREFER_CMA(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_OS_LINUX_PREFER_CMA) != 0U) + +#define PVRSRV_MEMALLOCFLAG_OS_LINUX_MOVABLE (IMG_UINT64_C(1)<<37) +#define PVRSRV_CHECK_OS_LINUX_MOVABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_OS_LINUX_MOVABLE) != 0U) + +#define PVRSRV_MEMALLOCFLAG_OS_LINUX_DENY_MOVE (IMG_UINT64_C(1)<<38) +#define PVRSRV_CHECK_OS_LINUX_DENY_MOVE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_OS_LINUX_DENY_MOVE) != 0U) + +/* + * + * ********************************************************** + * * * + * * IPA Policy * + * * * + * ********************************************************** + * + * (Bits 53 to 55) + * + */ + +/*! + * Offset of Intermediate Physical Address (IPA) policy. + */ +#define PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET 53 + +/*! + * Mask for retrieving IPA policy. + */ +#define PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK (IMG_UINT64_C(7) << PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET) +#define PVRSRV_MEMALLOCFLAG_IPA_POLICY(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK) >> PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET) + /* * * ********************************************************** @@ -743,10 +871,16 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; * * * * ********************************************************** * - * (Bits 59 to 63) + * (Bits 56 to 63) * */ +/*! + * Ensures Physheap isn't reassigned when considered favourable by driver under a OOM condition. + */ +#define PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP (IMG_UINT64_C(1)<<56) +#define PVRSRV_CHECK_MANDATED_PHYSHEAP(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP) != 0U) + /*! * Value of enum PVRSRV_PHYS_HEAP stored in memalloc flags. If not set * i.e. PVRSRV_PHYS_HEAP_DEFAULT (value 0) used, the system layer defined default physical heap is used. @@ -834,13 +968,13 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ - PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) + PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING) #else #define PVRSRV_MEMALLOCFLAGS_TDFWMASK ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ - PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) + PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING) #endif /*! @@ -858,10 +992,13 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ - PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ - PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ - PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ + PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING | \ + PVRSRV_MEMALLOCFLAG_ZERO_BACKING | \ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \ + PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP | \ + PVRSRV_MEMALLOCFLAG_OS_ALLOCFLAG_MASK | \ + PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK | \ PVRSRV_PHYS_HEAP_HINT_MASK) #else #define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ @@ -872,10 +1009,13 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ - PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ - PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ - PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ + PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING | \ + PVRSRV_MEMALLOCFLAG_ZERO_BACKING | \ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \ + PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP | \ + PVRSRV_MEMALLOCFLAG_OS_ALLOCFLAG_MASK | \ + PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK | \ PVRSRV_PHYS_HEAP_HINT_MASK) #endif @@ -925,10 +1065,10 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; #define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ - PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC | \ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ - PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ - PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) + PVRSRV_MEMALLOCFLAG_ZERO_BACKING | \ + PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING) #if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0U) #error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK @@ -941,21 +1081,33 @@ typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; #if defined(DEBUG) #define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ + PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP | \ + PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK | \ + PVRSRV_MEMALLOCFLAG_OS_LINUX_PREFER_CMA | \ + PVRSRV_MEMALLOCFLAG_OS_LINUX_MOVABLE | \ PVRSRV_PHYS_HEAP_HINT_MASK) #else #define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP | \ + PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK | \ + PVRSRV_MEMALLOCFLAG_OS_LINUX_PREFER_CMA | \ + PVRSRV_MEMALLOCFLAG_OS_LINUX_MOVABLE | \ PVRSRV_PHYS_HEAP_HINT_MASK) #endif diff --git a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_tlcommon.h b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_tlcommon.h index 19715dac86ed..9bc65fe05f98 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_tlcommon.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_tlcommon.h @@ -62,7 +62,7 @@ typedef IMG_HANDLE PVRSRVTL_SD; /*! Packet lengths are always rounded up to a multiple of 8 bytes */ #define PVRSRVTL_PACKET_ALIGNMENT 8U -#define PVRSRVTL_ALIGN(x) (((x)+PVRSRVTL_PACKET_ALIGNMENT-1U) & ~(PVRSRVTL_PACKET_ALIGNMENT-1U)) +#define PVRSRVTL_ALIGN(x) PVR_ALIGN(x, PVRSRVTL_PACKET_ALIGNMENT) /*! A packet is made up of a header structure followed by the data bytes. diff --git a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_tlstreams.h b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_tlstreams.h index 9064075ad5c0..624b8eb1f47d 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_tlstreams.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pvrsrv_tlstreams.h @@ -45,11 +45,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef PVRSRV_TLSTREAMS_H #define PVRSRV_TLSTREAMS_H -#define PVRSRV_TL_CTLR_STREAM "tlctrl" +#define PVRSRV_TL_CTRL_STREAM "tlctrl" #define PVRSRV_TL_HWPERF_RGX_FW_STREAM "hwperf_fw_" #define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_" +#define PVRSRV_TL_FTRACE_RGX_FW_STREAM "ftrace_fw_" + /* Host HWPerf client stream names are of the form 'hwperf_client_' */ #define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM "hwperf_client_" #define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u" diff --git a/drivers/gpu/drm/img/img-volcanic/include/pvrversion.h b/drivers/gpu/drm/img/img-volcanic/include/pvrversion.h index adbef2a038b4..4d17f6e55594 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/pvrversion.h +++ b/drivers/gpu/drm/img/img-volcanic/include/pvrversion.h @@ -44,22 +44,22 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef PVRVERSION_H #define PVRVERSION_H -#define PVRVERSION_MAJ 1U -#define PVRVERSION_MIN 18U +#define PVRVERSION_MAJ 24U +#define PVRVERSION_MIN 2U #define PVRVERSION_FAMILY "rogueddk" -#define PVRVERSION_BRANCHNAME "1.18" -#define PVRVERSION_BUILD 6307965 +#define PVRVERSION_BRANCHNAME "24.2" +#define PVRVERSION_BUILD 6643903 #define PVRVERSION_BSCONTROL "Rogue_DDK_Linux_WS" -#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.18@6307965" -#define PVRVERSION_STRING_SHORT "1.18@6307965" +#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 24.2@6643903" +#define PVRVERSION_STRING_SHORT "24.2@6643903" #define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved." -#define PVRVERSION_BUILD_HI 630 -#define PVRVERSION_BUILD_LO 7965 -#define PVRVERSION_STRING_NUMERIC "1.18.630.7965" +#define PVRVERSION_BUILD_HI 664 +#define PVRVERSION_BUILD_LO 3903 +#define PVRVERSION_STRING_NUMERIC "24.2.664.3903" #define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U)) #define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU) diff --git a/drivers/gpu/drm/img/img-volcanic/include/rgx_common.h b/drivers/gpu/drm/img/img-volcanic/include/rgx_common.h index a851a4d3ab01..55b0ed450e1c 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rgx_common.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rgx_common.h @@ -54,12 +54,15 @@ extern "C" { #include "rgx_common_asserts.h" +typedef struct RGXFWIF_DEV_VIRTADDR_ +{ + IMG_UINT32 ui32Addr; +} RGXFWIF_DEV_VIRTADDR; /* Virtualisation validation builds are meant to test the VZ-related hardware without a fully virtualised platform. - * As such a driver can support either the vz-validation code or real virtualisation. - * Note: PVRSRV_VZ_NUM_OSID is the external build option, while RGX_NUM_OS_SUPPORTED is the internal symbol used in the DDK */ -#if defined(SUPPORT_GPUVIRT_VALIDATION) && (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)) -#error "Invalid build configuration: Virtualisation support (PVRSRV_VZ_NUM_OSID > 1) and virtualisation validation code (SUPPORT_GPUVIRT_VALIDATION) are mutually exclusive." + * As such a driver can support either the vz-validation code or real virtualisation. */ +#if defined(SUPPORT_GPUVIRT_VALIDATION) && (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) +#error "Invalid build configuration: Virtualisation support (RGX_NUM_DRIVERS_SUPPORTED > 1) and virtualisation validation code (SUPPORT_GPUVIRT_VALIDATION) are mutually exclusive." #endif /* The RGXFWIF_DM defines assume only one of RGX_FEATURE_TLA or @@ -69,6 +72,19 @@ extern "C" { #error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!" #endif +#define FW_OSID (0U) +#define MMU_CONTEXT_MAPPING_FWPRIV (0U) /* FW code/private data */ + +#if (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_FIRMWARE_OSID) || defined(RGX_FEATURE_MIPS) +/* The Firmware accesses its private code & data and the interface + * memory it shares with the KM drivers using the same MMU context */ +#define MMU_CONTEXT_MAPPING_FWIF MMU_CONTEXT_MAPPING_FWPRIV +#else +/* The Firmware accesses the interface memory it shares + * with the KM drivers using a reserved MMU context */ +#define MMU_CONTEXT_MAPPING_FWIF (7U) +#endif + /*! The master definition for data masters known to the firmware of RGX. * When a new DM is added to this list, relevant entry should be added to * RGX_HWPERF_DM enum list. @@ -106,6 +122,8 @@ typedef IMG_UINT32 RGX_KICK_TYPE_DM; /* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RDM, GEOM2, GEOM3, GEOM4 */ #define RGXFWIF_DM_MAX (RGXFWIF_DM_LAST + 1U) +/* The set of DMs for gathering stats on GPU utilisation excludes GP */ +#define RGXFWIF_GPU_UTIL_DM_MAX (RGXFWIF_DM_MAX - 1U) /* * Data Master Tags to be appended to resources created on behalf of each RGX @@ -177,7 +195,13 @@ typedef IMG_UINT32 RGX_KICK_TYPE_DM; #define RGXFWIF_GPU_UTIL_STATE_ACTIVE (1U) #define RGXFWIF_GPU_UTIL_STATE_BLOCKED (2U) #define RGXFWIF_GPU_UTIL_STATE_NUM (3U) +/* the state below "combines" IDLE and BLOCKED + * and is used when we only care about GPU being in ACTIVE or not */ +#define RGXFWIF_GPU_UTIL_STATE_INACTIVE (0U) +/* when we combine IDLE and BLOCKED we end up with one state less */ +#define RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM (RGXFWIF_GPU_UTIL_STATE_NUM-1U) #define RGXFWIF_GPU_UTIL_STATE_MASK IMG_UINT64_C(0x0000000000000003) +#define RGXFWIF_GPU_UTIL_STATE_MASK32 IMG_UINT32_C(0x00000003) /* @@ -185,7 +209,7 @@ typedef IMG_UINT32 RGX_KICK_TYPE_DM; * programmer (FW or META DMA). This is not a HW limitation, it is only * a protection against malformed inputs to the register programmer. */ -#define RGX_MAX_NUM_REGISTER_PROGRAMMER_WRITES (128U) +#define RGX_MAX_NUM_REGISTER_PROGRAMMER_WRITES (256U) /* FW common context priority. */ /*! @@ -222,6 +246,11 @@ typedef IMG_UINT32 RGX_KICK_TYPE_DM; typedef IMG_UINT32 RGX_CONTEXT_PROPERTY; #define RGX_CONTEXT_PROPERTY_FLAGS 0U /*!< Context flags */ +/* MMU4 supported number of ranges */ +#define RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES (4U) +#define RGX_MMU_RANGE_NON4KHEAP (0U) +#define RGX_MMU_RANGE_GLOBAL (RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES - 1) + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/img/img-volcanic/include/rgx_fwif_sf.h b/drivers/gpu/drm/img/img-volcanic/include/rgx_fwif_sf.h index 74b8878f42d2..a190875b5e6d 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rgx_fwif_sf.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rgx_fwif_sf.h @@ -69,7 +69,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. X(RGXFW_GROUP_HWP,HWP) \ X(RGXFW_GROUP_RPM,RPM) \ X(RGXFW_GROUP_DMA,DMA) \ - X(RGXFW_GROUP_DBG,DBG) + X(RGXFW_GROUP_DBG,DBG) \ + X(RGXFW_GROUP_VZ,VZ) \ + X(RGXFW_GROUP_SAFETY,SAFETY) \ + X(RGXFW_GROUP_VERBOSE,VERBOSE) \ + X(RGXFW_GROUP_CUSTOMER,CUSTOMER) /*! * @InGroup SRVAndFWTracing @@ -107,35 +111,35 @@ typedef struct { /*id, gid, id name, string, # arguments */ \ X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string", 0) \ \ -X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d", 6) \ +X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %u, RTD 0x%08x. Partial render:%u, CSW resume:%u, prio:%d", 6) \ X( 2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%x, HWRTData1State=%x", 2) \ -X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d", 4) \ +X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %u, CSW resume:%u, prio: %d", 4) \ X( 4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished", 0) \ -X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ +X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %u, prio: %d", 3) \ X( 6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished", 0) \ -X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d", 7) \ +X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %u, RTD 0x%08x. First kick:%u, Last kick:%u, CSW resume:%u, prio:%d", 7) \ X( 8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished", 0) \ X( 9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render", 0) \ X( 10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render", 0) \ X( 11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x", 2) \ -X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ +X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %u, prio:%d", 3) \ X( 13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished", 0) \ -X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x", 3) \ -X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %d", 2) \ +X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %u, DM = %u, FWCtx = 0x%08.8x", 3) \ +X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %u", 2) \ X( 17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ X( 18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded", 0) \ X( 19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ X( 20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx 0x%08.8x", 1) \ X( 21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x", 4) \ -X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %d", 2) \ +X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %u", 2) \ X( 23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ -X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of:", 1) \ -X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ -X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ +X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %u of:", 1) \ +X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%u, FWCtx: 0x%08.8x", 2) \ +X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%u, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ X( 27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW", 0) \ X( 28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.", 0) \ X( 29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u", 1) \ -X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = 0x%x, fw = 0x%x", 3) \ +X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %u failed: host = 0x%x, fw = 0x%x", 3) \ X( 31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered", 0) \ X( 32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler", 2) \ X( 33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8x", 1) \ @@ -146,59 +150,59 @@ X( 37, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active rend X( 38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER_DEPRECATED, "Estimated Power 0x%x", 1) \ X( 39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u", 1) \ X( 40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u", 2) \ -X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %d failed: addresses = %d, sizes = %d", 3) \ +X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %u failed: addresses = %u, sizes = %u", 3) \ X( 42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%x", 1) \ -X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ -X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d", 2) \ +X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %u, Units: 0x%08.8x", 2) \ +X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %u to %u", 2) \ X( 45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down", 0) \ -X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)", 2) \ +X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %u (bPowRascalDust=%u)", 2) \ X( 47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)", 0) \ X( 48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)", 0) \ -X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: 0x%08.8x", 2) \ -X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d", 7) \ -X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x", 3) \ -X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK", 1) \ -X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty", 1) \ -X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d", 8) \ +X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%u FWCtx: 0x%08.8x", 2) \ +X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%u checker: CatBase TE=0x%08x (%u Pages), VCE=0x%08x (%u Pages), ALIST=0x%08x, IsTA=%u", 7) \ +X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%u checker: MList[%u] = 0x%08x", 3) \ +X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%u OK", 1) \ +X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%u is empty", 1) \ +X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%u checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%u", 8) \ X( 55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick", 0) \ X( 56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device", 1) \ X( 57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8x DM%u", 2) \ -X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ +X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %u, prio: %d", 3) \ X( 59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED_DEPRECATED, "RDM finished on context %u", 1) \ -X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ +X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %u, prio: %d", 3) \ X( 61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED_DEPRECATED, "SHG finished", 0) \ X( 62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED_DEPRECATED, "FBA finished on context %u", 1) \ X( 63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed", 0) \ -X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start", 1) \ -X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete", 1) \ +X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%u start", 1) \ +X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%u complete", 1) \ X( 66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE_DEPRECATED, "FC%u cCCB Woff update = %u", 2) \ -X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ +X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %u, prio: %d, Frame Context: %u", 4) \ X( 68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_INIT, "GPU init", 0) \ X( 69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_INIT, "GPU Units init (# mask: 0x%x)", 1) \ -X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d", 3) \ +X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %u cycles, write: %u cycles, iterations: %u", 3) \ X( 71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x", 3) \ -X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \ +X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %u. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \ X( 73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.", 0) \ X( 74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DEPRECATED, "GPU has locked up (see HWR logs for more info)", 0) \ X( 75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)", 0) \ -X( 76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)", 0) \ +X( 76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE_DEPRECATED, "HWR has been triggered - GPU has failed a poll (see HWR logs)", 0) \ X( 77, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM_DEPRECATED, "Doppler out of memory event for FC %u", 1) \ X( 78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ X( 79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ X( 80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [0x%08.8x]", 1) \ -X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %d", 2) \ +X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %u", 2) \ X( 82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ -X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %d", 2) \ -X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %d", 2) \ -X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ -X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ -X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \ -X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ +X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %u", 2) \ +X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %u", 2) \ +X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %u, prio: %d, Frame Context: %u", 4) \ +X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %u (deferred DMs = 0x%08x)", 4) \ +X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %u to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \ +X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %u (deferred DMs = 0x%08x)", 4) \ X( 89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ -X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ -X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d", 1) \ -X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d", 1) \ -X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz", 1) \ +X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for Driver ID %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ +X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %u", 1) \ +X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %u", 1) \ +X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %u Hz", 1) \ X( 94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ X( 95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DEPRECATED, "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x", 3) \ X( 96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_DEPRECATED, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x", 5) \ @@ -209,9 +213,9 @@ X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resu X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x", 4) \ X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM_DEPRECATED, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x", 4) \ X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM_DEPRECATED, "DM: %u signal check failed", 1) \ -X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ +X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %u, prio:%d", 3) \ X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished", 0) \ -X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08x 0x%08x)", 4) \ +X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%u]_PIPE[%u]: 0x%08x 0x%08x)", 4) \ X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT", 0) \ X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked", 0) \ X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA", 0) \ @@ -220,75 +224,75 @@ X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE_DEPRECATED, " X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x", 4) \ X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL_DEPRECATED, "TDM stalled (Roff = %u, Woff = %u)", 2) \ X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx 0x%08.8x", 1) \ -X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %d's priority from %u to %u", 3) \ +X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %u's priority from %u to %u", 3) \ X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed", 0) \ -X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ -X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ -X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \ -X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \ -X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ -X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 6) \ -X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ -X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ -X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.", 1) \ -X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.", 1) \ -X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.", 1) \ -X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed", 1) \ +X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7) \ +X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED3, "Kick TDM: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7) \ +X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %u, RTD 0x%08x, First kick:%u, Last kick:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 11) \ +X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %u, RTD 0x%08x, Partial render:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 10) \ +X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \ +X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED2, "Kick Compute: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 6) \ +X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %u, Frame Context:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \ +X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7) \ +X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %u.", 1) \ +X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %u.", 1) \ +X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %u.", 1) \ +X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%u failed", 1) \ X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ -X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ -X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \ -X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %d ms", 1) \ -X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \ -X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %d: USCTiles=%d", 2) \ +X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %u (Roff = %u, Woff = %u)", 3) \ +X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED_DEPRECATED, "DM %u failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \ +X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %u ms", 1) \ +X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%u, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \ +X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %u: USCTiles=%u", 2) \ X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF_DEPRECATED, "Isolation grouping is disabled", 0) \ -X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %d", 1) \ -X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %d has come online", 1) \ -X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %d has gone offline", 1) \ +X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %u", 1) \ +X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %u has come online", 1) \ +X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %u has gone offline", 1) \ X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS_DEPRECATED, "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)", 6) \ X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 5) \ X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_DEINIT, "GPU deinit", 0) \ X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_DEINIT, "GPU units deinit", 0) \ -X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x", 2) \ -X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %d/%d", 2) \ +X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %u with config flags 0x%08x", 2) \ +X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %u/%u", 2) \ X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store", 0) \ -X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x", 3) \ +X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED2, "Initialised OS %u with config flags 0x%08x and extended config flags 0x%08x", 3) \ X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND_DEPRECATED, "Unknown Command (eCmdType=0x%08x)", 1) \ -X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x", 4) \ -X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d", 5) \ +X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %u [0x%08.8x] = 0x%08.8x", 4) \ +X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP_DEPRECATED, "UFO forced update NOP: FWCtx 0x%08.8x @ %u [0x%08.8x] = 0x%08.8x, reason %u", 5) \ X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u", 3) \ -X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \ +X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "Driver ID %u CCB init status: %u (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \ X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u", 2) \ X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 3) \ X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_DEPRECATED, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x", 3) \ -X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x", 3) \ -X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x", 4) \ +X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for Driver ID %u @ KCCB 0x%08x", 3) \ +X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT_DEPRECATED, "FW FAULT: At line %u in file 0x%08x%08x, additional data=0x%08x", 4) \ X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 4) \ X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID_DEPRECATED, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x", 3) \ X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE_DEPRECATED, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x", 4) \ X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD_DEPRECATED, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u", 4) \ -X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \ -X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \ -X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)", 4) \ -X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "OSid %u has %u stale commands in its KCCB", 2) \ +X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for Driver ID %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \ +X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED2, "OSid %u CCB init status: %u (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \ +X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE_DEPRECATED, "Driver ID %u fw state transition request: from %u to %u (0-offline 1-ready 2-active 3-offloading 4-cooldown). Status %u (1-ok 0-fail)", 4) \ +X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "Driver ID %u has %u stale commands in its KCCB", 2) \ X(168, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_VCE_PAUSE, "Applying VCE pause", 0) \ X(169, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KCCB_UPDATE_RTN_SLOT_DEPRECATED, "OSid %u KCCB slot %u value updated to %u", 3) \ X(170, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x", 7) \ X(171, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ X(172, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ -X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u", 2) \ -X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x", 2) \ -X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \ +X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for Driver ID %u with WOff %u", 2) \ +X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for Driver ID %u, FWCtx 0x%08x", 2) \ +X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from Driver ID %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \ X(176, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_INIT_CONFIG, "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x", 2) \ -X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %d", 1) \ -X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x", 3) \ -X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %d triggered a reset", 1) \ +X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %u", 1) \ +X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %u, FW state: 0x%08x, HWR flags: 0x%08x", 3) \ +X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %u triggered a reset", 1) \ X(181, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, Signal Id: %u", 2) \ -X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEV_SERIES8_DEPRECATED, "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8.", 1) \ +X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEV_SERIES8_DEPRECATED, "WARNING: Skipping FW KCCB Cmd type %u which is not yet supported on Series8.", 1) \ X(183, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INCONSISTENT_MMU_FLAGS, "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u", 4) \ -X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d", 5) \ -X(185, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x.", 3) \ +X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%u", 5) \ +X(185, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL_DEPRECATED, "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x.", 3) \ X(186, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66284_UPDATE, "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u", 3) \ X(187, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SPFILTER_UPDATES, "Signal updates: FIFO: %u, Signals: 0x%08x", 2) \ X(188, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_FBSC_CMD, "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x", 2) \ @@ -298,13 +302,13 @@ X(191, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_RTA_PRESENT, "RTC with RTA present, % X(192, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULL_RTAS, "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!", 0) \ X(193, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_COUNTER, "Block 0x%x / Counter 0x%x INVALID and ignored", 2) \ X(194, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_DEPRECATED, "ECC fault GPU=0x%08x FW=0x%08x", 2) \ -X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %d", 1) \ -X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER, "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \ +X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %u", 1) \ +X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER_DEPRECATED, "Driver ID %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \ X(197, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU-%u has locked up (see HWR logs for more info)", 1) \ -X(198, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \ +X(198, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%u, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \ X(199, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DM, "GPU has locked up (see HWR logs for more info)", 0) \ X(200, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REPROCESS_XPU_EVENTS, "Reprocessing outstanding XPU events from cores 0x%02x", 1) \ -X(201, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x", 3) \ +X(201, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%u, CoreMask=0x%02x, Raised=0x%02x", 3) \ X(202, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ X(203, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled Core %u (Roff = %u, Woff = %u)", 3) \ X(204, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_OFFSETS, "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ @@ -312,82 +316,130 @@ X(205, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_STALLED, "Compute stalled co X(206, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_CORE_READ_OFFSET, "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 6) \ X(207, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_RESUMED_FROM_STALL, "TDM resumed core %u (Roff = %u, Woff = %u)", 3) \ X(208, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_RESUMED_FROM_STALL, "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)", 4) \ -X(209, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_MTS_PERMISSION_CHANGED, " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \ +X(209, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_MTS_PERMISSION_CHANGED_DEPRECATED, " Updated permission for Driver ID %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \ X(210, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST1, "Mask = 0x%X, mask2 = 0x%X", 2) \ X(211, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST2, " core %u, reg = %u, mask = 0x%X)", 3) \ X(212, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_SAFETY_BUS, "ECC fault received from safety bus: 0x%08x", 1) \ X(213, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_CONFIG, "Safety Watchdog threshold period set to 0x%x clock cycles", 1) \ -X(214, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_TRIGGER, "MTS Safety Event trigged by the safety watchdog.", 0) \ -X(215, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_USC_TASKS_RANGE, "DM%d USC tasks range limit 0 - %d, stride %d", 3) \ +X(214, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_TRIGGER, "MTS Safety Event triggered by the safety watchdog.", 0) \ +X(215, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_USC_TASKS_RANGE, "DM%u USC tasks range limit 0 - %u, stride %u", 3) \ X(216, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_ECC_FAULT, "ECC fault GPU=0x%08x", 1) \ X(217, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_SAFETY_RESET, "GPU Hardware units reset to prevent transient faults.", 0) \ -X(218, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %d", 2) \ -X(219, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_DEPRECATED, "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7)\ +X(218, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %u", 2) \ +X(219, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_DEPRECATED, "Kick Ray: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7)\ X(220, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_DEPRECATED, "Ray finished", 0) \ -X(221, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \ -X(222, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%d increasing to %d)", 2) \ -X(223, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%d increasing to %d)", 2) \ +X(221, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %u (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \ +X(222, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%u increasing to %u)", 2) \ +X(223, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%u increasing to %u)", 2) \ X(224, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GEOM_OOM_DISALLOWED, "Geom OOM event not allowed", 0) \ -X(225, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \ -X(226, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SKIP_ALREADY_RUN_GEOM, "Skipping already executed TA FWCtx 0x%08.8x @ %d", 2) \ -X(227, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ATTEMPT_TO_RUN_AHEAD_GEOM, "Attempt to execute TA FWCtx 0x%08.8x @ %d ahead of time on other GEOM", 2) \ -X(228, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED2, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ -X(229, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_PIPELINE, "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 12) \ -X(230, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_PIPELINE, "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \ -X(231, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 7) \ -X(232, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED_PIPELINE, "TDM finished: Kick ID %u ", 1) \ -X(233, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED_PIPELINE, "TA finished: Kick ID %u ", 1) \ -X(234, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED_PIPELINE, "3D finished: Kick ID %u , HWRTData0State=%x, HWRTData1State=%x", 3) \ -X(235, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED_PIPELINE, "Compute finished: Kick ID %u ", 1) \ -X(236, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d, Base 0x%08x%08x. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \ -X(237, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8)\ -X(238, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_PIPELINE, "Ray finished: Kick ID %u ", 1) \ +X(225, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED2, "Changing Driver ID %u's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \ +X(226, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SKIP_ALREADY_RUN_GEOM, "Skipping already executed TA FWCtx 0x%08.8x @ %u", 2) \ +X(227, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ATTEMPT_TO_RUN_AHEAD_GEOM, "Attempt to execute TA FWCtx 0x%08.8x @ %u ahead of time on other GEOM", 2) \ +X(228, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED2, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \ +X(229, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_PIPELINE, "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %u, RTD 0x%08x, First kick:%u, Last kick:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 12) \ +X(230, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_PIPELINE, "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %u, RTD 0x%08x, Partial render:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 11) \ +X(231, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE_DEPRECATED, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 7) \ +X(232, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED_PIPELINE, "TDM finished: Kick ID %u", 1) \ +X(233, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED_PIPELINE, "TA finished: Kick ID %u", 1) \ +X(234, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED_PIPELINE, "3D finished: Kick ID %u, HWRTData0State=%x, HWRTData1State=%x", 3) \ +X(235, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED_PIPELINE, "Compute finished: Kick ID %u", 1) \ +X(236, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE_DEPRECATED, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %u, Base 0x%08x%08x. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 10) \ +X(237, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE_DEPRECATED, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8)\ +X(238, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_PIPELINE, "Ray finished: Kick ID %u", 1) \ X(239, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RT_UNITS_INIT, "GPU RT Units init (# mask: 0x%08x%08x)", 2) \ X(240, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_PENDING_PASS, "UFO Check: [0x%08.8x] is pending update to 0x%08.8x and therefore passes", 2) \ X(241, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK_PENDING_PASS, "UFO PR-Check: [0x%08.8x] is pending update to 0x%08.8x and therefore passes", 2) \ X(242, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DELAY_DM_TO_OVERLAP_PIPES, "Holding kick of DM %u pipe %u to encourage pipeline overlap", 2) \ X(243, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RELEASE_DM_PIPE, "Releasing kick for DM %u pipe %u", 2) \ +X(244, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED3, "Changing Driver ID %u's priority from %u to %u", 3) \ +X(245, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ISOLATION_GROUP_CHANGE_DEPRECATED, "Changing Driver ID %u's isolation group from %u to %u", 3) \ +X(246, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VK_TIMESTAMP, "VK Timestamp: addr=0x%08x%08x, avail=0x%08x%08x stamp=0x%08x%08x", 6) \ +X(247, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %u failed to Context Switch on time (Current time: 0x%08x%08x, deadline: 0x%08x%08x). Triggered HCS (see HWR logs).", 5) \ +X(248, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll, RGX_CR_EVENT_STATUS=0x%08x (see HWR logs)", 1) \ +X(249, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBCDC_FAILURE_DETECTED_DEPRECATED, "FBCDC signature failure detected so block scheduling more work", 0) \ +X(250, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBCDC_FAILURE_CLEARED_DEPRECATED, "FBCDC signature cleared which unlocks scheduling more work", 0) \ +X(251, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %u in file 0x%08x%08x, additional data=0x%08x%08x", 5) \ +X(252, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %u, reason %u", 3) \ +X(253, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_CONTEXT_STORED, "TDM FWCtx:0x%08.8x stored", 1) \ +X(254, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CDM_CONTEXT_STORED, "CDM FWCtx:0x%08.8x stored, resumeMask:0x%08x", 2) \ +X(255, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GEOM_CONTEXT_STORED, "GEOM FWCtx:0x%08.8x stored", 1) \ +X(256, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_CONTEXT_STORED, "3D FWCtx:0x%08.8x stored, resumeMask:0x%08x", 2) \ +X(257, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RDM_CONTEXT_STORED, "RAY FWCtx:0x%08.8x stored", 1) \ +X(258, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \ +X(259, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 7) \ +X(260, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 8) \ +X(261, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %u, Base 0x%08x%08x, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 11) \ +X(262, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 9)\ +X(263, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DISABLE_DEPTH, "3D Disable Depth. ExtJobRef = 0x%08x", 1) \ +X(264, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DISABLE_STENCIL, "3D Disable Stencil. ExtJobRef = 0x%08x", 1) \ +X(265, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DISABLE_DS_IN_3D_RUNNING, "3D Disable DS in 3D running. RenderContext 0x%08.8x ExtJobRef 0x%08x", 2) \ +X(266, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DISABLE_DS_IN_KICK_3D, "3D Disable DS in kick 3D. RenderContext 0x%08.8x ExtJobRef 0x%08x", 2) \ +X(267, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ADD_DISABLE_DS_QUEUE, "Add disable DS in queue. RenderContext 0x%08.8x DisableJobRef 0x%08x Uncheck %u", 3) \ +X(268, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOT_FIND_USABLE_DS_IN_QUEUE, "Not find usable DS in queue. RenderContext 0x%08.8x ExtJobRef 0x%08x DisableJobRef 0x%08x Uncheck %u index %u", 5) \ +X(269, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_LIST_FULL, "Unable to set breakpoint for MemCtx 0x%08x as the breakpoint list is full.", 1) \ +X(270, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NUM_LOG_PARAMS, "Invalid number of log parameters passed! (Group:%u ID:%u Params:%u Passed:%u)", 4) \ +X(271, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_CANCEL_PIPELINE, "TDM cancelled: Kick ID %u", 1) \ +X(272, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_CANCEL_PIPELINE, "TA cancelled: Kick ID %u", 1) \ +X(273, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_CANCEL_PIPELINE, "3D cancelled: Kick ID %u", 1) \ +X(274, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CDM_CANCEL_PIPELINE, "Compute cancelled: Kick ID %u", 1) \ +X(275, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_CANCEL_PIPELINE, "Ray cancelled: Kick ID %u", 1) \ +X(276, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIME_SLICE_MAX_DEPRECATED, "Time Slice: Update Failed, Max total allocations exceeded 100pc", 0)\ +X(277, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIME_SLICE_DYNAMIC_LOW_DEPRECATED, "Time Slice: Dynamic time slice low, allocating zero time slice to dynamic drivers", 0)\ +X(278, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIME_SLICE_UPDATE_SUCCESS_DEPRECATED, "Time Slice: Updated Successfully", 0)\ +X(279, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DISCARD_FWCCB, "FWCCB for Driver ID %u is full, discarding command! (Roff = %u, Woff = %u)", 3) \ +X(280, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CANCEL_WORK_CMD_RECEIVED_DEPRECATED, "Cancelling jobs with intjobref<0x%08x, DM = %u, FWCtx = 0x%08.8x", 3) \ +X(281, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CANCEL_WORK_CMD_RECEIVED, "Cancelling jobs with intjobref from 0x%08x to 0x%08x, DM = %u, FWCtx = 0x%08.8x", 4) \ +X(282, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIME_SLICE_MAX_DEPRECATED2, "Time Slice: Update Failed, Max total allocations exceeded 100pc", 0)\ +X(283, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIME_SLICE_DYNAMIC_LOW_DEPRECATED2, "Time Slice: Dynamic time slice low, allocating zero time slice to dynamic drivers", 0)\ +X(284, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIME_SLICE_UPDATE_SUCCESS_DEPRECATED2, "Time Slice: Updated Successfully", 0)\ +X(285, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE_DEPRECATED2, "Driver ID %u fw state transition request: from %u to %u (0-offline 1-ready 2-active 3-graceful_offloading 4-forced_offloading 5-cooldown). Status %u (1-ok 0-fail)", 4) \ +X(286, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context Set [0x%08x]", 1) \ +X(287, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CANCEL_WORK_CMD_DROPPED, "Dropped cancel request with intjobref from 0x%08x to 0x%08x, DM = %u, FWCtx = 0x%08.8x", 4) \ +X(288, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ZERO_LM, "Zeroing local memory after context storing FWCtx = 0x%08.8x", 1) \ +X(289, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FAILED_IRQ_CLEAR, "Host failed to clear IRQ DriverID: %u Reg: 0x%08x",2) \ +X(290, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_ABORTED, "Kill DM%u aborted", 1) \ +X(291, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SOC_CLOCK_SPEED_CHANGE, "SOC clock set to %u Hz", 1) \ \ -X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d", 2) \ +X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %u", 2) \ X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u", 1) \ -X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK_DEPRECATED, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x", 3) \ +X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK_DEPRECATED, "Irq Task DM = %u, Breq = %u, SBIrq = 0x%x", 3) \ X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u", 1) \ X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL_DEPRECATED, "Kick MTS Bg task DM=All", 0) \ -X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d", 1) \ -X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d", 2) \ +X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%u", 1) \ +X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %u", 2) \ X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x", 2) \ X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x", 3) \ -X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x", 3) \ -X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %d, OSid = %u", 3) \ -X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %d, OSid = %u", 3) \ +X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug Driver ID = %u, DM = %u, item = 0x%x", 3) \ +X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %u, OSid = %u", 3) \ +X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %u, OSid = %u", 3) \ X( 13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u", 1) \ X( 14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.", 0) \ -X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d.", 7) \ +X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %u OS ID = %u PID = %u context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %u PID = %u.", 7) \ X( 16, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC_DEPRECATED, "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u", 4) \ X( 17, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_RTN_VALUE, "KCCB Slot %u: Return value %u", 2) \ -X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task OSid = %u", 1) \ -X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, OSid=%u", 3) \ +X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task Driver ID = %u", 1) \ +X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, Driver ID=%u", 3) \ X( 20, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task (EVENT_STATUS=0x%08x)", 1) \ -X( 21, RGXFW_GROUP_MTS, RGXFW_SF_MTS_VZ_SIDEBAND, "VZ sideband test, kicked with OSid=%u from MTS, OSid for test=%u", 2) \ +X( 21, RGXFW_GROUP_MTS, RGXFW_SF_MTS_VZ_SIDEBAND, "VZ sideband test, kicked with DriverID=%u from MTS, OSid for test=%u", 2) \ \ X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned", 1) \ -X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d", 3) \ -X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%d, received cleanup request", 2) \ -X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d", 3) \ +X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %u, WriteOffset = %u", 3) \ +X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%u, received cleanup request", 2) \ +X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %u", 3) \ X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy", 2) \ X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_DEPRECATED, "HWRTData [0x%08x] HW Context %u cleaned", 2) \ X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned", 1) \ X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned", 1) \ -X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d", 3) \ -X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d", 4) \ -X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request", 2) \ -X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d", 3) \ -X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d", 4) \ +X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %u, executed = %u", 3) \ +X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %u, executed = %u", 4) \ +X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%u, received cleanup request", 2) \ +X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %u", 3) \ +X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %u, executed = %u", 4) \ X( 14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_DEPRECATED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned", 2) \ X( 15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x", 1) \ X( 16, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "Received cleanup request for HWRTData [0x%08x]", 1) \ -X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d", 3) \ -X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d", 3) \ +X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %u, executed = %u", 3) \ +X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %u", 3) \ \ X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8x needs resume", 1) \ X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_DEPRECATED, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ @@ -406,116 +458,136 @@ X( 14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8x resume fr X( 15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%x, load 0x%x", 2) \ X( 16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete", 0) \ X( 17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start", 0) \ -X( 18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d", 3) \ +X( 18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_REQUESTS_DM_DEPRECATED, "Higher priority context requests DM %u, old prio:%d, new prio:%d", 3) \ X( 19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u", 2) \ -X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8x", 2) \ -X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8x", 2) \ +X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%u state: 0x%08.8x", 2) \ +X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%u state: 0x%08.8x", 2) \ X( 22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME_DEPRECATED, "SHG FWCtx 0x%08.8x needs resume", 1) \ X( 23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME_DEPRECATED, "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ X( 24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED_DEPRECATED, "SHG context shared alloc size store 0x%x, load 0x%x", 2) \ X( 25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE_DEPRECATED, "*** SHG context store complete", 0) \ X( 26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START_DEPRECATED, "*** SHG context store start", 0) \ -X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d", 1) \ +X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %u", 1) \ X( 28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.", 0) \ X( 29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u", 4) \ -X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)", 2) \ +X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%u->%u)", 2) \ X( 31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT_DEPRECATED, "TA context store hit BRN 52563: vertex store tasks outstanding", 0) \ -X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)", 1) \ +X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %u)", 1) \ X( 33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \ -X( 34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED2, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \ +X( 34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_REQUESTS_DM_DEPRECATED2, "Higher priority context requests DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \ X( 35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start", 0) \ X( 36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete", 0) \ X( 37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME_DEPRECATED, "TDM context needs resume, header [0x%08.8x, 0x%08.8x]", 2) \ -X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \ +X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_REQUESTS_DM, "Higher priority context requests DM %u. Prios (Driver ID, Driver ID Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \ X( 39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8x", 3) \ X( 40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8x", 3) \ -X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)", 1) \ -X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%d state: 0x%08.8x%08x", 3) \ -X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%d state: 0x%08.8x%08x", 3) \ +X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %u (1=IPP_TILE, 2=ISP_TILE)", 1) \ +X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%u state: 0x%08.8x%08x", 3) \ +X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%u state: 0x%08.8x%08x", 3) \ X( 44, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_IPP_STATE, "3D context resume IPP state: 0x%08.8x%08x", 2) \ X( 45, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_PIPES_EMPTY, "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x", 1) \ -X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%d state: 0x%08.8x%08x", 3) \ +X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%u state: 0x%08.8x%08x", 3) \ X( 47, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_LEVEL4_STORE_START, "*** 3D context store start version 4", 0) \ -X( 48, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%d active core mask 0x%04.4x", 2) \ -X( 49, RGXFW_GROUP_CSW, RGXFW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%d active core mask 0x%04.4x", 2) \ -X( 50, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x", 5) \ +X( 48, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%u active core mask 0x%04.4x", 2) \ +X( 49, RGXFW_GROUP_CSW, RGXFW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%u active core mask 0x%04.4x", 2) \ +X( 50, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %u, pipe%u state: 0x%08.8x%08x%08x", 5) \ X( 51, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_COMPLETE, "*** RDM FWCtx store complete", 0) \ X( 52, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_START, "*** RDM FWCtx store start", 0) \ X( 53, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_NEEDS_RESUME, "RDM FWCtx 0x%08.8x needs resume", 1) \ X( 54, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_RESUME, "RDM FWCtx 0x%08.8x resume", 1) \ \ -X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%d secure=%d", 3) \ +X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%u secure=%u", 3) \ X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x", 1) \ -X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC_DEPRECATED, "Alloc PC reg %d", 1) \ -X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_GRAB, "Grab reg set %d refcount now %d", 2) \ -X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_UNGRAB, "Ungrab reg set %d refcount now %d", 2) \ -X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ -X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%d, for BIFreq=%d", 2) \ -X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x", 9) \ -X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x", 4) \ -X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context %d, Register's contents are now 0x%04x", 3) \ -X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \ +X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC_DEPRECATED, "Alloc PC reg %u", 1) \ +X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_GRAB, "Grab reg set %u refcount now %u", 2) \ +X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_UNGRAB_DEPRECATED, "Ungrab reg set %u refcount now %u", 2) \ +X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%u BIFreq=%u, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ +X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%u, for BIFreq=%u", 2) \ +X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %u base 0x%08x%08x len 0x%08x%08x enable %u stride %u --> 0x%08x%08x", 9) \ +X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %u to OSID0, Cat Base %u, Register's contents are now 0x%08x 0x%08x", 4) \ +X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %u to OSID1, Context %u, Register's contents are now 0x%04x", 3) \ +X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx_DEPRECATED, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \ X( 12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_BIFREQ_DEPRECATED, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u", 5) \ X( 13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)", 1) \ -X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%d secure=%d", 3) \ -X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_DEPRECATED, "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ +X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%u secure=%u", 3) \ +X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_DEPRECATED, "Setup reg=%u DM=%u, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ X( 16, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u", 4) \ -X( 17, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DM, "Trust enabled:%d, for DM=%d", 2) \ +X( 17, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DM, "Trust enabled:%u, for DM=%u", 2) \ X( 18, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_DM, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u", 5) \ -X( 19, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup register set=%d DM=%d, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%d", 6) \ -X( 20, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_ALLOC, "Alloc PC set %d as register range [%u - %u]", 3) \ +X( 19, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup register set=%u DM=%u, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%u", 6) \ +X( 20, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_ALLOC, "Alloc PC set %u as register range [%u - %u]", 3) \ +X( 21, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32CoreID = %u, ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 8) \ +X( 22, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_SECURE, "Setup secure register=%u DM=%u, PC address=0x%08x%08x, OSid=%u, NewContext=%u", 6) \ +X( 23, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM_DEPRECATED, "Activate MemCtx=0x%08x DM=%u secure=%u CtxFlags=0x%08x", 4) \ +X( 24, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE_DEPRECATED, "Deactivate MemCtx=0x%08x CtxFlags=0x%08x", 2) \ +X( 25, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE_AND_UNGRAB_PCSET, "Deactivate MemCtx=0x%08x, ungrab reg set %u refcount now %u", 3) \ \ X( 1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x", 1) \ X( 2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x", 1) \ X( 3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled", 0) \ X( 4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled", 0) \ -X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)", 1) \ -X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))", 2) \ -X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))", 2) \ +X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%u (0=OK, 1=Disabled)", 1) \ +X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%u byte(s))", 2) \ +X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%u byte(s))", 2) \ X( 8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!", 0) \ -X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)", 1) \ +X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%u (0=OK, 1=Disabled)", 1) \ X( 10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x", 1) \ -X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d", 2) \ -X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d", 1) \ -X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)", 2) \ -X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)", 2) \ -X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame", 1) \ -X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame", 1) \ -X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)", 1) \ -X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d", 1) \ -X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)", 1) \ -X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)", 1) \ -X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)", 1) \ -X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_DEPRECATED, "TRP state: %d", 1) \ -X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE, "TRP failure: %d", 1) \ -X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE, "SW TRP State: %d", 1) \ -X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE_DEPRECATED, "SW TRP failure: %d", 1) \ -X( 26, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HW_KICK, "HW kick event (%u)", 1) \ -X( 27, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_CHECKSUMS, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \ -X( 28, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_UNIT_CHECKSUMS, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \ -X( 29, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ -X( 30, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \ -X( 31, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ -X( 32, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_HWRTDATA, "TRP HWRTData: 0x%08x, state: %d", 2) \ -X( 33, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_CNTX, "TRP Context: 0x%08x, state: %d", 2) \ -X( 34, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE_CNTX, "TRP Context: 0x%08x, failure: %d", 2) \ +X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %u available returned %u", 2) \ +X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %u", 1) \ +X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %u (after %u ticks)", 2) \ +X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %u returned %u (0=No skip, 1=Skip frame)", 2) \ +X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %u in frame", 1) \ +X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %u is a new frame", 1) \ +X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %u ticks)", 1) \ +X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %u", 1) \ +X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %u)", 1) \ +X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %u)", 1) \ +X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %u)", 1) \ +X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_DEPRECATED2, "TRP state: %u", 1) \ +X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE_DEPRECATED, "TRP failure: %u", 1) \ +X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE_DEPRECATED, "SW TRP State: %u", 1) \ +X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE_DEPRECATED2, "SW TRP failure: %u", 1) \ +X( 26, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HW_KICK_DEPRECATED, "HW kick event (%u)", 1) \ +X( 27, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_CHECKSUMS_DEPRECATED, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \ +X( 28, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_UNIT_CHECKSUMS_DEPRECATED, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \ +X( 29, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_CHECK_REG_DEPRECATED, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ +X( 30, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_SLOTS_CHECK_DEPRECATED, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \ +X( 31, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_REG_CHECK_DEPRECATED, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ +X( 32, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_HWRTDATA_DEPRECATED, "TRP HWRTData: 0x%08x, state: %u", 2) \ +X( 33, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_CNTX_DEPRECATED, "TRP Context: 0x%08x, state: %u", 2) \ +X( 34, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE_CNTX_DEPRECATED, "TRP Context: 0x%08x, failure: %u", 2) \ +X( 35, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU_DEPRECATED, "Memory dump: Addr=0x%02x%08x, Size=%d, ContextId=%u, DM=%u", 5) \ +X( 36, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU_DWORDS4_DEPRECATED, " 0x%02x%08x %08x %08x %08x %08x", 6) \ +X( 37, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU_DWORDS3_DEPRECATED, " 0x%02x%08x %08x %08x %08x", 5) \ +X( 38, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU_DWORDS2_DEPRECATED, " 0x%02x%08x %08x %08x", 4) \ +X( 39, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU_DWORDS1_DEPRECATED, " 0x%02x%08x %08x", 3) \ +X( 40, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW_DEPRECATED, "Memory dump: Addr=0x%08x, Size=%d", 2) \ +X( 41, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW_DWORDS4_DEPRECATED, " 0x%08x %08x %08x %08x %08x", 5) \ +X( 42, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW_DWORDS3_DEPRECATED, " 0x%08x %08x %08x %08x", 4) \ +X( 43, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW_DWORDS2_DEPRECATED, " 0x%08x %08x %08x", 3) \ +X( 44, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW_DWORDS1_DEPRECATED, " 0x%08x %08x", 2) \ +X( 45, RGXFW_GROUP_MISC, RGXFW_SF_MISC_FBCDC_FAILURE_STATUS_DEPRECATED, "FBCDC: Core=0x%08x, Status=0x%08x, Signature status=0x%08x", 3) \ +X( 46, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WORK_CYCLES_PIPEDM_EN, "FWCtx 0x%08.8x, PipeDM state %04x, (start) %08x, (end) %08x, elapsed %08x", 5) \ +X( 47, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WORK_CYCLES, "FWCtx 0x%08.8x, elapsed %08x", 2) \ +X( 48, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_UNEXPECTED_EVENT_DEPRECATED, "Unexpected TRP pass completion on DM: %u",1) \ +X( 49, RGXFW_GROUP_MISC, RGXFW_SF_MISC_FAILED_IRQ_CLEAR_DEPRECATED, "Host failed to clear IRQ, Reg: 0x%08x",1) \ \ -X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \ -X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d", 8) \ +X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%u SP = %u, MLIST%u SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \ +X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %u, finished: %u on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%u, local:%u, mmu:%u", 8) \ X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE_DEPRECATED, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED_DEPRECATED, "Grow for freelist ID=0x%08x denied by host", 1) \ X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed", 1) \ -X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d", 2) \ -X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d", 2)\ +X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %u, operation(0-unpause, 1-pause): %u", 2) \ +X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %u", 2)\ X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x", 1) \ -X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d", 7) \ +X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %u, finished: %u on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%u, local:%u", 7) \ X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ X( 19, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE_VOLCANIC, "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ @@ -526,24 +598,25 @@ X( 23, RGXFW_GROUP_PM, RGXFW_SF_PM_CHECK_FL_BASEADDR, "Freelist 0x%08x base addr X( 24, RGXFW_GROUP_PM, RGXFW_SF_PM_ANALYSE_FL_GROW, "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x", 5) \ X( 25, RGXFW_GROUP_PM, RGXFW_SF_PM_ATTEMPT_FL_GROW, "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ X( 26, RGXFW_GROUP_PM, RGXFW_SF_PM_DEFER_FL_GROW, "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ -X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \ +X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %u, finished: %u (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \ X( 28, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT, "3D Timeout Now for FWCtx 0x%08.8x", 1) \ X( 29, RGXFW_GROUP_PM, RGXFW_SF_PM_RECYCLE, "GEOM PM Recycle for FWCtx 0x%08.8x", 1) \ -X( 30, RGXFW_GROUP_PM, RGXFW_SF_PM_PRIMARY_CONFIG, "PM running primary config (Core %d)", 1) \ -X( 31, RGXFW_GROUP_PM, RGXFW_SF_PM_SECONDARY_CONFIG, "PM running secondary config (Core %d)", 1) \ -X( 32, RGXFW_GROUP_PM, RGXFW_SF_PM_TERTIARY_CONFIG, "PM running tertiary config (Core %d)", 1) \ -X( 33, RGXFW_GROUP_PM, RGXFW_SF_PM_QUATERNARY_CONFIG, "PM running quaternary config (Core %d)", 1) \ +X( 30, RGXFW_GROUP_PM, RGXFW_SF_PM_PRIMARY_CONFIG, "PM running primary config (Core %u)", 1) \ +X( 31, RGXFW_GROUP_PM, RGXFW_SF_PM_SECONDARY_CONFIG, "PM running secondary config (Core %u)", 1) \ +X( 32, RGXFW_GROUP_PM, RGXFW_SF_PM_TERTIARY_CONFIG, "PM running tertiary config (Core %u)", 1) \ +X( 33, RGXFW_GROUP_PM, RGXFW_SF_PM_QUATERNARY_CONFIG, "PM running quaternary config (Core %u)", 1) \ +X( 34, RGXFW_GROUP_PM, RGXFW_SF_PM_REVERT_CONFIG, "PM reverting to previous config (Core %u)", 1) \ \ X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS_DEPRECATED, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS_DEPRECATED, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW_DEPRECATED, "RPM request failed. Waiting for freelist grow.", 0) \ X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT_DEPRECATED, "RPM request failed. Aborting the current frame.", 0) \ X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW_DEPRECATED, "RPM waiting for pending grow on freelist 0x%08x", 1) \ -X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %d, grow size %d", 3) \ +X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %u, grow size %u", 3) \ X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD_DEPRECATED, "Freelist load: SHF = 0x%08x, SHG = 0x%08x", 2) \ X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08x.0x%08x", 2) \ X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08x.0x%08x", 2) \ -X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)", 5) \ +X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %u) at 0x%08x from current size %u to new size %u, RPM restart: %u (1=Yes)", 5) \ X( 11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART_DEPRECATED, "Restarting SHG", 0) \ X( 12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED_DEPRECATED, "Grow failed, aborting the current frame.", 0) \ X( 13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE_DEPRECATED, "RPM abort complete on HWFrameData [0x%08x].", 1) \ @@ -559,39 +632,39 @@ X( 22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE_DEPRECATED, "SHG FL (0x%08x) \ X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u", 2) \ X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u", 2) \ -X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d", 4) \ +X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %u, global: %u, mmu: %u", 4) \ X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D_DEPRECATED, "Loading VFP table 0x%08x%08x for 3D", 2) \ X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA_DEPRECATED, "Loading VFP table 0x%08x%08x for TA", 2) \ -X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ +X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: TotalPMPages = %u, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store", 0) \ -X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No", 2) \ -X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No", 2) \ -X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ -X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ -X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \ -X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ +X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%u: Load=No, Store=No", 2) \ +X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%u: Load=Yes, Store=No", 2) \ +X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%u: Load=Yes, Store=Yes", 3) \ +X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%u: Load=Yes, Store=Yes", 3) \ +X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %u (0:MidTA,1:3D) on context %u, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \ +X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: TotalPMPages = %u, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ X( 14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u", 2) \ X( 15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u", 2) \ -X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ -X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ X( 18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG_DEPRECATED, "Freelist 0x%x RESET!!!!!!!!", 1) \ X( 19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2_DEPRECATED, "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 5) \ -X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u", 3) \ +X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %u (0:local,1:global,2:mmu) on HW context %u", 3) \ X( 21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)", 1) \ X( 22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed", 0) \ -X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d", 3) \ -X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)", 3) \ +X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%u", 3) \ +X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %u)", 3) \ X( 25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS_DEPRECATED, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 8) \ X( 26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u", 2) \ X( 27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)", 4) \ X( 28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x", 2) \ -X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d", 3) \ -X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ -X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ -X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 5) \ +X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %u, global: %u", 3) \ +X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %u (0:local,1:global) for PMDM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%x type: %u (0:local,1:global) for PMDM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %u, (MemCtx 0x%08x)", 5) \ X( 33, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 7) \ -X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 4) \ -X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)", 6) \ +X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %u, (MemCtx 0x%08x)", 4) \ +X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load Freelist 0x%x type: %u (0:local,1:global) for PMDM%u: FL Total Pages %u (max=%u,grow size=%u)", 6) \ X( 36, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_TA, "TA RTData 0x%08x marked as killed.", 1) \ X( 37, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_3D, "3D RTData 0x%08x marked as killed.", 1) \ X( 38, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILL_TA_AFTER_RESTART, "RTData 0x%08x will be killed after TA restart.", 1) \ @@ -606,7 +679,7 @@ X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU_DEPRECATED, "3D MemFree: MMU X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL_DEPRECATED, "3D MemFree: Global FL 0x%08x", 1) \ X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_DEPRECATED, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 6) \ X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x", 3) \ -X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \ +X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%u, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \ X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED_DEPRECATED, "Partial render avoided", 0) \ X( 10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED_DEPRECATED, "Partial render discarded", 0) \ X( 11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished", 0) \ @@ -638,49 +711,50 @@ X( 36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running", X( 37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided", 0) \ X( 38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed", 0) \ X( 39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)", 2) \ -X( 40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag", 0) \ +X( 40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDetected flag", 0) \ X( 41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x", 1) \ X( 42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)", 1) \ X( 43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u", 5) \ X( 44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA_WITH_SP, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u", 4) \ X( 45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u", 5) \ -X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided", 1) \ +X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%u) Buffer provided", 1) \ X( 47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)", 1) \ X( 48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)", 1) \ X( 49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)", 1) \ X( 50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)", 1) \ X( 51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ X( 52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ -X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)", 2) \ +X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %u type to be backed (ID=0x%08x)", 2) \ X( 54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u", 4) \ X( 66, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ X( 67, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u", 3) \ -X( 68, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x", 3) \ +X( 68, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%u runlist head from Context 0x%08x to 0x%08x", 3) \ X( 69, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_FORCEFREE, "SPM State = PR force free", 0) \ +X( 70, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_FAILED, "Failure to back PR Buffer", 0) \ \ -X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \ +X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%u int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \ X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_IDLE, "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ -X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x", 3) \ -X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d", 4) \ -X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ +X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %u), DM%u, pow flags: 0x%x", 3) \ +X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %u %u %u %u", 4) \ +X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %u, Any RD-DM Active? %u", 2) \ X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_OFF, "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ -X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ -X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)", 2) \ -X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d", 2) \ +X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %u, Units: 0x%08.8x", 2) \ +X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %u (Power flags=%u)", 2) \ +X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %u to %u", 2) \ X( 11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init", 0) \ X( 12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: 0x%x)", 1) \ X( 13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.", 0) \ X( 14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.", 0) \ -X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ -X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d", 2) \ +X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %u, Any RD-DM Active? %u", 2) \ +X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %u, TLA-DM Active? %u", 2) \ X( 17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37270_DEPRECATED, "Request power up due to BRN37270. Pow stat int: 0x%x", 1) \ X( 18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x", 3) \ X( 19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%x", 1) \ X( 20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%x", 1) \ X( 21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ X( 22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ -X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz", 2) \ -X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %d dusts powered.", 2) \ +X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %ums. Core clock: %u Hz", 2) \ +X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %u dusts powered.", 2) \ X( 25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.", 0) \ X( 26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u", 1) \ X( 27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED2, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x", 3) \ @@ -699,11 +773,11 @@ X( 39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS_DEPRECATED, "Proacti X( 40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u", 1) \ X( 41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u", 1) \ X( 42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.", 0) \ -X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d", 2) \ -X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %d due to BRN59042.", 1) \ +X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %u, Ticks: %u", 2) \ +X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %u due to BRN59042.", 1) \ X( 45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ X( 46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u", 5) \ -X( 47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \ +X( 47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Custom/Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \ X( 48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle", 0) \ X( 49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active", 0) \ X( 50, RGXFW_GROUP_POW, RGXFW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.", 1) \ @@ -712,48 +786,48 @@ X( 52, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST_DEPRECATED, "Discardi X( 53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ X( 54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ X( 55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)", 1) \ -X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \ +X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %u (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \ X( 57, RGXFW_GROUP_POW, RGXFW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u", 2) \ X( 58, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x", 1) \ -X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), pow flags: 0x%x", 2) \ +X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %u), pow flags: 0x%x", 2) \ X( 60, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x", 1) \ X( 61, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x", 3) \ X( 62, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x", 2) \ X( 63, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_CHANGE_NOT_IDLE, "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)", 1) \ X( 64, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_SPU_POWER_MASK, "Invalid SPU power mask 0x%x! Changing to 1", 1) \ X( 65, RGXFW_GROUP_POW, RGXFW_SF_POW_CLKDIV_UPDATE, "Proactive DVFS: Send OPP %u with clock divider value %u", 2) \ -X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "PPA block started in perf validation mode.", 0) \ +X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "Power counters in raw/validation mode.", 0) \ X( 67, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESET, "Reset PPA block state %u (1=reset, 0=recalculate).", 1) \ -X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%d last request so retrying.", 1) \ -X( 69, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x", 3) \ +X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%u last request so retrying.", 1) \ +X( 69, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %u, Units: 0x%08x%08x", 3) \ X( 70, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x", 5) \ X( 71, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x", 4) \ -X( 72, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RAC, "RAC pending? %d, RAC Active? %d", 2) \ +X( 72, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RAC, "RAC pending? %u, RAC Active? %u", 2) \ X( 73, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RAC, "Initiate powoff query for RAC.", 0) \ X( 74, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEFER_REACTIVE_UPDATE, "Proactive DVFS: Defer reactive update to meet next deadline 0x%08x%08x", 2) \ \ -X( 1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ -X( 2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ +X( 1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%u, FWCtx: 0x%08.8x", 2) \ +X( 2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%u, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ X( 3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW", 0) \ X( 4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.", 0) \ -X( 5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: 0x%08.8x", 2) \ -X( 6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ -X( 7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)", 3) \ -X( 8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)", 3) \ -X( 9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ -X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ -X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)", 3) \ -X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)", 4) \ +X( 5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%u FWCtx: 0x%08.8x", 2) \ +X( 6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%u->%u), PER-DM(0x%08x->0x%08x)", 4) \ +X( 7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%u->%u), PER-DM(0x%08x)", 3) \ +X( 8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%u), PER-DM(0x%08x->0x%08x)", 3) \ +X( 9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%u->%u), PER-DM(0x%08x->0x%08x)", 4) \ +X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%u->%u), PER-DM(0x%08x->0x%08x)", 4) \ +X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%u->%u), PER-DM(0x%08x)", 3) \ +X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%u->%u), PER-DM(0x%08x)", 4) \ X( 13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)", 3) \ X( 14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08x", 2) \ -X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d", 6) \ -X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d", 2) \ -X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d", 5) \ -X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d", 8) \ -X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%d", 2) \ -X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x.", 3) \ -X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)", 2) \ -X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)", 4) \ +X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %u), FWCtx 0x%08x @ %u", 6) \ +X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %u, RD powered off: %u", 2) \ +X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %u, global (0x%08.8x): %u", 5) \ +X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %u - local (0x%08.8x): s%u?=c%u, global (0x%08.8x): s%u?=c%u", 8) \ +X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%u", 2) \ +X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %u, RTD 0x%08x.", 3) \ +X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%u, extmem: %u)", 2) \ +X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %u bytes)", 4) \ X( 23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED2, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x", 2) \ X( 24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x", 5) \ X( 25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered", 1) \ @@ -766,13 +840,13 @@ X( 31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED2, "GPU has locked up" X( 32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR", 1) \ X( 33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x", 2) \ X( 34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED2, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)", 1) \ -X( 35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out", 1) \ +X( 35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT_DEPRECATED, "DM%u timed out", 1) \ X( 36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x", 1) \ X( 37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08x", 2) \ X( 38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline", 0) \ X( 39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll", 0) \ X( 40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x", 2) \ -X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%d, poll failures: 0x%08x)", 2) \ +X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%u, poll failures: 0x%08x)", 2) \ X( 42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08x", 1) \ X( 43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)", 1) \ X( 44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).", 1) \ @@ -780,55 +854,56 @@ X( 45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detect X( 46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_DEPRECATED, "Fast CRC Check result for DM%u is HWRNeeded=%u", 2) \ X( 47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK_DEPRECATED, "Full Signature Check result for DM%u is HWRNeeded=%u", 2) \ X( 48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u", 3) \ -X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 3) \ +X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%u", 3) \ X( 50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK_DEPRECATED, "Deadline counter for DM%u is HWRDeadline=%u", 2) \ X( 51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction", 1) \ -X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%d)", 2) \ -X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete", 1) \ -X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u", 4) \ -X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed", 1) \ +X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%u)", 2) \ +X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%u complete", 1) \ +X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%u) type: %u (0:local,1:global,2:mmu) on HW context %u", 4) \ +X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%u failed", 1) \ X( 56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ X( 57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ X( 58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u", 2) \ X( 59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty", 1) \ -X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \ -X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)", 1) \ -X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)", 1) \ -X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction", 2) \ +X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%u's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \ +X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%u)", 1) \ +X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%u)", 1) \ +X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for Driver ID %u due to pending freelist reconstruction", 2) \ X( 64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)", 5) \ -X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u", 4) \ +X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%u) type: %u (0:local,1:global) on HW context %u", 4) \ X( 66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)", 3) \ X( 67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance", 1) \ -X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%d)", 2) \ -X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)", 4) \ -X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 5) \ +X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%u)", 2) \ +X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %u bytes)", 4) \ +X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%u) type: %u (0:local,1:global) phase: %u (0:TA, 1:3D) on HW context %u", 5) \ X( 71, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_LONG_HW_POLL, "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)", 3) \ -X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%d)", 1) \ -X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %d ticks and deadline is %d ticks", 3) \ +X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%u)", 1) \ +X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %u ticks and deadline is %u ticks", 3) \ X( 74, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK_DEPRECATED, "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 5) \ -X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 6) \ +X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%u) Driver ID: %u type: %u (0:local,1:global) phase: %u (0:TA, 1:3D) on HW context %u", 6) \ X( 76, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU-%u has locked up", 1) \ X( 77, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DM, "DM%u has locked up", 1) \ -X( 78, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %d RGX_CR_EVENT_STATUS=0x%08x", 2) \ +X( 78, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %u RGX_CR_EVENT_STATUS=0x%08x", 2) \ X( 79, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MULTICORE_EVENT_STATUS_REG, "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x", 2) \ -X( 80, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \ -X( 81, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)", 3) \ -X( 82, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)", 4) \ -X( 83, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)", 4) \ +X( 80, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %u MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \ +X( 81, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %u MMU Status: 0x%08x%08x)", 3) \ +X( 82, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %u MMU Status: 0x%08x%08x 0x%08x)", 4) \ +X( 83, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (core:%u of %u, loop:%u, poll failures: 0x%08x)", 4) \ X( 84, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ X( 85, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ -X( 86, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 4) \ +X( 86, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%u", 4) \ X( 87, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK, "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 6) \ X( 88, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_RISCV_FAULT, "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)", 3) \ -X( 89, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "TEXAS1_PFS poll failed on core %d with value 0x%08x", 2) \ -X( 90, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "BIF_PFS poll failed on core %d with value 0x%08x", 2) \ -X( 91, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x", 2) \ -X( 92, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x", 2) \ -X( 93, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x", 2) \ -X( 94, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x", 2) \ -X( 95, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "TEXAS%d_PFS poll failed on core %d with value 0x%08x", 3) \ +X( 89, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "TEXAS1_PFS poll failed on core %u with value 0x%08x", 2) \ +X( 90, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "BIF_PFS poll failed on core %u with value 0x%08x", 2) \ +X( 91, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS set poll failed on core %u with value 0x%08x", 2) \ +X( 92, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS unset poll failed on core %u with value 0x%08x", 2) \ +X( 93, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "MMU_CTRL_INVAL poll (all but fw) failed on core %u with value 0x%08x", 2) \ +X( 94, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "MMU_CTRL_INVAL poll (all) failed on core %u with value 0x%08x", 2) \ +X( 95, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "TEXAS%u_PFS poll failed on core %u with value 0x%08x", 3) \ X( 96, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EXTRA_CHECK, "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ X( 97, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WRITE_TO_GPU_READONLY_ADDR, "FW attempted to write to read-only GPU address 0x%08x", 1) \ +X( 98, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out (phase count=0x%08x)", 2) \ \ X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u", 2) \ X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW", 1) \ @@ -848,20 +923,20 @@ X( 15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block fr X( 16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: 0x%x value: 0x%x", 2) \ X( 17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u ID:0x%x", 2) \ X( 18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded", 3) \ -X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %d", 1) \ -X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded", 2) \ -X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d", 2) \ +X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %u", 1) \ +X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %u is not allowed. Use only blocks lower than %u. The package will be discarded", 2) \ +X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %u counters IDs while the upper limit is %u", 2) \ X( 22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter 0x%x is 0x%x ?", 2) \ X( 23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset", 1) \ -X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%d)", 1) \ +X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%u)", 1) \ X( 25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ -X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ -X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d", 1) \ +X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %u (Roff = %u, Woff = %u)", 3) \ +X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %u", 1) \ X( 28, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKENA, "Block 0x%x ENABLED", 1) \ X( 29, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKDIS, "Block 0x%x DISABLED", 1) \ X( 30, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK_INSTANCE, "Accessing Indirect block 0x%x, instance %u", 2) \ X( 31, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTRVAL, "Counter register 0x%x, Value 0x%x", 2) \ -X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %d", 1) \ +X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %u", 1) \ X( 33, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTLBLK, "Block 0x%x mapped to Ctl Idx %u", 2) \ X( 34, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_WORKEST_EN, "Block(s) in use for workload estimation.", 0) \ X( 35, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCCTR, "GPU %u Cycle counter 0x%x, Value 0x%x", 3) \ @@ -875,7 +950,7 @@ X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer of type 0x%02x X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed", 3) \ X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)", 3) \ X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure", 1) \ -X( 8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead", 2) \ +X( 8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED_DEPRECATED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead", 2) \ X( 9, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u", 7) \ \ X( 1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%08x 0x%08x", 2) \ @@ -904,24 +979,74 @@ X( 23, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u", 6) \ X( 24, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u", 7) \ X( 25, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u", 8) \ \ +X( 1, RGXFW_GROUP_VZ, RGXFW_SF_VZ_OS_STATE_CHANGE, "[Host time %09u%09u%09u] Driver ID %u fw state transition request: from %u to %u (0-offline 1-ready 2-active 3-graceful_offloading 4-forced_offloading 5-cooldown). Status %u (1-ok 0-fail)", 7) \ +X( 2, RGXFW_GROUP_VZ, RGXFW_SF_VZ_OS_MTS_PERMISSION_CHANGED, "[Host time %09u%09u%09u] Updated permission for Driver ID %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 5) \ +X( 3, RGXFW_GROUP_VZ, RGXFW_SF_VZ_OS_INIT_CCBS, "[Host time %09u%09u%09u] Driver ID %u CCB init status: %u (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 9) \ +X( 4, RGXFW_GROUP_VZ, RGXFW_SF_VZ_OS_INIT_CONFIG, "[Host time %09u%09u%09u] Initialised OS %u with config flags 0x%08x", 5) \ +X( 5, RGXFW_GROUP_VZ, RGXFW_SF_VZ_TIME_SLICE_MAX, "[Host time %09u%09u%09u] Time Slice: Update Failed, Max total allocations exceeded 100pc", 3)\ +X( 6, RGXFW_GROUP_VZ, RGXFW_SF_VZ_TIME_SLICE_DYNAMIC_LOW, "[Host time %09u%09u%09u] Time Slice: Dynamic time slice low, allocating zero time slice to dynamic drivers", 3)\ +X( 7, RGXFW_GROUP_VZ, RGXFW_SF_VZ_TIME_SLICE_UPDATE_SUCCESS, "[Host time %09u%09u%09u] Time Slice: Updated Successfully", 3)\ +X( 8, RGXFW_GROUP_VZ, RGXFW_SF_VZ_OS_ISOLATION_GROUP_CHANGE, "[Host time %09u%09u%09u] Changing Driver ID %u's isolation group from %u to %u", 6) \ +X( 9, RGXFW_GROUP_VZ, RGXFW_SF_VZ_OS_PRIORITY_CHANGE, "[Host time %09u%09u%09u] Changing Driver ID %u's priority from %u to %u", 6) \ +X( 10, RGXFW_GROUP_VZ, RGXFW_SF_VZ_WDG_TRIGGER, "[Host time %09u%09u%09u] Driver ID %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 5) \ +\ +\ +X( 1, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_FBCDC_FAILURE_STATUS, "FBCDC: Core=0x%08x, Status=0x%08x, Signature status=0x%08x", 3) \ +X( 2, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_FBCDC_FAILURE_DETECTED, "FBCDC signature failure detected so block scheduling more work", 0) \ +X( 3, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_FBCDC_FAILURE_CLEARED, "FBCDC signature cleared which unlocks scheduling more work", 0) \ +X( 4, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_TRP_FAILURE, "TRP failure: %u", 1) \ +X( 5, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_SW_TRP_STATE, "SW TRP State: %u", 1) \ +X( 6, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_WGP_HW_KICK, "HW kick event (%u)", 1) \ +X( 7, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_WGP_CHECKSUMS, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \ +X( 8, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_WGP_UNIT_CHECKSUMS, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \ +X( 9, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ +X( 10, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \ +X( 11, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ +X( 12, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_TRP_STATE_HWRTDATA_DEPRECATED, "TRP HWRTData: 0x%08x, state: %u", 2) \ +X( 13, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_TRP_STATE_CNTX_DEPRECATED, "TRP Context: 0x%08x, state: %u", 2) \ +X( 14, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_TRP_FAILURE_CNTX, "TRP Context: 0x%08x, failure: %u", 2) \ +X( 15, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_TRP_UNEXPECTED_EVENT_DEPRECATED, "Unexpected TRP pass completion on DM: %u",1) \ +X( 16, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_ICS_ENABLED, "ICS enabled for DM: %u",1) \ +X( 17, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_ICS_INTERVAL,"ICS: interval: %u cycles, threshold: %u cycles",2) \ +X( 18, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_ICS_FAULT,"ICS fault detected, GPU cores mask 0x%08x",1) \ +X( 19, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_ICS_MODE,"Set ICS mode to: %u ",1) \ +X( 20, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_ICS_TESTS_COMPLETION_FAILURE,"ICS tests not completed in mode %u",1) \ +X( 21, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_ICS_TESTS_START_FAILURE,"ICS tests failed to start",0) \ +X( 22, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_SELF_TEST_START,"Start Safety self-test",0) \ +X( 23, RGXFW_GROUP_SAFETY, RGXFW_SF_SAFETY_SELF_TEST_END,"Safety self-test completed",0) \ +\ +X( 1, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ +X( 2, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \ +X( 3, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ +X( 4, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_GPU, "Memory dump: Addr=0x%02x%08x, Size=%d, ContextId=%u, DM=%u", 5) \ +X( 5, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_GPU_DWORDS4, " 0x%02x%08x %08x %08x %08x %08x", 6) \ +X( 6, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_GPU_DWORDS3, " 0x%02x%08x %08x %08x %08x", 5) \ +X( 7, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_GPU_DWORDS2, " 0x%02x%08x %08x %08x", 4) \ +X( 8, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_GPU_DWORDS1, " 0x%02x%08x %08x", 3) \ +X( 9, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_FW, "Memory dump: Addr=0x%08x, Size=%d", 2) \ +X( 0, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_FW_DWORDS4, " 0x%08x %08x %08x %08x %08x", 5) \ +X( 11, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_FW_DWORDS3, " 0x%08x %08x %08x %08x", 4) \ +X( 12, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_FW_DWORDS2, " 0x%08x %08x %08x", 3) \ +X( 13, RGXFW_GROUP_VERBOSE, RGXFW_SF_VERBOSE_HEXDUMP_FW_DWORDS1, " 0x%08x %08x", 2) \ +\ X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string", 15) /* The symbolic names found in the table above are assigned an ui32 value of * the following format: - * 31 30 28 27 20 19 16 15 12 11 0 bits - * - --- ---- ---- ---- ---- ---- ---- ---- - * 0-11: id number - * 12-15: group id number - * 16-19: number of parameters - * 20-27: unused - * 28-30: active: identify SF packet, otherwise regular int32 - * 31: reserved for signed/unsigned compatibility + * 31 30 28 26 20 19 16 15 12 11 10 0 bits + * - --- --- ---- ---- ---- - --- ---- ---- + * 0-10: id number + * 12-15,11: group id number (bits 0-3 and bit 4) + * 16-19: number of parameters + * 20-26: unused + * 28-30: active: identify SF packet, otherwise regular int32 + * 31: reserved for signed/unsigned compatibility * * The following macro assigns those values to the enum generated SF ids list. */ #define RGXFW_LOG_IDMARKER (0x70000000U) -#define RGXFW_LOG_CREATESFID(a,b,e) ((IMG_UINT32)(a) | ((IMG_UINT32)(b)<<12U) | ((IMG_UINT32)(e)<<16U)) | RGXFW_LOG_IDMARKER +#define RGXFW_LOG_CREATESFID(a,b,e) ((IMG_UINT32)(a) | (((IMG_UINT32)(b)&0x10U)<<7U) | (((IMG_UINT32)(b)&0xFU)<<12U) | ((IMG_UINT32)(e)<<16U)) | RGXFW_LOG_IDMARKER #define RGXFW_LOG_IDMASK (0xFFF00000U) #define RGXFW_LOG_VALIDID(I) (((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER) @@ -932,8 +1057,10 @@ typedef enum { #undef X } RGXFW_LOG_SFids; -/* Return the group id that the given (enum generated) id belongs to */ -#define RGXFW_SF_GID(x) (((IMG_UINT32)(x)>>12) & 0xfU) +/* Return the group id number that the given (enum generated) id belongs to */ +#define RGXFW_SF_GID(x) ((((IMG_UINT32)(x)>>12) & 0xfU)|(((IMG_UINT32)(x)>>7) & 0x10U)) +/* Return the id number that the given (enum generated) id belongs to */ +#define RGXFW_SF_ID(x) ((IMG_UINT32)(x) & 0x7ffU) /* Returns how many arguments the SF(string format) for the given (enum generated) id requires */ #define RGXFW_SF_PARAMNUM(x) (((IMG_UINT32)(x)>>16) & 0xfU) diff --git a/drivers/gpu/drm/img/img-volcanic/include/rgx_heap_firmware.h b/drivers/gpu/drm/img/img-volcanic/include/rgx_heap_firmware.h index f02deeb1ea92..2628cd82e38e 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rgx_heap_firmware.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rgx_heap_firmware.h @@ -114,13 +114,22 @@ static_assert((RGX_FIRMWARE_RAW_HEAP_SIZE) >= IMG_UINT32_C(0x800000), "MIPS GPU * MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */ #define RGX_FIRMWARE_MAX_PAGETABLE_SIZE (1 * 1024 * 1024) +#define RGX_FW_CONFIG_HEAP_SIZE (1 << RGX_FW_HEAP_SHIFT) +#define RGX_FW_MAX_HEAP_SIZE (1 << 28) /* - * The maximum configurable size via RGX_FW_HEAP_SHIFT is 32MiB (1<<25) and - * the minimum is 4MiB (1<<22); the default firmware heap size is set to - * maximum 32MiB. + * The maximum configurable size via RGX_FW_HEAP_SHIFT is 256MiB (1<<28) and + * the minimum is 4MiB (1<<22); the firmware heap size is dependent + * on the number of drivers supported. */ -#if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25) -#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]" +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + #if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || (RGX_FW_CONFIG_HEAP_SIZE > (RGX_FW_MAX_HEAP_SIZE / RGX_NUM_DRIVERS_SUPPORTED))) + #error "RGX_FW_HEAP_SHIFT is outside valid range" + #endif +#else + #if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 28) + #error "RGX_FW_HEAP_SHIFT is outside valid range [22,28]" + #endif #endif + #endif /* RGX_HEAP_FIRMWARE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/rgx_hwperf_common.h b/drivers/gpu/drm/img/img-volcanic/include/rgx_hwperf_common.h index e421edcdd825..070bfede23d9 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rgx_hwperf_common.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rgx_hwperf_common.h @@ -59,8 +59,10 @@ extern "C" { #include "img_types.h" #include "img_defs.h" +#include "rgx_common.h" #include "rgx_common_asserts.h" #include "pvrsrv_tlcommon.h" +#include "pvrsrv_sync_km.h" /****************************************************************************** @@ -144,6 +146,7 @@ typedef IMG_UINT32 RGX_HWPERF_EVENT_TYPE; #define RGX_HWPERF_HW_NULLKICK 0x2AU /*!< NULL event */ #define RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE 0x2AU +#define RGX_HWPERF_TRACE_EVENT_GPU_WORK_PERIOD 0x2BU /*! context switch types 0x30..0x31 */ #define RGX_HWPERF_CSW_START 0x30U /*!< HW context store started */ @@ -216,6 +219,29 @@ typedef enum { */ static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types"); +/*! Define containing bit position for 32bit feature flags used in hwperf and api */ +typedef IMG_UINT32 RGX_HWPERF_FEATURE_FLAGS; +#define RGX_HWPERF_FEATURE_PERFBUS_FLAG 0x00000001U +#define RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG 0x00000002U +#define RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG 0x00000004U +#define RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG 0x00000008U +#define RGX_HWPERF_FEATURE_ROGUEXE_FLAG 0x00000010U +#define RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG 0x00000020U +#define RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG 0x00000040U +#define RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION 0x00000080U +#define RGX_HWPERF_FEATURE_MULTICORE_FLAG 0x00000100U +#define RGX_HWPERF_FEATURE_RAYTRACING_FLAG 0x00000200U +#define RGX_HWPERF_FEATURE_CXT_TOP_INFRASTRUCTURE_FLAG 0x00000400U +#define RGX_HWPERF_FEATURE_VOLCANIC_FLAG 0x00000800U +#define RGX_HWPERF_FEATURE_ROGUE_FLAG 0x00001000U +#define RGX_HWPERF_FEATURE_RESERVED1_FLAG 0x00002000U +#define RGX_HWPERF_FEATURE_CXT_XTP_TOP_INFRASTRUCTURE_FLAG 0x00004000U +#define RGX_HWPERF_FEATURE_AX_TOP_INFRASTRUCTURE_FLAG 0x00008000U +#define RGX_HWPERF_FEATURE_BX_TOP_INFRASTRUCTURE_FLAG 0x00010000U +#define RGX_HWPERF_FEATURE_DX_TOP_INFRASTRUCTURE_FLAG 0x00020000U + +/* ! Define for RGX_HWPERF_DM type. The values are architecture specific */ +typedef IMG_UINT32 RGX_HWPERF_DM; /****************************************************************************** * Packet Header Format Version 2 Types @@ -297,7 +323,8 @@ RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR); #define RGX_HWPERF_TYPEID_STREAM_MASK 0x00070000U #define RGX_HWPERF_TYPEID_META_DMA_MASK 0x00080000U #define RGX_HWPERF_TYPEID_M_CORE_MASK 0x00100000U -#define RGX_HWPERF_TYPEID_OSID_MASK 0x07000000U +#define RGX_HWPERF_TYPEID_PIPEDM_MASK 0x00200000U +#define RGX_HWPERF_TYPEID_OSID_MASK 0x1f000000U /*! Meta thread macros for encoding the ID into the type field of a packet */ #define RGX_HWPERF_META_THREAD_SHIFT 15U @@ -311,10 +338,12 @@ RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR); #define RGX_HWPERF_META_DMA_SHIFT 19U /*! Bit-shift macro used for encoding multi-core data into the type field of a packet */ #define RGX_HWPERF_M_CORE_SHIFT 20U +/*! Bit-shift macro used for encoding Pipeline DM data into the type field of a packet */ +#define RGX_HWPERF_PIPEDM_SHIFT 21U /*! OSID bit-shift macro used for encoding OSID into type field of a packet */ #define RGX_HWPERF_OSID_SHIFT 24U -/*! Origin or source of the event */ +/*! HWPerf Stream ID type definition. Maximum of 32bits. */ typedef IMG_UINT32 RGX_HWPERF_STREAM_ID; /*! Events from the Firmware/GPU */ #define RGX_HWPERF_STREAM_ID0_FW 0U @@ -337,8 +366,14 @@ static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_ # else #define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ # endif +# if defined(RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION) && (RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION > 0) +#define RGX_HWPERF_PIPEDM_VALUE 1U /*!< 1 => Pipeline DM supported */ +# else +#define RGX_HWPERF_PIPEDM_VALUE 0U /*!< 0 => Pipeline DM not supported */ +# endif #else #define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ +#define RGX_HWPERF_PIPEDM_VALUE 0U /*!< 0 => Pipeline DM not supported */ #endif /*! Macros used to set the packet type and encode meta thread ID (0|1), @@ -349,7 +384,8 @@ static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_ (RGX_HWPERF_TYPEID_EVENT_MASK & (IMG_UINT32)(_type)) | \ (RGX_HWPERF_TYPEID_META_DMA_MASK & ((IMG_UINT32)(_metadma) << RGX_HWPERF_META_DMA_SHIFT)) | \ (RGX_HWPERF_TYPEID_OSID_MASK & ((IMG_UINT32)(_osid) << RGX_HWPERF_OSID_SHIFT)) | \ - (RGX_HWPERF_TYPEID_M_CORE_MASK & ((IMG_UINT32)(RGX_HWPERF_M_CORE_VALUE) << RGX_HWPERF_M_CORE_SHIFT)))) + (RGX_HWPERF_TYPEID_M_CORE_MASK & ((IMG_UINT32)(RGX_HWPERF_M_CORE_VALUE) << RGX_HWPERF_M_CORE_SHIFT)) | \ + (RGX_HWPERF_TYPEID_PIPEDM_MASK & ((IMG_UINT32)(RGX_HWPERF_PIPEDM_VALUE) << RGX_HWPERF_PIPEDM_SHIFT)))) /*! Obtains the event type that generated the packet */ #define RGX_HWPERF_GET_TYPE(_packet_addr) (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK) @@ -360,6 +396,9 @@ static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_ /*! Determines if the packet generated contains multi-core data */ #define RGX_HWPERF_GET_M_CORE(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_M_CORE_MASK) >> RGX_HWPERF_M_CORE_SHIFT) +/*! Determines if the packet generated contains multi-core data */ +#define RGX_HWPERF_GET_PIPEDM(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_PIPEDM_MASK) >> RGX_HWPERF_PIPEDM_SHIFT) + /*! Obtains the guest OSID which resulted in packet generation */ #define RGX_HWPERF_GET_OSID(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT) @@ -384,17 +423,13 @@ static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_ * Other Common Defines *****************************************************************************/ -/*! This macro is not a real array size, but indicates the array has a variable - * length only known at run-time but always contains at least 1 element. The - * final size of the array is deduced from the size field of a packet header. - */ -#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS 1U - -/*! This macro is not a real array size, but indicates the array is optional - * and if present has a variable length only known at run-time. The final - * size of the array is deduced from the size field of a packet header. */ -#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U - +/*! Used to align structs using a flexible array member as the definition of the member + * size can change between C standards */ +#if (IMG_FLEX_ARRAY_MEMBER + 0) +#define RGX_HWPERF_FLEX_ARRAY_ALIGN(_align_var) _align_var +#else +#define RGX_HWPERF_FLEX_ARRAY_ALIGN(...) +#endif /*! Masks for use with the IMG_UINT32 ui32BlkInfo field */ #define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK 0xFFFF0000U @@ -442,13 +477,20 @@ static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_ } /*! Masks for use with the IMG_UINT32 ui32KickInfo field */ -#define RGX_HWPERF_KICKINFO_KICKID_MASK 0x000000FFU +#define RGX_HWPERF_KICKINFO_STARTBE_MASK 0xFFFFFF00U +#define RGX_HWPERF_KICKINFO_KICKID_MASK 0x000000FFU /*! Shift for the Kick ID field in ui32KickInfo */ -#define RGX_HWPERF_KICKINFO_KICKID_SHIFT 0U +#define RGX_HWPERF_KICKINFO_STARTBE_SHIFT 8U +#define RGX_HWPERF_KICKINFO_KICKID_SHIFT 0U /*! Macro used to set the kick info field. */ -#define RGX_HWPERF_MAKE_KICKINFO(_kickid) ((IMG_UINT32) (RGX_HWPERF_KICKINFO_KICKID_MASK&((_kickid) << RGX_HWPERF_KICKINFO_KICKID_SHIFT))) +#define RGX_HWPERF_MAKE_KICKINFO(_startbe, _kickid) \ + ((IMG_UINT32) (RGX_HWPERF_KICKINFO_STARTBE_MASK&((_startbe) << RGX_HWPERF_KICKINFO_STARTBE_SHIFT))| \ + (RGX_HWPERF_KICKINFO_KICKID_MASK&((_kickid) << RGX_HWPERF_KICKINFO_KICKID_SHIFT))) + +/*! Macro used to obtain the lowest 24 bits of START_BE if present in the packet */ +#define RGX_HWPERF_GET_STARTBE(_kickinfo) (((_kickinfo) & RGX_HWPERF_KICKINFO_STARTBE_MASK) >> RGX_HWPERF_KICKINFO_STARTBE_SHIFT) /*! Macro used to obtain the Kick ID if present in the packet */ #define RGX_HWPERF_GET_KICKID(_kickinfo) (((_kickinfo) & RGX_HWPERF_KICKINFO_KICKID_MASK) >> RGX_HWPERF_KICKINFO_KICKID_SHIFT) @@ -475,6 +517,1123 @@ static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_ (((_streaminfo) & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT) +/*! This structure holds the data of a firmware packet. */ +typedef struct +{ + RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ + IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ + IMG_UINT32 ui32TimeCorrIndex; /*!< Internal field */ + IMG_UINT32 ui32Padding; /*!< Reserved */ +} RGX_HWPERF_FW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); + + +/*! This structure holds the data of a gpu work period packet. */ +typedef struct +{ + IMG_UINT64 ui64GPUWorkPeriodStartTime; + IMG_UINT32 ui32UID; /*!< Process identifier */ + IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the beginning of gpu work period */ + IMG_UINT32 ui32StartTimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ + IMG_UINT32 ui32Padding2; /*!< Reserved. To ensure correct alignment (not written in the packet) */ +} RGX_HWPERF_GPU_WORK_PERIOD_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_WORK_PERIOD_DATA); + +/*! This structure holds the data of a hardware packet, including counters. */ +typedef struct +{ + union + { + IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ + IMG_UINT32 ui32KickStartTime; /*!< Front End start time for Pipeline DMs */ + }; + IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ + IMG_UINT32 ui32PID; /*!< Process identifier */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ + IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ + IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ + IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ + IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ + IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ + IMG_UINT32 ui32CtxPriority; /*!< Context priority */ + IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */ + IMG_UINT32 ui32KickInfo; /*!< <31..8> Back End start time lowest 24 bits <7..0> GPU Pipeline DM kick ID, 0 if not using Pipeline DMs */ + IMG_UINT32 ui32KickEndTime; /*!< Back End finish time for Pipeline DMs */ + IMG_UINT32 aui32CountBlksStream[IMG_FLEX_ARRAY_MEMBER]; /*!< Optional variable length Counter data */ + RGX_HWPERF_FLEX_ARRAY_ALIGN(IMG_UINT32 uiReserved); /*!< Reserved. To ensure correct alignment (not written in the packet) */ +} RGX_HWPERF_HW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); +RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_HW_DATA, aui32CountBlksStream); + +/*! Mask for use with the aui32CountBlksStream field when decoding the + * counter block ID and mask word. */ +#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U +#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U + +/*! MAX value used in server handling of counter config arrays */ +#define RGX_CNTBLK_COUNTERS_MAX PVRSRV_HWPERF_COUNTERS_PERBLK + +/*! Obtains the counter block ID word from an aui32CountBlksStream field. + * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit + * within group (3-0) */ +#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) + +/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address + * and stream index. May be used in decoding the counter block stream words of + * a RGX_HWPERF_HW_DATA structure. */ +#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) + +/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */ +#define RGX_HWPERF_GET_CNTBLK_GPUW(_word) ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT)) + +#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK))) + +/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address + * and stream index. May be used in decoding the counter block stream words + * of a RGX_HWPERF_HW_DATA structure. */ +#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) + + +/*! Context switch packet event */ +typedef struct +{ + RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32FrameNum; /*!< Client Frame number (TA, 3D only) */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ + IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ + IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */ + IMG_UINT32 ui32Padding; /*!< Padding to 8 DWords */ +} RGX_HWPERF_CSW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); + +/*! Enumeration of clocks supporting this event */ +typedef enum +{ + RGX_HWPERF_CLKS_CHG_INVALID = 0, + + RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, + + RGX_HWPERF_CLKS_CHG_LAST, +} RGX_HWPERF_CLKS_CHG_NAME; + +/*! This structure holds the data of a clocks change packet. */ +typedef struct +{ + IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ + RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ + IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ + IMG_UINT64 ui64OSMonoTimeStamp; /*!< OSTimeStamp sampled by the host (monotonic) */ + IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and + correlated to OSTimeStamp */ + IMG_UINT64 ui64OSSecondaryTimeStamp; /*!< OSTimeStamp sampled by the host */ +} RGX_HWPERF_CLKS_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); + +/*! Enumeration of GPU utilisation states supported by this event */ +typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; + +/*! This structure holds the data of a GPU utilisation state change packet. */ +typedef struct +{ + RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ + IMG_UINT32 uiUnused1; /*!< Padding */ + IMG_UINT32 uiUnused2; /*!< Padding */ + IMG_UINT32 uiUnused3; /*!< Padding */ +} RGX_HWPERF_GPU_STATE_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); + + +/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ +#define HWPERF_PWR_EST_V1_SIG 0x48504531 + +/*! Macros to obtain a component field from a counter ID word */ +#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) +#define RGX_HWPERF_GET_PWR_EST_GPUID(_word) (((_word)&0x70000000)>>28) +/*!< Obtains the GPU ID from a counter ID word */ +#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) +#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) + +#define RGX_HWPERF_PWR_EST_HIGH_OFFSET (31) +#define RGX_HWPERF_PWR_EST_GPUID_OFFSET (28) +#define RGX_HWPERF_PWR_EST_GPUID_MASK (0x7U) +#define RGX_HWPERF_PWR_EST_UNIT_OFFSET (24) +#define RGX_HWPERF_PWR_EST_UNIT_MASK (0xFU) +#define RGX_HWPERF_PWR_EST_VALUE_MASK (0xFFFFU) + +/*! This macro constructs a counter ID for a power estimate data stream from + * the component parts of: high word flag, unit id, GPU id, counter number */ +#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \ + ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<= RGX_BVNC_STR_SIZE_MAX), + "Space inside HWPerf packet data for BVNC string insufficient"); + +#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (20U) + +/*! BVNC Features */ +typedef struct +{ + /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ + IMG_UINT16 ui16BlockID; + + /*! Number of counters in this block type */ + IMG_UINT16 ui16NumCounters; + + /*! Number of blocks of this type */ + IMG_UINT16 ui16NumBlocks; + + /*! Reserved for future use */ + IMG_UINT16 ui16Reserved; +} RGX_HWPERF_BVNC_BLOCK; + +/*! BVNC Features */ +typedef struct +{ + IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */ + IMG_UINT32 ui32BvncKmFeatureFlags; /*!< See RGX_HWPERF_FEATURE_FLAGS */ + IMG_UINT16 ui16BvncBlocks; /*!< Number of blocks described in aBvncBlocks */ + IMG_UINT16 ui16BvncGPUCores; /*!< Number of GPU cores present */ + RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */ +} RGX_HWPERF_BVNC; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); + +/*! Performance Counter Configuration data element. */ +typedef struct +{ + IMG_UINT32 ui32BlockID; /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */ + IMG_UINT32 ui32NumCounters; /*!< Number of counters configured */ + IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; /*!< Counters configured (ui32NumCounters worth of entries) */ +} RGX_HWPERF_COUNTER_CFG_DATA_EL; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL); + +/*! Performance Counter Configuration data. */ +typedef struct +{ + IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */ + RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */ + IMG_UINT32 ui32Padding; /*!< reserved */ +} RGX_HWPERF_COUNTER_CFG; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG); + +/*! Sub-event's data. */ +typedef union +{ + struct + { + RGX_HWPERF_DM eDM; /*!< Data Master ID. */ + RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ + IMG_UINT32 ui32DMContext; /*!< FW render context */ + } sHWR; /*!< HWR sub-event data. */ + + RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features. See RGX_HWPERF_BVNC */ + struct + { + IMG_UINT32 ui32EvMaskLo; /*!< Low order 32 bits of Filter Mask */ + IMG_UINT32 ui32EvMaskHi; /*!< High order 32 bits of Filter Mask */ + } sEvMsk; /*!< HW Filter Mask */ + RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */ + + struct + { + RGX_HWPERF_DM eDM; /*!< Data Master ID. */ + IMG_UINT32 ui32DMContext; /*!< FW context */ + IMG_UINT32 ui32GPUIdMask; /*!< Multicore mask. */ + IMG_UINT32 ui32KickID; /*!< Kick Id cancelled. */ + } sKickCancel; /*!< Kick cancel sub-event data. */ +} RGX_HWPERF_FWACT_DETAIL; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); + +/*! This structure holds the data of a FW activity event packet */ +typedef struct +{ + RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ + RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ + IMG_UINT32 ui32Padding; /*!< Reserved. */ +} RGX_HWPERF_FWACT_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); + +typedef struct +{ + RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for + scheduling on GPU hardware. + See RGX_HWPERF_KICK_TYPE */ + IMG_UINT32 ui32PID; /*!< Client process identifier */ + IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX API + to track submitted work (for debugging / + trace purposes) */ + IMG_UINT32 ui32IntJobRef; /*!< internal reference used to track submitted + work (for debugging / trace purposes) */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32Padding; /*!< Unused, reserved */ + IMG_UINT64 ui64CheckFence_UID; /*!< ID of fence gating work execution on GPU */ + IMG_UINT64 ui64UpdateFence_UID; /*!< ID of fence triggered after work completes on GPU */ + IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ + IMG_UINT32 ui32CycleEstimate; /*!< Estimated cycle time for the workload */ + PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ + PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ + PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ + + /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_ENQ_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event */ + IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the stream and + stream data offset in the payload */ +#ifdef __CHECKER__ + /* Since we're not conforming to the C99 standard by not using a flexible + * array member need to add a special case for Smatch static code analyser. */ + IMG_UINT32 aui32StreamData[]; +#else + IMG_UINT32 aui32StreamData[IMG_FLEX_ARRAY_MEMBER]; /*!< Series of tuples holding UFO objects data */ + RGX_HWPERF_FLEX_ARRAY_ALIGN(IMG_UINT32 uiReserved); /*!< Reserved, align structure size to 8 bytes */ +#endif +} RGX_HWPERF_HOST_UFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +/*! + * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been + * Allocated, Freed or Modified. The values are used to determine which event + * data structure to use to decode the data from the event stream + */ +typedef enum +{ + RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, /*!< Invalid */ + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /*!< SyncPrim */ + RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, + /*!< Timeline resource packets are + now emitted in client hwperf buffer */ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */ + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, /*!< Sync Checkpoint */ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /*!< Fence created on SW timeline */ + + RGX_HWPERF_HOST_RESOURCE_TYPE_LAST /*!< End of enumeration */ +} RGX_HWPERF_HOST_RESOURCE_TYPE; + +typedef union +{ + /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer + * generated in the HOST stream. Timeline data is now provided in the + * CLIENT stream instead. + */ + struct + { + IMG_UINT32 uiPid; /*!< Identifier of owning process */ + IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ + } sTimelineAlloc; + + /*! Data for TYPE_FENCE_PVR */ + struct + { + IMG_PID uiPID; /*!< Identifier of owning process */ + PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ + IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point + backing this fence on the GPU */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + } sFenceAlloc; + + /*! Data for TYPE_SYNC_CP */ + struct + { + IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ + PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ + IMG_PID uiPID; /*!< Identifier of owning process */ + PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + } sSyncCheckPointAlloc; + + /*! Data for TYPE_FENCE_SW */ + struct + { + IMG_PID uiPID; /*!< Identifier of owning process */ + PVRSRV_FENCE hSWFence; /*!< Unique identifier for the SWFence resource */ + PVRSRV_TIMELINE hSWTimeline; /*!< Unique identifier for the timeline resource */ + IMG_UINT64 ui64SyncPtIndex; /*!< Sync-pt index where this SW timeline has reached */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + } sSWFenceAlloc; + + /*! Data for TYPE_SYNC */ + struct + { + IMG_UINT32 ui32FWAddr; /*!< Identifier of sync resource */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + } sSyncAlloc; +} RGX_HWPERF_HOST_ALLOC_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; + /*!< This describes the type of the resource + allocated in the driver. See + RGX_HWPERF_HOST_RESOURCE_TYPE */ + RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; + /*!< Union of structures providing further + data regarding the resource allocated. + Size of data varies with union member that + is present, check ``ui32AllocType`` value + to decode */ +} RGX_HWPERF_HOST_ALLOC_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef union +{ + /*! Data for TYPE_TIMELINE (*Deprecated*) */ + struct + { + IMG_UINT32 uiPid; /*!< Identifier of owning process */ + IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ + } sTimelineDestroy; + + /*! Data for TYPE_FENCE_PVR */ + struct + { + IMG_UINT64 ui64Fence_UID; /*!< Unique identifier for the fence resource */ + IMG_UINT32 ui32Padding; /*!< Reserved. */ + } sFenceDestroy; + + /*! Data for TYPE_SYNC_CP */ + struct + { + IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ + } sSyncCheckPointFree; + + /*! Data for TYPE_SYNC */ + struct + { + IMG_UINT32 ui32FWAddr; /*!< Unique identifier for the sync resource */ + } sSyncFree; +} RGX_HWPERF_HOST_FREE_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; + /*!< This describes the type of the resource + freed or released by the driver. See + RGX_HWPERF_HOST_RESOURCE_TYPE */ + RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; + /*!< Union of structures providing further data + regarding the resource freed. Size of data + varies with union member that is present, + check ``ui32FreeType`` value to decode */ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_FREE_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + IMG_UINT64 ui64CRTimestamp; /*!< CR timer value from the latest entry of + the time domains correlation table */ + IMG_UINT64 ui64OSMonoTimestamp; /*!< OS timestamp (monotonic) in nanoseconds from the + latest entry of the time domains correlation table */ + IMG_UINT32 ui32ClockSpeed; /*!< GPU clock speed from the latest entry of + the time domains correlation table */ + IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ + IMG_UINT64 ui64OSSecondaryTimeStamp; /*!< OS timestamp in nanoseconds from the latest entry + of the time domains correlation table */ +} RGX_HWPERF_HOST_CLK_SYNC_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef union +{ + /*! Data for TYPE_FENCE_PVR */ + struct + { + IMG_UINT64 ui64NewFence_UID; /*!< Unique identifier for the new merged fence + resource that has been created */ + IMG_UINT64 ui64InFence1_UID; /*!< Unique identifier for the fence resource */ + IMG_UINT64 ui64InFence2_UID; /*!< Unique identifier of the check point backing + the fence on the GPU */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ + } sFenceMerge; +} RGX_HWPERF_HOST_MODIFY_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; + /*!< Describes the type of the resource + modified by the driver. See + RGX_HWPERF_HOST_RESOURCE_TYPE */ + + RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; + /*!< Union of structures providing further + data regarding the resource modified. + Size of data varies with union member that + is present. + Check ``uiModifyType`` value to decode */ +} RGX_HWPERF_HOST_MODIFY_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING,/*!< Device not responding to requests */ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */ + + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST +} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; + +typedef enum +{ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, /*!< Invalid */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, /*!< No underlying health reason. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, /*!< Device has asserted. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, /*!< Device poll has failed. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, /*!< Device timeout has fired. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, /*!< Queue has become corrupt. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, /*!< Queue has stalled. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, /*!< Device is idling. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, /*!< Device restarting. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_PCI_ERROR, /*!< PCI error detected. */ + + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST +} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; + +/*! Data for device status event */ +typedef struct +{ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; + /*!< Device's health status */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; + /*!< Reason for device's health status */ +} RGX_HWPERF_HOST_DEVICE_HEALTH; + +/*! RGX_HWPERF_DEV_INFO_EV values */ +typedef enum +{ + RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */ + RGX_HWPERF_DEV_INFO_EV_FEATURES, /*!< Features sub-event */ + + RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */ +} RGX_HWPERF_DEV_INFO_EV; + +/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing + * further data regarding the device's status + */ +typedef union +{ + RGX_HWPERF_HOST_DEVICE_HEALTH sDeviceStatus; /*!< Device health status */ + RGX_HWPERF_BVNC sBVNC; /*!< Device features */ +} RGX_HWPERF_HOST_DEV_INFO_DETAIL; + +/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */ +typedef struct +{ + IMG_UINT32 ui32Padding; + /*!< Reserved. Align structure size to 8 bytes */ + RGX_HWPERF_DEV_INFO_EV eEvType; + /*!< Type of the sub-event. See + RGX_HWPERF_DEV_INFO_EV */ + RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; + /*!< Union of structures providing further data + regarding the device's status. Size of data + varies with union member that is present, + check ``eEvType`` value to decode */ +} RGX_HWPERF_HOST_DEV_INFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */ +typedef enum +{ + RGX_HWPERF_INFO_EV_RESERVED_0, + RGX_HWPERF_INFO_EV_MEM64_USAGE, /*!< 64-bit Memory usage event */ + RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */ +} RGX_HWPERF_INFO_EV; + +/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the + * RGX_HWPERF_HOST_INFO_DATA event. + */ +typedef union +{ + /*! Host Memory usage statistics */ + struct + { + IMG_UINT64 ui64TotalMemoryUsage; /*!< Total memory usage (bytes) */ + /*! Detailed memory usage */ + struct _RGX_HWPERF_HOST_INFO_PER_PROC_USAGE_ + { + IMG_UINT32 ui32Pid; /*!< Process ID */ + IMG_UINT32 ui32Padding; /*!< Padding */ + IMG_UINT64 ui64KernelMemUsage; /*!< Kernel memory usage (bytes) */ + IMG_UINT64 ui64GraphicsMemUsage; /*!< GPU memory usage (bytes) */ + } sPerProcessUsage[IMG_FLEX_ARRAY_MEMBER]; + } sMemUsageStats; +} RGX_HWPERF_HOST_INFO_DETAIL; + +/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device + * memory usage information. + */ +typedef struct +{ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ + RGX_HWPERF_INFO_EV eEvType; /*!< Type of subevent. See RGX_HWPERF_INFO_EV */ + RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; + /*!< Union of structures providing further data + regarding memory usage. Size varies with union + member that is present, check ``eEvType`` + value to decode */ +} RGX_HWPERF_HOST_INFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +/*! FENCE_WAIT_TYPE definitions */ +typedef enum +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, /*!< Begin */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, /*!< End */ + + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, /*!< Do not use */ +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; + +/*! FENCE_WAIT_RESULT definitions */ +typedef enum +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, /*!< Timed Out */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, /*!< Passed */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, /*!< Errored */ + + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, /*!< Do not use */ +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; + +/*! FENCE_WAIT_DETAIL Event Payload */ +typedef union +{ +/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */ + struct + { + IMG_UINT32 ui32TimeoutInMs; /*!< Wait timeout (ms) */ + } sBegin; + + /*! Data for SYNC_FENCE_WAIT_TYPE_END */ + struct + { + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */ + } sEnd; +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; + +/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure + * is received whenever the host driver handles a wait for sync event request. + */ +typedef struct +{ + IMG_PID uiPID; /*!< Identifier of the owning process */ + PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; + /*!< Type of the subevent, see + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; + /*!< Union of structures providing further data + regarding device's status. Size of data varies with + union member that is present, check ``eType`` value + to decode */ + +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; + +static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA. + * Software Timeline Advanced Event Payload. This data structure is received + * whenever the host driver processes a Software Timeline Advanced event. + */ +typedef struct +{ + IMG_PID uiPID; /*!< Identifier of the owning process */ + PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ + IMG_UINT64 ui64SyncPtIndex; /*!< Index of the sync point to which the + timeline has advanced */ + +} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; + +static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_HOST_CLIENT_INFO_TYPE_INVALID = 0, /*!< Invalid */ + RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME, /*!< Process Name */ + + RGX_HWPERF_HOST_CLIENT_INFO_TYPE_LAST, /*!< Do not use */ +} RGX_HWPERF_HOST_CLIENT_INFO_TYPE; + +typedef struct +{ + IMG_PID uiClientPID; /*!< Client process identifier */ + IMG_UINT32 ui32Length; /*!< Number of bytes present in ``acName`` */ + IMG_CHAR acName[IMG_FLEX_ARRAY_MEMBER]; /*!< Process name string, null terminated */ +} RGX_HWPERF_HOST_CLIENT_PROC_NAME; + +#define RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen) \ + ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_PROC_NAME, acName) + (ui32NameLen))) + +typedef union +{ + struct + { + IMG_UINT32 ui32Count; /*!< Number of elements in ``asProcNames`` */ + RGX_HWPERF_HOST_CLIENT_PROC_NAME asProcNames[IMG_FLEX_ARRAY_MEMBER]; + } sProcName; +} RGX_HWPERF_HOST_CLIENT_INFO_DETAIL; + +typedef struct +{ + IMG_UINT32 uiReserved; /*!< Reserved. Align structure size to 8 bytes */ + RGX_HWPERF_HOST_CLIENT_INFO_TYPE eType; + /*!< Type of the subevent, see + RGX_HWPERF_HOST_CLIENT_INFO_TYPE */ + RGX_HWPERF_HOST_CLIENT_INFO_DETAIL uDetail; + /*!< Union of structures. Size of data + varies with union member that is present, + check ``eType`` value to decode */ +} RGX_HWPERF_HOST_CLIENT_INFO_DATA; + +/*! This type is a union of packet payload data structures associated with + * various FW and Host events */ +typedef union +{ + RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data, + events ``0x01-0x06`` */ + RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data, + events ``0x07-0x19``, ``0x28-0x29`` + See RGX_HWPERF_HW_DATA */ + RGX_HWPERF_GPU_WORK_PERIOD_DATA sGPUWorkPeriod; + + RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet + data, events ``0x1A`` */ + RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state + change event packet data, + events ``0x1B`` */ + RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event + packet data, + events ``0x20-0x22`` */ + RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data, + events ``0x23`` */ + RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data, + events ``0x30-0x31`` */ + RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data, + events ``0x32`` */ + RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data, events ``0x38`` */ + RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event + packet data, + events ``0x39`` */ + /* */ + RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data, + events ``0x01`` (Host) */ + RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data, + events ``0x02`` (Host) */ + RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data, + events ``0x03`` (Host) */ + RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data, + events ``0x04`` (Host) */ + RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data, + events ``0x05`` (Host) */ + RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data, + events ``0x06`` (Host) */ + RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data, + events ``0x07`` (Host) */ + RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data, + events ``0x08`` (Host) */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data, + events ``0x09`` (Host) */ + RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance + data, events ``0x0A`` (Host) */ + RGX_HWPERF_HOST_CLIENT_INFO_DATA sHClientInfo; /*!< Host client info, + events ``0x0B`` (Host) */ +} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA); + +#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) + +#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ + ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/img/img-volcanic/include/rgx_meta.h b/drivers/gpu/drm/img/img-volcanic/include/rgx_meta.h index bdff11ffbdc1..1e6214c9ba1c 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rgx_meta.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rgx_meta.h @@ -231,6 +231,16 @@ typedef struct /* All threads can access and writable */ #define RGXFW_SEGMMU_ALLTHRS_WRITEABLE (RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE) +/* Direct map region 8 optionally used for custom mappings - max 8MB */ +#define RGXFW_SEGMMU_DMAP_CUSTOM0_ID (8U) +#define RGXFW_SEGMMU_DMAP_CUSTOM0_ADDR_START (0x06000000U) +#define RGXFW_SEGMMU_DMAP_CUSTOM0_MAX_SIZE (0x00800000U) + +/* Direct map region 9 optionally used for custom mappings - max 8MB */ +#define RGXFW_SEGMMU_DMAP_CUSTOM1_ID (9U) +#define RGXFW_SEGMMU_DMAP_CUSTOM1_ADDR_START (0x06800000U) +#define RGXFW_SEGMMU_DMAP_CUSTOM1_MAX_SIZE (0x00800000U) + /* Direct map region 10 used for mapping GPU memory - max 8MB */ #define RGXFW_SEGMMU_DMAP_GPU_ID (10U) #define RGXFW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U) @@ -261,7 +271,7 @@ typedef struct ((IMG_UINT64)((IMG_UINT64)(bifdm) & 0xFU) << 40U)) #define RGXFW_SEGMMU_META_BIFDM_ID (0x7U) -#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) +#if !(defined(__KERNEL__) || defined(TEE_DDK)) && defined(RGX_FEATURE_META) #if defined(RGX_FEATURE_SLC_VIVT) #define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED #define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED @@ -302,12 +312,6 @@ typedef struct #define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U) #define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U) - -#if defined(SECURE_FW_CODE_OSID) && defined(RGX_FEATURE_META) -#error "SECURE_FW_CODE_OSID is not supported on META cores" -#endif - - /****************************************************************************** * RGX FW Bootloader defaults ******************************************************************************/ @@ -335,7 +339,7 @@ typedef struct #define RGX_META_COREMEM_DATA_ADDR (0x82000000U) #define RGX_META_COREMEM_OFFSET_MASK (0x01ffffffU) -#if defined(__KERNEL__) +#if defined(__KERNEL__) || defined(TEE_DDK) #define RGX_META_IS_COREMEM_CODE(A, B) (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B)))) #define RGX_META_IS_COREMEM_DATA(A, B) (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B)))) #endif @@ -354,7 +358,7 @@ typedef struct #define META_CR_CORE_ID_VER_SHIFT (16U) #define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU) -#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) +#if !(defined(__KERNEL__) || defined(TEE_DDK)) && defined(RGX_FEATURE_META) #if (RGX_FEATURE_META == MTP218) #define RGX_CR_META_CORE_ID_VALUE 0x19 diff --git a/drivers/gpu/drm/img/img-volcanic/include/rgx_mips.h b/drivers/gpu/drm/img/img-volcanic/include/rgx_mips.h index 93951cee6373..a80d2f66f0b7 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rgx_mips.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rgx_mips.h @@ -139,14 +139,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12) #define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \ RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT) - -#if defined(SECURE_FW_CODE_OSID) && (SECURE_FW_CODE_OSID + 1 > 2) -#define MIPS_FW_CODE_OSID (SECURE_FW_CODE_OSID) -#elif defined(SECURE_FW_CODE_OSID) -#define MIPS_FW_CODE_OSID (1U) -#endif - - /* * Pages to trampoline problematic physical addresses: * - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000 @@ -167,7 +159,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXMIPSFW_SENSITIVE_ADDR(a) (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1UL << RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1U) & a)) -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) #define RGXMIPSFW_C0_PAGEMASK_4K (0x00001800) #define RGXMIPSFW_C0_PAGEMASK_16K (0x00007800) #define RGXMIPSFW_C0_PAGEMASK_64K (0x0001F800) @@ -184,15 +175,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_4MB) #define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_4MB) #define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB) -#elif (RGX_NUM_OS_SUPPORTED == 1) +#elif (RGX_NUM_DRIVERS_SUPPORTED == 1) #define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_64K) #define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_64K) #define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB) -#elif (RGX_NUM_OS_SUPPORTED <= 4) +#elif (RGX_NUM_DRIVERS_SUPPORTED <= 4) #define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_256K) #define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_256K) #define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB) -#elif (RGX_NUM_OS_SUPPORTED <= 8) +#elif (RGX_NUM_DRIVERS_SUPPORTED <= 8) #define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_1MB) #define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_1MB) #define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB) @@ -200,8 +191,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #error "MIPS TLB invalid params" #endif -#endif - #define RGXMIPSFW_DECODE_REMAP_CONFIG_REGION_SIZE(r) ((1U << (((r >> 7) + 1U) << 1U))*0x400) /* @@ -245,8 +234,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* Offset inside the bootloader data page where the general_exception handler saves the error state. * The error value is then copied by the NMI handler to the MipsState struct in shared memory. - * This is done because it's difficult to obain the address of MipsState inside the general exception - * handler. */ + * This is done because it's difficult to obtain the address of MipsState inside the general + * exception handler. */ #define RGXMIPSFW_ERROR_STATE_BASE (0x100) /* @@ -256,19 +245,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * within the bootloader/NMI data page */ #define RGXMIPSFW_BOOTLDR_CONF_OFFSET (0x0U) - -/* - * NMI shared data - */ -/* Base address of the shared data within the bootloader/NMI data page */ -#define RGXMIPSFW_NMI_SHARED_DATA_BASE (0x100) -/* Size used by Debug dump data */ -#define RGXMIPSFW_NMI_SHARED_SIZE (0x2B0) -/* Offsets in the NMI shared area in 32-bit words */ -#define RGXMIPSFW_NMI_SYNC_FLAG_OFFSET (0x0) -#define RGXMIPSFW_NMI_STATE_OFFSET (0x1) -#define RGXMIPSFW_NMI_ERROR_STATE_SET (0x1) - /* * MIPS boot stage */ @@ -398,9 +374,7 @@ typedef struct { typedef struct { IMG_UINT32 ui32ErrorState; /* This must come first in the structure */ -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) IMG_UINT32 ui32Sync; -#endif IMG_UINT32 ui32ErrorEPC; IMG_UINT32 ui32StatusRegister; IMG_UINT32 ui32CauseRegister; @@ -412,17 +386,12 @@ typedef struct { IMG_UINT32 ui32BadInstr; IMG_UINT32 ui32UnmappedAddress; RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]; -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) IMG_UINT64 aui64Remap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES]; -#else - RGX_MIPS_REMAP_ENTRY asRemap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES]; -#endif } RGX_MIPS_STATE; static_assert(offsetof(RGX_MIPS_STATE, ui32ErrorState) == 0, "ui32ErrorState is not the first member of the RGX_MIPS_STATE struct"); -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) #if defined(SUPPORT_MIPS_64K_PAGE_SIZE) static_assert(RGXMIPSFW_REGISTERS_PAGE_SIZE >= RGXMIPSFW_PAGE_SIZE_64K, "Register page size must be greater or equal to MIPS page size"); @@ -430,7 +399,6 @@ static_assert(RGXMIPSFW_REGISTERS_PAGE_SIZE >= RGXMIPSFW_PAGE_SIZE_64K, static_assert(RGXMIPSFW_REGISTERS_PAGE_SIZE >= RGXMIPSFW_PAGE_SIZE_4K, "Register page size must be greater or equal to MIPS page size"); #endif -#endif #endif /* RGXMIPSFW_ASSEMBLY_CODE */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/rgx_riscv.h b/drivers/gpu/drm/img/img-volcanic/include/rgx_riscv.h index 4c064384608d..f98b77658d8f 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rgx_riscv.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rgx_riscv.h @@ -49,18 +49,28 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* Utility defines to convert regions to virtual addresses and remaps */ -#define RGXRISCVFW_GET_REGION_BASE(r) IMG_UINT32_C((r) << 28) -#define RGXRISCVFW_GET_REGION(a) IMG_UINT32_C((a) >> 28) -#define RGXRISCVFW_MAX_REGION_SIZE IMG_UINT32_C(1 << 28) +#define RGXRISCVFW_NUM_REGIONS_LOG2 IMG_UINT32_C(4) +#define RGXRISCVFW_REGION_SIZE_LOG2 IMG_UINT32_C(28) +#define RGXRISCVFW_NUM_REGIONS (IMG_UINT32_C(1) << RGXRISCVFW_NUM_REGIONS_LOG2) +#define RGXRISCVFW_GET_REGION_BASE(r) ((r) << RGXRISCVFW_REGION_SIZE_LOG2) +#define RGXRISCVFW_GET_REGION(a) ((a) >> RGXRISCVFW_REGION_SIZE_LOG2) +#define RGXRISCVFW_MAX_REGION_SIZE (IMG_UINT32_C(1) << RGXRISCVFW_REGION_SIZE_LOG2) #define RGXRISCVFW_GET_REMAP(r) (RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 + ((r) * 8U)) +#define RGXRISCVFW_REGION_MASK (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_NUM_REGIONS - IMG_UINT32_C(1))) + +#if !defined(RGXRISCVFW_ASSEMBLY_CODE) +static_assert(RGXRISCVFW_NUM_REGIONS_LOG2 + RGXRISCVFW_REGION_SIZE_LOG2 == 32, + "Mismatch between number of remap regions and remap size"); +#endif + /* RISCV remap output is aligned to 4K */ #define RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN (0x1000U) /* * FW bootloader defines */ -#define RGXRISCVFW_BOOTLDR_CODE_REGION IMG_UINT32_C(0xC) +#define RGXRISCVFW_BOOTLDR_CODE_REGION (IMG_UINT32_C(0xC0000000) >> RGXRISCVFW_REGION_SIZE_LOG2) #define RGXRISCVFW_BOOTLDR_DATA_REGION IMG_UINT32_C(0x5) #define RGXRISCVFW_BOOTLDR_CODE_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_CODE_REGION)) #define RGXRISCVFW_BOOTLDR_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_DATA_REGION)) @@ -82,8 +92,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* * Host-FW shared data defines */ -#define RGXRISCVFW_SHARED_CACHED_DATA_REGION (0x6UL) -#define RGXRISCVFW_SHARED_UNCACHED_DATA_REGION (0xDUL) +#define RGXRISCVFW_SHARED_CACHED_DATA_REGION (0x0UL) +#define RGXRISCVFW_SHARED_UNCACHED_DATA_REGION (0x1UL) #define RGXRISCVFW_SHARED_CACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) #define RGXRISCVFW_SHARED_UNCACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) #define RGXRISCVFW_SHARED_CACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) diff --git a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_alignchecks.h b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_alignchecks.h index 84a916a51888..a33b6d541c65 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_alignchecks.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_alignchecks.h @@ -159,7 +159,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \ offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \ offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \ - offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \ + offsetof(RGXFWIF_SYSINIT, aui32TPUTrilinearFracMask), \ offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \ offsetof(RGXFWIF_SYSINIT, sFwSysData), \ (IMG_UINT32)sizeof(RGXFWIF_OSINIT), \ diff --git a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_hwperf.h b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_hwperf.h index 810eb39a8831..ec2d9bb071e7 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_hwperf.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_hwperf.h @@ -54,9 +54,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * and the IRQ context when applying a configuration request. */ typedef struct { - /* Padded to IMG_UINT32 to workaround pdump alignment requirements */ - IMG_UINT32 bValid; - IMG_UINT32 bEnabled; + /* Few members could be booleans but padded to IMG_UINT32 + * to workaround pdump alignment requirements */ + IMG_UINT32 ui32Valid; + IMG_UINT32 ui32Enabled; IMG_UINT32 eBlockID; IMG_UINT32 uiCounterMask; IMG_UINT64 RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_MUX_COUNTERS_MAX]; @@ -71,7 +72,7 @@ typedef struct /* Structure used to hold a Direct-Addressable block's parameters for passing * between the BG context and the IRQ context when applying a configuration - * request. RGX_FEATURE_HWPERF_OCEANIC use only. + * request. HWPERF_UNIFIED use only. */ typedef struct { @@ -236,8 +237,7 @@ static INLINE RGXFWIF_HWPERF_CTL_BLK *rgxfw_hwperf_get_block_ctl( return &psHWPerfInitData->sBlkCfg[ui32Idx]; } -/* Stub routine for rgxfw_hwperf_get_da_block_ctl() for non - * RGX_FEATURE_HWPERF_OCEANIC systems. Just return a NULL. +/* Stub routine for rgxfw_hwperf_get_da_block_ctl(). Just return a NULL. */ #ifdef INLINE_IS_PRAGMA #pragma inline(rgxfw_hwperf_get_da_block_ctl) diff --git a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_km.h b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_km.h index bae2304d3e88..22da7ec4cc9d 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_km.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_km.h @@ -49,84 +49,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdefs_km.h" #include "dllist.h" #include "rgx_hwperf.h" -#include "rgxheapconfig.h" #include "rgx_mips.h" - - -/*************************************************************************/ /*! - Logging type -*/ /**************************************************************************/ -#define RGXFWIF_LOG_TYPE_NONE 0x00000000U -#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U -#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U -#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U -#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U -#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U -#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U -#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U -#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U -#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U -#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U -#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U -#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U -#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U -#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U -#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U -#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U -#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU -#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU - -/* String used in pvrdebug -h output */ -#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" - -/* Table entry to map log group strings to log type value */ -typedef struct { - const IMG_CHAR* pszLogGroupName; - IMG_UINT32 ui32LogGroupType; -} RGXFWIF_LOG_GROUP_MAP_ENTRY; - -/* - Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup - table where needed. Keep log group names short, no more than 20 chars. -*/ -#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ - { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ - { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ - { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ - { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ - { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ - { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ - { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ - { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ - { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ - { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ - { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ - { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ - { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ - { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ - { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } - - -/* Used in print statements to display log group state, one %s per group defined */ -#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" - -/* Used in a print statement to display log group state, one per group */ -#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) ((((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) != 0U) ?("main ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) != 0U) ?("mts ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) != 0U) ?("cleanup ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) != 0U) ?("csw ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) != 0U) ?("bif ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_PM) != 0U) ?("pm ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) != 0U) ?("rtd ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) != 0U) ?("spm ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_POW) != 0U) ?("pow ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) != 0U) ?("hwr ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) != 0U) ?("hwp ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) != 0U) ?("rpm ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) != 0U) ?("dma ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) != 0U) ?("misc ") :("")), \ - ((((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) != 0U) ?("debug ") :("")) - +#include "rgxheapconfig.h" /************************************************************************ * RGX FW signature checks @@ -135,105 +59,8 @@ typedef struct { #define RGXFWIF_TIMEDIFF_ID ((0x1UL << 28) | RGX_CR_TIMER) -/*! - ****************************************************************************** - * Trace Buffer - *****************************************************************************/ - -/*! Min, Max, and Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ -#define RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS 8192U /* 32KB */ -#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U /* ~48KB */ -#define RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS 32768U /* 128KB */ - -#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U -#if defined(RGXFW_META_SUPPORT_2ND_THREAD) -#define RGXFW_THREAD_NUM 2U -#else -#define RGXFW_THREAD_NUM 1U -#endif - #define RGXFW_POLL_TYPE_SET 0x80000000U -#define RGXFW_PROCESS_NAME_LEN (16) - -typedef struct -{ - IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; - IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; - IMG_UINT32 ui32LineNum; -} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; - -/*! - * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface - * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing - * @{ - */ - -/*! - * @Brief Firmware trace buffer details - */ -typedef struct -{ - IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer) */ - IMG_UINT32 ui32WrapCount; /*!< Number of times the Trace Buffer has wrapped */ - -#if defined(RGX_FIRMWARE) - IMG_UINT32 *pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */ -#else - RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address)*/ -#endif - IMG_PUINT32 RGXFW_ALIGN pui32TraceBuffer; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */ - - RGXFWIF_FILE_INFO_BUF RGXFW_ALIGN sAssertBuf; -} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; - -/*! @} End of Defgroup SRVAndFWTracing */ - -#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ - -typedef struct -{ - IMG_UINT64 RGXFW_ALIGN ui64CRTimer; - IMG_UINT64 RGXFW_ALIGN ui64OSTimer; - IMG_UINT32 RGXFW_ALIGN ui32Data; - IMG_UINT32 ui32Reserved; - RGXFWIF_FILE_INFO_BUF sFaultBuf; -} UNCACHED_ALIGN RGX_FWFAULTINFO; - - -#define RGXFWIF_POW_STATES \ - X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ - X(RGXFWIF_POW_ON) /* running HW commands */ \ - X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ - X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ - -typedef enum -{ -#define X(NAME) NAME, - RGXFWIF_POW_STATES -#undef X -} RGXFWIF_POW_STATE; - -/* Firmware HWR states */ -#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ -#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */ -#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ -#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ -#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ -#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ -#define RGXFWIF_HWR_RESTART_REQUESTED (IMG_UINT32_C(0x1) << 7U) /*!< The FW has requested the host to restart it */ - -#define RGXFWIF_PHR_STATE_SHIFT (8U) -#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */ -#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */ -#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED) - -#define RGXFWIF_PHR_MODE_OFF (0UL) -#define RGXFWIF_PHR_MODE_RD_RESET (1UL) -#define RGXFWIF_PHR_MODE_FULL_RESET (2UL) - -typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; - /* Firmware per-DM HWR states */ #define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */ #define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */ @@ -246,14 +73,22 @@ typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; #define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */ #define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */ #define RGXFWIF_DM_STATE_GPU_ECC_HWR (IMG_UINT32_C(0x1) << 10) /*!< DM was forced into HWR due to an uncorrected GPU ECC error */ +#define RGXFWIF_DM_STATE_GPU_PARITY_HWR (IMG_UINT32_C(0x1) << 11) /*!< DM was forced into HWR due to an uncorrected GPU PARITY error */ +#define RGXFWIF_DM_STATE_GPU_LATENT_HWR (IMG_UINT32_C(0x1) << 12) /*!< DM was forced into HWR due to an uncorrected GPU LATENT error */ + /* Firmware's connection state */ typedef IMG_UINT32 RGXFWIF_CONNECTION_FW_STATE; -#define RGXFW_CONNECTION_FW_OFFLINE 0U /*!< Firmware is offline */ -#define RGXFW_CONNECTION_FW_READY 1U /*!< Firmware is initialised */ -#define RGXFW_CONNECTION_FW_ACTIVE 2U /*!< Firmware connection is fully established */ -#define RGXFW_CONNECTION_FW_OFFLOADING 3U /*!< Firmware is clearing up connection data */ -#define RGXFW_CONNECTION_FW_STATE_COUNT 4U +#define RGXFW_CONNECTION_FW_OFFLINE 0U /*!< Firmware is offline */ +#define RGXFW_CONNECTION_FW_READY 1U /*!< Firmware is initialised */ +#define RGXFW_CONNECTION_FW_ACTIVE 2U /*!< Firmware connection is fully established */ +#define RGXFW_CONNECTION_FW_GRACEFUL_OFFLOAD 3U /*!< Firmware is clearing up connection data */ +#define RGXFW_CONNECTION_FW_FORCED_OFFLOAD 4U /*!< Firmware is clearing up connection data */ +#define RGXFW_CONNECTION_FW_COOLDOWN 5U /*!< Firmware connection is in cooldown period */ +#define RGXFW_CONNECTION_FW_STATE_COUNT 6U + +#define RGXFW_FORCED_OFFLOAD_HCS_DEADLINE_MS 2U /*!< Workloads of Guest being forcefully offloaded must be stopped quickly */ +#define RGXFW_GRACEFUL_OFFLOAD_HCS_DEADLINE_MS 1000U /*!< Workloads of Guest being gracefully offloaded are allowed more time to finish */ /* OS' connection state */ typedef enum @@ -264,298 +99,30 @@ typedef enum RGXFW_CONNECTION_OS_STATE_COUNT } RGXFWIF_CONNECTION_OS_STATE; -typedef struct -{ - IMG_UINT bfOsState : 3; - IMG_UINT bfFLOk : 1; - IMG_UINT bfFLGrowPending : 1; - IMG_UINT bfIsolatedOS : 1; - IMG_UINT bfReserved : 26; -} RGXFWIF_OS_RUNTIME_FLAGS; - -typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; - -#if defined(PVRSRV_STALLED_CCB_ACTION) -#define PVR_SLR_LOG_ENTRIES 10U -#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ - -typedef struct -{ - IMG_UINT64 RGXFW_ALIGN ui64Timestamp; - IMG_UINT32 ui32FWCtxAddr; - IMG_UINT32 ui32NumUFOs; - IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; -} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; -#endif - -/*! - * @InGroup SRVAndFWTracing - * @Brief Firmware trace control data - */ -typedef struct -{ - IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */ - RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */ - IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated - (in RGXTraceBufferInitOnDemandResources) */ - IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ -} UNCACHED_ALIGN RGXFWIF_TRACEBUF; - -/*! @Brief Firmware system data shared with the Host driver */ -typedef struct -{ - IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ - IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ - volatile RGXFWIF_POW_STATE ePowState; - volatile IMG_UINT32 ui32HWPerfRIdx; - volatile IMG_UINT32 ui32HWPerfWIdx; - volatile IMG_UINT32 ui32HWPerfWrapCount; - IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ - IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ - - /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with - * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ - IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ - IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ - IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ - RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */ - RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */ - IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */ - IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */ - IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */ - IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */ - IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; -#if defined(SUPPORT_POWMON_COMPONENT) -#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) - RGXFWIF_TRACEBUF_SPACE sPowerMonBuf; - IMG_UINT32 ui32PowerMonBufSizeInDWords; -#endif -#endif - -#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) -#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) -#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) - IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; -#endif - RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */ - RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */ - IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ - IMG_UINT32 ui32McConfig; /*!< Identify whether MC config is P-P or P-S */ -} UNCACHED_ALIGN RGXFWIF_SYSDATA; - -/*! - * @InGroup ContextSwitching - * @Brief Firmware per-os data and configuration - */ -typedef struct -{ - IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ - IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ - IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */ -#if defined(PVRSRV_STALLED_CCB_ACTION) - IMG_UINT32 ui32ForcedUpdatesRequested; - IMG_UINT8 ui8SLRLogWp; - RGXFWIF_SLR_ENTRY sSLRLogFirst; - RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; - IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; -#endif - volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ - IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */ - RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */ - IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ -} UNCACHED_ALIGN RGXFWIF_OSDATA; - -/* Firmware trace time-stamp field breakup */ - -/* RGX_CR_TIMER register read (48 bits) value*/ -#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) -#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) - -/* Extra debug-info (16 bits) */ -#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) -#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK - - -/* Debug-info sub-fields */ -/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ -#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) -#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) - -/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ -#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) -#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) - -/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */ -#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) -#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) - -/* Bit 3-15: Unused bits */ - -#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 -#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " -#define RGXFWT_DEBUG_INFO_STR_APPEND ")" - -/* Table of debug info sub-field's masks and corresponding message strings - * to be appended to firmware trace - * - * Mask : 16 bit mask to be applied to debug-info field - * String : debug info message string - */ - -#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ -/*Mask, String*/ \ -X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ -X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \ -X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events") - -/*! - ****************************************************************************** - * HWR Data - *****************************************************************************/ -/*! - * @Defgroup HWRInfo FW HWR shared data interface - * @Brief Types grouping data structures and defines used in realising the HWR record. - * @{ - */ -/*! @Brief HWR Lockup types */ -typedef enum -{ - RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */ - RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */ - RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */ - RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */ - RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */ - RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */ - RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */ - RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */ - RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */ - RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */ - RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */ -} RGX_HWRTYPE; - -#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) - -#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ - ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ - ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ - ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ - ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ - ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \ - ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false) - -typedef struct -{ - IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */ - IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */ - IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ - IMG_UINT64 RGXFW_ALIGN ui64Reserved; -} RGX_BIFINFO; - -typedef struct -{ - IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */ -} RGX_ECCINFO; - -typedef struct -{ - IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */ - IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ - IMG_UINT64 RGXFW_ALIGN ui64Reserved; -} RGX_MMUINFO; - -typedef struct -{ - IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */ - IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */ - IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */ - IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */ - IMG_UINT64 RGXFW_ALIGN ui64Reserved; -} UNCACHED_ALIGN RGX_POLLINFO; - -typedef struct -{ - IMG_UINT32 ui32BadVAddr; /*!< VA address */ - IMG_UINT32 ui32EntryLo; -} RGX_TLBINFO; - -/*! @Brief Structure to keep information specific to a lockup e.g. DM, timer, lockup type etc. */ -typedef struct -{ - union - { - RGX_BIFINFO sBIFInfo; /*!< BIF failure details */ - RGX_MMUINFO sMMUInfo; /*!< MMU failure details */ - RGX_POLLINFO sPollInfo; /*!< Poll failure details */ - RGX_TLBINFO sTLBInfo; /*!< TLB failure details */ - RGX_ECCINFO sECCInfo; /*!< ECC failure details */ - } uHWRData; - - IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */ - IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */ - IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */ - IMG_UINT32 ui32PID; /*!< PID belonging to the workload */ - IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */ - IMG_UINT32 ui32HWRNumber; /*!< HWR number */ - IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */ - IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */ - RGX_HWRTYPE eHWRType; /*!< Type of lockup */ - RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */ - IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */ - IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */ - IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */ - IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */ - IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */ - IMG_CHAR RGXFW_ALIGN szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ -} UNCACHED_ALIGN RGX_HWRINFO; - -#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ -#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ -#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ -#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ - -/*! @Brief Firmware HWR information structure allocated by the Services and used by the Firmware to update recovery information. */ -typedef struct -{ - RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */ - IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */ - IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */ - IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */ - IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ - IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */ - IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */ - IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */ - IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */ -} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; - -/*! @} End of HWRInfo */ - #define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1)) #define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (IMG_UINT32_C(0x2)) #define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (IMG_UINT32_C(0x3)) #define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (IMG_UINT32_C(0x4)) -#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1)) -#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2)) - -#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1)) -#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2)) +#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1)) +#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2)) /*! ****************************************************************************** * RGX firmware Init Config Data *****************************************************************************/ /* Flag definitions affecting the firmware globally */ -#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) +#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) /*!< Randomise context switch requests */ #define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1) #define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2) -#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) +#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) /*!< Randomise DM-killing requests */ #define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4) -/* 5 unused */ +#define RGXFWIF_INICFG_SPU_CLOCK_GATE (IMG_UINT32_C(0x1) << 5) #define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6) #define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7) #define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8) -/* 9 unused */ -/* 10 unused */ +#define RGXFWIF_INICFG_TRY_OVERLAPPING_DM_PIPELINES_EN (IMG_UINT32_C(0x1) << 9) +#define RGXFWIF_INICFG_DM_PIPELINE_ROADBLOCKS_EN (IMG_UINT32_C(0x1) << 10) /* 11 unused */ #define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12) #define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13) @@ -569,16 +136,14 @@ typedef struct #define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) #define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19) #define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20) -#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (IMG_UINT32_C(0x1) << 21) +#define RGXFWIF_INICFG_INJECT_ICS_FAULT (IMG_UINT32_C(0x1) << 21) #define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22) #define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23) #define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24) #define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25) #define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26) -#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27) -#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) -#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) -#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +/* 27 unused */ +/* 28 unused */ #define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) #define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) #define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) @@ -591,12 +156,13 @@ typedef struct /* Extended Flag definitions affecting the firmware globally */ #define RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT (0) -/* [7] YUV10 override +/* [8] Lossy min channel override + * [7] YUV10 override * [6:4] Quality * [3] Quality enable * [2:1] Compression scheme * [0] Lossy group */ -#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK (IMG_UINT32_C(0xFF)) /* RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL */ +#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK (IMG_UINT32_C(0x1FF)) /* RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL */ #define RGXFWIF_INICFG_EXT_ALL (RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) #define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ @@ -652,6 +218,7 @@ typedef IMG_UINT32 RGX_RD_POWER_ISLAND_CONF; /* Unused registers re-purposed for storing counters of the Firmware's * interrupts for each OS */ +#if (RGXFW_MAX_NUM_OSIDS == 8) #define IRQ_COUNTER_STORAGE_REGS \ 0x2028U, /* RGX_CR_PM_TA_MMU_FSTACK */ \ 0x2050U, /* RGX_CR_PM_3D_MMU_FSTACK */ \ @@ -661,21 +228,13 @@ typedef IMG_UINT32 RGX_RD_POWER_ISLAND_CONF; 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ -#endif - -typedef struct -{ - IMG_UINT16 ui16RegNum; /*!< Register number */ - IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */ - IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */ - IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */ -} RGXFW_REGISTER_LIST; - -#if defined(RGX_FIRMWARE) -typedef DLLIST_NODE RGXFWIF_DLLIST_NODE; +#elif (RGXFW_MAX_NUM_OSIDS == 2) +#define IRQ_COUNTER_STORAGE_REGS \ + 0x2028U, /* RGX_CR_PM_TA_MMU_FSTACK */ \ + 0x2050U, /* RGX_CR_PM_3D_MMU_FSTACK */ #else -typedef struct {RGXFWIF_DEV_VIRTADDR p; - RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE; +#error Unsupported number of IRQ_COUNTER_STORAGE_REGS registers! +#endif #endif typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER; @@ -688,28 +247,32 @@ typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FW; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL; typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_MUX_CNTBLK; typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK; typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_SELECT_CUSTOM_CNTRS; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COUNTERBUFFER; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; + +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) +/*! + * @Brief Buffer to store KM active client contexts + */ +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ +} RGXFWIF_ACTIVE_CONTEXT_BUF_DATA; +#endif /*! * This number is used to represent an invalid page catalogue physical address @@ -717,13 +280,18 @@ typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; #define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU /*! - * This number is used to represent unallocated page catalog base register + * This number is used to represent an unallocated set of page catalog base registers */ #define RGXFW_BIF_INVALID_PCSET 0xFFFFFFFFU /*! - Firmware memory context. -*/ + * This number is used to represent an invalid OS ID for the purpose of tracking PC set ownership + */ +#define RGXFW_BIF_INVALID_OSID 0xFFFFFFFFU + +/*! + * Firmware memory context. + */ typedef struct { IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */ @@ -733,13 +301,18 @@ typedef struct IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */ IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */ -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) IMG_UINT32 ui32OSid; IMG_BOOL bOSidAxiProt; #endif } UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT; +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_FWMEMCONTEXT) == 32, + "RGXFWIF_FWMEMCONTEXT is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + /*! * FW context state flags */ @@ -757,10 +330,16 @@ typedef struct /* FW-accessible TA state which must be written out to memory on context store */ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER; /*!< VDM control stream stack pointer, to store in mid-TA */ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init; /*!< Initial value of VDM control stream stack pointer (in case is 'lost' due to a lock-up) */ - IMG_UINT32 uTAReg_VBS_SO_PRIM[4]; IMG_UINT16 ui16TACurrentIdx; } UNCACHED_ALIGN RGXFWIF_TACTX_STATE_PER_GEOM; +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +#define MAX_GEOM_CORE_SIZE 4 + +static_assert(RGX_NUM_GEOM_CORES <= MAX_GEOM_CORE_SIZE, + "RGX_NUM_GEOM_CORES is outside of allowable range for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + /*! * @InGroup ContextSwitching * @Brief Firmware GEOM/TA context suspend states for all GEOM cores @@ -768,9 +347,18 @@ typedef struct typedef struct { /*! FW-accessible TA state which must be written out to memory on context store */ +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) + RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[MAX_GEOM_CORE_SIZE]; +#else RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[RGX_NUM_GEOM_CORES]; +#endif } UNCACHED_ALIGN RGXFWIF_TACTX_STATE; +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_TACTX_STATE) == 160, + "RGXFWIF_TACTX_STATE is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + /*! * @InGroup ContextSwitching * @Brief Firmware FRAG/3D context suspend state @@ -790,6 +378,11 @@ typedef struct static_assert(sizeof(RGXFWIF_3DCTX_STATE) <= 16U, "Size of structure RGXFWIF_3DCTX_STATE exceeds maximum expected size."); +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_3DCTX_STATE) == 16, + "RGXFWIF_3DCTX_STATE is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + #define RGXFWIF_CTX_USING_BUFFER_A (0) #define RGXFWIF_CTX_USING_BUFFER_B (1U) @@ -798,65 +391,33 @@ typedef struct IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */ } RGXFWIF_COMPUTECTX_STATE; -/*! - * @InGroup WorkloadContexts - * @Brief Firmware Common Context (or FWCC) - */ -typedef struct RGXFWIF_FWCOMMONCONTEXT_ -{ - /* CCB details for this firmware context */ - PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ - PRGXFWIF_CCCB psCCB; /*!< CCB base */ - RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; - - /* Context suspend state */ - PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ - - /* Flags e.g. for context switching */ - IMG_UINT32 ui32FWComCtxFlags; - IMG_INT32 i32Priority; /*!< Priority level */ - IMG_UINT32 ui32PrioritySeqNum; - - /* Framework state */ - PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ - - /* Statistic updates waiting to be passed back to the host... */ - IMG_BOOL bStatsPending; /*!< True when some stats are pending */ - IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ - IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ - IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ - RGXFWIF_DM eDM; /*!< Data Master type */ - IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */ - RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */ - RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ - IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ - - IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; - IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ - bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ - - RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ - RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ - RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ - - PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_COMPUTECTX_STATE) == 4, + "RGXFWIF_COMPUTECTX_STATE is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif - /* References to the host side originators */ - IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ - IMG_UINT32 ui32PID; /*!< associated process ID */ +#define RGXFWIF_CONTEXT_COMPAT_FLAGS_STATS_PENDING (1U << 0) +#define RGXFWIF_CONTEXT_COMPAT_FLAGS_HAS_DEFER_COUNT (1U << 1) - IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */ - IMG_CHAR szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ +typedef struct +{ + IMG_UINT32 ui32ExtJobRefToDisableZSStore; + IMG_BOOL bDisableZStore; + IMG_BOOL bDisableSStore; +} RGXFWIF_DISABLE_ZSSTORE; -} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; +#define MAX_ZSSTORE_DISABLE 8 -static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256U, - "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size."); +typedef struct +{ + bool bSaved; + IMG_UINT64 ui64CheckSum[4]; +}RGXFWIF_TRP_CHECKSUM_GEOM_ENTRY; typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_TQ[RGX_TRP_MAX_NUM_CORES][1]; typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2]; typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4]; -typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES][2]; +typedef RGXFWIF_TRP_CHECKSUM_GEOM_ENTRY RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES]; /*! * @InGroup WorkloadContexts @@ -869,6 +430,11 @@ typedef struct RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState; + RGXFWIF_DISABLE_ZSSTORE sDisableZSStoreQueue[MAX_ZSSTORE_DISABLE]; + + IMG_UINT32 ui32ZSStoreQueueCount; + IMG_UINT32 ui32WriteOffsetOfDisableZSStore; + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */ @@ -876,6 +442,7 @@ typedef struct #if defined(SUPPORT_TRP) RGXFWIF_TRP_CHECKSUM_3D aui64TRPChecksums3D; /*!< Used by Firmware to store checksums during 3D WRR */ RGXFWIF_TRP_CHECKSUM_GEOM aui64TRPChecksumsGeom; /*!< Used by Firmware to store checksums during TA WRR */ + RGXFWIF_DM eTRPGeomCoreAffinity; /* !< Represent the DM affinity for pending 2nd TRP pass of GEOM otherwise points RGXFWIF_DM_MAX. */ #endif } UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT; @@ -892,7 +459,6 @@ typedef struct IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */ - IMG_UINT32 ui32WGPState; IMG_UINT32 aui32WGPChecksum[RGX_WGP_MAX_NUM_CORES]; } UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT; @@ -916,7 +482,6 @@ typedef struct RGXFWIF_FWCOMMONCONTEXT sTQContext; /*!< Firmware context for TQ3D */ #if defined(SUPPORT_TRP) - IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */ RGXFWIF_TRP_CHECKSUM_TQ aui64TRPChecksumsTQ;/*!< Used by Firmware to store checksums during TQ WRR */ #endif } UNCACHED_ALIGN RGXFWIF_FWTRANSFERCONTEXT; @@ -947,9 +512,13 @@ typedef struct volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */ - IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */ } UNCACHED_ALIGN RGXFWIF_CCB_CTL; +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_CCB_CTL) == 16, + "RGXFWIF_CCB_CTL is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + /*! * @Defgroup KCCBTypes Kernel CCB data interface * @Brief Types grouping data structures and defines used in realising the KCCB functionality @@ -960,27 +529,16 @@ typedef struct #define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */ #define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */ -#if !defined(__KERNEL) +#if !defined(__KERNEL__) -#if !defined(RGX_FEATURE_SLC_VIVT) #define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ -#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE < 2) #define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */ -#else -#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB) -#endif #define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0U) /* not used */ -#else /* RGX_FEATURE_SLC_VIVT */ -#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x0) /* not used */ -#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */ -#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ -#endif - #else -#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ -#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */ -#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800U) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ #endif #define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */ @@ -1002,14 +560,15 @@ typedef struct typedef struct { - PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ - IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ - IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ - IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ - IMG_UINT32 ui32BPDataFlags; - IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ - IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ - RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ + IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ + IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ + IMG_UINT32 ui32BPDataFlags; + IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ + IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ + IMG_UINT64 RGXFW_ALIGN ui64SpillAddr; + RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ } RGXFWIF_BPDATA; #define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ @@ -1047,6 +606,12 @@ typedef struct IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */ } RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA; +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; + RGXFWIF_DISABLE_ZSSTORE sDisableZSStore; +} RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE_DATA; + /*! * @Brief Resource types supported by \ref RGXFWIF_KCCB_CMD_CLEANUP type command */ @@ -1101,7 +666,7 @@ typedef struct RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */ union { - IMG_UINT32 ui32NumOfDusts; /*!< Number of active Dusts */ + IMG_UINT32 ui32PowUnits; /*!< Number of active Dusts */ IMG_BOOL bForced; /*!< If the operation is mandatory */ RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */ } uPowerReqData; @@ -1141,6 +706,9 @@ typedef struct PRGX_HWPERF_CONFIG_MUX_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_MUX_CNTBLK array */ } RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS; +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS type command + */ typedef struct { IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */ @@ -1184,22 +752,6 @@ typedef struct IMG_BOOL bDone; /*!< action backing/unbacking succeeded */ } RGXFWIF_ZSBUFFER_BACKING_DATA; -#if defined(SUPPORT_VALIDATION) -typedef struct -{ - IMG_UINT32 ui32RegWidth; - IMG_BOOL bWriteOp; - IMG_UINT32 ui32RegAddr; - IMG_UINT64 RGXFW_ALIGN ui64RegVal; -} RGXFWIF_RGXREG_DATA; - -typedef struct -{ - IMG_UINT64 ui64BaseAddress; - PRGXFWIF_FWCOMMONCONTEXT psContext; - IMG_UINT32 ui32Size; -} RGXFWIF_GPUMAP_DATA; -#endif /*! * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE type command @@ -1236,22 +788,22 @@ typedef struct ****************************************************************************** * Proactive DVFS Structures *****************************************************************************/ -#define NUM_OPP_VALUES 16 +#define NUM_OPP_LEVELS 16 typedef struct { IMG_UINT32 ui32Volt; /* V */ IMG_UINT32 ui32Freq; /* Hz */ -} UNCACHED_ALIGN PDVFS_OPP; +} UNCACHED_ALIGN OPP_LEVEL; typedef struct { - PDVFS_OPP asOPPValues[NUM_OPP_VALUES]; + OPP_LEVEL asOPPValues[NUM_OPP_LEVELS]; #if defined(DEBUG) IMG_UINT32 ui32MinOPPPoint; #endif IMG_UINT32 ui32MaxOPPPoint; -} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP; +} UNCACHED_ALIGN RGXFWIF_OPP_INFO; typedef struct { @@ -1278,18 +830,6 @@ typedef enum RGXFWIF_REGCFG_CMD_DISABLE = 104 } RGXFWIF_REGDATA_CMD_TYPE; -typedef enum -{ - RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */ - RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */ - RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */ - RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */ - RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */ - RGXFWIF_REG_CFG_TYPE_TLA, /* TLA kick */ - RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */ - RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */ -} RGXFWIF_REG_CFG_TYPE; - typedef struct { IMG_UINT64 ui64Addr; @@ -1326,10 +866,17 @@ typedef enum */ typedef struct { - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; RGXFWIF_OS_STATE_CHANGE eNewOSState; } UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA; +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; + IMG_UINT32 ui32FirstIntJobRefToCancel; + IMG_UINT32 ui32LastIntJobRefToCancel; +} UNCACHED_ALIGN RGXFWIF_CANCEL_WORK_DATA; + typedef enum { RGXFWIF_PWR_COUNTER_DUMP_START = 1, @@ -1351,50 +898,44 @@ typedef enum RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */ RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */ RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, - RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ - RGXFWIF_KCCB_CMD_CLEANUP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ - RGXFWIF_KCCB_CMD_POW = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */ - RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ - RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ - RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ - RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ - /* RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE */ - RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ - RGXFWIF_KCCB_CMD_HEALTH_CHECK = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ - RGXFWIF_KCCB_CMD_FORCE_UPDATE = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ - - RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ - RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */ - + RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ + RGXFWIF_KCCB_CMD_CLEANUP = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ + RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ + RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ + RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ + RGXFWIF_KCCB_CMD_HEALTH_CHECK = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ + RGXFWIF_KCCB_CMD_FORCE_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ + RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ + RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */ + RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW to disable zs store of a running 3D or add it to queue of render context. */ + RGXFWIF_KCCB_CMD_CANCEL_WORK = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Cancel all work up to and including a given intjobref for a given context */ /* Commands only permitted to the native or host OS */ - RGXFWIF_KCCB_CMD_REGCONFIG = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, - RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ - /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS */ - RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ - RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ - /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT*/ - RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ - RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */ - RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */ - RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ - /* RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE */ - /*RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE */ - RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ - RGXFWIF_KCCB_CMD_PHR_CFG = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_KCCB_CMD_RGXREG = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */ -#endif - RGXFWIF_KCCB_CMD_WDG_CFG = 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */ - RGXFWIF_KCCB_CMD_COUNTER_DUMP = 216U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */ - RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 217U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ - RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 218U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_KCCB_CMD_GPUMAP = 219U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */ -#endif - RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 220U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure directly addressable counters for HWPerf */ + RGXFWIF_KCCB_CMD_POW = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */ + RGXFWIF_KCCB_CMD_REGCONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ + RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */ + RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the priority/group for a particular driver. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ + RGXFWIF_KCCB_CMD_PHR_CFG = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ + RGXFWIF_KCCB_CMD_WDG_CFG = 210U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */ + RGXFWIF_KCCB_CMD_COUNTER_DUMP = 211U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */ + RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the GPU time slice for a particular driver. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE_INTERVAL = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the GPU time slice interval for all drivers. It can only be serviced for the Host DDK */ + + /* HWPerf commands */ + RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 300U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 301U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure directly addressable counters for HWPerf */ + RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 302U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 303U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ + RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 304U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */ + } RGXFWIF_KCCB_CMD_TYPE; -#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1) +#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_POW - 1) /*! @Brief Kernel CCB command packet */ typedef struct @@ -1431,16 +972,19 @@ typedef struct RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */ RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */ RGXFWIF_COUNTER_DUMP_DATA sCounterDumpConfigData; /*!< Data for dumping of register ranges */ + RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE_DATA sDisableZSStoreData; /*!< Data for disabling zs store of a 3D workload */ RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_RGXREG_DATA sFwRgxData; /*!< Data for reading off an RGX register */ - RGXFWIF_GPUMAP_DATA sGPUMapData; /*!< Data for requesting a FW GPU mapping which is written into by the FW with a pattern */ -#endif + RGXFWIF_CANCEL_WORK_DATA sCancelWorkData; /*!< Data for cancelling work */ } UNCACHED_ALIGN uCmdData; } UNCACHED_ALIGN RGXFWIF_KCCB_CMD; RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD); +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_KCCB_CMD) == 64, + "RGXFWIF_KCCB_CMD is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + /*! @} End of KCCBTypes */ /*! @@ -1495,7 +1039,7 @@ typedef struct RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */ RGXFWIF_DM eDM; /*!< Data Master affected by the reset */ IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */ - IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ + IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */ IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */ } RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA; @@ -1534,12 +1078,6 @@ typedef enum RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests GPU restart \n Command data: None */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_FWCCB_CMD_REG_READ = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, -#if defined(SUPPORT_SOC_TIMER) - RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, -#endif -#endif RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a FW pagefault \n Command data: RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA */ } RGXFWIF_FWCCB_CMD_TYPE; @@ -1575,21 +1113,6 @@ typedef struct IMG_UINT32 ui32CoreClkRate; } UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA; -#if defined(SUPPORT_VALIDATION) -typedef struct -{ - IMG_UINT64 ui64RegValue; -} RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA; - -#if defined(SUPPORT_SOC_TIMER) -typedef struct -{ - IMG_UINT64 ui64timerGray; - IMG_UINT64 ui64timerBinary; - IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS]; -} RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA; -#endif -#endif /*! ****************************************************************************** @@ -1609,17 +1132,16 @@ typedef struct RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange; RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA sCmdFWPagefault; /*!< Data for context reset notification */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA sCmdRgxRegReadData; -#if defined(SUPPORT_SOC_TIMER) - RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers; -#endif -#endif } RGXFW_ALIGN uCmdData; } RGXFW_ALIGN RGXFWIF_FWCCB_CMD; RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_FWCCB_CMD) == 48, + "RGXFWIF_FWCCB_CMD is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + /*! @} End of FWCCBTypes */ /*! @@ -1629,6 +1151,7 @@ RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); typedef struct { IMG_UINT16 ui16ReturnDataIndex; /*!< Index for return data array */ + IMG_UINT16 ui16CyclesTakenHigh; /*!< Not used on Rogue GPU cores. */ IMG_UINT32 ui32CyclesTaken; /*!< The cycles the workload took on the hardware */ } RGXFWIF_WORKEST_FWCCB_CMD; @@ -1645,7 +1168,7 @@ typedef struct #define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64) #define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15) -#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1U)) & ~(RGXFWIF_FWALLOC_ALIGN - 1U)) +#define RGX_CCB_FWALLOC_ALIGN(size) (PVR_ALIGN(size, RGXFWIF_FWALLOC_ALIGN)) typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; @@ -1676,16 +1199,21 @@ typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. */ #define RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP (217U | RGX_CMD_MAGIC_DWORD_SHIFTED) -#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (218U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates of a command */ -#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates related to workload resources */ +/* UNFENCED type is not supported but keeping defines as it is for now */ -#if defined(SUPPORT_VALIDATION) -#define RGXFWIF_CCB_CMD_TYPE_REG_READ (220U | RGX_CMD_MAGIC_DWORD_SHIFTED) -#endif #define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */ +#define RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP (223U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Process a vulkan timestamp */ /*! @} End of Client CCB command types */ + +#define RGXFWIF_TRP_STATUS_UNKNOWN 0x000U +#define RGXFWIF_TRP_STATUS_CHECKSUMS_OK 0x001U +#define RGXFWIF_TRP_STATUS_CHECKSUMS_ERROR 0x002U + +#define RGXFWIF_CR_TRP_SIGNATURE_STATUS (RGX_CR_SCRATCH10) + + typedef struct { /* Index for the KM Workload estimation return data array */ @@ -1707,11 +1235,16 @@ typedef struct IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */ IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ -#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#if defined(SUPPORT_WORKLOAD_ESTIMATION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ #endif } RGXFWIF_CCB_CMD_HEADER; +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_CCB_CMD_HEADER) == 16, + "RGXFWIF_CCB_CMD_HEADER is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + /* ****************************************************************************** * Client CCB commands which are only required by the kernel @@ -1783,7 +1316,6 @@ typedef struct { RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */ RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */ - IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the FW processor version */ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ @@ -1791,26 +1323,44 @@ typedef struct IMG_BOOL bUpdated; /*!< Information is valid */ } UNCACHED_ALIGN RGXFWIF_COMPCHECKS; +typedef struct +{ + IMG_UINT32 ui32NumCores; + IMG_UINT64 RGXFW_ALIGN aui64MultiCoreCapabilities[RGX_MULTICORE_MAX_NOHW_CORES]; +} UNCACHED_ALIGN RGXFWIF_MULTICORE_INFO; + /*! @Brief Firmware Runtime configuration data \ref RGXFWIF_RUNTIME_CFG * allocated by services and used by the Firmware on boot **/ typedef struct { - IMG_UINT32 ui32ActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ - IMG_UINT32 ui32RuntimeCfgFlags; /*!< Compatibility and other flags */ - IMG_BOOL bActivePMLatencyPersistant; /*!< If set, APM latency does not reset to system default each GPU power transition */ - IMG_UINT32 ui32CoreClockSpeed; /*!< Core clock speed, currently only used to calculate timer ticks */ - IMG_UINT32 ui32DefaultDustsNumInit; /*!< Last number of dusts change requested by the host */ - IMG_UINT32 ui32PHRMode; /*!< Periodic Hardware Reset configuration values */ - IMG_UINT32 ui32HCSDeadlineMS; /*!< New number of milliseconds C/S is allowed to last */ - IMG_UINT32 ui32WdgPeriodUs; /*!< The watchdog period in microseconds */ - IMG_UINT32 aui32OSidPriority[RGXFW_MAX_NUM_OS]; /*!< Array of priorities per OS */ - PRGXFWIF_HWPERFBUF sHWPerfBuf; /*!< On-demand allocated HWPerf buffer address, to be passed to the FW */ -#if defined(SUPPORT_VALIDATION) - IMG_BOOL bInjectFWFault; /*!< Injecting firmware fault to validate recovery through Host */ + IMG_UINT32 ui32ActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ + IMG_UINT32 ui32RuntimeCfgFlags; /*!< Compatibility and other flags */ + IMG_BOOL bActivePMLatencyPersistant; /*!< If set, APM latency does not reset to system default each GPU power transition */ + IMG_UINT32 ui32CoreClockSpeed; /*!< Core clock speed, currently only used to calculate timer ticks */ +#if defined(SUPPORT_SOC_TIMER) + IMG_UINT32 ui32SOCClockSpeed; /*!< SOC clock speed, used for deadline scheduling */ #endif + IMG_UINT32 ui32PowUnitsState; /*!< Last number of dusts change requested by the host */ + IMG_UINT32 ui32PHRMode; /*!< Periodic Hardware Reset configuration values */ + IMG_UINT32 ui32HCSDeadlineMS; /*!< New number of milliseconds C/S is allowed to last */ + IMG_UINT32 ui32WdgPeriodUs; /*!< The watchdog period in microseconds */ + IMG_INT32 ai32DriverPriority[RGXFW_MAX_NUM_OSIDS]; /*!< Array of priorities per OS */ + IMG_UINT32 aui32DriverIsolationGroup[RGXFW_MAX_NUM_OSIDS]; /*!< Array of isolation groups per OS */ + IMG_UINT32 aui32TSPercentage[RGXFW_MAX_NUM_OSIDS]; /*!< Array of time slice per OS */ + IMG_UINT32 ui32TSIntervalMs; /*!< Time slice interval */ + IMG_UINT32 ui32VzConnectionCooldownPeriodInSec; /*!< Vz Connection Cooldown period in secs */ + + PRGXFWIF_HWPERFBUF sHWPerfBuf; /*!< On-demand allocated HWPerf buffer address, to be passed to the FW */ + RGXFWIF_DMA_ADDR sHWPerfDMABuf; + RGXFWIF_DMA_ADDR sHWPerfCtlDMABuf; } RGXFWIF_RUNTIME_CFG; +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_RUNTIME_CFG) == 68, + "RGXFWIF_RUNTIME_CFG is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + /*! ***************************************************************************** * Control data for RGX @@ -1831,7 +1381,7 @@ typedef enum typedef struct { IMG_PID uiPID; - IMG_UINT32 ui32OSID; + IMG_UINT32 ui32DriverID; } RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM; typedef struct @@ -1960,35 +1510,34 @@ typedef struct PRGXFWIF_OSDATA sFwOsData; /*!< Firmware per-os shared data */ RGXFWIF_COMPCHECKS sRGXCompChecks; /*!< Compatibility checks to be populated by the Firmware */ + RGXFWIF_MULTICORE_INFO sRGXMulticoreInfo; /*! < Multicore capability info */ } UNCACHED_ALIGN RGXFWIF_OSINIT; +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_OSINIT) == 104, + "RGXFWIF_OSINIT is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + /*! @Brief Firmware System Initialization data \ref RGXFWIF_SYSINIT * allocated by services and used by the Firmware on boot **/ typedef struct { -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) RGX_MIPS_STATE sMIPSState; /*!< MIPS Debug Data; this must be the first member in the structure */ -#endif IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; /*!< Fault read address */ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; /*!< PDS execution base */ IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase; /*!< USC execution base */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCStateTableBase; /*!< FBCDC bindless texture state table base */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCLargeStateTableBase; - IMG_DEV_VIRTADDR RGXFW_ALIGN sTextureHeapBase; /*!< Texture state base */ IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter; /*! Event filter for Firmware events */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr; - IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; +#if defined(PDUMP) RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_MAX]; /*!< Signature and Checksum Buffers for DMs */ - - RGXFWIF_PDVFS_OPP sPDVFSOPPInfo; +#endif RGXFWIF_DMA_ADDR sCorememDataStore; /*!< Firmware coremem data */ @@ -2008,7 +1557,7 @@ typedef struct PRGXFWIF_TBIBUF sTBIBuf; /*!< Tbi log buffer */ #endif - PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; /*!< GPU utilization buffer */ + PRGXFWIF_GPU_UTIL_FW sGpuUtilFWCtl; /*!< GPU utilization buffer */ PRGXFWIF_REG_CFG sRegCfg; /*!< Firmware register user configuration */ PRGXFWIF_HWPERF_CTL sHWPerfCtl; /*!< HWPerf counter block configuration.*/ @@ -2016,6 +1565,10 @@ typedef struct IMG_UINT32 ui32InitialCoreClockSpeed; /*!< Core clock speed at FW boot time */ +#if defined(SUPPORT_SOC_TIMER) + IMG_UINT32 ui32InitialSOCClockSpeed; /*!< System/SOC clock speed at FW boot time */ +#endif + IMG_UINT32 ui32InitialActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ IMG_BOOL bFirmwareStarted; /*!< Flag to be set by the Firmware after successful start */ @@ -2024,16 +1577,18 @@ typedef struct IMG_UINT32 ui32FirmwareStartedTimeStamp; /*!< Firmware initialization complete time */ - IMG_UINT32 ui32JonesDisableMask; - FW_PERF_CONF eFirmwarePerf; /*!< Firmware performance counter config */ +#if defined(SUPPORT_FW_OPP_TABLE) + RGXFWIF_OPP_INFO RGXFW_ALIGN sOPPInfo; + /** * FW Pointer to memory containing core clock rate in Hz. * Firmware (PDVFS) updates the memory when running on non primary FW thread * to communicate to host driver. */ - PRGXFWIF_CORE_CLK_RATE sCoreClockRate; + PRGXFWIF_CORE_CLK_RATE RGXFW_ALIGN sCoreClockRate; +#endif #if defined(PDUMP) RGXFWIF_PID_FILTER sPIDFilter; @@ -2049,7 +1604,11 @@ typedef struct RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer; #endif -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) + RGXFWIF_DEV_VIRTADDR sActiveContextBufBase; /*!< Active context buffer base */ +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) /* * Used when validation is enabled to allow the host to check * that MTS sent the correct sideband in response to a kick @@ -2069,15 +1628,16 @@ typedef struct #if defined(SUPPORT_AUTOVZ) IMG_UINT32 ui32VzWdgPeriod; #endif - +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) + /* notify firmware power-up on host-side recovery */ + IMG_BOOL bFwHostRecoveryMode; +#endif } UNCACHED_ALIGN RGXFWIF_SYSINIT; -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) static_assert(offsetof(RGXFWIF_SYSINIT, sMIPSState) == 0, "sMIPSState is not the first member of the RGXFWIF_SYSINIT struct"); -#endif -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) #define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1 #define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1 #endif @@ -2143,9 +1703,12 @@ typedef struct /* See rgx_common.h for a list of GPU states */ #define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK) +#define RGXFWIF_GPU_UTIL_TIME_MASK32 (IMG_UINT32_C(0xFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK32) -#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) -#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) +#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) +#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) +#define RGXFWIF_GPU_UTIL_GET_TIME32(word) ((IMG_UINT32)(word) & RGXFWIF_GPU_UTIL_TIME_MASK32) +#define RGXFWIF_GPU_UTIL_GET_STATE32(word) ((IMG_UINT32)(word) & RGXFWIF_GPU_UTIL_STATE_MASK32) /* The OS timestamps computed by the FW are approximations of the real time, * which means they could be slightly behind or ahead the real timer on the Host. @@ -2159,6 +1722,9 @@ typedef struct #define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \ (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state)) +#define RGXFWIF_GPU_UTIL_MAKE_WORD32(time,state) \ + (RGXFWIF_GPU_UTIL_GET_TIME32(time) | RGXFWIF_GPU_UTIL_GET_STATE32(state)) + /* The timer correlation array must be big enough to ensure old entries won't be * overwritten before all the HWPerf events linked to those entries are processed @@ -2177,6 +1743,26 @@ typedef struct static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); +/* The time is stored in DM state time-stamps, and as a result in DMs states counters, in "approximately microseconds", + * dividing the time originally obtained in nanoseconds by 2^10 for the sake of reducing coremem usage */ +#define RGXFWIF_DM_OS_TIMESTAMP_SHIFT 10U + +typedef struct +{ + /* Last GPU DM per-OS states + OS time of the last state update */ + IMG_UINT32 RGXFW_ALIGN aui32DMOSLastWord[RGXFWIF_GPU_UTIL_DM_MAX]; + /* DMs time-stamps are cached in coremem - to reduce coremem usage we allocate 32 bits for each of them + * and save their values divided by 2^10, so they wrap around in ~73 mins, consequently + * we keep the count of the wrapping around instances */ + IMG_UINT32 RGXFW_ALIGN aui32DMOSLastWordWrap[RGXFWIF_GPU_UTIL_DM_MAX]; + /* Counters for the amount of time the GPU DMs were active or inactive(idle or blocked) */ + IMG_UINT32 RGXFW_ALIGN aaui32DMOSStatsCounters[RGXFWIF_GPU_UTIL_DM_MAX][RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM]; + /* DMs Counters are cached in coremem - to reduce coremem usage we allocate 32 bits for each of them + * and save their values divided by 2^10, so they wrap around in ~73 mins, consequently + * we keep the count of the wrapping around instances */ + IMG_UINT32 RGXFW_ALIGN aaui32DMOSCountersWrap[RGXFWIF_GPU_UTIL_DM_MAX][RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM]; +} RGXFWIF_GPU_STATS; + typedef struct { RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE]; @@ -2189,190 +1775,111 @@ typedef struct IMG_UINT64 RGXFW_ALIGN ui64GpuLastWord; /* Counters for the amount of time the GPU was active/idle/blocked */ IMG_UINT64 RGXFW_ALIGN aui64GpuStatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM]; + /* Device off period timestamp offset */ + IMG_INT64 RGXFW_ALIGN i64DeviceTimestampOffset; + /* Stats per OSID/DriverID */ + RGXFWIF_GPU_STATS sStats[RGXFW_MAX_NUM_OSIDS]; +} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FW; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_GPU_UTIL_FW) == 12584, + "RGXFWIF_GPU_UTIL_FW is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif - /* Last GPU DM per-OS states + OS time of the last state update */ - IMG_UINT64 RGXFW_ALIGN aaui64DMOSLastWord[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OS]; - /* Counters for the amount of time the GPU DMs were active/idle/blocked */ - IMG_UINT64 RGXFW_ALIGN aaaui64DMOSStatsCounters[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OS][RGXFWIF_GPU_UTIL_STATE_NUM]; -} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB; +#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET +#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U -typedef struct -{ - IMG_UINT32 ui32RenderTargetIndex; //Render number - IMG_UINT32 ui32CurrentRenderTarget; //index in RTA - IMG_UINT32 ui32ActiveRenderTargets; //total active RTs - IMG_UINT32 ui32CumulActiveRenderTargets; //total active RTs from the first TA kick, for OOM - RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices - RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target - IMG_UINT32 ui32MaxRTs; //Number of render targets in the array - IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ -} UNCACHED_ALIGN RGXFWIF_RTA_CTL; +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES) +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U -/*! - * @InGroup RenderTarget - * @Brief Firmware Freelist holding usage state of the Parameter Buffers - */ -typedef struct -{ - IMG_DEV_VIRTADDR RGXFW_ALIGN psFreeListDevVAddr; /*!< Freelist page table base */ - IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page */ - IMG_UINT32 ui32CurrentStackTop; /*!< Freelist current free page */ - IMG_UINT32 ui32MaxPages; /*!< Max no. of pages can be added to the freelist */ - IMG_UINT32 ui32GrowPages; /*!< No pages to add in each freelist grow */ - IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */ - IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */ - IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/ -#if defined(SUPPORT_SHADOW_FREELISTS) - IMG_UINT32 ui32HWRCounter; - PRGXFWIF_FWMEMCONTEXT psFWMemContext; -#endif - IMG_UINT32 ui32FreeListID; /*!< Unique Freelist ID */ - IMG_BOOL bGrowPending; /*!< Freelist grow is pending */ - IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */ - IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */ -#if defined(SUPPORT_AGP) - IMG_UINT32 ui32PmGlobalPb; /*!< PM Global PB on which Freelist is loaded */ +#if defined(SUPPORT_TRUSTED_DEVICE) +#define RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE PVR_ALIGN((RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES), DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY) +#else +#define RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE 0 #endif -} UNCACHED_ALIGN RGXFWIF_FREELIST; + +#define RGXFWIF_TDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES) +#define RGXFWIF_CDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES) /*! ****************************************************************************** - * HWRTData + * Virtualisation and Security *****************************************************************************/ - -/* HWRTData flags */ -/* Deprecated flags 1:0 */ -#define HWRTDATA_HAS_LAST_TA (IMG_UINT32_C(1) << 2) -#define HWRTDATA_PARTIAL_RENDERED (IMG_UINT32_C(1) << 3) -#define HWRTDATA_DISABLE_TILE_REORDERING (IMG_UINT32_C(1) << 4) -#define HWRTDATA_NEED_BRN65101_BLIT (IMG_UINT32_C(1) << 5) -#define HWRTDATA_FIRST_BRN65101_STRIP (IMG_UINT32_C(1) << 6) -#define HWRTDATA_NEED_BRN67182_2ND_RENDER (IMG_UINT32_C(1) << 7) -#if defined(SUPPORT_AGP) -#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (IMG_UINT32_C(1) << 8) -#if defined(SUPPORT_AGP4) -#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (IMG_UINT32_C(1) << 9) -#endif -#define HWRTDATA_GEOM_NEEDS_RESUME (IMG_UINT32_C(1) << 10) +#if defined(SECURE_FW_CODE_OSID) +/* software workaround for SoCs without fw_code, fw_priv_data signals, MIPS only */ +#if defined(RGX_FEATURE_META) +#error "SECURE_FW_CODE_OSID is not supported on META cores" +#elif defined(RGX_FEATURE_RISCV_FW_PROCESSOR) +#error "SECURE_FW_CODE_OSID is not supported on RISC-V cores" +#elif (RGX_NUM_DRIVERS_SUPPORTED > 1) +#error "SECURE_FW_CODE_OSID is not supported on virtualization drivers" +#elif (SECURE_FW_CODE_OSID + 1 > 2) +#define MIPS_FW_CODE_OSID (SECURE_FW_CODE_OSID) +#else +#define MIPS_FW_CODE_OSID (1U) #endif +#endif /* defined(SECURE_FW_CODE_OSID) */ + +static_assert((RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_FIRMWARE_OSID), + " Invalid RGX_FW_HEAP_OSID_ASSIGNMENT value. Rogue cores support only the RGX_FW_HEAP_USES_FIRMWARE_OSID config"); + +/* Firmware and Host driver share the same OSID */ +#define FW_HEAP_OSID (FW_OSID) + +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) +/* virtualization without security support */ +#define DRIVER_ID(osid) (osid) +#define OSID(did) (did) +/* Time slice support */ +/* Bits 30 and 31 reserved by FW private driver priority */ +#define RGXFW_VZ_PRIORITY_MAX_SHIFT (30U) +#define RGXFW_VZ_PRIORITY_MASK ((1U << RGXFW_VZ_PRIORITY_MAX_SHIFT) - 1U) +#define RGXFW_VZ_TIME_SLICE_MAX (100U) +#define RGXFW_VZ_TIME_SLICE_MIN (5U) +#else +#define DRIVER_ID(osid) (0U) +#define OSID(did) (did) +#define RGXFW_VZ_PRIORITY_MASK (0U) +#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -typedef enum -{ - RGXFWIF_RTDATA_STATE_NONE = 0, - RGXFWIF_RTDATA_STATE_KICKTA, - RGXFWIF_RTDATA_STATE_KICKTAFIRST, - RGXFWIF_RTDATA_STATE_TAFINISHED, - RGXFWIF_RTDATA_STATE_KICK3D, - RGXFWIF_RTDATA_STATE_3DFINISHED, - RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED, - RGXFWIF_RTDATA_STATE_TAOUTOFMEM, - RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, - /* In case of HWR, we can't set the RTDATA state to NONE, - * as this will cause any TA to become a first TA. - * To ensure all related TA's are skipped, we use the HWR state */ - RGXFWIF_RTDATA_STATE_HWR, - RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU -} RGXFWIF_RTDATA_STATE; - -typedef struct -{ - IMG_BOOL bTACachesNeedZeroing; - - IMG_UINT32 ui32ScreenPixelMax; - IMG_UINT64 RGXFW_ALIGN ui64MultiSampleCtl; - IMG_UINT64 ui64FlippedMultiSampleCtl; - IMG_UINT32 ui32TPCStride; - IMG_UINT32 ui32TPCSize; - IMG_UINT32 ui32TEScreen; - IMG_UINT32 ui32MTileStride; - IMG_UINT32 ui32TEAA; - IMG_UINT32 ui32TEMTILE1; - IMG_UINT32 ui32TEMTILE2; - IMG_UINT32 ui32ISPMergeLowerX; - IMG_UINT32 ui32ISPMergeLowerY; - IMG_UINT32 ui32ISPMergeUpperX; - IMG_UINT32 ui32ISPMergeUpperY; - IMG_UINT32 ui32ISPMergeScaleX; - IMG_UINT32 ui32ISPMergeScaleY; - IMG_UINT32 uiRgnHeaderSize; - IMG_UINT32 ui32ISPMtileSize; -} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; - -/*! - * @InGroup RenderTarget - * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context - */ -typedef struct -{ - IMG_DEV_VIRTADDR RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */ - - IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[1]; /*!< VCE Page Catalogue base */ - IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[1]; - IMG_UINT64 RGXFW_ALIGN ui64TECatBase[1]; /*!< TE Page Catalogue base */ - IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[1]; - IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */ - IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; - - IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; /*!< Freelist page table entry for current Mlist page */ - IMG_UINT32 ui32PMMListStackPointer; /*!< Current Mlist page */ - - RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; /*!< Render target dimension dependent data */ - - IMG_UINT32 ui32HWRTDataFlags; - RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */ - - PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */ - IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) - IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */ +#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) < RGX_NUM_DRIVERS_SUPPORTED; (did)++) - RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Render target clean up state */ +#if defined(__KERNEL__) +/* Driver implementation */ +#define FOREACH_ACTIVE_DRIVER(devinfo, did) RGXFwSharedMemCacheOpValue(psFwSysData->asOsRuntimeFlagsMirror[RGXFW_HOST_DRIVER_ID], \ + INVALIDATE); \ + FOREACH_SUPPORTED_DRIVER(did) \ + { \ + if (devinfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[did].bfOsState != RGXFW_CONNECTION_FW_ACTIVE) continue; - RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */ +#define END_FOREACH_ACTIVE_DRIVER } - IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sMacrotileArrayDevVAddr; /*!< Macrotiling array base */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sRgnHeaderDevVAddr; /*!< Region headers base */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sRTCDevVAddr; /*!< Render target cache base */ -#if defined(RGX_FIRMWARE) - struct RGXFWIF_FWCOMMONCONTEXT_* RGXFW_ALIGN psOwnerGeom; #else - RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN pui32OwnerGeomNotUsedByHost; -#endif -#if defined(SUPPORT_TRP) - IMG_UINT32 ui32KickFlagsCopy; - IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */ - IMG_UINT32 ui32TEPageCopy; - IMG_UINT32 ui32VCEPageCopy; -#endif -#if defined(SUPPORT_AGP) - IMG_BOOL bTACachesNeedZeroing; -#endif -} UNCACHED_ALIGN RGXFWIF_HWRTDATA; +/* Firmware implementation */ +#define FOREACH_ACTIVE_DRIVER(did) do { \ + unsigned int idx; \ + for (idx = 0; idx < RGXFW_NUM_ACTIVE_DRIVERS; idx++) \ + { \ + (did) = gsRGXFWCtl.aui32ActiveDrivers[idx]; \ + { -/* Sync_checkpoint firmware object. - * This is the FW-addressable structure use to hold the sync checkpoint's - * state and other information which needs to be accessed by the firmware. - */ -typedef struct -{ - IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ - IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ -} SYNC_CHECKPOINT_FW_OBJ; +#define END_FOREACH_ACTIVE_DRIVER }}} while (false); +#endif /* defined(__KERNEL__) */ -/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ -#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) -#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET -#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U +#else +#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) <= RGXFW_HOST_DRIVER_ID; (did)++) -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES) -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U +#define FOREACH_ACTIVE_DRIVER(did) FOREACH_SUPPORTED_DRIVER(did) +#define END_FOREACH_ACTIVE_DRIVER -#define RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES (RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES) +#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -#define RGXFWIF_TDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES) -#define RGXFWIF_CDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES) +#define FOREACH_VALIDATION_OSID(osid) for ((osid)=0; (osid) < GPUVIRT_VALIDATION_NUM_OS; (osid)++) +#define FOREACH_HW_OSID(osid) for ((osid)=0; (osid) < RGXFW_MAX_NUM_OSIDS; (osid)++) +#define FOREACH_DRIVER_RAW_HEAP(did, _struct, dev) for ((did)=RGX_FIRST_RAW_HEAP_DRIVER_ID; (did) < ((PVRSRV_VZ_MODE_IS(NATIVE, _struct, dev) ? 1 : RGX_NUM_DRIVERS_SUPPORTED)); (did)++) #endif /* RGX_FWIF_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_shared.h b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_shared.h index 1cbe8d0d3ebb..ccab45ae8fc0 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_shared.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_fwif_shared.h @@ -84,11 +84,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) -typedef struct RGXFWIF_DEV_VIRTADDR_ -{ - IMG_UINT32 ui32Addr; -} RGXFWIF_DEV_VIRTADDR; - typedef struct { IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr; @@ -208,16 +203,23 @@ typedef struct * fence dependencies are not met. */ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity in bytes of the CCB-1 */ -#if defined(SUPPORT_AGP) - IMG_UINT32 ui32ReadOffset2; -#if defined(SUPPORT_AGP4) - IMG_UINT32 ui32ReadOffset3; - IMG_UINT32 ui32ReadOffset4; -#endif -#endif + + IMG_UINT32 ui32ReadOffset2; /*!< Firmware 2nd read offset into CCB for AGP. + Points to the command that is + runnable on GPU, if R2!=W */ + IMG_UINT32 ui32ReadOffset3; /*!< Firmware 3rd read offset into CCB for AGP. + Points to the command that is + runnable on GPU, if R3!=W */ + IMG_UINT32 ui32ReadOffset4; /*!< Firmware 4th read offset into CCB for AGP. + Points to the command that is + runnable on GPU, if R4!=W */ } UNCACHED_ALIGN RGXFWIF_CCCB_CTL; +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_CCCB_CTL) == 32, + "RGXFWIF_CCCB_CTL is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif typedef IMG_UINT32 RGXFW_FREELIST_TYPE; @@ -256,12 +258,6 @@ typedef struct IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK0; /*!< VDM context resume task 0 */ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK1; /*!< VDM context resume task 1 */ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK2; /*!< VDM context resume task 2 */ - - IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK3; - IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK4; - - IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK3; - IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK4; } asTAState[2]; } RGXFWIF_TAREGISTERS_CSWITCH; @@ -323,6 +319,12 @@ typedef enum RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, /*!< FW execution error (GPU reset requested) */ RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, /*!< Host watchdog detected FW error */ RGX_CONTEXT_GEOM_OOM_DISABLED = 16, /*!< Geometry DM OOM event is not allowed */ + RGX_CONTEXT_PVRIC_SIGNATURE_MISMATCH = 17, /*!< PVRIC Signature mismatch */ + RGX_CONTEXT_RESET_REASON_FW_PTE_PARITY_ERR = 18, /*!< Parity error in MMU Page Table Entry */ + RGX_CONTEXT_RESET_REASON_FW_PARITY_ERR = 19, /*!< Parity error in MH, system bus or Control Status registers */ + RGX_CONTEXT_RESET_REASON_GPU_PARITY_HWR = 20, /*!< Parity error in system bus or Control Status registers */ + RGX_CONTEXT_RESET_REASON_GPU_LATENT_HWR = 21, /*!< Latent/ICS signature mismatch error */ + RGX_CONTEXT_RESET_REASON_DCLS_ERR = 22, /*!< Dual Core Lock Step FW error detected */ } RGX_CONTEXT_RESET_REASON; /*! @@ -340,18 +342,797 @@ typedef struct #define RGX_HEAP_UM_USC_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY #define RGX_HEAP_UM_USC_RESERVED_REGION_OFFSET 0 -#define RGX_HEAP_USC_RESERVED_TOTAL_SIZE RGX_HEAP_UM_USC_RESERVED_SIZE +#define RGX_HEAP_KM_USC_RESERVED_REGION_OFFSET RGX_HEAP_UM_USC_RESERVED_SIZE #define RGX_HEAP_UM_GENERAL_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY #define RGX_HEAP_UM_GENERAL_RESERVED_REGION_OFFSET 0 -#if defined(SUPPORT_TRUSTED_DEVICE) -#define RGX_HEAP_KM_GENERAL_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY +#define RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET RGX_HEAP_UM_GENERAL_RESERVED_SIZE + +/*************************************************************************/ /*! + Logging type +*/ /**************************************************************************/ +#define RGXFWIF_LOG_TYPE_NONE 0x00000000U +#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U +#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U +#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U +#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U +#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U +#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U +#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U +#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U +#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U +#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U +#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U +#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U +#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U +#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U +#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U +#define RGXFWIF_LOG_TYPE_GROUP_VZ 0x00008000U +#define RGXFWIF_LOG_TYPE_GROUP_SAFETY 0x00010000U +#define RGXFWIF_LOG_TYPE_GROUP_VERBOSE 0x00020000U +#define RGXFWIF_LOG_TYPE_GROUP_CUSTOMER 0x00040000U +#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U +#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x8007FFFEU +#define RGXFWIF_LOG_TYPE_MASK 0x8007FFFFU + +/* String used in pvrdebug -h output */ +#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,vz,safety,verbose,customer,debug" + +/* Table entry to map log group strings to log type value */ +typedef struct { + const IMG_CHAR* pszLogGroupName; + IMG_UINT32 ui32LogGroupType; +} RGXFWIF_LOG_GROUP_MAP_ENTRY; + +/* + Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup + table where needed. Keep log group names short, no more than 20 chars. +*/ +#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ + { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ + { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ + { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ + { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ + { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ + { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ + { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ + { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ + { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ + { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ + { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ + { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ + { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ + { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ + { "vz", RGXFWIF_LOG_TYPE_GROUP_VZ }, \ + { "safety", RGXFWIF_LOG_TYPE_GROUP_SAFETY }, \ + { "verbose", RGXFWIF_LOG_TYPE_GROUP_VERBOSE }, \ + { "customer",RGXFWIF_LOG_TYPE_GROUP_CUSTOMER }, \ + { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } + +/* Used in print statements to display log group state, one %s per group defined */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" + +/* Used in a print statement to display log group state, one per group */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) ((((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) != 0U) ?("main ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) != 0U) ?("mts ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) != 0U) ?("cleanup ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) != 0U) ?("csw ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) != 0U) ?("bif ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_PM) != 0U) ?("pm ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) != 0U) ?("rtd ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) != 0U) ?("spm ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_POW) != 0U) ?("pow ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) != 0U) ?("hwr ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) != 0U) ?("hwp ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) != 0U) ?("rpm ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) != 0U) ?("dma ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) != 0U) ?("misc ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_VZ) != 0U) ?("vz ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_SAFETY) != 0U) ?("safety ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_VERBOSE) != 0U) ?("verbose ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_CUSTOMER) != 0U)?("customer ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) != 0U) ?("debug ") :("")) + +/*! + ****************************************************************************** + * Trace Buffer + *****************************************************************************/ + +/*! Min, Max, and Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ +#define RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS 8192U /* 32KB */ +#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U /* ~48KB */ +#define RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS 32768U /* 128KB */ + +#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) +#define RGXFW_THREAD_NUM 2U #else -#define RGX_HEAP_KM_GENERAL_RESERVED_SIZE 0 +#define RGXFW_THREAD_NUM 1U #endif -#define RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET RGX_HEAP_UM_GENERAL_RESERVED_SIZE -#define RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE (RGX_HEAP_UM_GENERAL_RESERVED_SIZE + RGX_HEAP_KM_GENERAL_RESERVED_SIZE) +typedef struct +{ + IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_UINT32 ui32LineNum; +} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_FILE_INFO_BUF) == 408, + "RGXFWIF_FILE_INFO_BUF is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +/*! + * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface + * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing + * @{ + */ + +/*! + * @Brief Firmware trace buffer details + */ +typedef struct +{ + IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer) */ + IMG_UINT32 ui32WrapCount; /*!< Number of times the Trace Buffer has wrapped */ + + RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */ + + RGXFWIF_FILE_INFO_BUF RGXFW_ALIGN sAssertBuf; +} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; + +/*! @} End of Defgroup SRVAndFWTracing */ + +/*! + * @InGroup SRVAndFWTracing + * @Brief Firmware trace control data + */ +typedef struct +{ + IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */ +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) + RGXFWIF_TRACEBUF_SPACE sTraceBuf[MAX_THREAD_NUM]; /*!< FW Trace buffer */ +#else + RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */ +#endif + IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated + (in RGXTraceBufferInitOnDemandResources) */ + IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_TRACEBUF; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_TRACEBUF) == 880, + "RGXFWIF_TRACEBUF is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +/* Debug-info sub-fields */ +/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) + +/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) + +/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */ +#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) +#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) + +/* Bit 3-15: Unused bits */ + +#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 +#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " +#define RGXFWT_DEBUG_INFO_STR_APPEND ")" + +/* Table of debug info sub-field's masks and corresponding message strings + * to be appended to firmware trace + * + * Mask : 16 bit mask to be applied to debug-info field + * String : debug info message string + */ + +#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ +/*Mask, String*/ \ +X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ +X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \ +X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events") + +/* Firmware trace time-stamp field breakup */ + +/* RGX_CR_TIMER register read (48 bits) value*/ +#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) +#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) + +/* Extra debug-info (16 bits) */ +#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) +#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK + +typedef struct +{ + IMG_UINT bfOsState : 3; + IMG_UINT bfFLOk : 1; + IMG_UINT bfFLGrowPending : 1; + IMG_UINT bfReserved : 27; +} RGXFWIF_OS_RUNTIME_FLAGS; + +#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; + IMG_UINT64 RGXFW_ALIGN ui64Data; + RGXFWIF_FILE_INFO_BUF sFaultBuf; +} UNCACHED_ALIGN RGX_FWFAULTINFO; + +#define RGXFWIF_POW_STATES \ + X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ + X(RGXFWIF_POW_ON) /* running HW commands */ \ + X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ + X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ + +typedef enum +{ +#define X(NAME) NAME, + RGXFWIF_POW_STATES +#undef X +} RGXFWIF_POW_STATE; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +#define MAX_THREAD_NUM 2 + +static_assert(RGXFW_THREAD_NUM <= MAX_THREAD_NUM, + "RGXFW_THREAD_NUM is outside of allowable range for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +/* Firmware HWR states */ +#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ +#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */ +#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ +#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ +#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ +#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ +#define RGXFWIF_HWR_RESTART_REQUESTED (IMG_UINT32_C(0x1) << 7U) /*!< The FW has requested the host to restart it */ + +#define RGXFWIF_PHR_MODE_OFF (0UL) +#define RGXFWIF_PHR_MODE_RD_RESET (1UL) + +typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; + +typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; + +/*! @Brief Firmware system data shared with the Host driver */ +typedef struct +{ + IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ + IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ + volatile RGXFWIF_POW_STATE ePowState; + struct { + volatile IMG_UINT32 ui32HWPerfRIdx; + volatile IMG_UINT32 ui32HWPerfWIdx; + volatile IMG_UINT32 ui32HWPerfWrapCount; + } sHWPerfCtrl; /* Struct used to inval/flush HWPerfCtrl members */ + IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ + IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ + + /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with + * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ + IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ + IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ + IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ + RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OSIDS];/*!< State flags for each Operating System mirrored from Fw coremem */ + RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */ + IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */ +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) + IMG_UINT32 aui32CrPollAddr[MAX_THREAD_NUM]; /*!< Failed poll address */ + IMG_UINT32 aui32CrPollMask[MAX_THREAD_NUM]; /*!< Failed poll mask */ + IMG_UINT32 aui32CrPollCount[MAX_THREAD_NUM]; /*!< Failed poll count */ +#else + IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */ + IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */ + IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */ +#endif + IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; + IMG_UINT32 aui32TSMirror[RGXFW_MAX_NUM_OSIDS]; /*!< Array of time slice per OS Mirrored from the FW */ + +#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) +#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) +#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) + IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; +#endif + RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */ + RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */ + IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ + IMG_UINT32 ui32McConfig; /*!< Identify whether MC config is P-P or P-S */ + IMG_UINT32 ui32MemFaultCheck; /*!< Device mem fault check on PCI systems */ +} UNCACHED_ALIGN RGXFWIF_SYSDATA; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_SYSDATA) == 3624, + "RGXFWIF_SYSDATA is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER == 3624"); +#endif + +#if defined(PVRSRV_STALLED_CCB_ACTION) +#define PVR_SLR_LOG_ENTRIES 10U +#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64Timestamp; + IMG_UINT32 ui32FWCtxAddr; + IMG_UINT32 ui32NumUFOs; + IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; +} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; +#endif + +#define RGXFWIF_OFFLINE_DATA_BUFFER_SIZE_IN_WORDS (128U) + +/*! + * @InGroup ContextSwitching + * @Brief Firmware per-os data and configuration + */ +typedef struct +{ + IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ + IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ + IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */ +#if defined(PVRSRV_STALLED_CCB_ACTION) || defined(SUPPORT_OPEN_SOURCE_DRIVER) + IMG_UINT32 ui32ForcedUpdatesRequested; + IMG_UINT8 ui8SLRLogWp; + RGXFWIF_SLR_ENTRY sSLRLogFirst; + RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; + IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; +#endif +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) + volatile IMG_UINT32 aui32InterruptCount[MAX_THREAD_NUM]; /*!< Interrupt count from Threads > */ +#else + volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ +#endif + IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */ + RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */ + IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ + IMG_UINT32 aui32OfflineBuffer[RGXFWIF_OFFLINE_DATA_BUFFER_SIZE_IN_WORDS]; +} UNCACHED_ALIGN RGXFWIF_OSDATA; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_OSDATA) == 584, + "RGXFWIF_OSDATA is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif +/*! + ****************************************************************************** + * HWR Data + *****************************************************************************/ +/*! + * @Defgroup HWRInfo FW HWR shared data interface + * @Brief Types grouping data structures and defines used in realising the HWR record. + * @{ + */ + +#define RGXFW_PROCESS_NAME_LEN (16) + +/*! @Brief HWR Lockup types */ +typedef enum +{ + RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */ + RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */ + RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */ + RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */ + RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */ + RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */ + RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */ + RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */ + RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */ + RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */ + RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */ +} RGX_HWRTYPE; + +#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) + +#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false) + +/************************ + * GPU HW error codes * + ************************/ +typedef enum +{ + RGX_HW_ERR_NA = 0x0, + RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL = 0x101, +} RGX_HW_ERR; + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */ + IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */ + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} RGX_BIFINFO; + +typedef struct +{ + IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */ +} RGX_ECCINFO; + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */ + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} RGX_MMUINFO; + +typedef struct +{ + IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */ + IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */ + IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */ + IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} UNCACHED_ALIGN RGX_POLLINFO; + +typedef struct +{ + IMG_UINT32 ui32BadVAddr; /*!< VA address */ + IMG_UINT32 ui32EntryLo; +} RGX_TLBINFO; + +/*! @Brief Structure to keep information specific to a lockup e.g. DM, timer, lockup type etc. */ +typedef struct +{ + union + { + RGX_BIFINFO sBIFInfo; /*!< BIF failure details */ + RGX_MMUINFO sMMUInfo; /*!< MMU failure details */ + RGX_POLLINFO sPollInfo; /*!< Poll failure details */ + RGX_TLBINFO sTLBInfo; /*!< TLB failure details */ + RGX_ECCINFO sECCInfo; /*!< ECC failure details */ + } uHWRData; + + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */ + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */ + IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */ + IMG_UINT32 ui32PID; /*!< PID belonging to the workload */ + IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */ + IMG_UINT32 ui32HWRNumber; /*!< HWR number */ + IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */ + IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */ + RGX_HWRTYPE eHWRType; /*!< Type of lockup */ + RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */ + IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */ + RGX_HW_ERR eHWErrorCode; /*!< Error code used to determine HW fault */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */ + IMG_CHAR RGXFW_ALIGN szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ +} UNCACHED_ALIGN RGX_HWRINFO; + +#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ +#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ + +/*! @Brief Firmware HWR information structure allocated by the Services and used by the Firmware to update recovery information. */ +typedef struct +{ + RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */ + IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */ + IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */ + IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */ + IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ + IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */ + IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */ + IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */ + IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */ +} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_HWRINFOBUF) == 2336, + "RGXFWIF_HWRINFOBUF is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +/*! @} End of HWRInfo */ + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; + +/*! + * @InGroup RenderTarget + * @Brief Firmware Freelist holding usage state of the Parameter Buffers + */ +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN psFreeListDevVAddr; /*!< Freelist page table base */ + IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page */ + IMG_UINT32 ui32CurrentStackTop; /*!< Freelist current free page */ + IMG_UINT32 ui32MaxPages; /*!< Max no. of pages can be added to the freelist */ + IMG_UINT32 ui32GrowPages; /*!< No pages to add in each freelist grow */ + IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */ + IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */ + IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/ +#if defined(SUPPORT_SHADOW_FREELISTS) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) + IMG_UINT32 ui32HWRCounter; + PRGXFWIF_FWMEMCONTEXT psFWMemContext; +#endif + IMG_UINT32 ui32FreeListID; /*!< Unique Freelist ID */ + IMG_BOOL bGrowPending; /*!< Freelist grow is pending */ + IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */ + IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */ +#if defined(SUPPORT_AGP) || defined(SUPPORT_OPEN_SOURCE_DRIVER) + IMG_UINT32 ui32PmGlobalPb; /*!< PM Global PB on which Freelist is loaded */ +#endif +} UNCACHED_ALIGN RGXFWIF_FREELIST; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_FREELIST) == 64, + "RGXFWIF_FREELIST is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +typedef struct {RGXFWIF_DEV_VIRTADDR sNext; + RGXFWIF_DEV_VIRTADDR sPrev;} RGXFW_DLLIST_NODE; + +#define RGXFWIF_MAX_NUM_CANCEL_REQUESTS (8U) /* Maximum number of workload cancellation requests */ + +/*! + * @InGroup WorkloadContexts + * @Brief Firmware Common Context (or FWCC) + */ +typedef struct RGXFWIF_FWCOMMONCONTEXT_ +{ + /* CCB details for this firmware context */ + PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ + PRGXFWIF_CCCB psCCB; /*!< CCB base */ + + /* Context suspend state */ + PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ + + /* Flags e.g. for context switching */ + IMG_UINT32 ui32FWComCtxFlags; + IMG_INT32 i32Priority; /*!< Priority level */ + IMG_UINT32 ui32PrioritySeqNum; + + /* Framework state */ + PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ + + /* Misc and compatibility flags */ + IMG_UINT32 ui32CompatFlags; + + /* Statistic updates waiting to be passed back to the host... */ + IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ + IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ + IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ + RGXFWIF_DM eDM; /*!< Data Master type */ + RGXFW_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ + IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ + + IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; + IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ + bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ + + RGXFW_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ + RGXFW_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ + RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ + + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + + /* References to the host side originators */ + IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ + IMG_UINT32 ui32PID; /*!< associated process ID */ + + IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */ + IMG_CHAR szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ + IMG_UINT32 ui32DeferCount; /*!< Number of context defers before forced scheduling of context */ + IMG_UINT32 aui32FirstIntJobRefToCancel[RGXFWIF_MAX_NUM_CANCEL_REQUESTS]; /*!< Saved values of the beginning of range of IntJobRefs at and above which workloads will be discarded */ + IMG_UINT32 aui32FirstValidIntJobRef[RGXFWIF_MAX_NUM_CANCEL_REQUESTS]; /*!< Saved values of the end of range of IntJobRef below which workloads will be discarded */ + IMG_BOOL bCancelRangesActive; /*!< True if any active ranges in aui32FirstIntJobRefToCancel and aui32FirstValidIntJobRef arrays */ + IMG_BOOL bLastKickedCmdWasSafetyOnly; + IMG_UINT32 ui32UID; /*!< associated process UID used in FW managed gpu work period hwperf events */ +} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; + +static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256U, + "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size."); + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) == 168, + "RGXFWIF_FWCOMMONCONTEXT is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +/*! + ****************************************************************************** + * HWRTData + *****************************************************************************/ + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; + +/* HWRTData flags */ +/* Deprecated flags 1:0 */ +#define HWRTDATA_HAS_LAST_TA (IMG_UINT32_C(1) << 2) +#define HWRTDATA_PARTIAL_RENDERED (IMG_UINT32_C(1) << 3) +#define HWRTDATA_DISABLE_TILE_REORDERING (IMG_UINT32_C(1) << 4) +#define HWRTDATA_NEED_BRN65101_BLIT (IMG_UINT32_C(1) << 5) +#define HWRTDATA_FIRST_BRN65101_STRIP (IMG_UINT32_C(1) << 6) +#define HWRTDATA_NEED_BRN67182_2ND_RENDER (IMG_UINT32_C(1) << 7) +#if defined(SUPPORT_AGP) +#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (IMG_UINT32_C(1) << 8) +#if defined(SUPPORT_AGP4) +#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (IMG_UINT32_C(1) << 9) +#endif +#define HWRTDATA_GEOM_NEEDS_RESUME (IMG_UINT32_C(1) << 10) +#endif +#if defined(SUPPORT_TRP) +#define HWRTDATA_GEOM_TRP_IN_PROGRESS (IMG_UINT32_C(1) << 9) +#endif + +typedef enum +{ + RGXFWIF_RTDATA_STATE_NONE = 0, + RGXFWIF_RTDATA_STATE_KICKTA, + RGXFWIF_RTDATA_STATE_KICKTAFIRST, + RGXFWIF_RTDATA_STATE_TAFINISHED, + RGXFWIF_RTDATA_STATE_KICK3D, + RGXFWIF_RTDATA_STATE_3DFINISHED, + RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED, + RGXFWIF_RTDATA_STATE_TAOUTOFMEM, + RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, + /* In case of HWR, we can't set the RTDATA state to NONE, + * as this will cause any TA to become a first TA. + * To ensure all related TA's are skipped, we use the HWR state */ + RGXFWIF_RTDATA_STATE_HWR, + RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU +} RGXFWIF_RTDATA_STATE; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +#define MAX_FREELISTS_SIZE 3 + +static_assert(RGXFW_MAX_FREELISTS <= MAX_FREELISTS_SIZE, + "RGXFW_MAX_FREELISTS is outside of allowable range for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +typedef struct +{ + IMG_BOOL bTACachesNeedZeroing; + + IMG_UINT32 ui32ScreenPixelMax; + IMG_UINT64 RGXFW_ALIGN ui64MultiSampleCtl; + IMG_UINT64 ui64FlippedMultiSampleCtl; + IMG_UINT32 ui32TPCStride; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32MTileStride; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32ISPMergeLowerX; + IMG_UINT32 ui32ISPMergeLowerY; + IMG_UINT32 ui32ISPMergeUpperX; + IMG_UINT32 ui32ISPMergeUpperY; + IMG_UINT32 ui32ISPMergeScaleX; + IMG_UINT32 ui32ISPMergeScaleY; + IMG_UINT32 uiRgnHeaderSize; + IMG_UINT32 ui32ISPMtileSize; +} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_HWRTDATA_COMMON) == 88, + "RGXFWIF_HWRTDATA_COMMON is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +typedef struct +{ + IMG_UINT32 ui32RenderTargetIndex; //Render number + IMG_UINT32 ui32CurrentRenderTarget; //index in RTA + IMG_UINT32 ui32ActiveRenderTargets; //total active RTs + IMG_UINT32 ui32CumulActiveRenderTargets; //total active RTs from the first TA kick, for OOM + RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices + RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target + IMG_UINT32 ui32MaxRTs; //Number of render targets in the array + IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_RTA_CTL; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_RTA_CTL) == 32, + "RGXFWIF_RTA_CTL is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +/*! + * @InGroup RenderTarget + * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context + */ +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */ + + IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[1]; /*!< VCE Page Catalogue base */ + IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[1]; + IMG_UINT64 RGXFW_ALIGN ui64TECatBase[1]; /*!< TE Page Catalogue base */ + IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[1]; + IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */ + IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; + + IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; /*!< Freelist page table entry for current Mlist page */ + IMG_UINT32 ui32PMMListStackPointer; /*!< Current Mlist page */ + + RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; /*!< Render target dimension dependent data */ + + IMG_UINT32 ui32HWRTDataFlags; + RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */ + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) + PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[MAX_FREELISTS_SIZE]; /*!< Freelist to use */ + IMG_UINT32 aui32FreeListHWRSnapshot[MAX_FREELISTS_SIZE]; +#else + PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */ + IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; +#endif + + IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */ + + RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */ + + IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sMacrotileArrayDevVAddr; /*!< Macrotiling array base */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sRgnHeaderDevVAddr; /*!< Region headers base */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sRTCDevVAddr; /*!< Render target cache base */ +#if defined(RGX_FIRMWARE) + struct RGXFWIF_FWCOMMONCONTEXT_* RGXFW_ALIGN psOwnerGeom; +#else + RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN pui32OwnerGeomNotUsedByHost; +#endif +#if defined(SUPPORT_TRP) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) + IMG_UINT32 ui32KickFlagsCopy; + IMG_UINT32 ui32TEPageCopy; + IMG_UINT32 ui32VCEPageCopy; +#endif +#if defined(SUPPORT_AGP) || defined(SUPPORT_OPEN_SOURCE_DRIVER) + IMG_BOOL bTACachesNeedZeroing; +#endif + + RGXFWIF_CLEANUP_CTL RGXFW_ALIGN_DCACHEL sCleanupState; /*!< Render target clean up state */ +} RGXFW_ALIGN_DCACHEL RGXFWIF_HWRTDATA; + +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) +static_assert(sizeof(RGXFWIF_HWRTDATA) == 256, + "RGXFWIF_HWRTDATA is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); +#endif + +/* Sync_checkpoint firmware object. + * This is the FW-addressable structure used to hold the sync checkpoint's + * state and other information which needs to be accessed by the firmware. + */ +typedef struct +{ + IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ + IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ +} SYNC_CHECKPOINT_FW_OBJ; + +/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ +#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) + +typedef enum +{ + RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */ + RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */ + RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */ + RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */ + RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */ + RGXFWIF_REG_CFG_TYPE_TLA, /* TLA kick */ + RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */ + RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */ +} RGXFWIF_REG_CFG_TYPE; + +#define RGXFWIF_KM_USC_TQ_SHADER_CODE_OFFSET_BYTES RGX_HEAP_KM_USC_RESERVED_REGION_OFFSET +#define RGXFWIF_KM_USC_TQ_SHADER_CODE_MAX_SIZE_BYTES (1U << 19) + +#define RGX_HEAP_KM_USC_RESERVED_SIZE RGXFWIF_KM_USC_TQ_SHADER_CODE_MAX_SIZE_BYTES +#define RGX_HEAP_USC_RESERVED_TOTAL_SIZE RGX_HEAP_UM_USC_RESERVED_SIZE + RGX_HEAP_KM_USC_RESERVED_SIZE + #endif /* RGX_FWIF_SHARED_H */ /****************************************************************************** diff --git a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_heaps.h b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_heaps.h index e41e4002b2c4..f8fcfe1fe27e 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_heaps.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_heaps.h @@ -52,7 +52,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */ #define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */ #define RGX_VK_CAPT_REPLAY_HEAP_IDENT "Vulkan Capture Replay" /*!< RGX Vulkan capture replay buffer Heap Identifier */ -#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Signals Heap Identifier */ #define RGX_FBCDC_HEAP_IDENT "FBCDC" /*!< RGX FBCDC State Table Heap Identifier */ #define RGX_FBCDC_LARGE_HEAP_IDENT "Large FBCDC" /*!< RGX Large FBCDC State Table Heap Identifier */ #define RGX_CMP_MISSION_RMW_HEAP_IDENT "Compute Mission RMW" /*!< Compute Mission RMW Heap Identifier */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_hwperf.h b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_hwperf.h index 99531bfe60fc..d164c20a2417 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_hwperf.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_hwperf.h @@ -61,9 +61,6 @@ extern "C" { #include "rgx_common.h" #include "rgx_hwperf_common.h" -#include "pvrsrv_tlcommon.h" -#include "pvrsrv_sync_km.h" - #if !defined(__KERNEL__) /* User-mode and Firmware definitions only */ @@ -142,9 +139,6 @@ static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, "Cluster count too large for HWPe /*! The number of counters supported in each non-mux counter block */ #define RGX_HWPERF_MAX_CUSTOM_CNTRS 8U -/*! The number of directly-addressable counters allowed in non-mux counter blocks */ -#define RGX_CNTBLK_COUNTERS_MAX ((IMG_UINT32)PVRSRV_HWPERF_COUNTERS_PERBLK + 0U) - /****************************************************************************** * Data Stream Common Types @@ -154,1130 +148,18 @@ static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, "Cluster count too large for HWPe * list, it should be appended at the end to maintain backward compatibility * of HWPerf data. */ -typedef enum { - - RGX_HWPERF_DM_GP, - RGX_HWPERF_DM_2D, - RGX_HWPERF_DM_TA, - RGX_HWPERF_DM_3D, - RGX_HWPERF_DM_CDM, - RGX_HWPERF_DM_RTU, - RGX_HWPERF_DM_SHG, - RGX_HWPERF_DM_TDM, - - RGX_HWPERF_DM_LAST, - - RGX_HWPERF_DM_INVALID = 0x1FFFFFFF -} RGX_HWPERF_DM; - -/*! Define containing bit position for 32bit feature flags used in hwperf and api */ -typedef IMG_UINT32 RGX_HWPERF_FEATURE_FLAGS; -#define RGX_HWPERF_FEATURE_PERFBUS_FLAG 0x0001U -#define RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG 0x0002U -#define RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG 0x0004U -#define RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG 0x0008U -#define RGX_HWPERF_FEATURE_ROGUEXE_FLAG 0x0010U -#define RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG 0x0020U -#define RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG 0x0040U -#define RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION 0x0080U -#define RGX_HWPERF_FEATURE_MULTICORE_FLAG 0x0100U -#define RGX_HWPERF_FEATURE_VOLCANIC_FLAG 0x0800U -#define RGX_HWPERF_FEATURE_ROGUE_FLAG 0x1000U -#define RGX_HWPERF_FEATURE_OCEANIC_FLAG 0x2000U - -/*! This structure holds the data of a firmware packet. */ -typedef struct -{ - RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ - IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ - IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ - IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ - IMG_UINT32 ui32TimeCorrIndex; /*!< Internal field */ - IMG_UINT32 ui32Padding; /*!< Reserved */ -} RGX_HWPERF_FW_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); - -/*! This structure holds the data of a hardware packet, including counters. */ -typedef struct -{ - IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ - IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ - IMG_UINT32 ui32PID; /*!< Process identifier */ - IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ - IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ - IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ - IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ - IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ - IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ - IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ - IMG_UINT32 ui32CtxPriority; /*!< Context priority */ - IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */ - IMG_UINT32 ui32KickInfo; /*!< <31..8> Reserved <7..0> GPU Pipeline DM kick ID, 0 if not using Pipeline DMs */ - IMG_UINT32 ui32Padding; /*!< Reserved. To ensure correct alignment */ - IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Optional variable length Counter data */ - IMG_UINT32 ui32Padding2; /*!< Reserved. To ensure correct alignment (not written in the packet) */ -} RGX_HWPERF_HW_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); -RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_HW_DATA, aui32CountBlksStream); - -typedef struct -{ - IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ - IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ - IMG_UINT32 ui32PID; /*!< Process identifier */ - IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ - IMG_UINT32 ui32WorkTarget[4]; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ - /*!< V2A Block count / Client driver context job reference used for tracking/debugging */ - /*!< RGX Data master context job reference used for tracking/debugging */ - /*!< V2 Block count / Index to the time correlation at the time the packet was generated */ -} RGX_HWPERF_HW_DATA_V2; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA_V2); - -/*! Mask for use with the aui32CountBlksStream field when decoding the - * counter block ID and mask word. */ -#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U -#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U - -/*! Obtains the counter block ID word from an aui32CountBlksStream field. - * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit - * within group (3-0) */ -#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) - -/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address - * and stream index. May be used in decoding the counter block stream words of - * a RGX_HWPERF_HW_DATA structure. */ -#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) - -/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */ -#define RGX_HWPERF_GET_CNTBLK_GPUW(_word) ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT)) - -#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK))) - -/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address - * and stream index. May be used in decoding the counter block stream words - * of a RGX_HWPERF_HW_DATA structure. */ -#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) - -/*! Context switch packet event */ -typedef struct -{ - RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ - IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ - IMG_UINT32 ui32FrameNum; /*!< Client Frame number (TA, 3D only) */ - IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ - IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ - IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ - IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */ -} RGX_HWPERF_CSW_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); - -/*! Enumeration of clocks supporting this event */ -typedef enum -{ - RGX_HWPERF_CLKS_CHG_INVALID = 0, - - RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, - - RGX_HWPERF_CLKS_CHG_LAST, -} RGX_HWPERF_CLKS_CHG_NAME; - -/*! This structure holds the data of a clocks change packet. */ -typedef struct -{ - IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ - RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ - IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ - IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ - IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and - correlated to OSTimeStamp */ -} RGX_HWPERF_CLKS_CHG_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); - -/*! Enumeration of GPU utilisation states supported by this event */ -typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; - -/*! This structure holds the data of a GPU utilisation state change packet. */ -typedef struct -{ - RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ - IMG_UINT32 uiUnused1; /*!< Padding */ - IMG_UINT32 uiUnused2; /*!< Padding */ - IMG_UINT32 uiUnused3; /*!< Padding */ -} RGX_HWPERF_GPU_STATE_CHG_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); - - -/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ -#define HWPERF_PWR_EST_V1_SIG 0x48504531 - -/*! Macros to obtain a component field from a counter ID word */ -#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) -#define RGX_HWPERF_GET_PWR_EST_GPUID(_word) (((_word)&0x70000000)>>28) -/*!< Obtains the GPU ID from a counter ID word */ -#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) -#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) - -#define RGX_HWPERF_PWR_EST_HIGH_OFFSET (31) -#define RGX_HWPERF_PWR_EST_GPUID_OFFSET (28) -#define RGX_HWPERF_PWR_EST_GPUID_MASK (0x7U) -#define RGX_HWPERF_PWR_EST_UNIT_OFFSET (24) -#define RGX_HWPERF_PWR_EST_UNIT_MASK (0xFU) -#define RGX_HWPERF_PWR_EST_VALUE_MASK (0xFFFFU) - -/*! This macro constructs a counter ID for a power estimate data stream from - * the component parts of: high word flag, unit id, GPU id, counter number */ -#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \ - ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<= RGX_BVNC_STR_SIZE_MAX), - "Space inside HWPerf packet data for BVNC string insufficient"); - -#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (16U) - -/*! BVNC Features */ -typedef struct -{ - /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ - IMG_UINT16 ui16BlockID; - - /*! Number of counters in this block type */ - IMG_UINT16 ui16NumCounters; - - /*! Number of blocks of this type */ - IMG_UINT16 ui16NumBlocks; - - /*! Reserved for future use */ - IMG_UINT16 ui16Reserved; -} RGX_HWPERF_BVNC_BLOCK; - -/*! BVNC Features */ -typedef struct -{ - IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */ - IMG_UINT32 ui32BvncKmFeatureFlags; /*!< See RGX_HWPERF_FEATURE_FLAGS */ - IMG_UINT16 ui16BvncBlocks; /*!< Number of blocks described in aBvncBlocks */ - IMG_UINT16 ui16BvncGPUCores; /*!< Number of GPU cores present */ - RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */ -} RGX_HWPERF_BVNC; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); - -/*! Performance Counter Configuration data element. */ -typedef struct -{ - IMG_UINT32 ui32BlockID; /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */ - IMG_UINT32 ui32NumCounters; /*!< Number of counters configured */ - IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; /*!< Counters configured (ui32NumCounters worth of entries) */ -} RGX_HWPERF_COUNTER_CFG_DATA_EL; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL); - -/*! Performance Counter Configuration data. */ -typedef struct -{ - IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */ - RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */ - IMG_UINT32 ui32Padding; /*!< reserved */ -} RGX_HWPERF_COUNTER_CFG; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG); - -/*! Sub-event's data. */ -typedef union -{ - struct - { - RGX_HWPERF_DM eDM; /*!< Data Master ID. */ - RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ - IMG_UINT32 ui32DMContext; /*!< FW render context */ - } sHWR; /*!< HWR sub-event data. */ - - RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features. See RGX_HWPERF_BVNC */ - struct - { - IMG_UINT32 ui32EvMaskLo; /*!< Low order 32 bits of Filter Mask */ - IMG_UINT32 ui32EvMaskHi; /*!< High order 32 bits of Filter Mask */ - } sEvMsk; /*!< HW Filter Mask */ - RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */ -} RGX_HWPERF_FWACT_DETAIL; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); - -/*! This structure holds the data of a FW activity event packet */ -typedef struct -{ - RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ - RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ - IMG_UINT32 ui32Padding; /*!< Reserved. */ -} RGX_HWPERF_FWACT_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); - - -typedef enum { - RGX_HWPERF_UFO_EV_UPDATE, /*!< Update on the UFO objects. */ - RGX_HWPERF_UFO_EV_CHECK_SUCCESS, /*!< Successful check on UFO objects. */ - RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, /*!< Successful partial render check on UFO objects. */ - RGX_HWPERF_UFO_EV_CHECK_FAIL, /*!< Unsuccessful check on UFO objects. */ - RGX_HWPERF_UFO_EV_PRCHECK_FAIL, /*!< Unsuccessful partial render check on UFO objects. */ - RGX_HWPERF_UFO_EV_FORCE_UPDATE, /*!< Forced erroring of the UFO objects. */ - - RGX_HWPERF_UFO_EV_LAST /*!< Reserved. Do not use. */ -} RGX_HWPERF_UFO_EV; - -/*! Data stream tuple. */ -typedef union -{ - struct - { - IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ - IMG_UINT32 ui32Value; /*!< Value of the UFO object */ - } sCheckSuccess; - struct - { - IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ - IMG_UINT32 ui32Value; /*!< Value of the UFO object */ - IMG_UINT32 ui32Required; /*!< Value of the UFO object required by the fence */ - } sCheckFail; - struct - { - IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ - IMG_UINT32 ui32OldValue; /*!< Value of UFO object before update */ - IMG_UINT32 ui32NewValue; /*!< Value of UFO object after update */ - } sUpdate; -} RGX_HWPERF_UFO_DATA_ELEMENT; - -/*! This structure holds the packet payload data for UFO event. */ -typedef struct -{ - RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event. See RGX_HWPERF_UFO_EV */ - IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the timer correlation data - at the time the packet was generated. - Used to approximate Host timestamps for - these events. */ - IMG_UINT32 ui32PID; /*!< Client process identifier */ - IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX - API to track submitted work (for - debugging/trace purposes) */ - IMG_UINT32 ui32IntJobRef; /*!< Internal reference used to track - submitted work (for debugging / trace - purposes) */ - IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context. - RenderContext for TA and 3D, Common - Context for other DMs */ - IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the - stream and stream data offset in the - payload */ - RGX_HWPERF_DM eDM; /*!< Data Master number, see RGX_HWPERF_DM */ - IMG_UINT32 ui32Padding; /*!< Unused, reserved */ - IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Series of tuples holding UFO objects data */ -} RGX_HWPERF_UFO_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA); - - -/*! - * RGX_HWPERF_KICK_TYPE describes the type of kick for events received / sent - * between KICK_START / KICK_END inclusively for all event types. - */ -typedef enum -{ - RGX_HWPERF_KICK_TYPE_RESERVED_0, /*!< Replaced by separate TA and 3D types (Deprecated) */ - RGX_HWPERF_KICK_TYPE_RESERVED_1, /*!< 2D TQ Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_TQ2D) */ - RGX_HWPERF_KICK_TYPE_RESERVED_2, /*!< 3D TQ Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_TQ3D) */ - RGX_HWPERF_KICK_TYPE_RESERVED_3, /*!< Compute Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_CDM) */ - RGX_HWPERF_KICK_TYPE_RESERVED_4, /*!< Ray Store Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_RS) */ - RGX_HWPERF_KICK_TYPE_RESERVED_5, /*!< Vertex Ray Data Master Kick (Deprecated) */ - RGX_HWPERF_KICK_TYPE_RESERVED_6, /*!< 2D Data Master TQ Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_TQTDM) */ - RGX_HWPERF_KICK_TYPE_RESERVED_7, /*!< Sync Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_SYNC) */ - RGX_HWPERF_KICK_TYPE_RESERVED_8, /*!< TA Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_GEOM) */ - RGX_HWPERF_KICK_TYPE_RESERVED_9, /*!< 3D Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_3D) */ - RGX_HWPERF_KICK_TYPE_RESERVED_10, - - RGX_HWPERF_KICK_TYPE_UNIFIED = 0x10, - - RGX_HWPERF_KICK_TYPE2_TQ2D, /*!< 2D TQ Kick */ - RGX_HWPERF_KICK_TYPE2_TQ3D, /*!< 3D TQ Kick */ - RGX_HWPERF_KICK_TYPE2_TQTDM, /*!< 2D Data Master TQ Kick */ - RGX_HWPERF_KICK_TYPE2_CDM, /*!< Compute Kick */ - RGX_HWPERF_KICK_TYPE2_GEOM, /*!< GEOM Kick */ - RGX_HWPERF_KICK_TYPE2_3D, /*!< 3D Kick */ - RGX_HWPERF_KICK_TYPE2_SYNC, /*!< Sync Kick */ - RGX_HWPERF_KICK_TYPE2_RS, /*!< Ray Store Kick */ - RGX_HWPERF_KICK_TYPE2_LAST, - - RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff -} RGX_HWPERF_KICK_TYPE; - -typedef struct -{ - RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for - scheduling on GPU hardware. - See RGX_HWPERF_KICK_TYPE */ - IMG_UINT32 ui32PID; /*!< Client process identifier */ - IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX API - to track submitted work (for debugging / - trace purposes) */ - IMG_UINT32 ui32IntJobRef; /*!< internal reference used to track submitted - work (for debugging / trace purposes) */ - IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ - IMG_UINT32 ui32Padding; /*!< Unused, reserved */ - IMG_UINT64 ui64CheckFence_UID; /*!< ID of fence gating work execution on GPU */ - IMG_UINT64 ui64UpdateFence_UID; /*!< ID of fence triggered after work completes on GPU */ - IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ - IMG_UINT32 ui32CycleEstimate; /*!< Estimated cycle time for the workload */ - PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ - PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ - PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ - - /* Align structure size to 8 bytes */ -} RGX_HWPERF_HOST_ENQ_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef struct -{ - RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event */ - IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the stream and - stream data offset in the payload */ -#ifdef __CHECKER__ - /* Since we're not conforming to the C99 standard by not using a flexible - * array member need to add a special case for Smatch static code analyser. */ - IMG_UINT32 aui32StreamData[]; -#else - IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; - /*!< Series of tuples holding UFO objects data */ - - IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ -#endif -} RGX_HWPERF_HOST_UFO_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -/*! - * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been - * Allocated, Freed or Modified. The values are used to determine which event - * data structure to use to decode the data from the event stream - */ -typedef enum -{ - RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, /*!< Invalid */ - RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /*!< SyncPrim */ - RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, - /*!< Timeline resource packets are - now emitted in client hwperf buffer */ - RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */ - RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, /*!< Sync Checkpoint */ - RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /*!< Fence created on SW timeline */ - - RGX_HWPERF_HOST_RESOURCE_TYPE_LAST /*!< End of enumeration */ -} RGX_HWPERF_HOST_RESOURCE_TYPE; - -typedef union -{ - /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer - * generated in the HOST stream. Timeline data is now provided in the - * CLIENT stream instead. - */ - struct - { - IMG_UINT32 uiPid; /*!< Identifier of owning process */ - IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ - } sTimelineAlloc; - - /*! Data for TYPE_FENCE_PVR */ - struct - { - IMG_PID uiPID; /*!< Identifier of owning process */ - PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ - IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point - backing this fence on the GPU */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - } sFenceAlloc; - - /*! Data for TYPE_SYNC_CP */ - struct - { - IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ - PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ - IMG_PID uiPID; /*!< Identifier of owning process */ - PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - } sSyncCheckPointAlloc; - - /*! Data for TYPE_FENCE_SW */ - struct - { - IMG_PID uiPID; /*!< Identifier of owning process */ - PVRSRV_FENCE hSWFence; /*!< Unique identifier for the SWFence resource */ - PVRSRV_TIMELINE hSWTimeline; /*!< Unique identifier for the timeline resource */ - IMG_UINT64 ui64SyncPtIndex; /*!< Sync-pt index where this SW timeline has reached */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - } sSWFenceAlloc; - - /*! Data for TYPE_SYNC */ - struct - { - IMG_UINT32 ui32FWAddr; /*!< Identifier of sync resource */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - } sSyncAlloc; -} RGX_HWPERF_HOST_ALLOC_DETAIL; - -typedef struct -{ - RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; - /*!< This describes the type of the resource - allocated in the driver. See - RGX_HWPERF_HOST_RESOURCE_TYPE */ - RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; - /*!< Union of structures providing further - data regarding the resource allocated. - Size of data varies with union member that - is present, check ``ui32AllocType`` value - to decode */ -} RGX_HWPERF_HOST_ALLOC_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef union -{ - /*! Data for TYPE_TIMELINE (*Deprecated*) */ - struct - { - IMG_UINT32 uiPid; /*!< Identifier of owning process */ - IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ - } sTimelineDestroy; - - /*! Data for TYPE_FENCE_PVR */ - struct - { - IMG_UINT64 ui64Fence_UID; /*!< Unique identifier for the fence resource */ - IMG_UINT32 ui32Padding; /*!< Reserved. */ - } sFenceDestroy; - - /*! Data for TYPE_SYNC_CP */ - struct - { - IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ - } sSyncCheckPointFree; - - /*! Data for TYPE_SYNC */ - struct - { - IMG_UINT32 ui32FWAddr; /*!< Unique identifier for the sync resource */ - } sSyncFree; -} RGX_HWPERF_HOST_FREE_DETAIL; +#define RGX_HWPERF_DM_GP 0x00000000U +#define RGX_HWPERF_DM_2D 0x00000001U +#define RGX_HWPERF_DM_TA 0x00000002U +#define RGX_HWPERF_DM_3D 0x00000003U +#define RGX_HWPERF_DM_CDM 0x00000004U +#define RGX_HWPERF_DM_RTU 0x00000005U +#define RGX_HWPERF_DM_SHG 0x00000006U +#define RGX_HWPERF_DM_TDM 0x00000007U -typedef struct -{ - RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; - /*!< This describes the type of the resource - freed or released by the driver. See - RGX_HWPERF_HOST_RESOURCE_TYPE */ - RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; - /*!< Union of structures providing further data - regarding the resource freed. Size of data - varies with union member that is present, - check ``ui32FreeType`` value to decode */ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ -} RGX_HWPERF_HOST_FREE_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef struct -{ - IMG_UINT64 ui64CRTimestamp; /*!< CR timer value from the latest entry of - the time domains correlation table */ - IMG_UINT64 ui64OSTimestamp; /*!< OS timestamp from the latest entry of the - time domains correlation table */ - IMG_UINT32 ui32ClockSpeed; /*!< GPU clock speed from the latest entry of - the time domains correlation table */ - IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ -} RGX_HWPERF_HOST_CLK_SYNC_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef union -{ - /*! Data for TYPE_FENCE_PVR */ - struct - { - IMG_UINT64 ui64NewFence_UID; /*!< Unique identifier for the new merged fence - resource that has been created */ - IMG_UINT64 ui64InFence1_UID; /*!< Unique identifier for the fence resource */ - IMG_UINT64 ui64InFence2_UID; /*!< Unique identifier of the check point backing - the fence on the GPU */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ - } sFenceMerge; -} RGX_HWPERF_HOST_MODIFY_DETAIL; - -typedef struct -{ - RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; - /*!< Describes the type of the resource - modified by the driver. See - RGX_HWPERF_HOST_RESOURCE_TYPE */ - - RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; - /*!< Union of structures providing further - data regarding the resource modified. - Size of data varies with union member that - is present. - Check ``uiModifyType`` value to decode */ -} RGX_HWPERF_HOST_MODIFY_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef enum -{ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING,/*!< Device not responding to requests */ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */ - - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST -} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; - -typedef enum -{ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, /*!< Invalid */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, /*!< No underlying health reason. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, /*!< Device has asserted. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, /*!< Device poll has failed. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, /*!< Device timeout has fired. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, /*!< Queue has become corrupt. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, /*!< Queue has stalled. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, /*!< Device is idling. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, /*!< Device restarting. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */ - - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST -} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; - -/*! RGX_HWPERF_DEV_INFO_EV values */ -typedef enum -{ - RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */ +#define RGX_HWPERF_DM_LAST 0x00000008U - RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */ -} RGX_HWPERF_DEV_INFO_EV; - -/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing - * further data regarding the device's status - */ -typedef union -{ - /*! Data for device status event */ - struct - { - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; - /*!< Device's health status */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; - /*!< Reason for device's health status */ - } sDeviceStatus; -} RGX_HWPERF_HOST_DEV_INFO_DETAIL; - -/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */ -typedef struct -{ - IMG_UINT32 ui32Padding; - /*!< Reserved. Align structure size to 8 bytes */ - RGX_HWPERF_DEV_INFO_EV eEvType; - /*!< Type of the sub-event. See - RGX_HWPERF_DEV_INFO_EV */ - RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; - /*!< Union of structures providing further data - regarding the device's status. Size of data - varies with union member that is present, - check ``eEvType`` value to decode */ -} RGX_HWPERF_HOST_DEV_INFO_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */ -typedef enum -{ - RGX_HWPERF_INFO_EV_MEM_USAGE, /*!< Memory usage event */ - RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */ -} RGX_HWPERF_INFO_EV; - -/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the - * RGX_HWPERF_HOST_INFO_DATA event. - */ -typedef union -{ - /*! Host Memory usage statistics */ - struct - { - IMG_UINT32 ui32TotalMemoryUsage; /*!< Total memory usage */ - /*! Detailed memory usage */ - struct - { - IMG_UINT32 ui32Pid; /*!< Process ID */ - IMG_UINT32 ui32KernelMemUsage; /*!< Kernel memory usage */ - IMG_UINT32 ui32GraphicsMemUsage; /*!< GPU memory usage */ - } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; - } sMemUsageStats; -} RGX_HWPERF_HOST_INFO_DETAIL; - -/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device - * memory usage information. - */ -typedef struct -{ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ - RGX_HWPERF_INFO_EV eEvType; /*!< Type of subevent. See RGX_HWPERF_INFO_EV */ - RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; - /*!< Union of structures providing further data - regarding memory usage. Size varies with union - member that is present, check ``eEvType`` - value to decode */ -} RGX_HWPERF_HOST_INFO_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -/*! FENCE_WAIT_TYPE definitions */ -typedef enum -{ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, /*!< Begin */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, /*!< End */ - - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, /*!< Do not use */ -} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; - -/*! FENCE_WAIT_RESULT definitions */ -typedef enum -{ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, /*!< Timed Out */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, /*!< Passed */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, /*!< Errored */ - - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, /*!< Do not use */ -} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; - -/*! FENCE_WAIT_DETAIL Event Payload */ -typedef union -{ -/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */ - struct - { - IMG_UINT32 ui32TimeoutInMs; /*!< Wait timeout (ms) */ - } sBegin; - - /*! Data for SYNC_FENCE_WAIT_TYPE_END */ - struct - { - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */ - } sEnd; -} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; - -/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure - * is received whenever the host driver handles a wait for sync event request. - */ -typedef struct -{ - IMG_PID uiPID; /*!< Identifier of the owning process */ - PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; - /*!< Type of the subevent, see - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; - /*!< Union of structures providing further data - regarding device's status. Size of data varies with - union member that is present, check ``eType`` value - to decode */ - -} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; - -static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA. - * Software Timeline Advanced Event Payload. This data structure is received - * whenever the host driver processes a Software Timeline Advanced event. - */ -typedef struct -{ - IMG_PID uiPID; /*!< Identifier of the owning process */ - PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ - IMG_UINT64 ui64SyncPtIndex; /*!< Index of the sync point to which the - timeline has advanced */ - -} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; - -static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef enum -{ - RGX_HWPERF_HOST_CLIENT_INFO_TYPE_INVALID = 0, /*!< Invalid */ - RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME, /*!< Process Name */ - - RGX_HWPERF_HOST_CLIENT_INFO_TYPE_LAST, /*!< Do not use */ -} RGX_HWPERF_HOST_CLIENT_INFO_TYPE; - -typedef struct -{ - IMG_PID uiClientPID; /*!< Client process identifier */ - IMG_UINT32 ui32Length; /*!< Number of bytes present in ``acName`` */ - IMG_CHAR acName[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Process name string, null terminated */ -} RGX_HWPERF_HOST_CLIENT_PROC_NAME; - -#define RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen) \ - ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_PROC_NAME, acName) + (ui32NameLen))) - -typedef union -{ - struct - { - IMG_UINT32 ui32Count; /*!< Number of elements in ``asProcNames`` */ - RGX_HWPERF_HOST_CLIENT_PROC_NAME asProcNames[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; - } sProcName; -} RGX_HWPERF_HOST_CLIENT_INFO_DETAIL; - -typedef struct -{ - IMG_UINT32 uiReserved1; /*!< Reserved. Align structure size to 8 bytes */ - RGX_HWPERF_HOST_CLIENT_INFO_TYPE eType; - /*!< Type of the subevent, see - RGX_HWPERF_HOST_CLIENT_INFO_TYPE */ - RGX_HWPERF_HOST_CLIENT_INFO_DETAIL uDetail; - /*!< Union of structures. Size of data - varies with union member that is present, - check ``eType`` value to decode */ - -} RGX_HWPERF_HOST_CLIENT_INFO_DATA; - -static_assert((sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef enum -{ - RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE, - RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER, - RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS, - RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA, - - RGX_HWPERF_RESOURCE_TYPE_COUNT -} RGX_HWPERF_RESOURCE_CAPTURE_TYPE; - -typedef struct -{ - IMG_UINT32 ui32Height; - IMG_UINT32 ui32Width; - IMG_UINT32 ui32BPP; - IMG_UINT32 ui32PixFormat; -} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO; - -typedef struct -{ - IMG_INT32 i32XOffset; /*!< render surface X shift */ - IMG_INT32 i32YOffset; /*!< render surface Y shift */ - IMG_UINT32 ui32WidthInTiles; /*!< number of TLT data points in X */ - IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */ -} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO; - -typedef union -{ - struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES - { - IMG_UINT32 ui32RenderSurfaceCount; - RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; - } sRenderSurfaces; - - struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS - { - RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; - } sTLTBuffers; -} RGX_RESOURCE_CAPTURE_DETAIL; - -typedef struct -{ - RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType; - IMG_PID uPID; - IMG_UINT32 ui32ContextID; - IMG_UINT32 ui32FrameNum; - IMG_UINT32 ui32CapturedTaskJobRef; /* The job ref of the HW task that emitted the data */ - IMG_INT32 eClientModule; /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */ - RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */ -} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO; - -#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail) - -/*! Tile Lifetime Tracking header size. Only available if - * RGX_FEATURE_ISP_TILE_LIFETIME_TRACKING is present and enabled via - * SUPPORT_TLT_PERF - */ -#define RGX_TLT_HARDWARE_HDR_SIZE (16U) - -/* PVRSRVGetHWPerfResourceCaptureResult */ -typedef enum -{ - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0, - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK, /* We got data ok, expect more packets for this request. */ - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY, /* Signals a timeout on the connection - no data available yet. */ - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS, /* The request completed successfully, signals the end of packets for the request. */ - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE /* The request failed, signals the end of packets for the request. */ -} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS; - -typedef struct -{ - IMG_PID uPID; /* In case of a failed request pass the caller the PID and context ID. */ - IMG_UINT32 ui32CtxID; - RGX_RESOURCE_CAPTURE_INFO *psInfo; /* Various meta-data regarding the captured resource which aid the requester when, - unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ - IMG_BYTE *pbData; /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ -} RGX_RESOURCE_CAPTURE_RESULT; - -/*! This type is a union of packet payload data structures associated with - * various FW and Host events */ -typedef union -{ - RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data, - events ``0x01-0x06`` */ - RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data, - events ``0x07-0x19``, ``0x28-0x29`` */ - RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet - data, events ``0x1A`` */ - RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state - change event packet data, - events ``0x1B`` */ - RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event - packet data, - events ``0x20-0x22`` */ - RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data, - events ``0x23`` */ - RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data, - events ``0x30-0x31`` */ - RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data, - events ``0x32`` */ - RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data, events ``0x38`` */ - RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event - packet data, - events ``0x39`` */ - /* */ - RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data, - events ``0x01`` (Host) */ - RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data, - events ``0x02`` (Host) */ - RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data, - events ``0x03`` (Host) */ - RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data, - events ``0x04`` (Host) */ - RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data, - events ``0x05`` (Host) */ - RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data, - events ``0x06`` (Host) */ - RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data, - events ``0x07`` (Host) */ - RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data, - events ``0x08`` (Host) */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data, - events ``0x09`` (Host) */ - RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance - data, events ``0x0A`` (Host) */ - RGX_HWPERF_HOST_CLIENT_INFO_DATA sHClientInfo; /*!< Host client info, - events ``0x0B`` (Host) */ - -} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA); - -#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) - -#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ - ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) +#define RGX_HWPERF_DM_INVALID 0x1FFFFFFFU /****************************************************************************** * API Types @@ -1435,19 +317,6 @@ typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; /*! The number of layout blocks defined with configurable multiplexed * performance counters, hence excludes custom counter blocks. */ -#if defined(RGX_FEATURE_HWPERF_OCEANIC) -#define RGX_HWPERF_MAX_MUX_BLKS (\ - (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ - RGX_CNTBLK_INDIRECT_COUNT(PBE, 0) ) - -#define RGX_HWPERF_MAX_DA_BLKS (\ - (IMG_UINT32)RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 0)+\ - RGX_CNTBLK_INDIRECT_COUNT(USC, 0) ) - -#define RGX_HWPERF_MAX_DEFINED_BLKS (\ - (IMG_UINT32)RGX_HWPERF_MAX_MUX_BLKS +\ - RGX_HWPERF_MAX_DA_BLKS ) -#else #define RGX_HWPERF_MAX_DEFINED_BLKS (\ (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7)+\ @@ -1458,7 +327,6 @@ typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) ) #define RGX_HWPERF_MAX_MUX_BLKS (\ RGX_HWPERF_MAX_DEFINED_BLKS ) -#endif static_assert( ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN), diff --git a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_options.h b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_options.h index 3f198989d6c4..df9c949f4a1f 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_options.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgx_options.h @@ -46,12 +46,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * The corresponding bit is set if the build option was enabled at compile * time. * - * In order to extract the enabled build flags the INTERNAL_TEST switch should - * be enabled in a client program which includes this header. Then the client - * can test specific build flags by reading the bit value at - * ##OPTIONNAME##_SET_OFFSET - * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS. - * * IMPORTANT: add new options to unused bits or define a new dword * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield * remains backwards compatible. @@ -60,7 +54,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGX_OPTIONS_H #define RGX_OPTIONS_H -#define OPTIONS_NO_HARDWARE_EN (0x1UL << 0) +#define OPTIONS_OPEN_SOURCE_EN (0x1UL << 0) #define OPTIONS_PDUMP_EN (0x1UL << 1) #define OPTIONS_UNUSED1_EN (0x1UL << 2) #define OPTIONS_SECURE_ALLOC_KM_EN (0x1UL << 3) @@ -76,11 +70,16 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define OPTIONS_AUTOVZ_HW_REGS_EN (0x1UL << 13) #define OPTIONS_FW_IRQ_REG_COUNTERS_EN (0x1UL << 14) #define OPTIONS_VALIDATION_EN (0x1UL << 15) +#define OPTIONS_NO_HARDWARE_EN (0x1UL << 16) +#define OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN (0x1UL << 17) +#define OPTIONS_NUM_DRIVERS_SUPPORTED_MASK (0x1fUL << 18) +#define OPTIONS_NUM_DRIVERS_SUPPORTED_SHIFT (18) +/* Option bits[22:18] are used for max number of drivers supported by the FW. */ #define OPTIONS_PERCONTEXT_FREELIST_EN (0x1UL << 31) #define RGX_BUILD_OPTIONS_MASK_KM \ - (OPTIONS_NO_HARDWARE_EN | \ + (OPTIONS_OPEN_SOURCE_EN | \ OPTIONS_PDUMP_EN | \ OPTIONS_SECURE_ALLOC_KM_EN | \ OPTIONS_RGX_EN | \ @@ -94,10 +93,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. OPTIONS_AUTOVZ_EN | \ OPTIONS_AUTOVZ_HW_REGS_EN | \ OPTIONS_FW_IRQ_REG_COUNTERS_EN | \ - OPTIONS_VALIDATION_EN) + OPTIONS_VALIDATION_EN | \ + OPTIONS_NO_HARDWARE_EN | \ + OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN) #define RGX_BUILD_OPTIONS_MASK_FW \ (RGX_BUILD_OPTIONS_MASK_KM & \ + ~OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN & \ + ~OPTIONS_NUM_DRIVERS_SUPPORTED_MASK & \ ~OPTIONS_BUFFER_SYNC_EN) /* Build options that the FW must have if the present on the KM */ @@ -112,6 +115,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. OPTIONS_PERCONTEXT_FREELIST_EN) & \ ~(OPTIONS_DEBUG_EN | \ OPTIONS_WORKLOAD_ESTIMATION_EN | \ + OPTIONS_OPEN_SOURCE_EN | \ OPTIONS_PDVFS_EN)) /* Build options that the KM must have if the present on the UM */ @@ -120,20 +124,21 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~(OPTIONS_DEBUG_EN | \ OPTIONS_WORKLOAD_ESTIMATION_EN | \ OPTIONS_PDVFS_EN | \ + OPTIONS_OPEN_SOURCE_EN | \ OPTIONS_BUFFER_SYNC_EN)) -#define NO_HARDWARE_OPTION "NO_HARDWARE " -#if defined(NO_HARDWARE) || defined(INTERNAL_TEST) - #define OPTIONS_BIT0 OPTIONS_NO_HARDWARE_EN +#define OPEN_SOURCE_OPTION "OPEN_SOURCE_DRIVER " +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) + #define OPTIONS_BIT0 OPTIONS_OPEN_SOURCE_EN #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" #endif #else #define OPTIONS_BIT0 0x0UL -#endif /* NO_HARDWARE */ +#endif /* SUPPORT_OPEN_SOURCE_DRIVER */ #define PDUMP_OPTION "PDUMP " -#if defined(PDUMP) || defined(INTERNAL_TEST) +#if defined(PDUMP) #define OPTIONS_BIT1 OPTIONS_PDUMP_EN #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -164,7 +169,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_SECURE_ALLOC_KM */ #define RGX_OPTION " " -#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST) +#if defined(SUPPORT_RGX) #define OPTIONS_BIT4 OPTIONS_RGX_EN #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -174,7 +179,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_RGX */ #define SECURE_EXPORT_OPTION "SECURE_EXPORTS " -#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST) +#if defined(SUPPORT_SECURE_EXPORT) #define OPTIONS_BIT5 OPTIONS_SECURE_EXPORT_EN #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -184,7 +189,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_SECURE_EXPORT */ #define INSECURE_EXPORT_OPTION "INSECURE_EXPORTS " -#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST) +#if defined(SUPPORT_INSECURE_EXPORT) #define OPTIONS_BIT6 OPTIONS_INSECURE_EXPORT_EN #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -194,7 +199,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_INSECURE_EXPORT */ #define VFP_OPTION "VFP " -#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST) +#if defined(SUPPORT_VFP) #define OPTIONS_BIT7 OPTIONS_VFP_EN #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -204,7 +209,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_VFP */ #define WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION " -#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST) +#if defined(SUPPORT_WORKLOAD_ESTIMATION) #define OPTIONS_BIT8 OPTIONS_WORKLOAD_ESTIMATION_EN #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -214,7 +219,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_WORKLOAD_ESTIMATION */ #define PDVFS_OPTION "PDVFS " -#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST) +#if defined(SUPPORT_PDVFS) #define OPTIONS_BIT9 OPTIONS_PDVFS_EN #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -224,7 +229,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_PDVFS */ #define DEBUG_OPTION "DEBUG " -#if defined(DEBUG) || defined(INTERNAL_TEST) +#if defined(DEBUG) #define OPTIONS_BIT10 OPTIONS_DEBUG_EN #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -234,7 +239,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* DEBUG */ #define BUFFER_SYNC_OPTION "BUFFER_SYNC " -#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST) +#if defined(SUPPORT_BUFFER_SYNC) #define OPTIONS_BIT11 OPTIONS_BUFFER_SYNC_EN #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -264,7 +269,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_AUTOVZ_HW_REGS */ #define RGX_FW_IRQ_OS_COUNTERS_OPTION "FW_IRQ_OS_COUNTERS " -#if defined(RGX_FW_IRQ_OS_COUNTERS) || defined(INTERNAL_TEST) +#if defined(RGX_FW_IRQ_OS_COUNTERS) #define OPTIONS_BIT14 OPTIONS_FW_IRQ_REG_COUNTERS_EN #if OPTIONS_BIT14 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -274,14 +279,24 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* RGX_FW_IRQ_OS_COUNTERS */ #define VALIDATION_OPTION "VALIDATION " -#if defined(SUPPORT_VALIDATION) - #define OPTIONS_BIT15 OPTIONS_VALIDATION_EN - #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM + #define OPTIONS_BIT15 0x0UL + +#define NO_HARDWARE_OPTION "NO_HARDWARE " +#if defined(NO_HARDWARE) + #define OPTIONS_BIT16 OPTIONS_NO_HARDWARE_EN + #if OPTIONS_BIT16 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" #endif #else - #define OPTIONS_BIT15 0x0UL -#endif /* SUPPORT_VALIDATION */ + #define OPTIONS_BIT16 0x0UL +#endif /* NO_HARDWARE */ + +#define NUM_DRIVERS_SUPPORTED_CHECK_OPTION "NUM_DRIVERS_SUPPORTED_CHECK " + #define OPTIONS_BIT17 OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN + #if OPTIONS_BIT17 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif /* NUM_DRIVERS_SUPPORTED_CHECK */ +/* Option bits[22:18] are used for max number of drivers supported by the FW. */ #define OPTIONS_BIT31 OPTIONS_PERCONTEXT_FREELIST_EN #if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM @@ -304,13 +319,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. OPTIONS_BIT12 |\ OPTIONS_BIT13 |\ OPTIONS_BIT14 |\ - OPTIONS_BIT15) + OPTIONS_BIT15 |\ + OPTIONS_BIT16 |\ + OPTIONS_BIT17) #define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31) #define RGX_BUILD_OPTIONS_LIST \ { \ - NO_HARDWARE_OPTION, \ + OPEN_SOURCE_OPTION, \ PDUMP_OPTION, \ INTERNAL_UNUSED1_OPTION, \ SECURE_ALLOC_KM_OPTION, \ @@ -325,7 +342,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AUTOVZ_OPTION, \ AUTOVZ_HW_REGS_OPTION, \ RGX_FW_IRQ_OS_COUNTERS_OPTION, \ - VALIDATION_OPTION \ + VALIDATION_OPTION, \ + NO_HARDWARE_OPTION, \ + NUM_DRIVERS_SUPPORTED_CHECK_OPTION \ } #endif /* RGX_OPTIONS_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgxheapconfig.h b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgxheapconfig.h index 2f47eced342e..6825da5caa3b 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgxheapconfig.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgxheapconfig.h @@ -46,12 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdefs_km.h" - -#define RGX_HEAP_SIZE_4KiB IMG_UINT64_C(0x0000001000) -#define RGX_HEAP_SIZE_64KiB IMG_UINT64_C(0x0000010000) -#define RGX_HEAP_SIZE_256KiB IMG_UINT64_C(0x0000040000) - -#define RGX_HEAP_SIZE_1MiB IMG_UINT64_C(0x0000100000) +#define RGX_HEAP_SIZE_32KiB IMG_UINT64_C(0x0000008000) #define RGX_HEAP_SIZE_2MiB IMG_UINT64_C(0x0000200000) #define RGX_HEAP_SIZE_4MiB IMG_UINT64_C(0x0000400000) #define RGX_HEAP_SIZE_16MiB IMG_UINT64_C(0x0001000000) @@ -101,7 +96,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. heaps should be added to this file, like BRN_63142 below. NOTE: All regular heaps below greater than 1GB require a BRN_65273 WA heap. - Base addresses have to be a multiple of 4MiB Heaps must not start at 0x0000000000, as this is reserved for internal use within device memory layer. Range comments, those starting in column 0 below are a section heading of @@ -117,17 +111,21 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* 0x00_0000_0000 ************************************************************/ -/* 0x00_0000_0000 - 0x00_0040_0000 **/ - /* 0 MiB to 4 MiB, size of 4 MiB : RESERVED **/ +/* 0x00_0000_0000 - 0x00_0020_0000 **/ + /* 0 MiB to 2 MiB, size of 2 MiB : RESERVED (only when General SVM + * doesn't exist) **/ /* BRN_65273 TQ3DPARAMETERS base 0x0000010000 */ /* BRN_65273 GENERAL base 0x65C0000000 */ /* BRN_65273 GENERAL_NON4K base 0x73C0000000 */ -/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/ - /* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : GENERAL_SVM_HEAP **/ - #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000) - #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_4MiB) +/* 0x00_0000_8000 - 0x7F_FFFF_8000 **/ + /* MAX(32 KiB, PAGE_SIZE) to 512 GiB, size of 512 GiB less MAX(32 KiB, PAGE_SIZE) : GENERAL_SVM_HEAP **/ + + /* The MAX is determined at runtime (PAGE_SIZE isn't available on all platforms) + * so the #define's must NOT be used directly. Use the heap config after initialisation. */ + #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000008000) + #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_32KiB) /* 0x80_0000_0000 ************************************************************/ @@ -217,12 +215,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* 0xE9_4000_0000 - 0xE9_FFFF_FFFF **/ /* 933 GiB to 936 GiB, size of 3 GiB : FREE **/ -/* 0xEA_0000_0000 - 0xEA_0000_0FFF **/ - /* 936 GiB to 937 GiB, size of min heap size : SIGNALS_HEAP **/ - /* CDM Signals heap (31 signals less one reserved for Services). - * Size 960B rounded up to minimum heap size */ - #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000) - #define RGX_SIGNALS_HEAP_SIZE DEVMEM_HEAP_MINIMUM_SIZE +/* 0xEA_0000_0000 - 0xEA_001F_FFFF **/ + /* 936 GiB to 937 GiB, size of 1 GiB : FREE **/ /* 0xEA_4000_0000 - 0xEA_FFFF_FFFF **/ /* 937 GiB to 940 GiB, size of 3 GiB : FREE **/ diff --git a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgxheapconfig_65273.h b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgxheapconfig_65273.h index 31f90fee9d42..66f44dc8c18b 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/rogue/rgxheapconfig_65273.h +++ b/drivers/gpu/drm/img/img-volcanic/include/rogue/rgxheapconfig_65273.h @@ -44,17 +44,22 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXHEAPCONFIG_65273_H #define RGXHEAPCONFIG_65273_H +#define RGX_HEAP_SIZE_64KiB IMG_UINT64_C(0x0000010000) + /* RGX Device Virtual Address Space Definitions - This file defines the RGX virtual address replacement heaps that are used in - in application memory contexts for BRN_65273. + This file defines the RGX virtual address replacement heaps that are used + in application memory contexts when BRN_65273 is present. The addresses + used are specifically crafted to avoid the BRN - under no circumstances + can they be moved, altered in size or additional heaps added. The heaps defined for BRN_65273 _replace_ the non-BRN equivalents when this BRN WA is active on affected cores. This is different to most other BRNs and hence has been given its own header file for clarity. The SVM_HEAP is also disabled and unavailable when the WA is active. This is reflected in the device connection capability bits returned to user space. + NOTE: All regular heaps in rgxheapconfig.h greater than 1GB require a BRN_65273 WA heap. diff --git a/drivers/gpu/drm/img/img-volcanic/include/servicesext.h b/drivers/gpu/drm/img/img-volcanic/include/servicesext.h index 737c58fe12b3..f2887d48aed7 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/servicesext.h +++ b/drivers/gpu/drm/img/img-volcanic/include/servicesext.h @@ -105,44 +105,14 @@ typedef IMG_INT32 *PPVRSRV_DEV_POWER_STATE; /*!< Typedef for ptr to PVRSRV_DEV_P Power Flags Enum */ typedef IMG_UINT32 PVRSRV_POWER_FLAGS; -#define PVRSRV_POWER_FLAGS_NONE 0U /*!< No flags */ -#define PVRSRV_POWER_FLAGS_FORCED 1U << 0 /*!< Power the transition should not fail */ -#define PVRSRV_POWER_FLAGS_SUSPEND_REQ 1U << 1 /*!< Power transition is due to OS suspend request */ -#define PVRSRV_POWER_FLAGS_RESUME_REQ 1U << 2 /*!< Power transition is due to OS resume request */ - -/* Clock speed handler prototypes */ - -/*! - Typedef for a pointer to a Function that will be called before a transition - from one clock speed to another. See also PFN_POST_CLOCKSPEED_CHANGE. - */ -typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eCurrentPowerState); - -/*! - Typedef for a pointer to a Function that will be called after a transition - from one clock speed to another. See also PFN_PRE_CLOCKSPEED_CHANGE. - */ -typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eCurrentPowerState); - -/*! - Typedef for a pointer to a function that will be called to transition the - device to a forced idle state. Used in unison with (forced) power requests, - DVFS and cluster count changes. - */ -typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_REQUEST) (IMG_HANDLE hDevHandle, - IMG_BOOL bDeviceOffPermitted); - -/*! - Typedef for a pointer to a function that will be called to cancel a forced - idle state and return the firmware back to a state where the hardware can be - scheduled. - */ -typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_CANCEL_REQUEST) (IMG_HANDLE hDevHandle); - -typedef PVRSRV_ERROR (*PFN_GPU_UNITS_POWER_CHANGE) (IMG_HANDLE hDevHandle, - IMG_UINT32 ui32SESPowerState); +#define PVRSRV_POWER_FLAGS_NONE 0U /*!< No flags */ +#define PVRSRV_POWER_FLAGS_FORCED (1U << 0) /*!< Power the transition should not fail */ +#define PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ (1U << 1) /*!< Indicates an OS Power Management + transition (S3/S4) has been requested. + Allows system modules to save VRAM */ +#define PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ (1U << 2) /*!< Indicates an OS Power Management + transition has been requested. + Allows system modules to load VRAM */ /*! ***************************************************************************** diff --git a/drivers/gpu/drm/img/img-volcanic/include/sync_checkpoint_external.h b/drivers/gpu/drm/img/img-volcanic/include/sync_checkpoint_external.h index 19b5011aa8b7..bfab937a1809 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/sync_checkpoint_external.h +++ b/drivers/gpu/drm/img/img-volcanic/include/sync_checkpoint_external.h @@ -79,5 +79,10 @@ typedef IMG_UINT32 PVRSRV_SYNC_CHECKPOINT_STATE; * represents a foreign sync point or collection of foreign sync points. */ #define SYNC_CHECKPOINT_FOREIGN_CHECKPOINT ((PVRSRV_TIMELINE) - 2U) +/*! + * Define to be used with SyncCheckpointAlloc() to indicate a checkpoint which + * represents a mirrored cross-device foreign sync point + */ +#define SYNC_CHECKPOINT_MIRRORED_CHECKPOINT ((PVRSRV_TIMELINE) - 3U) #endif /* SYNC_CHECKPOINT_EXTERNAL_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/sync_prim_internal.h b/drivers/gpu/drm/img/img-volcanic/include/sync_prim_internal.h index 77164c2356cd..c4dec49e9ca4 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/sync_prim_internal.h +++ b/drivers/gpu/drm/img/img-volcanic/include/sync_prim_internal.h @@ -71,7 +71,6 @@ typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP_TAG { #define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK (1U << 0) #define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE (1U << 1) - #define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1U<<2)) uint32_t ui32Flags; /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */ PVRSRV_CLIENT_SYNC_PRIM *psSync; /*!< Pointer to the client sync primitive */ uint32_t ui32FenceValue; /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/system/rgx_tc/odin_defs.h b/drivers/gpu/drm/img/img-volcanic/include/system/rgx_tc/odin_defs.h index 6234887a1bfd..10ca9b6d4382 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/system/rgx_tc/odin_defs.h +++ b/drivers/gpu/drm/img/img-volcanic/include/system/rgx_tc/odin_defs.h @@ -46,6 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PCI_VENDOR_ID_ODIN (0x1AEE) #define DEVICE_ID_ODIN (0x1010) +#define DEVICE_ID_VALI (0x2010) #define DEVICE_ID_TBA (0x1CF2) /* PCI BAR 0 contains the PDP regs and the Odin system regs */ @@ -72,7 +73,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* PCI BAR 2 contains the Device Under Test SOCIF 64MB region */ #define ODN_DUT_SOCIF_BAR 2 #define ODN_DUT_SOCIF_OFFSET 0x000000000 + +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) +/* Reserve only the strictly required register range for each OSID */ +#define ODN_DUT_SOCIF_SIZE 0x000010000 /* 64k */ +#else +/* Grant the driver access to the entire register IO range */ #define ODN_DUT_SOCIF_SIZE 0x004000000 /* 64MB */ +#endif /* PCI BAR 4 contains the on-board 1GB DDR memory */ #define ODN_DDR_BAR 4 @@ -288,6 +296,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define ODN_INTERRUPT_STATUS_CDMA2 (1 << (ODN_INTERRUPT_STATUS_CDMA_SHIFT + 1)) #define ODN_INTERRUPT_CLEAR_CDMA2 (1 << (ODN_INTERRUPT_CLR_CDMA_SHIFT + 1)) +#define ODN_INTERRUPT_ENABLE_OSID(n) (1 << (ODN_INTERRUPT_ENABLE_OS_IRQ_SHIFT + (n))) +#define ODN_INTERRUPT_STATUS_OSID(n) (1 << (ODN_INTERRUPT_STATUS_OS_IRQ_SHIFT + (n))) +#define ODN_INTERRUPT_CLEAR_OSID(n) (1 << (ODN_INTERRUPT_CLR_OS_IRQ_SHIFT + (n))) + /* Other defines */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_alignchecks.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_alignchecks.h index c09b369f8d2c..411e984aaa9e 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_alignchecks.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_alignchecks.h @@ -139,7 +139,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \ offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \ offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \ - offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \ + offsetof(RGXFWIF_SYSINIT, aui32TPUTrilinearFracMask),\ offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \ offsetof(RGXFWIF_SYSINIT, sFwSysData), \ \ @@ -178,7 +178,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if defined(SUPPORT_TRP) #define RGXFW_ALIGN_CHECKS_INIT_KM \ RGXFW_ALIGN_CHECKS_INIT_KM0, \ - offsetof(RGXFWIF_FWTDMCONTEXT, ui32TRPState), \ offsetof(RGXFWIF_FWTDMCONTEXT, aui64TRPChecksums2D) #else #define RGXFW_ALIGN_CHECKS_INIT_KM RGXFW_ALIGN_CHECKS_INIT_KM0 diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer_impl.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_customer.h similarity index 82% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer_impl.h rename to drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_customer.h index b1ea6f093d3a..60a9273570fe 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer_impl.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_customer.h @@ -1,8 +1,8 @@ /*************************************************************************/ /*! @File -@Title Header for DDK implementation of the Services abstraction layer +@Title RGX firmware interface structures used by pvrsrvkm @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Header for DDK implementation of the Services abstraction layer +@Description RGX firmware interface structures used by pvrsrvkm @License Dual MIT/GPLv2 The contents of this file are subject to the MIT license as set out below. @@ -41,21 +41,11 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ -#if !defined(RGXLAYER_IMPL_H) -#define RGXLAYER_IMPL_H +#if !defined(RGX_FWIF_CUSTOMER_H) +#define RGX_FWIF_CUSTOMER_H -#include "rgxlayer.h" -#include "device_connection.h" +/* + * Custom firmware data structures here + */ -typedef struct _RGX_LAYER_PARAMS_ -{ - void *psDevInfo; - void *psDevConfig; -#if defined(PDUMP) - IMG_UINT32 ui32PdumpFlags; #endif - - IMG_DEV_PHYADDR sPCAddr; -} RGX_LAYER_PARAMS; - -#endif /* RGXLAYER_IMPL_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_hwperf.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_hwperf.h index e430e86d62e6..e2287375fdc2 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_hwperf.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_hwperf.h @@ -60,8 +60,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_HWPERF_NUM_PBE_SHARED ((RGX_FEATURE_NUM_SPU)) #define RGX_HWPERF_NUM_SWIFT ((RGX_FEATURE_NUM_SPU * RGX_FEATURE_MAX_TPU_PER_SPU)) #define RGX_HWPERF_NUM_TEXAS ((RGX_FEATURE_NUM_SPU)) -#if (RGX_FEATURE_RAY_TRACING_ARCH > 2) -#define RGX_HWPERF_NUM_RAC ((RGX_FEATURE_NUM_SPU)) +#if (RGX_FEATURE_RAY_TRACING_ARCH > 2) && (RGX_FEATURE_SPU0_RAC_PRESENT > 0) +#define RGX_HWPERF_NUM_RAC ((RGX_NUM_RAC)) #else #define RGX_HWPERF_NUM_RAC ((0)) #endif diff --git a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_km.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_km.h index 193fb519032b..45cdb2d4ca88 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_km.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_km.h @@ -50,82 +50,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "dllist.h" #include "rgx_hwperf.h" #include "rgxheapconfig.h" - - -/*************************************************************************/ /*! - Logging type -*/ /**************************************************************************/ -#define RGXFWIF_LOG_TYPE_NONE 0x00000000U -#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U -#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U -#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U -#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U -#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U -#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U -#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U -#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U -#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U -#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U -#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U -#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U -#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U -#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U -#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U -#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U -#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU -#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU - -/* String used in pvrdebug -h output */ -#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" - -/* Table entry to map log group strings to log type value */ -typedef struct { - const IMG_CHAR* pszLogGroupName; - IMG_UINT32 ui32LogGroupType; -} RGXFWIF_LOG_GROUP_MAP_ENTRY; - -/* - Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup - table where needed. Keep log group names short, no more than 20 chars. -*/ -#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ - { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ - { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ - { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ - { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ - { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ - { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ - { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ - { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ - { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ - { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ - { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ - { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ - { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ - { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ - { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } - - -/* Used in print statements to display log group state, one %s per group defined */ -#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" - -/* Used in a print statement to display log group state, one per group */ -#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) ?("main ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) ?("mts ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) ?("cleanup ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) ?("csw ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) ?("bif ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_PM) ?("pm ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) ?("rtd ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) ?("spm ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_POW) ?("pow ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) ?("hwr ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) ?("hwp ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) ?("rpm ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) ?("dma ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) ?("misc ") :("")), \ - (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) ?("debug ") :("")) - +#include "rgx_fwif_customer.h" /************************************************************************ * RGX FW signature checks @@ -134,105 +59,8 @@ typedef struct { #define RGXFWIF_TIMEDIFF_ID ((0x1UL << 28) | RGX_CR_TIMER) -/*! - ****************************************************************************** - * Trace Buffer - *****************************************************************************/ - -/*! Min, Max, and Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ -#define RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS 8192U /* 32KB */ -#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U /* ~48KB */ -#define RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS 32768U /* 128KB */ - -#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U -#if defined(RGXFW_META_SUPPORT_2ND_THREAD) -#define RGXFW_THREAD_NUM 2U -#else -#define RGXFW_THREAD_NUM 1U -#endif - #define RGXFW_POLL_TYPE_SET 0x80000000U -#define RGXFW_PROCESS_NAME_LEN (16) - -typedef struct -{ - IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; - IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; - IMG_UINT32 ui32LineNum; -} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; - -/*! - * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface - * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing - * @{ - */ - -/*! - * @Brief Firmware trace buffer details - */ -typedef struct -{ - IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer) */ - IMG_UINT32 ui32WrapCount; /*!< Number of times the Trace Buffer has wrapped */ - -#if defined(RGX_FIRMWARE) - IMG_UINT32 *pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */ -#else - RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address)*/ -#endif - IMG_PUINT32 RGXFW_ALIGN pui32TraceBuffer; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */ - - RGXFWIF_FILE_INFO_BUF RGXFW_ALIGN sAssertBuf; -} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; - -/*! @} End of Defgroup SRVAndFWTracing */ - -#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ - -typedef struct -{ - IMG_UINT64 RGXFW_ALIGN ui64CRTimer; - IMG_UINT64 RGXFW_ALIGN ui64OSTimer; - IMG_UINT32 RGXFW_ALIGN ui32Data; - IMG_UINT32 ui32Reserved; - RGXFWIF_FILE_INFO_BUF sFaultBuf; -} UNCACHED_ALIGN RGX_FWFAULTINFO; - - -#define RGXFWIF_POW_STATES \ - X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ - X(RGXFWIF_POW_ON) /* running HW commands */ \ - X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ - X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ - -typedef enum -{ -#define X(NAME) NAME, - RGXFWIF_POW_STATES -#undef X -} RGXFWIF_POW_STATE; - -/* Firmware HWR states */ -#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ -#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */ -#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ -#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ -#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ -#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ -#define RGXFWIF_HWR_RESTART_REQUESTED (0x1U << 7U) /*!< The FW has requested the host to restart it */ - -#define RGXFWIF_PHR_STATE_SHIFT (8U) -#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */ -#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */ -#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED) - -#define RGXFWIF_PHR_MODE_OFF (0UL) -#define RGXFWIF_PHR_MODE_RD_RESET (1UL) -#define RGXFWIF_PHR_MODE_FULL_RESET (2UL) - -typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; - /* Firmware per-DM HWR states */ #define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */ #define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */ @@ -245,16 +73,22 @@ typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; #define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */ #define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */ #define RGXFWIF_DM_STATE_GPU_ECC_HWR (IMG_UINT32_C(0x1) << 10) /*!< DM was forced into HWR due to an uncorrected GPU ECC error */ +#define RGXFWIF_DM_STATE_GPU_PARITY_HWR (IMG_UINT32_C(0x1) << 11) /*!< DM was forced into HWR due to an uncorrected GPU PARITY error */ +#define RGXFWIF_DM_STATE_GPU_LATENT_HWR (IMG_UINT32_C(0x1) << 12) /*!< DM was forced into HWR due to an uncorrected GPU LATENT error */ +#define RGXFWIF_DM_STATE_ICS_HWR (IMG_UINT32_C(0x1) << 13) /*!< DM was forced into HWR due to a fault detected by Idle Cycle Stealing */ /* Firmware's connection state */ -typedef enum -{ - RGXFW_CONNECTION_FW_OFFLINE = 0, /*!< Firmware is offline */ - RGXFW_CONNECTION_FW_READY, /*!< Firmware is initialised */ - RGXFW_CONNECTION_FW_ACTIVE, /*!< Firmware connection is fully established */ - RGXFW_CONNECTION_FW_OFFLOADING, /*!< Firmware is clearing up connection data */ - RGXFW_CONNECTION_FW_STATE_COUNT -} RGXFWIF_CONNECTION_FW_STATE; +typedef IMG_UINT32 RGXFWIF_CONNECTION_FW_STATE; +#define RGXFW_CONNECTION_FW_OFFLINE 0U /*!< Firmware is offline */ +#define RGXFW_CONNECTION_FW_READY 1U /*!< Firmware is initialised */ +#define RGXFW_CONNECTION_FW_ACTIVE 2U /*!< Firmware connection is fully established */ +#define RGXFW_CONNECTION_FW_GRACEFUL_OFFLOAD 3U /*!< Firmware is clearing up connection data */ +#define RGXFW_CONNECTION_FW_FORCED_OFFLOAD 4U /*!< Firmware is clearing up connection data */ +#define RGXFW_CONNECTION_FW_COOLDOWN 5U /*!< Firmware connection is in cooldown period */ +#define RGXFW_CONNECTION_FW_STATE_COUNT 6U + +#define RGXFW_FORCED_OFFLOAD_HCS_DEADLINE_MS 2U /*!< Workloads of Guest being forcefully offloaded must be stopped quickly */ +#define RGXFW_GRACEFUL_OFFLOAD_HCS_DEADLINE_MS 1000U /*!< Workloads of Guest being gracefully offloaded are allowed more time to finish */ /* OS' connection state */ typedef enum @@ -265,350 +99,65 @@ typedef enum RGXFW_CONNECTION_OS_STATE_COUNT } RGXFWIF_CONNECTION_OS_STATE; -typedef struct -{ - IMG_UINT bfOsState : 3; - IMG_UINT bfFLOk : 1; - IMG_UINT bfFLGrowPending : 1; - IMG_UINT bfIsolatedOS : 1; - IMG_UINT bfReserved : 26; -} RGXFWIF_OS_RUNTIME_FLAGS; - -typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; - -#if defined(PVRSRV_STALLED_CCB_ACTION) -#define PVR_SLR_LOG_ENTRIES 10 -#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ - -typedef struct -{ - IMG_UINT64 RGXFW_ALIGN ui64Timestamp; - IMG_UINT32 ui32FWCtxAddr; - IMG_UINT32 ui32NumUFOs; - IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; -} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; -#endif - -/*! - * @InGroup SRVAndFWTracing - * @Brief Firmware trace control data - */ -typedef struct -{ - IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */ - RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */ - IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated - (in RGXTraceBufferInitOnDemandResources) */ - IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ -} UNCACHED_ALIGN RGXFWIF_TRACEBUF; - -/*! @Brief Firmware system data shared with the Host driver */ -typedef struct -{ - IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ - IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ - volatile RGXFWIF_POW_STATE ePowState; - volatile IMG_UINT32 ui32HWPerfRIdx; - volatile IMG_UINT32 ui32HWPerfWIdx; - volatile IMG_UINT32 ui32HWPerfWrapCount; - IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ - IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ - - /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with - * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ - IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ - IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ - IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ - RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */ - RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */ - IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */ - IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */ - IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */ - IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */ - IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; -#if defined(SUPPORT_POWMON_COMPONENT) -#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) - RGXFWIF_TRACEBUF_SPACE sPowerMonBuf; - IMG_UINT32 ui32PowerMonBufSizeInDWords; -#endif -#endif - -#if defined(SUPPORT_VALIDATION) - IMG_UINT32 ui32RenderKillingCtl; /*!< Rasterisation DM Killing Configuration from host */ - IMG_UINT32 ui32CDMTDMKillingCtl; /*!< CDM/TDM Killing Configuration from host */ -#endif -#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) -#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) -#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) - IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; -#endif - RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */ - RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */ - IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ -} UNCACHED_ALIGN RGXFWIF_SYSDATA; - -/*! - * @InGroup ContextSwitching - * @Brief Firmware per-os data and configuration - */ -typedef struct -{ - IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ - IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ - IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */ -#if defined(PVRSRV_STALLED_CCB_ACTION) - IMG_UINT32 ui32ForcedUpdatesRequested; - IMG_UINT8 ui8SLRLogWp; - RGXFWIF_SLR_ENTRY sSLRLogFirst; - RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; - IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; -#endif - volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ - IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */ - RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */ - IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ -} UNCACHED_ALIGN RGXFWIF_OSDATA; - -/* Firmware trace time-stamp field breakup */ - -/* RGX_CR_TIMER register read (48 bits) value*/ -#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) -#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) - -/* Extra debug-info (16 bits) */ -#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) -#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK - - -/* Debug-info sub-fields */ -/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ -#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) -#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) - -/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ -#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) -#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) - -/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */ -#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) -#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) - -/* Bit 3-15: Unused bits */ - -#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 -#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " -#define RGXFWT_DEBUG_INFO_STR_APPEND ")" - -/* Table of debug info sub-field's masks and corresponding message strings - * to be appended to firmware trace - * - * Mask : 16 bit mask to be applied to debug-info field - * String : debug info message string - */ - -#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ -/*Mask, String*/ \ -X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ -X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \ -X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events") - -/*! - ****************************************************************************** - * HWR Data - *****************************************************************************/ -/*! - * @Defgroup HWRInfo FW HWR shared data interface - * @Brief Types grouping data structures and defines used in realising the HWR record. - * @{ - */ -/*! @Brief HWR Lockup types */ -typedef enum -{ - RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */ - RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */ - RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */ - RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */ - RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */ - RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */ - RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */ - RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */ - RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */ - RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */ - RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */ -} RGX_HWRTYPE; - -#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) - -#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ - ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ - ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ - ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ - ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ - ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \ - ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false) - -/************************ - * GPU HW error codes * - ************************/ -typedef enum -{ - RGX_HW_ERR_NA = 0x0, - RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL = 0x101, -} RGX_HW_ERR; - -typedef struct -{ - IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */ - IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */ - IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ - IMG_UINT64 RGXFW_ALIGN ui64Reserved; -} RGX_BIFINFO; - -typedef struct -{ - IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */ -} RGX_ECCINFO; - -typedef struct -{ - IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */ - IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ - IMG_UINT64 RGXFW_ALIGN ui64Reserved; -} RGX_MMUINFO; - -typedef struct -{ - IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */ - IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */ - IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */ - IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */ - IMG_UINT64 RGXFW_ALIGN ui64Reserved; -} UNCACHED_ALIGN RGX_POLLINFO; - -typedef struct -{ - IMG_UINT32 ui32BadVAddr; /*!< VA address */ - IMG_UINT32 ui32EntryLo; -} RGX_TLBINFO; - -typedef struct -{ - union - { - RGX_BIFINFO sBIFInfo; /*!< BIF failure details */ - RGX_MMUINFO sMMUInfo; /*!< MMU failure details */ - RGX_POLLINFO sPollInfo; /*!< Poll failure details */ - RGX_TLBINFO sTLBInfo; /*!< TLB failure details */ - RGX_ECCINFO sECCInfo; /*!< ECC failure details */ - } uHWRData; - - IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */ - IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */ - IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */ - IMG_UINT32 ui32PID; /*!< PID belonging to the workload */ - IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */ - IMG_UINT32 ui32HWRNumber; /*!< HWR number */ - IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */ - IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */ - RGX_HWRTYPE eHWRType; /*!< Type of lockup */ - RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */ - IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */ - RGX_HW_ERR eHWErrorCode; /*!< Error code used to determine HW fault */ - IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */ - IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */ - IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */ - IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */ - IMG_CHAR RGXFW_ALIGN szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ - IMG_UINT32 RGXFW_ALIGN ui32PDSStalledDMs; /*!< DMs stalled on PDS Store space */ - IMG_UINT32 ui32PDSActiveDMUSCs; /*!< Per-DM USC PDS activity */ -} UNCACHED_ALIGN RGX_HWRINFO; - -#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ -#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ -#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ -#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ - -typedef struct -{ - RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */ - IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */ - IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */ - IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */ - IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ - IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */ - IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */ - IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */ - IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */ -} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; - -/*! @} End of HWRInfo */ - #define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1)) #define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (IMG_UINT32_C(0x2)) #define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (IMG_UINT32_C(0x3)) #define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (IMG_UINT32_C(0x4)) -#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1)) -#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2)) - #define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1)) #define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2)) /*! ****************************************************************************** * RGX firmware Init Config Data - * NOTE: Please be careful to keep backwards compatibility with DDKv1 for the - * CTXSWITCH controls. *****************************************************************************/ /* Flag definitions affecting the firmware globally */ -#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) /*!< Randomise context switch requests */ -#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1) -#define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2) -#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) /*!< Randomise DM-killing requests */ -#define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4) -#define RGXFWIF_INICFG_SPU_CLOCK_GATE (IMG_UINT32_C(0x1) << 5) -#define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6) -#define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7) -#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8) -#define RGXFWIF_INICFG_TRY_OVERLAPPING_DM_PIPELINES (IMG_UINT32_C(0x1) << 9) -/* 10 unused */ +#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) /*!< Randomise context switch requests */ +#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1) +#define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2) +#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) /*!< Randomise DM-killing requests */ +#define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4) +#define RGXFWIF_INICFG_SPU_CLOCK_GATE (IMG_UINT32_C(0x1) << 5) +#define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6) +#define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7) +#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8) +#define RGXFWIF_INICFG_TRY_OVERLAPPING_DM_PIPELINES_EN (IMG_UINT32_C(0x1) << 9) +#define RGXFWIF_INICFG_DM_PIPELINE_ROADBLOCKS_EN (IMG_UINT32_C(0x1) << 10) /* 11 unused */ -#define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12) -#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13) -#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (IMG_UINT32_C(0x1) << 14) +#define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12) +#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13) +#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (IMG_UINT32_C(0x1) << 14) /* 15 unused */ -#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) -#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19) -#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20) -#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (IMG_UINT32_C(0x1) << 21) -#define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22) -#define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23) -#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24) -#define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25) -#define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26) -#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27) -#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) -#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) -#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) -#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) -#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) -#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) -#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) -#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\ - RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP) -#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (IMG_UINT32_C(0x1) << 31) -#define RGXFWIF_INICFG_ALL (0xFFFFF3FFU) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19) +#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20) +#define RGXFWIF_INICFG_INJECT_ICS_FAULT (IMG_UINT32_C(0x1) << 21) +#define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22) +#define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23) +#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24) +#define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25) +#define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26) +/* 27 unused */ +/* 28 unused */ +#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) +#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) +#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\ + RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP) +#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (IMG_UINT32_C(0x1) << 31) +#define RGXFWIF_INICFG_ALL (0xFFFFF7FFU) /* Extended Flag definitions affecting the firmware globally */ -#define RGXFWIF_INICFG_EXT_ALL (0x0U) +#define RGXFWIF_INICFG_EXT_ALL (0x0U) -#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ - RGXFWIF_INICFG_CTXSWITCH_SRESET_EN) +#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ + RGXFWIF_INICFG_CTXSWITCH_SRESET_EN) /* Flag definitions affecting only workloads submitted by a particular OS */ @@ -618,65 +167,78 @@ typedef struct * @Name Per-OS DM context switch configuration flags * @{ */ -#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN (IMG_UINT32_C(0x1) << 0) /*!< Enables TDM context switch */ -#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN (IMG_UINT32_C(0x1) << 1) /*!< Enables GEOM-TA and GEOM-SHG context switch */ -#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (IMG_UINT32_C(0x1) << 2) /*!< Enables FRAG DM context switch */ -#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (IMG_UINT32_C(0x1) << 3) /*!< Enables CDM context switch */ -#define RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN (IMG_UINT32_C(0x1) << 4) - -#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (IMG_UINT32_C(0x1) << 5) -#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM (IMG_UINT32_C(0x1) << 6) -#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (IMG_UINT32_C(0x1) << 7) -#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (IMG_UINT32_C(0x1) << 8) -#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM (IMG_UINT32_C(0x1) << 9) +#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN (IMG_UINT32_C(0x1) << 0) /*!< Enables TDM context switch */ +#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN (IMG_UINT32_C(0x1) << 1) /*!< Enables GEOM-TA and GEOM-SHG context switch */ +#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (IMG_UINT32_C(0x1) << 2) /*!< Enables FRAG DM context switch */ +#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (IMG_UINT32_C(0x1) << 3) /*!< Enables CDM context switch */ +#define RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN (IMG_UINT32_C(0x1) << 4) -#define RGXFWIF_INICFG_OS_ALL (0x3FFU) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (IMG_UINT32_C(0x1) << 5) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM (IMG_UINT32_C(0x1) << 6) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (IMG_UINT32_C(0x1) << 7) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (IMG_UINT32_C(0x1) << 8) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM (IMG_UINT32_C(0x1) << 9) -#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ - RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \ - RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \ - RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN | \ - RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN) +#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN) -#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL) +#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL) /*! * @} End of Per-OS Context switch configuration flags * @} End of AddToGroup ContextSwitching */ +/*! + * @AddToGroup IdleCycleStealing + * @{ + * @Name Per-OS DM Idle Cycle Stealing configuration flags + * @{ + */ +#define RGXFWIF_INICFG_OS_ICS_TDM_EN (IMG_UINT32_C(0x1) << 10) /*!< Enables ICS for TDM */ +#define RGXFWIF_INICFG_OS_ICS_GEOM_EN (IMG_UINT32_C(0x1) << 11) /*!< Enables ICS for GEOM */ +#define RGXFWIF_INICFG_OS_ICS_3D_EN (IMG_UINT32_C(0x1) << 12) /*!< Enables ICS for FRAG */ +#define RGXFWIF_INICFG_OS_ICS_CDM_EN (IMG_UINT32_C(0x1) << 13) /*!< Enables ICS for CDM */ +#define RGXFWIF_INICFG_OS_ICS_RDM_EN (IMG_UINT32_C(0x1) << 14) /*!< Enables ICS for RDM */ -#define RGXFWIF_FILTCFG_TRUNCATE_HALF (IMG_UINT32_C(0x1) << 3) -#define RGXFWIF_FILTCFG_TRUNCATE_INT (IMG_UINT32_C(0x1) << 2) -#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (IMG_UINT32_C(0x1) << 1) +#define RGXFWIF_INICFG_OS_ICS_DM_ALL (RGXFWIF_INICFG_OS_ICS_TDM_EN | \ + RGXFWIF_INICFG_OS_ICS_GEOM_EN | \ + RGXFWIF_INICFG_OS_ICS_3D_EN | \ + RGXFWIF_INICFG_OS_ICS_CDM_EN | \ + RGXFWIF_INICFG_OS_ICS_RDM_EN) -typedef enum -{ - RGX_ACTIVEPM_FORCE_OFF = 0, - RGX_ACTIVEPM_FORCE_ON = 1, - RGX_ACTIVEPM_DEFAULT = 2 -} RGX_ACTIVEPM_CONF; +#define RGXFWIF_INICFG_OS_ICS_DM_ALL_SHIFT (10U) +#define RGXFWIF_INICFG_OS_ICS_DM_APPHINT (RGXFWIF_INICFG_OS_ICS_CDM_EN >> RGXFWIF_INICFG_OS_ICS_DM_ALL_SHIFT) -typedef enum -{ - RGX_RD_POWER_ISLAND_FORCE_OFF = 0, - RGX_RD_POWER_ISLAND_FORCE_ON = 1, - RGX_RD_POWER_ISLAND_DEFAULT = 2 -} RGX_RD_POWER_ISLAND_CONF; +#define RGXFWIF_INICFG_OS_ICS_CLRMSK ~(RGXFWIF_INICFG_OS_ICS_DM_ALL) +/*! + * @} End of Per-OS Idle Cycle Stealing configuration flags + * @} End of AddToGroup IdleCycleStealing + */ +#define RGXFWIF_INICFG_OS_FDTI_PROFILE_LONG (IMG_UINT32_C(0x1)) +#define RGXFWIF_INICFG_OS_FDTI_PROFILE_MEDIUM (IMG_UINT32_C(0x2)) +#define RGXFWIF_INICFG_OS_FDTI_PROFILE_SHORT (IMG_UINT32_C(0x3)) -typedef struct -{ - IMG_UINT16 ui16RegNum; /*!< Register number */ - IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */ - IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */ - IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */ -} RGXFW_REGISTER_LIST; +#define RGXFWIF_INICFG_FDTI_PROFILE_SHIFT (16) +#define RGXFWIF_INICFG_FDTI_PROFILE_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_FDTI_PROFILE_SHIFT) -#if defined(RGX_FIRMWARE) -typedef DLLIST_NODE RGXFWIF_DLLIST_NODE; -#else -typedef struct {RGXFWIF_DEV_VIRTADDR p; - RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE; -#endif +#define RGXFWIF_INICFG_OS_ALL (0x3FFFFU) + +#define RGXFWIF_FILTCFG_TRUNCATE_HALF (IMG_UINT32_C(0x1) << 3) +#define RGXFWIF_FILTCFG_TRUNCATE_INT (IMG_UINT32_C(0x1) << 2) +#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (IMG_UINT32_C(0x1) << 1) + +typedef IMG_UINT32 RGX_ACTIVEPM_CONF; +#define RGX_ACTIVEPM_FORCE_OFF 0U +#define RGX_ACTIVEPM_FORCE_ON 1U +#define RGX_ACTIVEPM_DEFAULT 2U + +typedef IMG_UINT32 RGX_RD_POWER_ISLAND_CONF; +#define RGX_RD_POWER_ISLAND_FORCE_OFF 0U +#define RGX_RD_POWER_ISLAND_FORCE_ON 1U +#define RGX_RD_POWER_ISLAND_DEFAULT 2U typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF; @@ -688,25 +250,31 @@ typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FW; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL; typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; + typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COUNTERBUFFER; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA; typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR; -typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; + +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) +/*! + * @Brief Buffer to store KM active client contexts + */ +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ +} RGXFWIF_ACTIVE_CONTEXT_BUF_DATA; +#endif /*! * This number is used to represent an invalid page catalogue physical address @@ -719,8 +287,15 @@ typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; #define RGXFW_BIF_INVALID_PCSET 0xFFFFFFFFU /*! - Firmware memory context. -*/ + * This number is used to represent an invalid OS ID for the purpose of tracking PC set ownership + */ +#define RGXFW_BIF_INVALID_OSID 0xFFFFFFFFU + +#define RGXFWIF_FWMEMCONTEXT_FLAGS_USED_TESS (0x00000001U) + +/*! + * Firmware memory context. + */ typedef struct { IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */ @@ -729,9 +304,10 @@ typedef struct IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */ IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */ IMG_UINT64 RGXFW_ALIGN ui64FBCStateIDMask; /*!< FBCDC state descriptor IDs (non-zero means defer on mem context activation) */ + IMG_UINT64 RGXFW_ALIGN ui64SpillAddr; IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */ -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) IMG_UINT32 ui32OSid; IMG_BOOL bOSidAxiProt; #endif @@ -756,6 +332,7 @@ typedef struct IMG_UINT32 uTAReg_DCE_WRITE; IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW0; IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW1; + IMG_UINT64 ui64EnabledUnitsMask; IMG_UINT32 uTAReg_GTA_SO_PRIM[4]; IMG_UINT16 ui16TACurrentIdx; } UNCACHED_ALIGN RGXFWIF_TACTX_STATE_PER_GEOM; @@ -808,67 +385,30 @@ typedef struct typedef struct { IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */ + IMG_UINT64 ui64EnabledUnitsMask; } RGXFWIF_COMPUTECTX_STATE; -/*! - * @InGroup WorkloadContexts - * @Brief Firmware Common Context (or FWCC) - */ -typedef struct RGXFWIF_FWCOMMONCONTEXT_ +typedef struct { - /* CCB details for this firmware context */ - PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ - PRGXFWIF_CCCB psCCB; /*!< CCB base */ - RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; - - /* Context suspend state */ - PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ - - /* Flags e.g. for context switching */ - IMG_UINT32 ui32FWComCtxFlags; - IMG_INT32 i32Priority; /*!< Priority level */ - IMG_UINT32 ui32PrioritySeqNum; - - /* Framework state */ - PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ - - /* Statistic updates waiting to be passed back to the host... */ - IMG_BOOL bStatsPending; /*!< True when some stats are pending */ - IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ - IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ - IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ - RGXFWIF_DM eDM; /*!< Data Master type */ - IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */ - RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */ - RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ - IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ + bool bSaved; + IMG_UINT64 ui64CheckSum[4]; +}RGXFWIF_TRP_CHECKSUM_GEOM_ENTRY; - IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; - IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ - bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ - - RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ - RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ - RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ - - PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ - - /* References to the host side originators */ - IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ - IMG_UINT32 ui32PID; /*!< associated process ID */ - - IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */ - - IMG_UINT32 ui32PipelinedKicks; /*!< Number of kick from this CCB currently submitted to the DM pipeline */ - IMG_CHAR szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ -} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; - -static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256, - "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size."); +#define RGXFWIF_CONTEXT_COMPAT_FLAGS_STATS_PENDING (1U << 0) +#define RGXFWIF_CONTEXT_COMPAT_FLAGS_HAS_DEFER_COUNT (1U << 1) typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2]; typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4]; -typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES][2]; +typedef RGXFWIF_TRP_CHECKSUM_GEOM_ENTRY RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES]; + +typedef struct +{ + IMG_UINT32 ui32ExtJobRefToDisableZSStore; + IMG_BOOL bDisableZStore; + IMG_BOOL bDisableSStore; +} RGXFWIF_DISABLE_ZSSTORE; + +#define MAX_ZSSTORE_DISABLE 8 /*! * @InGroup WorkloadContexts @@ -881,12 +421,19 @@ typedef struct RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState; + RGXFWIF_DISABLE_ZSSTORE sDisableZSStoreQueue[MAX_ZSSTORE_DISABLE]; + + IMG_UINT32 ui32ZSStoreQueueCount; + IMG_UINT32 ui32WriteOffsetOfDisableZSStore; + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */ #if defined(SUPPORT_TRP) RGXFWIF_TRP_CHECKSUM_3D aui64TRPChecksums3D; /*!< Used by Firmware to store checksums during 3D WRR */ RGXFWIF_TRP_CHECKSUM_GEOM aui64TRPChecksumsGeom; /*!< Used by Firmware to store checksums during TA WRR */ + RGXFWIF_DM eTRPGeomCoreAffinity; /* !< Represent the DM affinity for pending 2nd TRP pass of GEOM otherwise points RGXFWIF_DM_MAX. */ #endif } UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT; @@ -903,7 +450,6 @@ typedef struct IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */ - IMG_UINT32 ui32WGPState; IMG_UINT32 aui32WGPChecksum[RGX_WGP_MAX_NUM_CORES]; } UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT; @@ -928,7 +474,6 @@ typedef struct IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ #if defined(SUPPORT_TRP) - IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */ RGXFWIF_TRP_CHECKSUM_2D RGXFW_ALIGN aui64TRPChecksums2D; /*!< Used by Firmware to store checksums during TDM WRR */ #endif @@ -960,7 +505,6 @@ typedef struct volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */ - IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */ } UNCACHED_ALIGN RGXFWIF_CCB_CTL; /*! @@ -994,14 +538,15 @@ typedef struct typedef struct { - PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ - IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ - IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ - IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ - IMG_UINT32 ui32BPDataFlags; - IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ - IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ - RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ + IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ + IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ + IMG_UINT32 ui32BPDataFlags; + IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ + IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ + IMG_UINT64 RGXFW_ALIGN ui64SpillAddr; + RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ } RGXFWIF_BPDATA; #define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ @@ -1039,6 +584,12 @@ typedef struct IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */ } RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA; +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; + RGXFWIF_DISABLE_ZSSTORE sDisableZSStore; +} RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE_DATA; + /*! * @Brief Resource types supported by \ref RGXFWIF_KCCB_CMD_CLEANUP type command */ @@ -1095,8 +646,8 @@ typedef struct { struct { - IMG_UINT32 ui32PowUnitsStateMask; /*!< New power units state mask */ - IMG_UINT32 ui32RACStateMask; /*!< New RAC state mask */ + IMG_UINT32 ui32PowUnits; /*!< New power units state mask */ + IMG_UINT32 ui32RACUnits; /*!< New RAC state mask */ }; IMG_BOOL bForced; /*!< If the operation is mandatory */ RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */ @@ -1156,7 +707,7 @@ typedef struct IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */ } RGXFWIF_CORECLKSPEEDCHANGE_DATA; -#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16 +#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16U /*! * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS type command @@ -1177,22 +728,6 @@ typedef struct IMG_BOOL bDone; /*!< action backing/unbacking succeeded */ } RGXFWIF_ZSBUFFER_BACKING_DATA; -#if defined(SUPPORT_VALIDATION) -typedef struct -{ - IMG_UINT32 ui32RegWidth; - IMG_BOOL bWriteOp; - IMG_UINT32 ui32RegAddr; - IMG_UINT64 RGXFW_ALIGN ui64RegVal; -} RGXFWIF_RGXREG_DATA; - -typedef struct -{ - IMG_UINT64 ui64BaseAddress; - PRGXFWIF_FWCOMMONCONTEXT psContext; - IMG_UINT32 ui32Size; -} RGXFWIF_GPUMAP_DATA; -#endif /*! * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE type command @@ -1205,7 +740,8 @@ typedef struct IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */ } RGXFWIF_FREELIST_GS_DATA; -#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U) +/* Max freelists must include freelists loaded (for all kick IDs) and freelists being setup. */ +#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 3U) #define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U /*! @@ -1229,22 +765,22 @@ typedef struct ****************************************************************************** * Proactive DVFS Structures *****************************************************************************/ -#define NUM_OPP_VALUES 16 +#define NUM_OPP_LEVELS 16 typedef struct { IMG_UINT32 ui32Volt; /* V */ IMG_UINT32 ui32Freq; /* Hz */ -} UNCACHED_ALIGN PDVFS_OPP; +} UNCACHED_ALIGN OPP_LEVEL; typedef struct { - PDVFS_OPP asOPPValues[NUM_OPP_VALUES]; + OPP_LEVEL asOPPValues[NUM_OPP_LEVELS]; #if defined(DEBUG) IMG_UINT32 ui32MinOPPPoint; #endif IMG_UINT32 ui32MaxOPPPoint; -} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP; +} UNCACHED_ALIGN RGXFWIF_OPP_INFO; typedef struct { @@ -1271,15 +807,6 @@ typedef enum RGXFWIF_REGCFG_CMD_DISABLE = 104 } RGXFWIF_REGDATA_CMD_TYPE; -typedef IMG_UINT32 RGXFWIF_REG_CFG_TYPE; -#define RGXFWIF_REG_CFG_TYPE_PWR_ON 0U /* Sidekick power event */ -#define RGXFWIF_REG_CFG_TYPE_DUST_CHANGE 1U /* Rascal / dust power event */ -#define RGXFWIF_REG_CFG_TYPE_TA 2U /* TA kick */ -#define RGXFWIF_REG_CFG_TYPE_3D 3U /* 3D kick */ -#define RGXFWIF_REG_CFG_TYPE_CDM 4U /* Compute kick */ -#define RGXFWIF_REG_CFG_TYPE_TDM 5U /* TDM kick */ -#define RGXFWIF_REG_CFG_TYPE_ALL 6U /* Applies to all types. Keep as last element */ - typedef struct { IMG_UINT64 ui64Addr; @@ -1301,7 +828,7 @@ typedef struct * PDump WRW command write granularity is 32 bits. * Add padding to ensure array size is 32 bit granular. */ - IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN(RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))]; + IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN((IMG_UINT32)RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))]; RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE]; } UNCACHED_ALIGN RGXFWIF_REG_CFG; @@ -1316,10 +843,42 @@ typedef enum */ typedef struct { - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; RGXFWIF_OS_STATE_CHANGE eNewOSState; } UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA; +typedef enum +{ + RGXFWIF_PWR_COUNTER_DUMP_START = 1, + RGXFWIF_PWR_COUNTER_DUMP_STOP, + RGXFWIF_PWR_COUNTER_DUMP_SAMPLE, +} RGXFWIF_COUNTER_DUMP_REQUEST; + +typedef struct +{ + RGXFWIF_COUNTER_DUMP_REQUEST eCounterDumpRequest; +} RGXFW_ALIGN RGXFWIF_COUNTER_DUMP_DATA; + +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; + IMG_UINT32 ui32FirstIntJobRefToCancel; + IMG_UINT32 ui32LastIntJobRefToCancel; +} UNCACHED_ALIGN RGXFWIF_CANCEL_WORK_DATA; + +/*! + ****************************************************************************** + * Platform configuration structures + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32PlatformCmd; + + /* !! Customisable properties/values follow !! */ + IMG_UINT32 ui32ClockFrequencyExample; +} UNCACHED_ALIGN RGXFWIF_PLATFORM_DATA; + + /*! * @Brief List of command types supported by the Kernel CCB */ @@ -1329,46 +888,47 @@ typedef enum RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */ RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */ RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, - RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ - RGXFWIF_KCCB_CMD_CLEANUP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ - RGXFWIF_KCCB_CMD_POW = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request (type specified in the command data) */ - RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ - RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ - RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ - RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ - /* RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE */ - RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ - RGXFWIF_KCCB_CMD_HEALTH_CHECK = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ - RGXFWIF_KCCB_CMD_FORCE_UPDATE = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ - - RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ - RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */ - + RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ + RGXFWIF_KCCB_CMD_CLEANUP = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ + RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ + RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ + RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ + RGXFWIF_KCCB_CMD_HEALTH_CHECK = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ + RGXFWIF_KCCB_CMD_FORCE_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ + RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ + RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */ + RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW to disable zs store of a running 3D or add it to queue of render context. */ + RGXFWIF_KCCB_CMD_CANCEL_WORK = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Cancel all work up to and including a given intjobref for a given context */ /* Commands only permitted to the native or host OS */ - RGXFWIF_KCCB_CMD_REGCONFIG = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, - RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ - RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ - RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ - RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ - RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks during the init process */ - RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ - RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, - RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSID. It can only be serviced for the Host DDK */ - RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ - /* RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE */ - /* RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE */ - RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ - RGXFWIF_KCCB_CMD_PHR_CFG = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_KCCB_CMD_RGXREG = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */ -#endif - RGXFWIF_KCCB_CMD_WDG_CFG = 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_KCCB_CMD_GPUMAP = 219U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */ -#endif + RGXFWIF_KCCB_CMD_POW = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request (type specified in the command data) */ + RGXFWIF_KCCB_CMD_REGCONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ + RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */ + RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the priority/group for a particular driver. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ + RGXFWIF_KCCB_CMD_PHR_CFG = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ + RGXFWIF_KCCB_CMD_WDG_CFG = 210U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */ + RGXFWIF_KCCB_CMD_COUNTER_DUMP = 211U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */ + RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the GPU time slice for a particular driver. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE_INTERVAL = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the GPU time slice interval for all drivers. It can only be serviced for the Host DDK */ + + /* HWPerf commands */ + RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 300U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 301U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ + RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 302U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT = 303U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks during the init process */ + + /* Indirect platform command */ + /* !! Do not add internal commands to the 4xx range !! */ + RGXFWIF_KCCB_CMD_PLATFORM_CMD = 400U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Send a platform specific command */ + } RGXFWIF_KCCB_CMD_TYPE; -#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1) +#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_POW - 1) /*! @Brief Kernel CCB command packet */ typedef struct @@ -1404,11 +964,11 @@ typedef struct #endif RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */ RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */ + RGXFWIF_COUNTER_DUMP_DATA sCounterDumpConfigData; /*!< Data for dumping of register ranges */ + RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE_DATA sDisableZSStoreData; /*!< Data for disabling zs store of a 3D workload */ RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_RGXREG_DATA sFwRgxData; /*!< Data for reading off an RGX register */ - RGXFWIF_GPUMAP_DATA sGPUMapData; /*!< Data for requesting a FW GPU mapping which is written into by the FW with a pattern */ -#endif + RGXFWIF_CANCEL_WORK_DATA sCancelWorkData; /*!< Data for cancelling work */ + RGXFWIF_PLATFORM_DATA sPlatformData; /*!< Data for sending platform specific parameters */ } UNCACHED_ALIGN uCmdData; } UNCACHED_ALIGN RGXFWIF_KCCB_CMD; @@ -1468,7 +1028,7 @@ typedef struct RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */ RGXFWIF_DM eDM; /*!< Data Master affected by the reset */ IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */ - IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ + IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */ IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */ } RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA; @@ -1503,17 +1063,9 @@ typedef enum \n Command data: None */ RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats \n Command data: RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA */ -#if defined(SUPPORT_PDVFS) RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, -#endif RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests GPU restart \n Command data: None */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_FWCCB_CMD_REG_READ = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, -#if defined(SUPPORT_SOC_TIMER) - RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, -#endif -#endif RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a FW pagefault \n Command data: RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA */ } RGXFWIF_FWCCB_CMD_TYPE; @@ -1529,7 +1081,8 @@ typedef enum RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */ RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */ RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */ - RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_RAY_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumRayStores stat */ } RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE; /*! @@ -1549,21 +1102,6 @@ typedef struct IMG_UINT32 ui32CoreClkRate; } UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA; -#if defined(SUPPORT_VALIDATION) -typedef struct -{ - IMG_UINT64 ui64RegValue; -} RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA; - -#if defined(SUPPORT_SOC_TIMER) -typedef struct -{ - IMG_UINT64 ui64timerGray; - IMG_UINT64 ui64timerBinary; - IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS]; -} RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA; -#endif -#endif /*! ****************************************************************************** @@ -1583,12 +1121,6 @@ typedef struct RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange; RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA sCmdFWPagefault; /*!< Data for context reset notification */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA sCmdRgxRegReadData; -#if defined(SUPPORT_SOC_TIMER) - RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers; -#endif -#endif } RGXFW_ALIGN uCmdData; } RGXFW_ALIGN RGXFWIF_FWCCB_CMD; @@ -1603,6 +1135,7 @@ RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); typedef struct { IMG_UINT16 ui16ReturnDataIndex; /*!< Index for return data array */ + IMG_UINT16 ui16CyclesTakenHigh; /*!< [39:32] from the cycle counter, which has up to 40-bit range. */ IMG_UINT32 ui32CyclesTaken; /*!< The cycles the workload took on the hardware */ } RGXFWIF_WORKEST_FWCCB_CMD; @@ -1619,7 +1152,7 @@ typedef struct #define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64) #define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15) -#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1)) +#define RGX_CCB_FWALLOC_ALIGN(size) (PVR_ALIGN(size, RGXFWIF_FWALLOC_ALIGN)) typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; @@ -1650,17 +1183,22 @@ typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. */ #define RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP (217U | RGX_CMD_MAGIC_DWORD_SHIFTED) -#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (218U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates of a command */ -#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates related to workload resources */ +/* UNFENCED type is not supported but keeping defines as it is for now */ -#if defined(SUPPORT_VALIDATION) -#define RGXFWIF_CCB_CMD_TYPE_REG_READ (220U | RGX_CMD_MAGIC_DWORD_SHIFTED) -#endif #define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */ #define RGXFWIF_CCB_CMD_TYPE_RAY (222U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP (223U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Process a vulkan timestamp */ /*! @} End of Client CCB command types */ + +#define RGXFWIF_TRP_STATUS_UNKNOWN 0x000U +#define RGXFWIF_TRP_STATUS_CHECKSUMS_OK 0x001U +#define RGXFWIF_TRP_STATUS_CHECKSUMS_ERROR 0x002U + +#define RGXFWIF_CR_TRP_SIGNATURE_STATUS (RGX_CR_SCRATCH10) + + typedef struct { /* Index for the KM Workload estimation return data array */ @@ -1678,12 +1216,12 @@ typedef struct */ typedef struct { - RGXFWIF_CCB_CMD_TYPE eCmdType; /*!< Command data type following this command header */ - IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */ - IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ - IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ + RGXFWIF_CCB_CMD_TYPE eCmdType; /*!< Command data type following this command header */ + IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */ + IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ + IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ #if defined(SUPPORT_WORKLOAD_ESTIMATION) - RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ + RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ #endif } RGXFWIF_CCB_CMD_HEADER; @@ -1710,6 +1248,12 @@ typedef struct IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */ } UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL; +typedef struct +{ + PRGXFWIF_COUNTERBUFFER sBuffer; /*!< Ptr to counter dump buffer */ + IMG_UINT32 ui32SizeInDwords; /*!< Amount of space for storing in the buffer */ +} UNCACHED_ALIGN RGXFWIF_COUNTER_DUMP_CTL; + typedef struct { PRGXFWIF_FIRMWAREGCOVBUFFER sBuffer; /*!< Ptr to firmware gcov buffer */ @@ -1721,9 +1265,9 @@ typedef struct * RGX Compatibility checks *****************************************************************************/ -/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC is a subject of change, - following define should be increased by 1 to indicate to compatibility logic, - that layout has changed */ +/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC changes, the + following define should be increased by 1 to indicate to the + compatibility logic that layout has changed. */ #define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3 typedef struct @@ -1746,13 +1290,12 @@ typedef struct do { \ (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \ (name).ui64BVNC = 0; \ - } while (0) + } while (false) typedef struct { RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */ RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */ - IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the FW processor version */ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ @@ -1760,27 +1303,38 @@ typedef struct IMG_BOOL bUpdated; /*!< Information is valid */ } UNCACHED_ALIGN RGXFWIF_COMPCHECKS; +typedef struct +{ + IMG_UINT32 ui32NumCores; + IMG_UINT32 ui32MulticoreInfo; +} UNCACHED_ALIGN RGXFWIF_MULTICORE_INFO; + /*! @Brief Firmware Runtime configuration data \ref RGXFWIF_RUNTIME_CFG * allocated by services and used by the Firmware on boot **/ typedef struct { - IMG_UINT32 ui32ActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ - IMG_UINT32 ui32RuntimeCfgFlags; /*!< Compatibility and other flags */ - IMG_BOOL bActivePMLatencyPersistant; /*!< If set, APM latency does not reset to system default each GPU power transition */ - IMG_UINT32 ui32CoreClockSpeed; /*!< Core clock speed, currently only used to calculate timer ticks */ - IMG_UINT32 ui32PowUnitsStateMask; /*!< Power Unit state mask set by the host */ - IMG_UINT32 ui32RACStateMask; /*!< RAC state mask set by the host */ - IMG_UINT32 ui32PHRMode; /*!< Periodic Hardware Reset configuration values */ - IMG_UINT32 ui32HCSDeadlineMS; /*!< New number of milliseconds C/S is allowed to last */ - IMG_UINT32 ui32WdgPeriodUs; /*!< The watchdog period in microseconds */ - IMG_UINT32 aui32OSidPriority[RGXFW_MAX_NUM_OS]; /*!< Array of priorities per OS */ - PRGXFWIF_HWPERFBUF sHWPerfBuf; /*!< On-demand allocated HWPerf buffer address, to be passed to the FW */ + IMG_UINT32 ui32ActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ + IMG_UINT32 ui32RuntimeCfgFlags; /*!< Compatibility and other flags */ + IMG_BOOL bActivePMLatencyPersistant; /*!< If set, APM latency does not reset to system default each GPU power transition */ + IMG_UINT32 ui32CoreClockSpeed; /*!< Core clock speed, currently only used to calculate timer ticks */ +#if defined(SUPPORT_SOC_TIMER) + IMG_UINT32 ui32SOCClockSpeed; /*!< SOC clock speed, used for deadline scheduling */ +#endif + IMG_UINT32 ui32PowUnitsState; /*!< Power Unit state mask set by the host */ + IMG_UINT32 ui32RACUnitsState; /*!< RAC state mask set by the host */ + IMG_UINT32 ui32PHRMode; /*!< Periodic Hardware Reset configuration values */ + IMG_UINT32 ui32HCSDeadlineMS; /*!< New number of milliseconds C/S is allowed to last */ + IMG_UINT32 ui32WdgPeriodUs; /*!< The watchdog period in microseconds */ + IMG_INT32 ai32DriverPriority[RGXFW_MAX_NUM_OSIDS]; /*!< Array of priorities per OS */ + IMG_UINT32 aui32DriverIsolationGroup[RGXFW_MAX_NUM_OSIDS]; /*!< Array of isolation groups per OS */ + IMG_UINT32 aui32TSPercentage[RGXFW_MAX_NUM_OSIDS]; /*!< Array of time slice per OS */ + IMG_UINT32 ui32TSIntervalMs; /*!< Time slice interval */ + IMG_UINT32 ui32VzConnectionCooldownPeriodInSec; /*!< Vz Connection Cooldown period in secs */ + + PRGXFWIF_HWPERFBUF sHWPerfBuf; /*!< On-demand allocated HWPerf buffer address, to be passed to the FW */ RGXFWIF_DMA_ADDR sHWPerfDMABuf; RGXFWIF_DMA_ADDR sHWPerfCtlDMABuf; -#if defined(SUPPORT_VALIDATION) - IMG_BOOL bInjectFWFault; /*!< Injecting firmware fault to validate recovery through Host */ -#endif } RGXFWIF_RUNTIME_CFG; /*! @@ -1803,7 +1357,7 @@ typedef enum typedef struct { IMG_PID uiPID; - IMG_UINT32 ui32OSID; + IMG_UINT32 ui32DriverID; } RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM; typedef struct @@ -1848,6 +1402,7 @@ typedef enum RGXFWIF_TPU_DM_VDM = 1, RGXFWIF_TPU_DM_CDM = 2, RGXFWIF_TPU_DM_TDM = 3, + RGXFWIF_TPU_DM_RDM = 4, RGXFWIF_TPU_DM_LAST } RGXFWIF_TPU_DM; @@ -1873,14 +1428,12 @@ typedef enum RGXFWIF_GPIO_VAL_LAST } RGXFWIF_GPIO_VAL_MODE; -typedef enum -{ - FW_PERF_CONF_NONE = 0, - FW_PERF_CONF_ICACHE = 1, - FW_PERF_CONF_DCACHE = 2, - FW_PERF_CONF_JTLB_INSTR = 5, - FW_PERF_CONF_INSTRUCTIONS = 6 -} FW_PERF_CONF; +typedef IMG_UINT32 FW_PERF_CONF; +#define FW_PERF_CONF_NONE 0U +#define FW_PERF_CONF_ICACHE 1U +#define FW_PERF_CONF_DCACHE 2U +#define FW_PERF_CONF_JTLB_INSTR 5U +#define FW_PERF_CONF_INSTRUCTIONS 6U typedef enum { @@ -1951,6 +1504,7 @@ typedef struct PRGXFWIF_OSDATA sFwOsData; /*!< Firmware per-os shared data */ RGXFWIF_COMPCHECKS sRGXCompChecks; /*!< Compatibility checks to be populated by the Firmware */ + RGXFWIF_MULTICORE_INFO sRGXMulticoreInfo; /*! < Multicore capability info */ } UNCACHED_ALIGN RGXFWIF_OSINIT; @@ -1976,13 +1530,14 @@ typedef struct IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; IMG_UINT32 RGXFW_ALIGN aui32USRMNumRegions[RGXFWIF_USRM_DM_LAST]; + IMG_UINT64 ui64ClkCtrl0; + IMG_UINT64 ui64ClkCtrl1; + IMG_UINT32 ui32ClkCtrl2; + IMG_UINT32 ui32FilterFlags; +#if defined(PDUMP) RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_MAX]; /*!< Signature and Checksum Buffers for DMs */ -#if defined(SUPPORT_VALIDATION) - RGXFWIF_SIGBUF_CTL asValidationSigBufCtl[RGXFWIF_DM_MAX]; - IMG_UINT64 RGXFW_ALIGN ui64RCEDisableMask; - IMG_UINT32 RGXFW_ALIGN ui32PCGPktDropThresh; #endif PRGXFWIF_RUNTIME_CFG sRuntimeCfg; /*!< Firmware Runtime configuration */ @@ -1993,10 +1548,12 @@ typedef struct PRGXFWIF_TBIBUF sTBIBuf; /*!< Tbi log buffer */ #endif - PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; /*!< GPU utilization buffer */ + PRGXFWIF_GPU_UTIL_FW sGpuUtilFWCtl; /*!< GPU utilization buffer */ PRGXFWIF_REG_CFG sRegCfg; /*!< Firmware register user configuration */ PRGXFWIF_HWPERF_CTL sHWPerfCtl; /*!< HWPerf counter block configuration.*/ + RGXFWIF_COUNTER_DUMP_CTL sCounterDumpCtl; + #if defined(SUPPORT_FIRMWARE_GCOV) RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; /*!< Firmware gcov buffer control */ #endif @@ -2005,6 +1562,10 @@ typedef struct IMG_UINT32 ui32InitialCoreClockSpeed; /*!< Core clock speed at FW boot time */ +#if defined(SUPPORT_SOC_TIMER) + IMG_UINT32 ui32InitialSOCClockSpeed; /*!< System/SOC clock speed at FW boot time */ +#endif + IMG_UINT32 ui32InitialActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ IMG_BOOL bFirmwareStarted; /*!< Flag to be set by the Firmware after successful start */ @@ -2013,14 +1574,12 @@ typedef struct IMG_UINT32 ui32FirmwareStartedTimeStamp; /*!< Firmware initialization complete time */ - IMG_UINT32 ui32JonesDisableMask; - RGXFWIF_DMA_ADDR sCorememDataStore; /*!< Firmware coremem data */ FW_PERF_CONF eFirmwarePerf; /*!< Firmware performance counter config */ -#if defined(SUPPORT_PDVFS) - RGXFWIF_PDVFS_OPP RGXFW_ALIGN sPDVFSOPPInfo; +#if defined(SUPPORT_FW_OPP_TABLE) + RGXFWIF_OPP_INFO RGXFW_ALIGN sOPPInfo; /** * FW Pointer to memory containing core clock rate in Hz. @@ -2044,7 +1603,11 @@ typedef struct RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer; #endif -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) + RGXFWIF_DEV_VIRTADDR sActiveContextBufBase; /*!< Active context buffer base */ +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) /* * Used when validation is enabled to allow the host to check * that MTS sent the correct sideband in response to a kick @@ -2062,13 +1625,22 @@ typedef struct IMG_UINT32 ui32VzWdgPeriod; #endif +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) + /* notify firmware power-up on host-side recovery */ + IMG_BOOL bFwHostRecoveryMode; +#endif + #if defined(SUPPORT_SECURE_CONTEXT_SWITCH) RGXFWIF_DEV_VIRTADDR pbFwScratchBuf; #endif - +#if defined(SUPPORT_ICS) + IMG_UINT32 ui32FDTI; + IMG_UINT32 ui32ICSThreshold; + IMG_BOOL bTestModeOn; +#endif } UNCACHED_ALIGN RGXFWIF_SYSINIT; -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) #define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1 #define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1 #endif @@ -2134,9 +1706,12 @@ typedef struct /* See rgx_common.h for a list of GPU states */ #define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK) +#define RGXFWIF_GPU_UTIL_TIME_MASK32 (IMG_UINT32_C(0xFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK32) -#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) -#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) +#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) +#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) +#define RGXFWIF_GPU_UTIL_GET_TIME32(word) ((IMG_UINT32)(word) & RGXFWIF_GPU_UTIL_TIME_MASK32) +#define RGXFWIF_GPU_UTIL_GET_STATE32(word) ((IMG_UINT32)(word) & RGXFWIF_GPU_UTIL_STATE_MASK32) /* The OS timestamps computed by the FW are approximations of the real time, * which means they could be slightly behind or ahead the real timer on the Host. @@ -2150,6 +1725,9 @@ typedef struct #define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \ (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state)) +#define RGXFWIF_GPU_UTIL_MAKE_WORD32(time,state) \ + (RGXFWIF_GPU_UTIL_GET_TIME32(time) | RGXFWIF_GPU_UTIL_GET_STATE32(state)) + /* The timer correlation array must be big enough to ensure old entries won't be * overwritten before all the HWPerf events linked to those entries are processed @@ -2168,6 +1746,26 @@ typedef struct static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); +/* The time is stored in DM state counters in "approximately microseconds", + * dividing the time originally obtained in nanoseconds by 2^10 for the sake of reducing coremem usage */ +#define RGXFWIF_DM_OS_TIMESTAMP_SHIFT 10U + +typedef struct +{ + /* Last GPU DM per-OS states + OS time of the last state update */ + IMG_UINT32 RGXFW_ALIGN aui32DMOSLastWord[RGXFWIF_GPU_UTIL_DM_MAX]; + /* DMs time-stamps are cached in coremem - to reduce coremem usage we allocate 32 bits for each of them + * and save their values divided by 2^10, so they wrap around in ~73 mins, consequently + * we keep the count of the wrapping around instances */ + IMG_UINT32 RGXFW_ALIGN aui32DMOSLastWordWrap[RGXFWIF_GPU_UTIL_DM_MAX]; + /* Counters for the amount of time the GPU DMs were active or inactive(idle or blocked) */ + IMG_UINT32 RGXFW_ALIGN aaui32DMOSStatsCounters[RGXFWIF_GPU_UTIL_DM_MAX][RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM]; + /* DMs Counters are cached in coremem - to reduce coremem usage we allocate 32 bits for each of them + * and save their values divided by 2^10, so they wrap around in ~73 mins, consequently + * we keep the count of the wrapping around instances */ + IMG_UINT32 RGXFW_ALIGN aaui32DMOSCountersWrap[RGXFWIF_GPU_UTIL_DM_MAX][RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM]; +} RGXFWIF_GPU_STATS; + typedef struct { RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE]; @@ -2180,203 +1778,156 @@ typedef struct IMG_UINT64 RGXFW_ALIGN ui64GpuLastWord; /* Counters for the amount of time the GPU was active/idle/blocked */ IMG_UINT64 RGXFW_ALIGN aui64GpuStatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM]; - - /* Last GPU DM per-OS states + OS time of the last state update */ - IMG_UINT64 RGXFW_ALIGN aaui64DMOSLastWord[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OS]; - /* Counters for the amount of time the GPU DMs were active/idle/blocked */ - IMG_UINT64 RGXFW_ALIGN aaaui64DMOSStatsCounters[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OS][RGXFWIF_GPU_UTIL_STATE_NUM]; -} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB; - -typedef struct -{ - IMG_UINT32 ui32RenderTargetIndex; //Render number - IMG_UINT32 ui32CurrentRenderTarget; //index in RTA - IMG_UINT32 ui32ActiveRenderTargets; //total active RTs - RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices - RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target - IMG_UINT32 ui32MaxRTs; //Number of render targets in the array - IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ -} UNCACHED_ALIGN RGXFWIF_RTA_CTL; - -/*! - * @InGroup RenderTarget - * @Brief Firmware Freelist holding usage state of the Parameter Buffers - */ -typedef struct -{ - IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListBaseDevVAddr; /*!< Freelist page table base address */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListStateDevVAddr; /*!< Freelist state buffer base address */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListLastGrowDevVAddr; /*!< Freelist base address at last grow */ - -#if defined(PM_INTERACTIVE_MODE) - IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page */ - IMG_UINT32 ui32CurrentStackTop; /*!< Freelist current free page */ -#endif - - IMG_UINT32 ui32MaxPages; /*!< Max no. of pages can be added to the freelist */ - IMG_UINT32 ui32GrowPages; /*!< No pages to add in each freelist grow */ - IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */ -#if defined(PM_INTERACTIVE_MODE) - IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */ - IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/ -#endif -#if defined(SUPPORT_SHADOW_FREELISTS) - IMG_UINT32 ui32HWRCounter; - PRGXFWIF_FWMEMCONTEXT psFWMemContext; -#endif - IMG_UINT32 ui32FreeListID; /*!< Unique Freelist ID */ - IMG_BOOL bGrowPending; /*!< Freelist grow is pending */ - IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */ - IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */ - - IMG_BOOL bUpdatePending; - IMG_UINT32 ui32UpdateNewPages; - IMG_UINT32 ui32UpdateNewReadyPages; -} UNCACHED_ALIGN RGXFWIF_FREELIST; + /* Device off period timestamp offset */ + IMG_INT64 RGXFW_ALIGN i64DeviceTimestampOffset; + /* Stats per OSID/DriverID */ + RGXFWIF_GPU_STATS sStats[RGXFW_MAX_NUM_OSIDS]; +} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FW; /*! ****************************************************************************** - * HWRTData + * Virtualisation and Security *****************************************************************************/ -/* HWRTData flags */ -/* Deprecated flags 1:0 */ -#define HWRTDATA_HAS_LAST_TA (1UL << 2) -#define HWRTDATA_PARTIAL_RENDERED (1UL << 3) -#define HWRTDATA_KILLED (1UL << 4) -#define HWRTDATA_KILL_AFTER_TARESTART (1UL << 5) -#if defined(SUPPORT_AGP) -#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (1UL << 6) -#if defined(SUPPORT_AGP4) -#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (1UL << 7) -#endif -#define HWRTDATA_GEOM_NEEDS_RESUME (1UL << 8) -#endif - -typedef enum -{ - RGXFWIF_RTDATA_STATE_NONE = 0, - RGXFWIF_RTDATA_STATE_KICKTA, - RGXFWIF_RTDATA_STATE_KICKTAFIRST, - RGXFWIF_RTDATA_STATE_TAFINISHED, - RGXFWIF_RTDATA_STATE_KICK3D, - RGXFWIF_RTDATA_STATE_3DFINISHED, - RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED, - RGXFWIF_RTDATA_STATE_TAOUTOFMEM, - RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, - /* In case of HWR, we can't set the RTDATA state to NONE, - * as this will cause any TA to become a first TA. - * To ensure all related TA's are skipped, we use the HWR state */ - RGXFWIF_RTDATA_STATE_HWR, - RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU -} RGXFWIF_RTDATA_STATE; - -typedef struct -{ - IMG_UINT32 ui32ScreenPixelMax; - IMG_UINT64 RGXFW_ALIGN ui64PPPMultiSampleCtl; - IMG_UINT32 ui32TEStride; - IMG_UINT32 ui32TPCSize; - IMG_UINT32 ui32TEScreen; - IMG_UINT32 ui32TEAA; - IMG_UINT32 ui32TEMTILE1; - IMG_UINT32 ui32TEMTILE2; - IMG_UINT32 ui32RgnStride; - IMG_UINT32 ui32ISPMergeLowerX; - IMG_UINT32 ui32ISPMergeLowerY; - IMG_UINT32 ui32ISPMergeUpperX; - IMG_UINT32 ui32ISPMergeUpperY; - IMG_UINT32 ui32ISPMergeScaleX; - IMG_UINT32 ui32ISPMergeScaleY; -} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; - -/*! - * @InGroup RenderTarget - * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context - */ -typedef struct -{ - IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[2]; /*!< VCE Page Catalogue base */ - IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[2]; - IMG_UINT64 RGXFW_ALIGN ui64TECatBase[2]; /*!< TE Page Catalogue base */ - IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[2]; - IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */ - IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; - -#if defined(PM_INTERACTIVE_MODE) - IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sPMMListDevVAddr; /*!< Mlist table base */ +#define RGX_GP_SUPPORTS_SECURITY (0U) +#define RGX_TDM_SUPPORTS_SECURITY (1U) +#define RGX_GEOM_SUPPORTS_SECURITY (0U) +#define RGX_FRAG_SUPPORTS_SECURITY (1U) +#define RGX_COMPUTE_SUPPORTS_SECURITY (1U) +#define RGX_RAY_SUPPORTS_SECURITY (0U) + +#define RGX_DMS_WITH_SECURITY_COUNT (RGX_GP_SUPPORTS_SECURITY + \ + RGX_TDM_SUPPORTS_SECURITY + \ + RGX_GEOM_SUPPORTS_SECURITY + \ + RGX_FRAG_SUPPORTS_SECURITY + \ + RGX_COMPUTE_SUPPORTS_SECURITY + \ + RGX_RAY_SUPPORTS_SECURITY) + +#if defined(SUPPORT_TRUSTED_DEVICE) +#if (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_DEDICATED_OSID) || (defined(RGX_LEGACY_SECURE_OSID_SCHEME)) +/* OSIDs 0 and 1 reserved for Firmware */ +#define DRIVER_OSID_START_OFFSET (FW_OSID+2) #else - IMG_DEV_VIRTADDR RGXFW_ALIGN sPMRenderStateDevVAddr; /*!< Series8 PM State buffers */ - IMG_DEV_VIRTADDR RGXFW_ALIGN sPMSecureRenderStateDevVAddr; +/* OSIDs 1 reserved for Firmware */ +#define DRIVER_OSID_START_OFFSET (FW_OSID+1) #endif - PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */ - IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; - IMG_BOOL bRenderStateNeedsReset; - - RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; /*!< Render target dimension dependent data */ - - IMG_UINT32 ui32HWRTDataFlags; - RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */ - - RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Render target clean up state */ - - RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */ - - IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */ -#if defined(RGX_FIRMWARE) - struct RGXFWIF_FWCOMMONCONTEXT_* psOwnerGeom; #else - RGXFWIF_DEV_VIRTADDR pui32OwnerGeomNotUsedByHost; +/* Firmware and Host driver share the same OSID */ +#define DRIVER_OSID_START_OFFSET (FW_OSID) +#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ + +#if (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_DEDICATED_OSID) +/* Firmware heaps reside in a dedicated non-secure IPA space. */ +#define FW_HEAP_OSID (FW_OSID+1) +#elif (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_HOST_OSID) +/* Firmware heaps reside in the Host driver's non-secure IPA space. */ +#define FW_HEAP_OSID (DRIVER_OSID_START_OFFSET) +#elif (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_FIRMWARE_OSID) +/* Firmware heaps reside in the IPA space as the Firmware. */ +#define FW_HEAP_OSID (FW_OSID) +#else +#error "RGX_FW_HEAP_OSID_ASSIGNMENT not configured correctly." #endif -#if defined(PM_INTERACTIVE_MODE) - IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; /*!< Freelist page table entry for current Mlist page */ - IMG_UINT32 ui32PMMListStackPointer; /*!< Current Mlist page */ +#if !defined(RGX_NUM_DRIVERS_SUPPORTED) && !defined(__KERNEL__) +/* placeholder define for UM tools including this header */ +#define RGX_NUM_DRIVERS_SUPPORTED (1U) #endif -#if defined(SUPPORT_TRP) - IMG_UINT32 ui32KickFlagsCopy; - IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */ -#endif -} UNCACHED_ALIGN RGXFWIF_HWRTDATA; -/* Sync_checkpoint firmware object. - * This is the FW-addressable structure use to hold the sync checkpoint's - * state and other information which needs to be accessed by the firmware. - */ -typedef struct -{ - IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ - IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ -} SYNC_CHECKPOINT_FW_OBJ; +#if defined(SUPPORT_TRUSTED_DEVICE) + +#if defined(RGX_LEGACY_SECURE_OSID_SCHEME) +/* keep reverse compatibility with original scheme */ +static_assert((RGX_NUM_DRIVERS_SUPPORTED == 1), + "RGX_LEGACY_SECURE_OSID_SCHEME only applicable on native drivers (no VZ support)"); + +#define DRIVER_ID(osid) (0U) +#define OSID_SECURE(did) (1U) +#define OSID(did) (2U) + +#else /* defined(RGX_LEGACY_SECURE_OSID_SCHEME) */ +/* native and virtualized security support */ +#define LAST_OSID (RGXFW_MAX_NUM_OSIDS - 1) +#define DRIVER_ID(osid) (osid - DRIVER_OSID_START_OFFSET) +#define OSID(did) (did + DRIVER_OSID_START_OFFSET) +#define OSID_SECURE(did) \ + (did==0 ? (DRIVER0_SECURITY_SUPPORT ? (LAST_OSID) : OSID(did)) : \ + (did==1 ? (DRIVER1_SECURITY_SUPPORT ? (LAST_OSID - DRIVER0_SECURITY_SUPPORT) : OSID(did)) : \ + (did==2 ? (DRIVER2_SECURITY_SUPPORT ? (LAST_OSID - DRIVER0_SECURITY_SUPPORT - DRIVER1_SECURITY_SUPPORT) : OSID(did)) : \ + (did==3 ? (DRIVER3_SECURITY_SUPPORT ? (LAST_OSID - DRIVER0_SECURITY_SUPPORT - DRIVER1_SECURITY_SUPPORT - DRIVER2_SECURITY_SUPPORT) : OSID(did)) : \ + (did==4 ? (DRIVER4_SECURITY_SUPPORT ? (LAST_OSID - DRIVER0_SECURITY_SUPPORT - DRIVER1_SECURITY_SUPPORT - DRIVER2_SECURITY_SUPPORT - DRIVER3_SECURITY_SUPPORT) : OSID(did)) : \ + (did==5 ? (DRIVER5_SECURITY_SUPPORT ? (LAST_OSID - DRIVER0_SECURITY_SUPPORT - DRIVER1_SECURITY_SUPPORT - DRIVER2_SECURITY_SUPPORT - DRIVER3_SECURITY_SUPPORT - DRIVER4_SECURITY_SUPPORT) : OSID(did)) : \ + (did==6 ? (DRIVER6_SECURITY_SUPPORT ? (LAST_OSID - DRIVER0_SECURITY_SUPPORT - DRIVER1_SECURITY_SUPPORT - DRIVER2_SECURITY_SUPPORT - DRIVER3_SECURITY_SUPPORT - DRIVER4_SECURITY_SUPPORT - DRIVER5_SECURITY_SUPPORT) : OSID(did)) : \ + (DRIVER7_SECURITY_SUPPORT ? (LAST_OSID - DRIVER0_SECURITY_SUPPORT - DRIVER1_SECURITY_SUPPORT - DRIVER2_SECURITY_SUPPORT - DRIVER3_SECURITY_SUPPORT - DRIVER4_SECURITY_SUPPORT - DRIVER5_SECURITY_SUPPORT - DRIVER6_SECURITY_SUPPORT) : OSID(did))))))))) + +static_assert((RGX_NUM_DRIVERS_SUPPORTED + DRIVER_OSID_START_OFFSET + + DRIVER0_SECURITY_SUPPORT + DRIVER1_SECURITY_SUPPORT + DRIVER2_SECURITY_SUPPORT + DRIVER3_SECURITY_SUPPORT + + DRIVER4_SECURITY_SUPPORT + DRIVER5_SECURITY_SUPPORT + DRIVER6_SECURITY_SUPPORT + DRIVER7_SECURITY_SUPPORT) <= RGXFW_MAX_NUM_OSIDS, + "The GPU hardware is not equipped with enough hardware OSIDs to satisfy the requirements."); +#endif /* defined(RGX_LEGACY_SECURE_OSID_SCHEME) */ + +/* Time slice support */ +/* Bits 30 and 31 reserved by FW private driver priority */ +#define RGXFW_VZ_PRIORITY_MAX_SHIFT (30U) +#define RGXFW_VZ_PRIORITY_MASK ((1U << RGXFW_VZ_PRIORITY_MAX_SHIFT) - 1U) +#define RGXFW_VZ_TIME_SLICE_MAX (100U) +#define RGXFW_VZ_TIME_SLICE_MIN (5U) + +#elif defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) +/* virtualization without security support */ +#define DRIVER_ID(osid) (osid - DRIVER_OSID_START_OFFSET) +#define OSID(did) (did + DRIVER_OSID_START_OFFSET) + +/* Time slice support */ +/* Bits 30 and 31 reserved by FW private driver priority */ +#define RGXFW_VZ_PRIORITY_MAX_SHIFT (30U) +#define RGXFW_VZ_PRIORITY_MASK ((1U << RGXFW_VZ_PRIORITY_MAX_SHIFT) - 1U) +#define RGXFW_VZ_TIME_SLICE_MAX (100U) +#define RGXFW_VZ_TIME_SLICE_MIN (5U) +#else +/* native without security support */ +#define DRIVER_ID(osid) (0U) +#define OSID(did) (did) +#define RGXFW_VZ_PRIORITY_MASK (0U) +#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ -/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ -#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -#define RGXFW_SCRATCH_BUF_SIZE (32768U) +#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) < RGX_NUM_DRIVERS_SUPPORTED; (did)++) -#define RGX_NUM_CORES (8U) +#if defined(__KERNEL__) +/* Driver implementation */ +#define FOREACH_ACTIVE_DRIVER(devinfo, did) FOREACH_SUPPORTED_DRIVER(did) \ + { \ + if (devinfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[did].bfOsState != RGXFW_CONNECTION_FW_ACTIVE) continue; -#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET -#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U +#define END_FOREACH_ACTIVE_DRIVER } -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES) -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U +#else +/* Firmware implementation */ +#define FOREACH_ACTIVE_DRIVER(did) do { \ + unsigned int idx; \ + for (idx = 0; idx < RGXFW_NUM_ACTIVE_DRIVERS; idx++) \ + { \ + (did) = gsRGXFWCtl.aui32ActiveDrivers[idx]; \ + { + +#define END_FOREACH_ACTIVE_DRIVER }}} while (false); +#endif /* defined(__KERNEL__) */ -#define RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES) -#define RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_MAX_SIZE_BYTES 8192U -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_MAX_SIZE_BYTES) -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES (0x00004000U + 48U + 1023U) +#else +#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) <= RGXFW_HOST_DRIVER_ID; (did)++) -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES) -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_MAX_SIZE_BYTES RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES +#define FOREACH_ACTIVE_DRIVER(did) FOREACH_SUPPORTED_DRIVER(did) +#define END_FOREACH_ACTIVE_DRIVER -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_MAX_SIZE_BYTES) -#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_MAX_SIZE_BYTES (((0x00000080U + 127U) * RGX_NUM_CORES) + 127U) +#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -#define RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES PVR_ALIGN((RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_MAX_SIZE_BYTES), PAGE_SIZE) +#define FOREACH_VALIDATION_OSID(osid) for ((osid)=0; (osid) < GPUVIRT_VALIDATION_NUM_OS; (osid)++) +#define FOREACH_HW_OSID(osid) for ((osid)=0; (osid) < RGXFW_MAX_NUM_OSIDS; (osid)++) +#define FOREACH_DRIVER_RAW_HEAP(did, _struct, dev) for ((did)=RGX_FIRST_RAW_HEAP_DRIVER_ID; (did) < ((PVRSRV_VZ_MODE_IS(NATIVE, _struct, dev) ? 1 : RGX_NUM_DRIVERS_SUPPORTED)); (did)++) #define RGXFWIF_TDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES) #define RGXFWIF_CDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES) @@ -2384,6 +1935,12 @@ typedef struct #define RGXFWIF_CDM_SECURE_SR_BUF_VADDR PVR_ALIGN((RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_OFFSET_BYTES), 1024ULL) #define RGXFWIF_CDM_SECURE_SR_B_BUF_VADDR PVR_ALIGN((RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_OFFSET_BYTES), 1024ULL) #define RGXFWIF_CDM_SECURE_CONTEXT_STATE_VADDR PVR_ALIGN((RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_OFFSET_BYTES), 128ULL) +#define RGXFWIF_TDM_SECURE_CONTEXT_STATE_VADDR PVR_ALIGN((RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_CONTEXT_OFFSET_BYTES), 128ULL) + +#define SAFETY_SELF_TEST_STEP_CLEAR 1 +#define SAFETY_SELF_TEST_STEP_ENABLE 2 +#define SAFETY_SELF_TEST_STEP_TEST 3 +#define SAFETY_SELF_TEST_STEP_DISABLE 4 #endif /* RGX_FWIF_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_shared.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_shared.h index 1a541b18b0c3..061f718cec62 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_shared.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_fwif_shared.h @@ -91,11 +91,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) -typedef struct RGXFWIF_DEV_VIRTADDR_ -{ - IMG_UINT32 ui32Addr; -} RGXFWIF_DEV_VIRTADDR; - typedef struct { IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr; @@ -215,13 +210,17 @@ typedef struct fence dependencies are not met. */ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity in bytes of the CCB-1 */ -#if defined(SUPPORT_AGP) - IMG_UINT32 ui32ReadOffset2; -#if defined(SUPPORT_AGP4) - IMG_UINT32 ui32ReadOffset3; - IMG_UINT32 ui32ReadOffset4; -#endif -#endif + + IMG_UINT32 ui32ReadOffset2; /*!< Firmware 2nd read offset into CCB for AGP. + Points to the command that is + runnable on GPU, if R2!=W */ + IMG_UINT32 ui32ReadOffset3; /*!< Firmware 3rd read offset into CCB for AGP. + Points to the command that is + runnable on GPU, if R3!=W */ + IMG_UINT32 ui32ReadOffset4; /*!< Firmware 4th read offset into CCB for AGP. + Points to the command that is + runnable on GPU, if R4!=W */ + } UNCACHED_ALIGN RGXFWIF_CCCB_CTL; @@ -376,6 +375,13 @@ typedef enum RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, /*!< FW execution error (GPU reset requested) */ RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, /*!< Host watchdog detected FW error */ RGX_CONTEXT_GEOM_OOM_DISABLED = 16, /*!< Geometry DM OOM event is not allowed */ + RGX_CONTEXT_PVRIC_SIGNATURE_MISMATCH = 17, /*!< PVRIC Signature mismatch */ + RGX_CONTEXT_RESET_REASON_FW_PTE_PARITY_ERR = 18, /*!< Parity error in MMU Page Table Entry */ + RGX_CONTEXT_RESET_REASON_FW_PARITY_ERR = 19, /*!< Parity error in MH, system bus or Control Status registers */ + RGX_CONTEXT_RESET_REASON_GPU_PARITY_HWR = 20, /*!< Parity error in system bus or Control Status registers */ + RGX_CONTEXT_RESET_REASON_GPU_LATENT_HWR = 21, /*!< Latent/ICS signature mismatch error */ + RGX_CONTEXT_RESET_REASON_DCLS_ERR = 22, /*!< Dual Core Lock Step FW error detected */ + RGX_CONTEXT_RESET_REASON_ICS_HWR = 23, /*!< ICS fault */ } RGX_CONTEXT_RESET_REASON; /*! @@ -393,19 +399,12 @@ typedef struct #define RGX_HEAP_UM_USC_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY #define RGX_HEAP_UM_USC_RESERVED_REGION_OFFSET 0 -#define RGX_HEAP_USC_RESERVED_TOTAL_SIZE RGX_HEAP_UM_USC_RESERVED_SIZE +#define RGX_HEAP_KM_USC_RESERVED_REGION_OFFSET RGX_HEAP_UM_USC_RESERVED_SIZE #define RGX_HEAP_UM_GENERAL_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY #define RGX_HEAP_UM_GENERAL_RESERVED_REGION_OFFSET 0 -#if defined(SUPPORT_TRUSTED_DEVICE) -#define RGX_HEAP_KM_GENERAL_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY -#else -#define RGX_HEAP_KM_GENERAL_RESERVED_SIZE 0 -#endif #define RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET RGX_HEAP_UM_GENERAL_RESERVED_SIZE -#define RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE (RGX_HEAP_UM_GENERAL_RESERVED_SIZE + RGX_HEAP_KM_GENERAL_RESERVED_SIZE) - /* * 4 dwords reserved for shared register management. * The first dword is the number of shared register blocks to reload. @@ -413,6 +412,746 @@ typedef struct */ #define RGX_LLS_SHARED_REGS_RESERVE_SIZE (16U) +/*************************************************************************/ /*! + Logging type +*/ /**************************************************************************/ +#define RGXFWIF_LOG_TYPE_NONE 0x00000000U +#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U +#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U +#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U +#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U +#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U +#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U +#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U +#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U +#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U +#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U +#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U +#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U +#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U +#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U +#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U +#define RGXFWIF_LOG_TYPE_GROUP_VZ 0x00008000U +#define RGXFWIF_LOG_TYPE_GROUP_SAFETY 0x00010000U +#define RGXFWIF_LOG_TYPE_GROUP_VERBOSE 0x00020000U +#define RGXFWIF_LOG_TYPE_GROUP_CUSTOMER 0x00040000U +#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U +#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x8007FFFEU +#define RGXFWIF_LOG_TYPE_MASK 0x8007FFFFU + +/* String used in pvrdebug -h output */ +#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,vz,safety,verbose,customer,debug" + +/* Table entry to map log group strings to log type value */ +typedef struct { + const IMG_CHAR* pszLogGroupName; + IMG_UINT32 ui32LogGroupType; +} RGXFWIF_LOG_GROUP_MAP_ENTRY; + +/* + Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup + table where needed. Keep log group names short, no more than 20 chars. +*/ +#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ + { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ + { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ + { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ + { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ + { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ + { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ + { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ + { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ + { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ + { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ + { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ + { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ + { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ + { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ + { "vz", RGXFWIF_LOG_TYPE_GROUP_VZ }, \ + { "safety", RGXFWIF_LOG_TYPE_GROUP_SAFETY }, \ + { "verbose", RGXFWIF_LOG_TYPE_GROUP_VERBOSE }, \ + { "customer",RGXFWIF_LOG_TYPE_GROUP_CUSTOMER }, \ + { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } + +/* Used in print statements to display log group state, one %s per group defined */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" + +/* Used in a print statement to display log group state, one per group */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) ((((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) != 0U) ?("main ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) != 0U) ?("mts ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) != 0U) ?("cleanup ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) != 0U) ?("csw ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) != 0U) ?("bif ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_PM) != 0U) ?("pm ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) != 0U) ?("rtd ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) != 0U) ?("spm ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_POW) != 0U) ?("pow ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) != 0U) ?("hwr ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) != 0U) ?("hwp ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) != 0U) ?("rpm ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) != 0U) ?("dma ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) != 0U) ?("misc ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_VZ) != 0U) ?("vz ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_SAFETY) != 0U) ?("safety ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_VERBOSE) != 0U) ?("verbose ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_CUSTOMER) != 0U)?("customer ") :("")), \ + ((((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) != 0U) ?("debug ") :("")) + +/*! + ****************************************************************************** + * Trace Buffer + *****************************************************************************/ + +/*! Min, Max, and Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ +#define RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS 8192U /* 32KB */ +#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U /* ~48KB */ +#define RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS 32768U /* 128KB */ + +#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) +#define RGXFW_THREAD_NUM 2U +#else +#define RGXFW_THREAD_NUM 1U +#endif + +typedef struct +{ + IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_UINT32 ui32LineNum; +} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; + +/*! + * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface + * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing + * @{ + */ + +/*! + * @Brief Firmware trace buffer details + */ +typedef struct +{ + IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer) */ + IMG_UINT32 ui32WrapCount; /*!< Number of times the Trace Buffer has wrapped */ + + RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */ + + RGXFWIF_FILE_INFO_BUF RGXFW_ALIGN sAssertBuf; +} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; + +/*! @} End of Defgroup SRVAndFWTracing */ + +/*! + * @InGroup SRVAndFWTracing + * @Brief Firmware trace control data + */ +typedef struct +{ + IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */ + RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */ + IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated + (in RGXTraceBufferInitOnDemandResources) */ + IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_TRACEBUF; + +/* Debug-info sub-fields */ +/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) + +/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) + +/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */ +#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) +#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) + +/* Bit 3-15: Unused bits */ + +#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 +#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " +#define RGXFWT_DEBUG_INFO_STR_APPEND ")" + +/* Table of debug info sub-field's masks and corresponding message strings + * to be appended to firmware trace + * + * Mask : 16 bit mask to be applied to debug-info field + * String : debug info message string + */ + +#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ +/*Mask, String*/ \ +X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ +X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \ +X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events") + +/* Firmware trace time-stamp field breakup */ + +/* RGX_CR_TIMER register read (48 bits) value*/ +#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) +#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) + +/* Extra debug-info (16 bits) */ +#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) +#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK + +typedef struct +{ + IMG_UINT bfOsState : 3; + IMG_UINT bfFLOk : 1; + IMG_UINT bfFLGrowPending : 1; + IMG_UINT bfReserved : 27; +} RGXFWIF_OS_RUNTIME_FLAGS; + +#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; + IMG_UINT64 RGXFW_ALIGN ui64Data; + RGXFWIF_FILE_INFO_BUF sFaultBuf; +} UNCACHED_ALIGN RGX_FWFAULTINFO; + +#define RGXFWIF_POW_STATES \ + X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ + X(RGXFWIF_POW_ON) /* running HW commands */ \ + X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ + X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ + +typedef enum +{ +#define X(NAME) NAME, + RGXFWIF_POW_STATES +#undef X +} RGXFWIF_POW_STATE; + +/* Firmware HWR states */ +#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ +#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */ +#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ +#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ +#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ +#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ +#define RGXFWIF_HWR_RESTART_REQUESTED (IMG_UINT32_C(0x1) << 7U) /*!< The FW has requested the host to restart it */ + +#define RGXFWIF_PHR_MODE_OFF (0UL) +#define RGXFWIF_PHR_MODE_RD_RESET (1UL) + +typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; + +typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; + +/*! @Brief Firmware system data shared with the Host driver */ +typedef struct +{ + IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ + IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ + volatile RGXFWIF_POW_STATE ePowState; + struct { + volatile IMG_UINT32 ui32HWPerfRIdx; + volatile IMG_UINT32 ui32HWPerfWIdx; + volatile IMG_UINT32 ui32HWPerfWrapCount; + } sHWPerfCtrl; /* Struct used to inval/flush HWPerfCtrl members */ + IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ + IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ + + /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with + * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ + IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ + IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ + IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ + RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OSIDS];/*!< State flags for each Operating System mirrored from Fw coremem */ + RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */ + IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */ + IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */ + IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */ + IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */ + IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; + IMG_UINT32 aui32TSMirror[RGXFW_MAX_NUM_OSIDS]; /*!< Array of time slice per OS Mirrored from the FW */ + +#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) +#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) +#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) + IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; +#endif + RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */ + RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */ + IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ + IMG_UINT32 ui32MemFaultCheck; /*!< Device mem fault check on PCI systems */ +} UNCACHED_ALIGN RGXFWIF_SYSDATA; + +#if defined(PVRSRV_STALLED_CCB_ACTION) +#define PVR_SLR_LOG_ENTRIES 10U +#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64Timestamp; + IMG_UINT32 ui32FWCtxAddr; + IMG_UINT32 ui32NumUFOs; + IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; +} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; +#endif + +#define RGXFWIF_OFFLINE_DATA_BUFFER_SIZE_IN_WORDS (128U) + +/*! + * @InGroup ContextSwitching + * @Brief Firmware per-os data and configuration + */ +typedef struct +{ + IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ + IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ + IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */ +#if defined(PVRSRV_STALLED_CCB_ACTION) + IMG_UINT32 ui32ForcedUpdatesRequested; + IMG_UINT8 ui8SLRLogWp; + RGXFWIF_SLR_ENTRY sSLRLogFirst; + RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; + IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; +#endif + volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ + IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */ + RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */ + IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ + IMG_UINT32 aui32OfflineBuffer[RGXFWIF_OFFLINE_DATA_BUFFER_SIZE_IN_WORDS]; +} UNCACHED_ALIGN RGXFWIF_OSDATA; + +/*! + ****************************************************************************** + * HWR Data + *****************************************************************************/ +/*! + * @Defgroup HWRInfo FW HWR shared data interface + * @Brief Types grouping data structures and defines used in realising the HWR record. + * @{ + */ + +#define RGXFW_PROCESS_NAME_LEN (16) + +/*! @Brief HWR Lockup types */ +typedef enum +{ + RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */ + RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */ + RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */ + RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */ + RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */ + RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */ + RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */ + RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */ + RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */ + RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */ + RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */ + RGX_HWRTYPE_ICSFAULT = 11, /*!< Idle Cycle Stealing fault */ +} RGX_HWRTYPE; + +#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) + +#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false) + +/************************ + * GPU HW error codes * + ************************/ +typedef enum +{ + RGX_HW_ERR_NA = 0x0, + RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL = 0x101, +} RGX_HW_ERR; + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */ + IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */ + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} RGX_BIFINFO; + +typedef struct +{ + IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */ +} RGX_ECCINFO; + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */ + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} RGX_MMUINFO; + +typedef struct +{ + IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */ + IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */ + IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */ + IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} UNCACHED_ALIGN RGX_POLLINFO; + +typedef struct +{ + IMG_UINT32 ui32BadVAddr; /*!< VA address */ + IMG_UINT32 ui32EntryLo; +} RGX_TLBINFO; + +/*! @Brief Structure to keep information specific to a lockup e.g. DM, timer, lockup type etc. */ +typedef struct +{ + union + { + RGX_BIFINFO sBIFInfo; /*!< BIF failure details */ + RGX_MMUINFO sMMUInfo; /*!< MMU failure details */ + RGX_POLLINFO sPollInfo; /*!< Poll failure details */ + RGX_TLBINFO sTLBInfo; /*!< TLB failure details */ + RGX_ECCINFO sECCInfo; /*!< ECC failure details */ + } uHWRData; + + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */ + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */ + IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */ + IMG_UINT32 ui32PID; /*!< PID belonging to the workload */ + IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */ + IMG_UINT32 ui32HWRNumber; /*!< HWR number */ + IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */ + IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */ + RGX_HWRTYPE eHWRType; /*!< Type of lockup */ + RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */ + IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */ + RGX_HW_ERR eHWErrorCode; /*!< Error code used to determine HW fault */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */ + IMG_CHAR RGXFW_ALIGN szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ + IMG_UINT32 RGXFW_ALIGN ui32PDSStalledDMs; /*!< DMs stalled on PDS Store space */ + IMG_UINT32 ui32PDSActiveDMUSCs; /*!< Per-DM USC PDS activity */ +} UNCACHED_ALIGN RGX_HWRINFO; + +#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ +#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ + +/*! @Brief Firmware HWR information structure allocated by the Services and used by the Firmware to update recovery information. */ +typedef struct +{ + RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */ + IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */ + IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */ + IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */ + IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ + IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */ + IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */ + IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */ + IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */ +} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; + +/*! @} End of HWRInfo */ + +/*! + * @InGroup RenderTarget + * @Brief Firmware Freelist holding usage state of the Parameter Buffers + */ +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListBaseDevVAddr; /*!< Freelist page table base address */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListStateDevVAddr; /*!< Freelist state buffer base address */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListLastGrowDevVAddr; /*!< Freelist base address at last grow */ + +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page */ + IMG_UINT32 ui32CurrentStackTop; /*!< Freelist current free page */ +#endif + + IMG_UINT32 ui32MaxPages; /*!< Max no. of pages can be added to the freelist */ + IMG_UINT32 ui32GrowPages; /*!< No pages to add in each freelist grow */ + IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */ +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */ + IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/ +#endif +#if defined(SUPPORT_SHADOW_FREELISTS) + IMG_UINT32 ui32HWRCounter; + PRGXFWIF_FWMEMCONTEXT psFWMemContext; +#endif + IMG_UINT32 ui32FreeListID; /*!< Unique Freelist ID */ + IMG_BOOL bGrowPending; /*!< Freelist grow is pending */ + IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */ + IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */ + + IMG_BOOL bUpdatePending; + IMG_UINT32 ui32UpdateNewPages; + IMG_UINT32 ui32UpdateNewReadyPages; +} UNCACHED_ALIGN RGXFWIF_FREELIST; + +typedef struct {RGXFWIF_DEV_VIRTADDR sNext; + RGXFWIF_DEV_VIRTADDR sPrev;} RGXFW_DLLIST_NODE; + +#define RGXFWIF_MAX_NUM_CANCEL_REQUESTS (8U) /* Maximum number of workload cancellation requests */ + +/*! + * @InGroup WorkloadContexts + * @Brief Firmware Common Context (or FWCC) + */ +typedef struct RGXFWIF_FWCOMMONCONTEXT_ +{ + /* CCB details for this firmware context */ + PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ + PRGXFWIF_CCCB psCCB; /*!< CCB base */ + RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; + + /* Context suspend state */ + PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ + + /* Flags e.g. for context switching */ + IMG_UINT32 ui32FWComCtxFlags; + IMG_INT32 i32Priority; /*!< Priority level */ + IMG_UINT32 ui32PrioritySeqNum; + + /* Framework state */ + PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ + + /* Misc and compatibility flags */ + IMG_UINT32 ui32CompatFlags; + + /* Statistic updates waiting to be passed back to the host... */ + IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ + IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ + IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ + RGXFWIF_DM eDM; /*!< Data Master type */ + RGXFW_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ + IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ + + IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; + IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ + bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ + IMG_BOOL bIPPContextNeedsReset; /*!< Following HWR during 3D, IPP context needs resetting */ + + RGXFW_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ + RGXFW_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ + RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ + + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + + /* References to the host side originators */ + IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ + IMG_UINT32 ui32PID; /*!< associated process ID */ + + IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */ + + IMG_UINT32 ui32PipelinedKicks; /*!< Number of kick from this CCB currently submitted to the DM pipeline */ + IMG_CHAR szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ + + IMG_UINT32 ui32DeferCount; /*!< Number of context defers before forced scheduling of context */ + IMG_UINT32 aui32FirstIntJobRefToCancel[RGXFWIF_MAX_NUM_CANCEL_REQUESTS]; /*!< Saved values of the beginning of range of IntJobRefs at and above which workloads will be discarded */ + IMG_UINT32 aui32FirstValidIntJobRef[RGXFWIF_MAX_NUM_CANCEL_REQUESTS]; /*!< Saved values of the end of range of IntJobRef below which workloads will be discarded */ + IMG_BOOL bCancelRangesActive; /*!< True if any active ranges in aui32FirstIntJobRefToCancel and aui32FirstValidIntJobRef arrays */ + IMG_BOOL bLastKickedCmdWasSafetyOnly; + IMG_UINT32 ui32UID; /*!< associated process UID used in FW managed gpu work period hwperf events */ +} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; + +static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256U, + "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size."); + +/*! + ****************************************************************************** + * HWRTData + *****************************************************************************/ + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; + +/* HWRTData flags */ +/* Deprecated flags 1:0 */ +#define HWRTDATA_HAS_LAST_TA (IMG_UINT32_C(1) << 2) +#define HWRTDATA_PARTIAL_RENDERED (IMG_UINT32_C(1) << 3) +#define HWRTDATA_KILLED (IMG_UINT32_C(1) << 4) +#define HWRTDATA_KILL_AFTER_TARESTART (IMG_UINT32_C(1) << 5) +#if defined(SUPPORT_AGP) +#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (IMG_UINT32_C(1) << 6) +#if defined(SUPPORT_AGP4) +#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (IMG_UINT32_C(1) << 7) +#endif +#define HWRTDATA_GEOM_NEEDS_RESUME (IMG_UINT32_C(1) << 8) +#endif +#if defined(SUPPORT_TRP) +#define HWRTDATA_GEOM_TRP_IN_PROGRESS (IMG_UINT32_C(1) << 9) +#endif + +typedef enum +{ + RGXFWIF_RTDATA_STATE_NONE = 0, + RGXFWIF_RTDATA_STATE_KICKTA, + RGXFWIF_RTDATA_STATE_KICKTAFIRST, + RGXFWIF_RTDATA_STATE_TAFINISHED, + RGXFWIF_RTDATA_STATE_KICK3D, + RGXFWIF_RTDATA_STATE_3DFINISHED, + RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED, + RGXFWIF_RTDATA_STATE_TAOUTOFMEM, + RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, + /* In case of HWR, we can't set the RTDATA state to NONE, + * as this will cause any TA to become a first TA. + * To ensure all related TA's are skipped, we use the HWR state */ + RGXFWIF_RTDATA_STATE_HWR, + RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU +} RGXFWIF_RTDATA_STATE; + +typedef struct +{ + IMG_UINT32 ui32ScreenPixelMax; + IMG_UINT64 RGXFW_ALIGN ui64PPPMultiSampleCtl; + IMG_UINT32 ui32TEStride; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32RgnStride; + IMG_UINT32 ui32ISPMergeLowerX; + IMG_UINT32 ui32ISPMergeLowerY; + IMG_UINT32 ui32ISPMergeUpperX; + IMG_UINT32 ui32ISPMergeUpperY; + IMG_UINT32 ui32ISPMergeScaleX; + IMG_UINT32 ui32ISPMergeScaleY; +} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; + +typedef struct +{ + IMG_UINT32 ui32RenderTargetIndex; //Render number + IMG_UINT32 ui32CurrentRenderTarget; //index in RTA + IMG_UINT32 ui32ActiveRenderTargets; //total active RTs + RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices + RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target + IMG_UINT32 ui32MaxRTs; //Number of render targets in the array + IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_RTA_CTL; + +/*! + * @InGroup RenderTarget + * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context + */ +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[2]; /*!< VCE Page Catalogue base */ + IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[2]; + IMG_UINT64 RGXFW_ALIGN ui64TECatBase[2]; /*!< TE Page Catalogue base */ + IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[2]; + IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */ + IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; + +#if defined(PM_INTERACTIVE_MODE) + IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sPMMListDevVAddr; /*!< Mlist table base */ +#else + IMG_DEV_VIRTADDR RGXFW_ALIGN sPMRenderStateDevVAddr; /*!< Series8 PM State buffers */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sPMSecureRenderStateDevVAddr; +#endif + + PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */ + IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; + IMG_BOOL bRenderStateNeedsReset; + + IMG_UINT32 ui32HWRTDataFlags; + RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */ + + RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */ + + IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */ +#if defined(RGX_FIRMWARE) + struct RGXFWIF_FWCOMMONCONTEXT_* psOwnerGeom; +#else + RGXFWIF_DEV_VIRTADDR pui32OwnerGeomNotUsedByHost; +#endif + +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; /*!< Freelist page table entry for current Mlist page */ + IMG_UINT32 ui32PMMListStackPointer; /*!< Current Mlist page */ +#endif +#if defined(SUPPORT_TRP) + IMG_UINT32 ui32KickFlagsCopy; +#endif + + RGXFWIF_CLEANUP_CTL RGXFW_ALIGN_DCACHEL sCleanupState; /*!< Render target clean up state */ +} RGXFW_ALIGN_DCACHEL RGXFWIF_HWRTDATA; + +/* Sync_checkpoint firmware object. + * This is the FW-addressable structure used to hold the sync checkpoint's + * state and other information which needs to be accessed by the firmware. + */ +typedef struct +{ + IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ + IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ +} SYNC_CHECKPOINT_FW_OBJ; + +/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ +#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) + +typedef enum +{ + RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */ + RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */ + RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */ + RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */ + RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */ + RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */ + RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */ +} RGXFWIF_REG_CFG_TYPE; + +#define RGXFWIF_KM_USC_TQ_SHADER_CODE_OFFSET_BYTES RGX_HEAP_KM_USC_RESERVED_REGION_OFFSET +#define RGXFWIF_KM_USC_TQ_SHADER_CODE_MAX_SIZE_BYTES (1U << 21) + +#define RGX_HEAP_KM_USC_RESERVED_SIZE RGXFWIF_KM_USC_TQ_SHADER_CODE_MAX_SIZE_BYTES +#define RGX_HEAP_USC_RESERVED_TOTAL_SIZE RGX_HEAP_UM_USC_RESERVED_SIZE + RGX_HEAP_KM_USC_RESERVED_SIZE + +#define RGXFW_SCRATCH_BUF_SIZE (1048576U) + +#define RGX_MAX_SECURE_ALLOC_NUM_CORES (8U) + +#define RGX_MAX_SECURE_ALLOC_NUM_CLUSTERS (12U) + +#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET +#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U + +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES) +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U + +#define RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES) +#define RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_MAX_SIZE_BYTES 8192U + +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_MAX_SIZE_BYTES) +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES (0x00004000U * RGX_MAX_SECURE_ALLOC_NUM_CORES * RGX_MAX_SECURE_ALLOC_NUM_CLUSTERS + 48U + 1023U) + +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES) +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_MAX_SIZE_BYTES RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES + +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_MAX_SIZE_BYTES) +#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_MAX_SIZE_BYTES (((0x00000080U + 127U) * RGX_MAX_SECURE_ALLOC_NUM_CORES) + 127U) + +#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_CONTEXT_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_MAX_SIZE_BYTES) +#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_CONTEXT_MAX_SIZE_BYTES (((0x00000180U + 127U) * RGX_MAX_SECURE_ALLOC_NUM_CORES) + 127U) + +#if defined(SUPPORT_TRUSTED_DEVICE) +#define RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE PVR_ALIGN((RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_CONTEXT_MAX_SIZE_BYTES), DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY) +#else +#define RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE 0 +#endif + #endif /* RGX_FWIF_SHARED_H */ /****************************************************************************** diff --git a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_heaps.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_heaps.h index 00dcc9f8c903..4e72d8d1a7bb 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_heaps.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_heaps.h @@ -53,7 +53,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */ #define RGX_USCCODE_BPH_HEAP_IDENT "BP Handler USC Code" /*!< RGX USC Code for breakpoint handlers Heap Identifier */ #define RGX_VK_CAPT_REPLAY_HEAP_IDENT "Vulkan Capture Replay" /*!< RGX vulkan capture replay buffer Heap Identifier */ -#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Compute Signals Heap Identifier */ #define RGX_COMPONENT_CTRL_HEAP_IDENT "Component Control" /*!< RGX DCE Component Control Heap Identifier */ #define RGX_FBCDC_HEAP_IDENT "FBCDC" /*!< RGX FBCDC State Table Heap Identifier */ #define RGX_FBCDC_LARGE_HEAP_IDENT "Large FBCDC" /*!< RGX Large FBCDC State Table Heap Identifier */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_hwperf.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_hwperf.h index f48b31d4b405..d0d4014a117a 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_hwperf.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_hwperf.h @@ -61,9 +61,6 @@ extern "C" { #include "rgx_common.h" #include "rgx_hwperf_common.h" -#include "pvrsrv_tlcommon.h" -#include "pvrsrv_sync_km.h" - #if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) /* HWPerf interface assumption checks */ @@ -87,1125 +84,16 @@ static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, * list, it should be appended at the end to maintain backward compatibility * of HWPerf data. */ -typedef enum { - - RGX_HWPERF_DM_GP, - RGX_HWPERF_DM_TDM, - RGX_HWPERF_DM_GEOM, - RGX_HWPERF_DM_3D, - RGX_HWPERF_DM_CDM, - RGX_HWPERF_DM_RTU, - - RGX_HWPERF_DM_LAST, - - RGX_HWPERF_DM_INVALID = 0x1FFFFFFF -} RGX_HWPERF_DM; - -/*! Enum containing bit position for 32bit feature flags used in hwperf and api */ -typedef enum { - RGX_HWPERF_FEATURE_PERFBUS_FLAG = 0x0001, - RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG = 0x0002, - RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG = 0x0004, - RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG = 0x0008, - RGX_HWPERF_FEATURE_ROGUEXE_FLAG = 0x0010, - RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG = 0x0020, - RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG = 0x0040, - RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION = 0x0080, - RGX_HWPERF_FEATURE_MULTICORE_FLAG = 0x0100, - RGX_HWPERF_FEATURE_RAYTRACING_FLAG = 0x0200, - RGX_HWPERF_FEATURE_CXT_TOP_INFRASTRUCTURE_FLAG = 0x0400, - RGX_HWPERF_FEATURE_VOLCANIC_FLAG = 0x0800, - RGX_HWPERF_FEATURE_ROGUE_FLAG = 0x1000, - RGX_HWPERF_FEATURE_OCEANIC_FLAG = 0x2000 -} RGX_HWPERF_FEATURE_FLAGS; - -/*! This structure holds the data of a firmware packet. */ -typedef struct -{ - RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ - IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ - IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ - IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ - IMG_UINT32 ui32TimeCorrIndex; /*!< Internal field */ - IMG_UINT32 ui32Padding; /*!< Reserved */ -} RGX_HWPERF_FW_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); - -/*! This structure holds the data of a hardware packet, including counters. */ -typedef struct -{ - IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ - IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ - IMG_UINT32 ui32PID; /*!< Process identifier */ - IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ - IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ - IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ - IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ - IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ - IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ - IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ - IMG_UINT32 ui32CtxPriority; /*!< Context priority */ - IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */ - IMG_UINT32 ui32KickInfo; /*!< <31..8> Reserved <7..0> GPU Pipeline DM kick ID, 0 if not using Pipeline DMs */ - IMG_UINT32 ui32Padding; /*!< Reserved. To ensure correct alignment */ - IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Optional variable length Counter data */ - IMG_UINT32 ui32Padding2; /*!< Reserved. To ensure correct alignment (not written in the packet) */ -} RGX_HWPERF_HW_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); -RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_HW_DATA, aui32CountBlksStream); - -/*! Mask for use with the aui32CountBlksStream field when decoding the - * counter block ID and mask word. */ -#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U -#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U - -/*! MAX value used in server handling of counter config arrays */ -#define RGX_CNTBLK_COUNTERS_MAX PVRSRV_HWPERF_COUNTERS_PERBLK - - -/*! Obtains the counter block ID word from an aui32CountBlksStream field. - * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit - * within group (3-0) */ -#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) - -/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address - * and stream index. May be used in decoding the counter block stream words of - * a RGX_HWPERF_HW_DATA structure. */ -#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) - -/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */ -#define RGX_HWPERF_GET_CNTBLK_GPUW(_word) ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT)) - -#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK))) - -/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address - * and stream index. May be used in decoding the counter block stream words - * of a RGX_HWPERF_HW_DATA structure. */ -#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) - -/*! Context switch packet event */ -typedef struct -{ - RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ - IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ - IMG_UINT32 ui32FrameNum; /*!< Client Frame number (TA, 3D only) */ - IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ - IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ - IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ - IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */ -} RGX_HWPERF_CSW_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); - -/*! Enumeration of clocks supporting this event */ -typedef enum -{ - RGX_HWPERF_CLKS_CHG_INVALID = 0, - - RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, - - RGX_HWPERF_CLKS_CHG_LAST, -} RGX_HWPERF_CLKS_CHG_NAME; - -/*! This structure holds the data of a clocks change packet. */ -typedef struct -{ - IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ - RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ - IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ - IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ - IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and - correlated to OSTimeStamp */ -} RGX_HWPERF_CLKS_CHG_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); - -/*! Enumeration of GPU utilisation states supported by this event */ -typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; - -/*! This structure holds the data of a GPU utilisation state change packet. */ -typedef struct -{ - RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ - IMG_UINT32 uiUnused1; /*!< Padding */ - IMG_UINT32 uiUnused2; /*!< Padding */ - IMG_UINT32 uiUnused3; /*!< Padding */ -} RGX_HWPERF_GPU_STATE_CHG_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); - - -/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ -#define HWPERF_PWR_EST_V1_SIG 0x48504531 - -/*! Macros to obtain a component field from a counter ID word */ -#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) -#define RGX_HWPERF_GET_PWR_EST_GPUID(_word) (((_word)&0x70000000)>>28) -/*!< Obtains the GPU ID from a counter ID word */ -#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) -#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) - -#define RGX_HWPERF_PWR_EST_HIGH_OFFSET (31) -#define RGX_HWPERF_PWR_EST_GPUID_OFFSET (28) -#define RGX_HWPERF_PWR_EST_GPUID_MASK (0x7U) -#define RGX_HWPERF_PWR_EST_UNIT_OFFSET (24) -#define RGX_HWPERF_PWR_EST_UNIT_MASK (0xFU) -#define RGX_HWPERF_PWR_EST_VALUE_MASK (0xFFFFU) - -/*! This macro constructs a counter ID for a power estimate data stream from - * the component parts of: high word flag, unit id, GPU id, counter number */ -#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \ - ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<= RGX_BVNC_STR_SIZE_MAX), - "Space inside HWPerf packet data for BVNC string insufficient"); - -#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (20U) - -/*! BVNC Features */ -typedef struct -{ - /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ - IMG_UINT16 ui16BlockID; - - /*! Number of counters in this block type */ - IMG_UINT16 ui16NumCounters; - - /*! Number of blocks of this type */ - IMG_UINT16 ui16NumBlocks; - - /*! Reserved for future use */ - IMG_UINT16 ui16Reserved; -} RGX_HWPERF_BVNC_BLOCK; - -/*! BVNC Features */ -typedef struct -{ - IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */ - IMG_UINT32 ui32BvncKmFeatureFlags; /*!< See RGX_HWPERF_FEATURE_FLAGS */ - IMG_UINT16 ui16BvncBlocks; /*!< Number of blocks described in aBvncBlocks */ - IMG_UINT16 ui16BvncGPUCores; /*!< Number of GPU cores present */ - RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */ -} RGX_HWPERF_BVNC; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); - -/*! Performance Counter Configuration data element. */ -typedef struct -{ - IMG_UINT32 ui32BlockID; /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */ - IMG_UINT32 ui32NumCounters; /*!< Number of counters configured */ - IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; /*!< Counters configured (ui32NumCounters worth of entries) */ -} RGX_HWPERF_COUNTER_CFG_DATA_EL; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL); - -/*! Performance Counter Configuration data. */ -typedef struct -{ - IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */ - RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */ - IMG_UINT32 ui32Padding; /*!< reserved */ -} RGX_HWPERF_COUNTER_CFG; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG); - -/*! Sub-event's data. */ -typedef union -{ - struct - { - RGX_HWPERF_DM eDM; /*!< Data Master ID. */ - RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ - IMG_UINT32 ui32DMContext; /*!< FW render context */ - } sHWR; /*!< HWR sub-event data. */ - - RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features. See RGX_HWPERF_BVNC */ - struct - { - IMG_UINT32 ui32EvMaskLo; /*!< Low order 32 bits of Filter Mask */ - IMG_UINT32 ui32EvMaskHi; /*!< High order 32 bits of Filter Mask */ - } sEvMsk; /*!< HW Filter Mask */ - RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */ - - struct - { - RGX_HWPERF_DM eDM; /*!< Data Master ID. */ - IMG_UINT32 ui32DMContext; /*!< FW context */ - IMG_UINT32 ui32CoreMask; /*!< Multicore mask. */ - IMG_UINT32 ui32KickID; /*!< Kick Id cancelled. */ - } sKickCancel; /*!< Kick cancel sub-event data. */ -} RGX_HWPERF_FWACT_DETAIL; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); - -/*! This structure holds the data of a FW activity event packet */ -typedef struct -{ - RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ - RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ - IMG_UINT32 ui32Padding; /*!< Reserved. */ -} RGX_HWPERF_FWACT_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); - - -typedef enum { - RGX_HWPERF_UFO_EV_UPDATE, /*!< Update on the UFO objects. */ - RGX_HWPERF_UFO_EV_CHECK_SUCCESS, /*!< Successful check on UFO objects. */ - RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, /*!< Successful partial render check on UFO objects. */ - RGX_HWPERF_UFO_EV_CHECK_FAIL, /*!< Unsuccessful check on UFO objects. */ - RGX_HWPERF_UFO_EV_PRCHECK_FAIL, /*!< Unsuccessful partial render check on UFO objects. */ - RGX_HWPERF_UFO_EV_FORCE_UPDATE, /*!< Forced erroring of the UFO objects. */ - - RGX_HWPERF_UFO_EV_LAST /*!< Reserved. Do not use. */ -} RGX_HWPERF_UFO_EV; - -/*! Data stream tuple. */ -typedef union -{ - struct - { - IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ - IMG_UINT32 ui32Value; /*!< Value of the UFO object */ - } sCheckSuccess; - struct - { - IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ - IMG_UINT32 ui32Value; /*!< Value of the UFO object */ - IMG_UINT32 ui32Required; /*!< Value of the UFO object required by the fence */ - } sCheckFail; - struct - { - IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ - IMG_UINT32 ui32OldValue; /*!< Value of UFO object before update */ - IMG_UINT32 ui32NewValue; /*!< Value of UFO object after update */ - } sUpdate; -} RGX_HWPERF_UFO_DATA_ELEMENT; - -/*! This structure holds the packet payload data for UFO event. */ -typedef struct -{ - RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event. See RGX_HWPERF_UFO_EV */ - IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the timer correlation data - at the time the packet was generated. - Used to approximate Host timestamps for - these events. */ - IMG_UINT32 ui32PID; /*!< Client process identifier */ - IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX - API to track submitted work (for - debugging/trace purposes) */ - IMG_UINT32 ui32IntJobRef; /*!< Internal reference used to track - submitted work (for debugging / trace - purposes) */ - IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ - IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the - stream and stream data offset in the - payload */ - RGX_HWPERF_DM eDM; /*!< Data Master number, see RGX_HWPERF_DM */ - IMG_UINT32 ui32Padding; /*!< Unused, reserved */ - IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Series of tuples holding UFO objects data */ -} RGX_HWPERF_UFO_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA); - - -/*! - * RGX_HWPERF_KICK_TYPE describes the type of kick for events received / sent - * between KICK_START / KICK_END inclusively for all event types. - */ -typedef enum -{ - RGX_HWPERF_KICK_TYPE_RESERVED_0, /*!< Replaced by separate TA and 3D types (Deprecated) */ - RGX_HWPERF_KICK_TYPE_RESERVED_1, /*!< Compute Data Master Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_CDM) */ - RGX_HWPERF_KICK_TYPE_RESERVED_2, /*!< Ray Store Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_RS) */ - RGX_HWPERF_KICK_TYPE_RESERVED_3, /*!< Scene Hierarchy Generator Kick (Deprecated) */ - RGX_HWPERF_KICK_TYPE_RESERVED_4, /*!< TQ 2D Data Master Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_TQTDM) */ - RGX_HWPERF_KICK_TYPE_RESERVED_5, /*!< Sync Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_SYNC) */ - RGX_HWPERF_KICK_TYPE_RESERVED_6, /*!< TA Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_GEOM) */ - RGX_HWPERF_KICK_TYPE_RESERVED_7, /*!< 3D Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_3D) */ - RGX_HWPERF_KICK_TYPE_RESERVED_8, - - RGX_HWPERF_KICK_TYPE_UNIFIED = 0x10, - - RGX_HWPERF_KICK_TYPE2_TQ2D, /*!< 2D TQ Kick */ - RGX_HWPERF_KICK_TYPE2_TQ3D, /*!< 3D TQ Kick */ - RGX_HWPERF_KICK_TYPE2_TQTDM, /*!< 2D Data Master TQ Kick */ - RGX_HWPERF_KICK_TYPE2_CDM, /*!< Compute Kick */ - RGX_HWPERF_KICK_TYPE2_GEOM, /*!< GEOM Kick */ - RGX_HWPERF_KICK_TYPE2_3D, /*!< 3D Kick */ - RGX_HWPERF_KICK_TYPE2_SYNC, /*!< Sync Kick */ - RGX_HWPERF_KICK_TYPE2_RS, /*!< Ray Store Kick */ - RGX_HWPERF_KICK_TYPE2_LAST, - - RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff -} RGX_HWPERF_KICK_TYPE; - -typedef struct -{ - RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for - scheduling on GPU hardware. - See RGX_HWPERF_KICK_TYPE */ - IMG_UINT32 ui32PID; /*!< Client process identifier */ - IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX API - to track submitted work (for debugging / - trace purposes) */ - IMG_UINT32 ui32IntJobRef; /*!< internal reference used to track submitted - work (for debugging / trace purposes) */ - IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ - IMG_UINT32 ui32Padding; /*!< Unused, reserved */ - IMG_UINT64 ui64CheckFence_UID; /*!< ID of fence gating work execution on GPU */ - IMG_UINT64 ui64UpdateFence_UID; /*!< ID of fence triggered after work completes on GPU */ - IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ - IMG_UINT32 ui32CycleEstimate; /*!< Estimated cycle time for the workload */ - PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ - PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ - PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ - - /* Align structure size to 8 bytes */ -} RGX_HWPERF_HOST_ENQ_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef struct -{ - RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event */ - IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the stream and - stream data offset in the payload */ -#ifdef __CHECKER__ - /* Since we're not conforming to the C99 standard by not using a flexible - * array member need to add a special case for Smatch static code analyser. */ - IMG_UINT32 aui32StreamData[]; -#else - IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; - /*!< Series of tuples holding UFO objects data */ - - IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ -#endif -} RGX_HWPERF_HOST_UFO_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -/*! - * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been - * Allocated, Freed or Modified. The values are used to determine which event - * data structure to use to decode the data from the event stream - */ -typedef enum -{ - RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, /*!< Invalid */ - RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /*!< SyncPrim */ - RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, - /*!< Timeline resource packets are - now emitted in client hwperf buffer */ - RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */ - RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, /*!< Sync Checkpoint */ - RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /*!< Fence created on SW timeline */ - - RGX_HWPERF_HOST_RESOURCE_TYPE_LAST /*!< End of enumeration */ -} RGX_HWPERF_HOST_RESOURCE_TYPE; - -typedef union -{ - /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer - * generated in the HOST stream. Timeline data is now provided in the - * CLIENT stream instead. - */ - struct - { - IMG_UINT32 uiPid; /*!< Identifier of owning process */ - IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ - } sTimelineAlloc; - - /*! Data for TYPE_FENCE_PVR */ - struct - { - IMG_PID uiPID; /*!< Identifier of owning process */ - PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ - IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point - backing this fence on the GPU */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - } sFenceAlloc; - - /*! Data for TYPE_SYNC_CP */ - struct - { - IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ - PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ - IMG_PID uiPID; /*!< Identifier of owning process */ - PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - } sSyncCheckPointAlloc; - - /*! Data for TYPE_FENCE_SW */ - struct - { - IMG_PID uiPID; /*!< Identifier of owning process */ - PVRSRV_FENCE hSWFence; /*!< Unique identifier for the SWFence resource */ - PVRSRV_TIMELINE hSWTimeline; /*!< Unique identifier for the timeline resource */ - IMG_UINT64 ui64SyncPtIndex; /*!< Sync-pt index where this SW timeline has reached */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - } sSWFenceAlloc; - - /*! Data for TYPE_SYNC */ - struct - { - IMG_UINT32 ui32FWAddr; /*!< Identifier of sync resource */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - } sSyncAlloc; -} RGX_HWPERF_HOST_ALLOC_DETAIL; - -typedef struct -{ - RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; - /*!< This describes the type of the resource - allocated in the driver. See - RGX_HWPERF_HOST_RESOURCE_TYPE */ - RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; - /*!< Union of structures providing further - data regarding the resource allocated. - Size of data varies with union member that - is present, check ``ui32AllocType`` value - to decode */ -} RGX_HWPERF_HOST_ALLOC_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef union -{ - /*! Data for TYPE_TIMELINE (*Deprecated*) */ - struct - { - IMG_UINT32 uiPid; /*!< Identifier of owning process */ - IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ - } sTimelineDestroy; - - /*! Data for TYPE_FENCE_PVR */ - struct - { - IMG_UINT64 ui64Fence_UID; /*!< Unique identifier for the fence resource */ - IMG_UINT32 ui32Padding; /*!< Reserved. */ - } sFenceDestroy; - - /*! Data for TYPE_SYNC_CP */ - struct - { - IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ - } sSyncCheckPointFree; - - /*! Data for TYPE_SYNC */ - struct - { - IMG_UINT32 ui32FWAddr; /*!< Unique identifier for the sync resource */ - } sSyncFree; -} RGX_HWPERF_HOST_FREE_DETAIL; - -typedef struct -{ - RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; - /*!< This describes the type of the resource - freed or released by the driver. See - RGX_HWPERF_HOST_RESOURCE_TYPE */ - RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; - /*!< Union of structures providing further data - regarding the resource freed. Size of data - varies with union member that is present, - check ``ui32FreeType`` value to decode */ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ -} RGX_HWPERF_HOST_FREE_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef struct -{ - IMG_UINT64 ui64CRTimestamp; /*!< CR timer value from the latest entry of - the time domains correlation table */ - IMG_UINT64 ui64OSTimestamp; /*!< OS timestamp from the latest entry of the - time domains correlation table */ - IMG_UINT32 ui32ClockSpeed; /*!< GPU clock speed from the latest entry of - the time domains correlation table */ - IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ -} RGX_HWPERF_HOST_CLK_SYNC_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef union -{ - /*! Data for TYPE_FENCE_PVR */ - struct - { - IMG_UINT64 ui64NewFence_UID; /*!< Unique identifier for the new merged fence - resource that has been created */ - IMG_UINT64 ui64InFence1_UID; /*!< Unique identifier for the fence resource */ - IMG_UINT64 ui64InFence2_UID; /*!< Unique identifier of the check point backing - the fence on the GPU */ - IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; - /*!< Label or name given to the sync resource */ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ - } sFenceMerge; -} RGX_HWPERF_HOST_MODIFY_DETAIL; - -typedef struct -{ - RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; - /*!< Describes the type of the resource - modified by the driver. See - RGX_HWPERF_HOST_RESOURCE_TYPE */ - - RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; - /*!< Union of structures providing further - data regarding the resource modified. - Size of data varies with union member that - is present. - Check ``uiModifyType`` value to decode */ -} RGX_HWPERF_HOST_MODIFY_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef enum -{ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING,/*!< Device not responding to requests */ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */ - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */ - - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST -} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; - -typedef enum -{ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, /*!< Invalid */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, /*!< No underlying health reason. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, /*!< Device has asserted. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, /*!< Device poll has failed. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, /*!< Device timeout has fired. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, /*!< Queue has become corrupt. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, /*!< Queue has stalled. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, /*!< Device is idling. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, /*!< Device restarting. */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */ - - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST -} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; - -/*! RGX_HWPERF_DEV_INFO_EV values */ -typedef enum -{ - RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */ - - RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */ -} RGX_HWPERF_DEV_INFO_EV; - -/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing - * further data regarding the device's status - */ -typedef union -{ - /*! Data for device status event */ - struct - { - RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; - /*!< Device's health status */ - RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; - /*!< Reason for device's health status */ - } sDeviceStatus; -} RGX_HWPERF_HOST_DEV_INFO_DETAIL; - -/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */ -typedef struct -{ - IMG_UINT32 ui32Padding; - /*!< Reserved. Align structure size to 8 bytes */ - RGX_HWPERF_DEV_INFO_EV eEvType; - /*!< Type of the sub-event. See - RGX_HWPERF_DEV_INFO_EV */ - RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; - /*!< Union of structures providing further data - regarding the device's status. Size of data - varies with union member that is present, - check ``eEvType`` value to decode */ -} RGX_HWPERF_HOST_DEV_INFO_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */ -typedef enum -{ - RGX_HWPERF_INFO_EV_MEM_USAGE, /*!< Memory usage event */ - RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */ -} RGX_HWPERF_INFO_EV; - -/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the - * RGX_HWPERF_HOST_INFO_DATA event. - */ -typedef union -{ - /*! Host Memory usage statistics */ - struct - { - IMG_UINT32 ui32TotalMemoryUsage; /*!< Total memory usage */ - /*! Detailed memory usage */ - struct - { - IMG_UINT32 ui32Pid; /*!< Process ID */ - IMG_UINT32 ui32KernelMemUsage; /*!< Kernel memory usage */ - IMG_UINT32 ui32GraphicsMemUsage; /*!< GPU memory usage */ - } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; - } sMemUsageStats; -} RGX_HWPERF_HOST_INFO_DETAIL; - -/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device - * memory usage information. - */ -typedef struct -{ - IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ - RGX_HWPERF_INFO_EV eEvType; /*!< Type of subevent. See RGX_HWPERF_INFO_EV */ - RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; - /*!< Union of structures providing further data - regarding memory usage. Size varies with union - member that is present, check ``eEvType`` - value to decode */ -} RGX_HWPERF_HOST_INFO_DATA; - -/* Payload size must be multiple of 8 bytes to align start of next packet. */ -static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -/*! FENCE_WAIT_TYPE definitions */ -typedef enum -{ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, /*!< Begin */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, /*!< End */ - - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, /*!< Do not use */ -} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; - -/*! FENCE_WAIT_RESULT definitions */ -typedef enum -{ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, /*!< Timed Out */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, /*!< Passed */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, /*!< Errored */ - - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, /*!< Do not use */ -} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; - -/*! FENCE_WAIT_DETAIL Event Payload */ -typedef union -{ -/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */ - struct - { - IMG_UINT32 ui32TimeoutInMs; /*!< Wait timeout (ms) */ - } sBegin; - - /*! Data for SYNC_FENCE_WAIT_TYPE_END */ - struct - { - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */ - } sEnd; -} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; - -/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure - * is received whenever the host driver handles a wait for sync event request. - */ -typedef struct -{ - IMG_PID uiPID; /*!< Identifier of the owning process */ - PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; - /*!< Type of the subevent, see - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; - /*!< Union of structures providing further data - regarding device's status. Size of data varies with - union member that is present, check ``eType`` value - to decode */ - -} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; - -static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA. - * Software Timeline Advanced Event Payload. This data structure is received - * whenever the host driver processes a Software Timeline Advanced event. - */ -typedef struct -{ - IMG_PID uiPID; /*!< Identifier of the owning process */ - PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ - IMG_UINT64 ui64SyncPtIndex; /*!< Index of the sync point to which the - timeline has advanced */ - -} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; - -static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef enum -{ - RGX_HWPERF_HOST_CLIENT_INFO_TYPE_INVALID = 0, /*!< Invalid */ - RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME, /*!< Process Name */ - - RGX_HWPERF_HOST_CLIENT_INFO_TYPE_LAST, /*!< Do not use */ -} RGX_HWPERF_HOST_CLIENT_INFO_TYPE; - -typedef struct -{ - IMG_PID uiClientPID; /*!< Client process identifier */ - IMG_UINT32 ui32Length; /*!< Number of bytes present in ``acName`` */ - IMG_CHAR acName[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Process name string, null terminated */ -} RGX_HWPERF_HOST_CLIENT_PROC_NAME; - -#define RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen) \ - ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_PROC_NAME, acName) + (ui32NameLen))) - -typedef union -{ - struct - { - IMG_UINT32 ui32Count; /*!< Number of elements in ``asProcNames`` */ - RGX_HWPERF_HOST_CLIENT_PROC_NAME asProcNames[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; - } sProcName; -} RGX_HWPERF_HOST_CLIENT_INFO_DETAIL; - -typedef struct -{ - IMG_UINT32 uiReserved1; /*!< Reserved. Align structure size to 8 bytes */ - RGX_HWPERF_HOST_CLIENT_INFO_TYPE eType; - /*!< Type of the subevent, see - RGX_HWPERF_HOST_CLIENT_INFO_TYPE */ - RGX_HWPERF_HOST_CLIENT_INFO_DETAIL uDetail; - /*!< Union of structures. Size of data - varies with union member that is present, - check ``eType`` value to decode */ - -} RGX_HWPERF_HOST_CLIENT_INFO_DATA; - -static_assert((sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, - "sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); - -typedef enum -{ - RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE, - RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER, - RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS, - RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA, - - RGX_HWPERF_RESOURCE_TYPE_COUNT -} RGX_HWPERF_RESOURCE_CAPTURE_TYPE; - -typedef struct -{ - IMG_UINT32 ui32Height; - IMG_UINT32 ui32Width; - IMG_UINT32 ui32BPP; - IMG_UINT32 ui32PixFormat; -} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO; - -typedef struct -{ - IMG_INT32 i32XOffset; /*!< render surface X shift */ - IMG_INT32 i32YOffset; /*!< render surface Y shift */ - IMG_UINT32 ui32WidthInTiles; /*!< number of TLT data points in X */ - IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */ -} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO; - -typedef union -{ - struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES - { - IMG_UINT32 ui32RenderSurfaceCount; - RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; - } sRenderSurfaces; - - struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS - { - RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; - } sTLTBuffers; -} RGX_RESOURCE_CAPTURE_DETAIL; - -typedef struct -{ - RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType; - IMG_PID uPID; - IMG_UINT32 ui32ContextID; - IMG_UINT32 ui32FrameNum; - IMG_UINT32 ui32CapturedTaskJobRef; /* The job ref of the HW task that emitted the data */ - IMG_INT32 eClientModule; /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */ - RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */ -} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO; - -#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail) - -/*! Tile Lifetime Tracking header size. Only available if - * RGX_FEATURE_ISP_TILE_LIFETIME_TRACKING is present and enabled via - * SUPPORT_TLT_PERF - */ -#define RGX_TLT_HARDWARE_HDR_SIZE (16U) - -/* PVRSRVGetHWPerfResourceCaptureResult */ -typedef enum -{ - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0, - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK, /* We got data ok, expect more packets for this request. */ - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY, /* Signals a timeout on the connection - no data available yet. */ - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS, /* The request completed successfully, signals the end of packets for the request. */ - RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE /* The request failed, signals the end of packets for the request. */ -} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS; - -typedef struct -{ - IMG_PID uPID; /* In case of a failed request pass the caller the PID and context ID. */ - IMG_UINT32 ui32CtxID; - RGX_RESOURCE_CAPTURE_INFO *psInfo; /* Various meta-data regarding the captured resource which aid the requester when, - unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ - IMG_BYTE *pbData; /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ -} RGX_RESOURCE_CAPTURE_RESULT; - -/*! This type is a union of packet payload data structures associated with - * various FW and Host events */ -typedef union -{ - RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data, - events ``0x01-0x06`` */ - RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data, - events ``0x07-0x19``, ``0x28-0x29`` - See RGX_HWPERF_HW_DATA */ - RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet - data, events ``0x1A`` */ - RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state - change event packet data, - events ``0x1B`` */ - RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event - packet data, - events ``0x20-0x22`` */ - RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data, - events ``0x23`` */ - RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data, - events ``0x30-0x31`` */ - RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data, - events ``0x32`` */ - RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data, events ``0x38`` */ - RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event - packet data, - events ``0x39`` */ - /* */ - RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data, - events ``0x01`` (Host) */ - RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data, - events ``0x02`` (Host) */ - RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data, - events ``0x03`` (Host) */ - RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data, - events ``0x04`` (Host) */ - RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data, - events ``0x05`` (Host) */ - RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data, - events ``0x06`` (Host) */ - RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data, - events ``0x07`` (Host) */ - RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data, - events ``0x08`` (Host) */ - RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data, - events ``0x09`` (Host) */ - RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance - data, events ``0x0A`` (Host) */ - RGX_HWPERF_HOST_CLIENT_INFO_DATA sHClientInfo; /*!< Host client info, - events ``0x0B`` (Host) */ -} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA; - -RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA); +#define RGX_HWPERF_DM_GP 0x00000000U +#define RGX_HWPERF_DM_TDM 0x00000001U +#define RGX_HWPERF_DM_GEOM 0x00000002U +#define RGX_HWPERF_DM_3D 0x00000003U +#define RGX_HWPERF_DM_CDM 0x00000004U +#define RGX_HWPERF_DM_RTU 0x00000005U -#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) +#define RGX_HWPERF_DM_LAST 0x00000006U -#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ - ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) +#define RGX_HWPERF_DM_INVALID 0x1FFFFFFFU /****************************************************************************** * API Types @@ -1244,6 +132,8 @@ typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; #define RGX_CNTBLK_ID_ISP5 0x0015U #define RGX_CNTBLK_ID_ISP6 0x0016U #define RGX_CNTBLK_ID_ISP7 0x0017U +#define RGX_CNTBLK_ID_ISP8 0x0018U +#define RGX_CNTBLK_ID_ISP9 0x0019U #define RGX_CNTBLK_ID_ISP_ALL 0x4010U #define RGX_CNTBLK_ID_MERCER0 0x0020U /*!< MERCER 1..N MERCER */ @@ -1254,6 +144,8 @@ typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; #define RGX_CNTBLK_ID_MERCER5 0x0025U #define RGX_CNTBLK_ID_MERCER6 0x0026U #define RGX_CNTBLK_ID_MERCER7 0x0027U +#define RGX_CNTBLK_ID_MERCER8 0x0028U +#define RGX_CNTBLK_ID_MERCER9 0x0029U #define RGX_CNTBLK_ID_MERCER_ALL 0x4020U #define RGX_CNTBLK_ID_PBE0 0x0030U /*!< PBE 1..N PBE_PER_SPU x N SPU */ @@ -1280,6 +172,8 @@ typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; #define RGX_CNTBLK_ID_USC5 0x0055U #define RGX_CNTBLK_ID_USC6 0x0056U #define RGX_CNTBLK_ID_USC7 0x0057U +#define RGX_CNTBLK_ID_USC8 0x0058U +#define RGX_CNTBLK_ID_USC9 0x0059U #define RGX_CNTBLK_ID_USC_ALL 0x4050U #define RGX_CNTBLK_ID_TPU0 0x0060U /*!< TPU 1..N TPU */ @@ -1290,6 +184,8 @@ typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; #define RGX_CNTBLK_ID_TPU5 0x0065U #define RGX_CNTBLK_ID_TPU6 0x0066U #define RGX_CNTBLK_ID_TPU7 0x0067U +#define RGX_CNTBLK_ID_TPU8 0x0068U +#define RGX_CNTBLK_ID_TPU9 0x0069U #define RGX_CNTBLK_ID_TPU_ALL 0x4060U #define RGX_CNTBLK_ID_SWIFT0 0x0070U /*!< SWIFT 1..N SWIFT */ @@ -1300,6 +196,8 @@ typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; #define RGX_CNTBLK_ID_SWIFT5 0x0075U #define RGX_CNTBLK_ID_SWIFT6 0x0076U #define RGX_CNTBLK_ID_SWIFT7 0x0077U +#define RGX_CNTBLK_ID_SWIFT8 0x0078U +#define RGX_CNTBLK_ID_SWIFT9 0x0079U #define RGX_CNTBLK_ID_SWIFT_ALL 0x4070U #define RGX_CNTBLK_ID_TEXAS0 0x0080U /*!< TEXAS 1..N TEXAS */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_hwperf_table.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_hwperf_table.h index 5c206e951ea0..9364d500e011 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_hwperf_table.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_hwperf_table.h @@ -180,8 +180,7 @@ static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkTyp /*NOTREACHED*/ break; default: - if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) && - (ui32NumDustsEnabled > 0U)) + if (rgxfw_pow_is_rd_on() && (ui32NumDustsEnabled > 0U)) { return IMG_TRUE; } @@ -197,8 +196,8 @@ static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkTyp #else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ -# define rgxfw_hwperf_pow_st_direct ((void *)NULL) -# define rgxfw_hwperf_pow_st_indirect ((void *)NULL) +# define rgxfw_hwperf_pow_st_direct ((PFN_RGXFW_HWPERF_CNTBLK_POWERED)NULL) +# define rgxfw_hwperf_pow_st_indirect ((PFN_RGXFW_HWPERF_CNTBLK_POWERED)NULL) #endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ @@ -316,7 +315,10 @@ static inline IMG_BOOL rgx_hwperf_blk_present(const RGXFW_HWPERF_CNTBLK_TYPE_MOD if (ui32RTArchVal > 2U) { psRtInfo->uiNumUnits = - PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, SPU0_RAC_PRESENT) + + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, SPU1_RAC_PRESENT) + + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, SPU2_RAC_PRESENT) + + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, SPU3_RAC_PRESENT); } else { diff --git a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_options.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_options.h index fc2f94c9ee9b..3f489fc7fdee 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_options.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgx_options.h @@ -46,12 +46,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * The corresponding bit is set if the build option was enabled at compile * time. * - * In order to extract the enabled build flags the INTERNAL_TEST switch should - * be enabled in a client program which includes this header. Then the client - * can test specific build flags by reading the bit value at - * ##OPTIONNAME##_SET_OFFSET - * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS. - * * IMPORTANT: add new options to unused bits or define a new dword * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield * remains backwards compatible. @@ -60,7 +54,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGX_OPTIONS_H #define RGX_OPTIONS_H -#define OPTIONS_NO_HARDWARE_EN (0x1UL << 0) +#define OPTIONS_OPEN_SOURCE_EN (0x1UL << 0) #define OPTIONS_PDUMP_EN (0x1UL << 1) #define OPTIONS_SECURE_CONTEXT_SWITCH_EN (0x1UL << 2) #define OPTIONS_SECURE_ALLOC_KM_EN (0x1UL << 3) @@ -76,11 +70,16 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define OPTIONS_AUTOVZ_HW_REGS_EN (0x1UL << 13) #define OPTIONS_UNUSED2_EN (0x1UL << 14) #define OPTIONS_VALIDATION_EN (0x1UL << 15) +#define OPTIONS_NO_HARDWARE_EN (0x1UL << 16) +#define OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN (0x1UL << 17) +#define OPTIONS_NUM_DRIVERS_SUPPORTED_MASK (0x1fUL << 18) +#define OPTIONS_NUM_DRIVERS_SUPPORTED_SHIFT (18) +/* Option bits[22:18] are used for max number of drivers supported by the FW. */ #define OPTIONS_PERCONTEXT_FREELIST_EN (0x1UL << 31) #define RGX_BUILD_OPTIONS_MASK_KM \ - (OPTIONS_NO_HARDWARE_EN | \ + (OPTIONS_OPEN_SOURCE_EN | \ OPTIONS_PDUMP_EN | \ OPTIONS_SECURE_CONTEXT_SWITCH_EN | \ OPTIONS_SECURE_ALLOC_KM_EN | \ @@ -94,10 +93,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. OPTIONS_BUFFER_SYNC_EN | \ OPTIONS_AUTOVZ_EN | \ OPTIONS_AUTOVZ_HW_REGS_EN | \ - OPTIONS_VALIDATION_EN) + OPTIONS_VALIDATION_EN | \ + OPTIONS_NO_HARDWARE_EN | \ + OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN) #define RGX_BUILD_OPTIONS_MASK_FW \ (RGX_BUILD_OPTIONS_MASK_KM & \ + ~OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN & \ + ~OPTIONS_NUM_DRIVERS_SUPPORTED_MASK & \ ~OPTIONS_BUFFER_SYNC_EN) /* Build options that the FW must have if the present on the KM */ @@ -112,6 +115,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. OPTIONS_PERCONTEXT_FREELIST_EN) & \ ~(OPTIONS_DEBUG_EN | \ OPTIONS_WORKLOAD_ESTIMATION_EN | \ + OPTIONS_OPEN_SOURCE_EN | \ OPTIONS_PDVFS_EN)) /* Build options that the KM must have if the present on the UM */ @@ -120,20 +124,21 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~(OPTIONS_DEBUG_EN | \ OPTIONS_WORKLOAD_ESTIMATION_EN | \ OPTIONS_PDVFS_EN | \ + OPTIONS_OPEN_SOURCE_EN | \ OPTIONS_BUFFER_SYNC_EN)) -#define NO_HARDWARE_OPTION "NO_HARDWARE " -#if defined(NO_HARDWARE) || defined(INTERNAL_TEST) - #define OPTIONS_BIT0 OPTIONS_NO_HARDWARE_EN +#define OPEN_SOURCE_OPTION "OPEN_SOURCE_DRIVER " +#if defined(SUPPORT_OPEN_SOURCE_DRIVER) + #define OPTIONS_BIT0 OPTIONS_OPEN_SOURCE_EN #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" #endif #else #define OPTIONS_BIT0 0x0UL -#endif /* NO_HARDWARE */ +#endif /* SUPPORT_OPEN_SOURCE_DRIVER */ #define PDUMP_OPTION "PDUMP " -#if defined(PDUMP) || defined(INTERNAL_TEST) +#if defined(PDUMP) #define OPTIONS_BIT1 OPTIONS_PDUMP_EN #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -163,7 +168,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_SECURE_ALLOC_KM */ #define RGX_OPTION " " -#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST) +#if defined(SUPPORT_RGX) #define OPTIONS_BIT4 OPTIONS_RGX_EN #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -173,7 +178,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_RGX */ #define SECURE_EXPORT_OPTION "SECURE_EXPORTS " -#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST) +#if defined(SUPPORT_SECURE_EXPORT) #define OPTIONS_BIT5 OPTIONS_SECURE_EXPORT_EN #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -183,7 +188,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_SECURE_EXPORT */ #define INSECURE_EXPORT_OPTION "INSECURE_EXPORTS " -#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST) +#if defined(SUPPORT_INSECURE_EXPORT) #define OPTIONS_BIT6 OPTIONS_INSECURE_EXPORT_EN #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -193,7 +198,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_INSECURE_EXPORT */ #define VFP_OPTION "VFP " -#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST) +#if defined(SUPPORT_VFP) #define OPTIONS_BIT7 OPTIONS_VFP_EN #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -203,7 +208,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_VFP */ #define WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION " -#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST) +#if defined(SUPPORT_WORKLOAD_ESTIMATION) #define OPTIONS_BIT8 OPTIONS_WORKLOAD_ESTIMATION_EN #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -213,7 +218,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_WORKLOAD_ESTIMATION */ #define PDVFS_OPTION "PDVFS " -#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST) +#if defined(SUPPORT_PDVFS) #define OPTIONS_BIT9 OPTIONS_PDVFS_EN #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -223,7 +228,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* SUPPORT_PDVFS */ #define DEBUG_OPTION "DEBUG " -#if defined(DEBUG) || defined(INTERNAL_TEST) +#if defined(DEBUG) #define OPTIONS_BIT10 OPTIONS_DEBUG_EN #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -233,7 +238,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* DEBUG */ #define BUFFER_SYNC_OPTION "BUFFER_SYNC " -#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST) +#if defined(SUPPORT_BUFFER_SYNC) #define OPTIONS_BIT11 OPTIONS_BUFFER_SYNC_EN #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" @@ -274,14 +279,24 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif #define VALIDATION_OPTION "VALIDATION " -#if defined(SUPPORT_VALIDATION) - #define OPTIONS_BIT15 OPTIONS_VALIDATION_EN - #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM + #define OPTIONS_BIT15 0x0UL + +#define NO_HARDWARE_OPTION "NO_HARDWARE " +#if defined(NO_HARDWARE) + #define OPTIONS_BIT16 OPTIONS_NO_HARDWARE_EN + #if OPTIONS_BIT16 > RGX_BUILD_OPTIONS_MASK_KM #error "Bit exceeds reserved range" #endif #else - #define OPTIONS_BIT15 0x0UL -#endif /* SUPPORT_VALIDATION */ + #define OPTIONS_BIT16 0x0UL +#endif /* NO_HARDWARE */ + +#define NUM_DRIVERS_SUPPORTED_CHECK_OPTION "NUM_DRIVERS_SUPPORTED_CHECK " + #define OPTIONS_BIT17 OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN + #if OPTIONS_BIT17 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif /* NUM_DRIVERS_SUPPORTED_CHECK */ +/* Option bits[22:18] are used for max number of drivers supported by the FW. */ #define OPTIONS_BIT31 OPTIONS_PERCONTEXT_FREELIST_EN #if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM @@ -304,13 +319,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. OPTIONS_BIT12 |\ OPTIONS_BIT13 |\ OPTIONS_BIT14 |\ - OPTIONS_BIT15) + OPTIONS_BIT15 |\ + OPTIONS_BIT16 |\ + OPTIONS_BIT17) #define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31) #define RGX_BUILD_OPTIONS_LIST \ { \ - NO_HARDWARE_OPTION, \ + OPEN_SOURCE_OPTION, \ PDUMP_OPTION, \ SECURE_CONTEXT_SWITCH_OPTION, \ SECURE_ALLOC_KM_OPTION, \ @@ -325,7 +342,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AUTOVZ_OPTION, \ AUTOVZ_HW_REGS_OPTION, \ INTERNAL_UNUSED2_OPTION, \ - VALIDATION_OPTION \ + VALIDATION_OPTION, \ + NO_HARDWARE_OPTION, \ + NUM_DRIVERS_SUPPORTED_CHECK_OPTION \ } #endif /* RGX_OPTIONS_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgxheapconfig.h b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgxheapconfig.h index fdbad2d5308e..00801fcaae84 100644 --- a/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgxheapconfig.h +++ b/drivers/gpu/drm/img/img-volcanic/include/volcanic/rgxheapconfig.h @@ -46,12 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdefs_km.h" - -#define RGX_HEAP_SIZE_4KiB IMG_UINT64_C(0x0000001000) -#define RGX_HEAP_SIZE_64KiB IMG_UINT64_C(0x0000010000) -#define RGX_HEAP_SIZE_256KiB IMG_UINT64_C(0x0000040000) - -#define RGX_HEAP_SIZE_1MiB IMG_UINT64_C(0x0000100000) +#define RGX_HEAP_SIZE_32KiB IMG_UINT64_C(0x0000008000) #define RGX_HEAP_SIZE_2MiB IMG_UINT64_C(0x0000200000) #define RGX_HEAP_SIZE_4MiB IMG_UINT64_C(0x0000400000) #define RGX_HEAP_SIZE_16MiB IMG_UINT64_C(0x0001000000) @@ -71,9 +66,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* RGX Device Virtual Address Space Definitions - NOTES: - Base addresses have to be a multiple of 4MiB - This file defines the RGX virtual address heaps that are used in application memory contexts. It also shows where the Firmware memory heap fits into this, but the firmware heap is only ever created in the @@ -105,13 +97,17 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* 0x00_0000_0000 ************************************************************/ -/* 0x00_0000_0000 - 0x00_0040_0000 **/ - /* 0 MiB to 4 MiB, size of 4 MiB : RESERVED **/ +/* 0x00_0000_0000 - 0x00_0020_0000 **/ + /* 0 MiB to 2 MiB, size of 2 MiB : RESERVED (only when General SVM + * doesn't exist) **/ + +/* 0x00_0000_8000 - 0x7F_FFFF_8000 **/ + /* MAX(32 KiB, PAGE_SIZE) to 512 GiB, size of 512 GiB less MAX(32 KiB, PAGE_SIZE) : GENERAL_SVM_HEAP **/ -/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/ - /* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : GENERAL_SVM_HEAP **/ - #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000) - #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_4MiB) + /* The MAX is determined at runtime (PAGE_SIZE isn't available on all platforms) + * so the #define's must NOT be used directly. Use the heap config after initialisation. */ + #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000008000) + #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_32KiB) /* 0x80_0000_0000 ************************************************************/ @@ -213,12 +209,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* 0xE9_4000_0000 - 0xE9_FFFF_FFFF **/ /* 933 GiB to 936 GiB, size of 3 GiB : FREE **/ -/* 0xEA_0000_0000 - 0xEA_0000_0FFF **/ - /* 936 GiB to 937 GiB, size of min heap size : SIGNALS_HEAP **/ - /* CDM Signals heap (31 signals less one reserved for Services). - * Size 960B rounded up to minimum heap size */ - #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000) - #define RGX_SIGNALS_HEAP_SIZE DEVMEM_HEAP_MINIMUM_SIZE +/* 0xEA_0000_0000 - 0xEA_001F_FFFF **/ + /* 936 GiB to 937 GiB, size of 1 GiB : FREE **/ /* 0xEA_4000_0000 - 0xEA_FFFF_FFFF **/ /* 937 GiB to 940 GiB, size of 3 GiB : FREE **/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/dma_flags.h b/drivers/gpu/drm/img/img-volcanic/services/include/dma_flags.h index 587e41b3c892..fa6e77fa28c8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/dma_flags.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/dma_flags.h @@ -41,7 +41,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef DMA_FLAGS_H #define DMA_FLAGS_H -/* these should match flags in pvrsrv_dma.h */ +/* These should match flags in pvrsrv_dma.h */ #define DMA_FLAG_MEM_TO_DEV (1U<<0) #define DMA_FLAG_DEV_TO_MEM (0U<<0) diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/htbuffer_sf.h b/drivers/gpu/drm/img/img-volcanic/services/include/htbuffer_sf.h index 9042de20cd8f..83699a2d9547 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/htbuffer_sf.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/htbuffer_sf.h @@ -99,54 +99,60 @@ extern "C" { */ #define HTB_LOG_SFIDLIST \ /*id, gid, sym name, string, # arguments */ \ -X( 0, HTB_GROUP_NONE, HTB_SF_FIRST, "You should not use this string", 0) \ +X( 0, HTB_GROUP_NONE, HTB_SF_FIRST, "You should not use this string", 0, 0) \ \ -X( 1, HTB_GROUP_CTRL, HTB_SF_CTRL_LOGMODE, "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \ -X( 2, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_PID, "HTB enable logging for PID %d\n", 1) \ -X( 3, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_GROUP, "HTB enable logging groups 0x%08x\n", 1) \ -X( 4, HTB_GROUP_CTRL, HTB_SF_CTRL_LOG_LEVEL, "HTB log level set to %d\n", 1) \ -X( 5, HTB_GROUP_CTRL, HTB_SF_CTRL_OPMODE, "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \ -X( 6, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE, "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ -X( 7, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE_RPT, "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ -X( 8, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK, "FW Sync Partition marker: %d\n", 1) \ -X( 9, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_RPT, "FW Sync Partition repeat: %d\n", 1) \ -X( 10, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_SCALE, "Text not used", 6)\ +X( 1, HTB_GROUP_CTRL, HTB_SF_CTRL_LOGMODE, "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1, 0) \ +X( 2, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_PID, "HTB enable logging for PID %d\n", 1, 0) \ +X( 3, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_GROUP, "HTB enable logging groups 0x%08x\n", 1, 0) \ +X( 4, HTB_GROUP_CTRL, HTB_SF_CTRL_LOG_LEVEL, "HTB log level set to %d\n", 1, 0) \ +X( 5, HTB_GROUP_CTRL, HTB_SF_CTRL_OPMODE, "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1, 0) \ +X( 6, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE, "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5, 0) \ +X( 7, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE_RPT, "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5, 0) \ +X( 8, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK, "FW Sync Partition marker: %d\n", 1, 0) \ +X( 9, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_RPT, "FW Sync Partition repeat: %d\n", 1, 0) \ +X( 10, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_SCALE, "Text not used", 6, 0)\ \ -X( 1, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_TABLE, "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \ -X( 2, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_ALLOC, "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \ -X( 3, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_FREE, "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \ -X( 4, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_MAP, "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ -X( 5, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_PMRMAP, "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ -X( 6, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_UNMAP, "MMU unmapping DevVAddr %08x%08x\n", 2) \ +X( 1, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_TABLE, "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7, 0) \ +X( 2, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_ALLOC, "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4, 0) \ +X( 3, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_FREE, "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4, 0) \ +X( 4, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_MAP, "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4, 0) \ +X( 5, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_PMRMAP, "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4, 0) \ +X( 6, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_UNMAP, "MMU unmapping DevVAddr %08x%08x\n", 2, 0) \ \ -X( 1, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_ALLOC, "Server sync allocation [%08X]\n", 1) \ -X( 2, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_UNREF, "Server sync unreferenced [%08X]\n", 1) \ -X( 3, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_CREATE, "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \ -X( 4, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_TAKE, "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \ -X( 5, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_COMPLETE, "Sync OP complete 0x%08x\n", 1) \ -X( 6, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_DESTROY, "Sync OP destroy 0x%08x\n", 1) \ +X( 1, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_ALLOC, "Server sync allocation [%08X]\n", 1, 0) \ +X( 2, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_UNREF, "Server sync unreferenced [%08X]\n", 1, 0) \ +X( 3, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_CREATE, "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4, 0) \ +X( 4, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_TAKE, "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3, 0) \ +X( 5, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_COMPLETE, "Sync OP complete 0x%08x\n", 1, 0) \ +X( 6, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_DESTROY, "Sync OP destroy 0x%08x\n", 1, 0) \ \ -X( 1, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx %08X @ %d\n", 2) \ -X( 2, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx %08X @ %d\n", 2) \ -X( 3, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM_DEPRECATED,"Kick CDM: FWCtx %08X @ %d\n", 2) \ -X( 4, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx %08X @ %d\n", 2) \ -X( 5, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx %08X @ %d\n", 2) \ -X( 6, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D_DEPRECATED, "Kick 2D: FWCtx %08X @ %d\n", 2) \ -X( 7, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_UNCOUNTED, "Kick (uncounted) for all DMs\n", 0) \ -X( 8, HTB_GROUP_MAIN, HTB_SF_MAIN_FWCCB_CMD, "FW CCB Cmd: %d\n", 1) \ -X( 9, HTB_GROUP_MAIN, HTB_SF_MAIN_PRE_POWER, "Pre-power duration @ phase [%d] (0-shutdown,1-startup) RGX: %llu ns SYS: %llu ns\n", 3) \ -X(10, HTB_GROUP_MAIN, HTB_SF_MAIN_POST_POWER, "Post-power duration @ phase [%d] (0-shutdown,1-startup) SYS: %llu ns RGX: %llu ns\n", 3) \ -X(11, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA, "Kick TA: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ -X(12, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D, "Kick 3D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ -X(13, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM, "Kick CDM: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ -X(14, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D, "Kick 2D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ +X( 1, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx %08X @ %d\n", 2, 0) \ +X( 2, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx %08X @ %d\n", 2, 0) \ +X( 3, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM_DEPRECATED,"Kick CDM: FWCtx %08X @ %d\n", 2, 0) \ +X( 4, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx %08X @ %d\n", 2, 0) \ +X( 5, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx %08X @ %d\n", 2, 0) \ +X( 6, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D_DEPRECATED, "Kick 2D: FWCtx %08X @ %d\n", 2, 0) \ +X( 7, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_UNCOUNTED, "Kick (uncounted) for all DMs\n", 0, 0) \ +X( 8, HTB_GROUP_MAIN, HTB_SF_MAIN_FWCCB_CMD, "FW CCB Cmd: %d\n", 1, 0) \ +X( 9, HTB_GROUP_MAIN, HTB_SF_MAIN_PRE_POWER, "Pre-power duration @ phase [%d] (0-shutdown,1-startup) RGX: %llu ns SYS: %llu ns\n", 3, 0) \ +X(10, HTB_GROUP_MAIN, HTB_SF_MAIN_POST_POWER, "Post-power duration @ phase [%d] (0-shutdown,1-startup) SYS: %llu ns RGX: %llu ns\n", 3, 0) \ +X(11, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA, "Kick TA: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5, 0) \ +X(12, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D, "Kick 3D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5, 0) \ +X(13, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM, "Kick CDM: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5, 0) \ +X(14, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D, "Kick 2D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5, 0) \ +X(15, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_ERROR, "Error: (%u) in file: %s:%u\n", 3, 2) \ +X(16, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_MSGLVL_ERROR, "Log error in file: %s:%u\n", 2, 1) \ +X(17, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_COND_ERROR_T, "Error: (%u) Conditional is unexpectedly true in file: %s:%u\n", 3, 2) \ +X(18, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_COND_ERROR_F, "Error: (%u) Conditional is unexpectedly false in file: %s:%u\n", 3, 2) \ +X(19, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_WARNING, "Warning: (%u) in file: %s @ line: %u\n", 3, 2) \ +X(20, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_MSGLVL_WARN, "Log warning in file: %s:%u\n", 2, 1) \ \ -X( 1, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL, "Bridge call: start: %010u: bid %03d fid %d\n", 3) \ -X( 2, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL_ERR, "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \ +X( 1, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL, "Bridge call: start: %010u: bid %03d fid %d\n", 3, 0) \ +X( 2, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL_ERR, "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4, 0) \ \ -X( 1, HTB_GROUP_DBG, HTB_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2) \ +X( 1, HTB_GROUP_DBG, HTB_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2, 0) \ \ -X( 65535, HTB_GROUP_NONE, HTB_SF_LAST, "You should not use this string\n", 15) +X( 65535, HTB_GROUP_NONE, HTB_SF_LAST, "You should not use this string\n", 15, 0) @@ -184,20 +190,22 @@ typedef enum _HTB_LOG_TYPE { * 0-11: id number * 12-15: group id number * 16-19: number of parameters - * 20-27: unused + * 20-24: If the format contains a string argument, argument + * index is passed. If there isn't a string argument, 0 is passed + * 25-27: unused * 28-30: active: identify SF packet, otherwise regular int32 * 31: reserved for signed/unsigned compatibility * * The following macro assigns those values to the enum generated SF ids list. */ #define HTB_LOG_IDMARKER (0x70000000) -#define HTB_LOG_CREATESFID(a,b,e) (((a) | (b << 12) | (e << 16)) | HTB_LOG_IDMARKER) +#define HTB_LOG_CREATESFID(a,b,e,f) (((a) | (b << 12) | (e << 16)) | (f << 20) | HTB_LOG_IDMARKER) #define HTB_LOG_IDMASK (0xFFF00000) #define HTB_LOG_VALIDID(I) ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER ) typedef enum HTB_LOG_SFids { -#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e), +#define X(a, b, c, d, e, f) c = HTB_LOG_CREATESFID(a,b,e,f), HTB_LOG_SFIDLIST #undef X } HTB_LOG_SFids; @@ -210,6 +218,9 @@ typedef enum HTB_LOG_SFids { * (enum generated) id requires. */ #define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf) +/* Returns the index of the argument that contains a string, 1 indexed so 0 means no string argument. + */ +#define HTB_SF_STRNUM(x) (((x)>>20) & 0xf) /* Returns the id of given enum */ #define HTB_SF_ID(x) (x & 0xfff) @@ -218,10 +229,30 @@ typedef enum HTB_LOG_SFids { #define HTB_LOG_HEADER_SIZE 5 #define HTB_LOG_MAX_PARAMS 15 +/* HTB supports string lengths of up to 23 characters + \0 */ +#define HTB_LOG_STR_ARG_NUM_WORDS (6) +#define HTB_LOG_STR_ARG_SIZE (HTB_LOG_STR_ARG_NUM_WORDS * sizeof(IMG_UINT32)) + #if defined(__cplusplus) } #endif +#if defined(__GNUC__) +#if GCC_VERSION_AT_LEAST(12, 1) +#define HTB_FILE_NAME __FILE_NAME__ +#else +#define HTB_FILE_NAME "n/a" +#endif +#elif defined(__clang__) +#if CLANG_VERSION_AT_LEAST(10) +#define HTB_FILE_NAME __FILE_NAME__ +#else +#define HTB_FILE_NAME "n/a" +#endif +#else +#define HTB_FILE_NAME "n/a" +#endif + /* Defines for handling MARK_SCALE special case */ #define HTB_GID_CTRL 1 #define HTB_ID_MARK_SCALE 10 diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/htbuffer_types.h b/drivers/gpu/drm/img/img-volcanic/services/include/htbuffer_types.h index a404bf8b7b10..fd061eb428d7 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/htbuffer_types.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/htbuffer_types.h @@ -55,10 +55,14 @@ extern "C" { #include "htbuffer_sf.h" /* The group flags array of ints large enough to store all the group flags */ +#if defined(PVRSRV_ENABLE_HTB) #define HTB_FLAG_NUM_EL (((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1) extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL]; #define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF))) +#else +#define HTB_GROUP_ENABLED(SF) IMG_FALSE +#endif /*************************************************************************/ /*! Host Trace Buffer operation mode diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/info_page_client.h b/drivers/gpu/drm/img/img-volcanic/services/include/info_page_client.h index 9df2461b55fb..56d394dd99a3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/info_page_client.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/info_page_client.h @@ -65,9 +65,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. static INLINE IMG_PUINT32 GetInfoPage(SHARED_DEV_CONNECTION hDevConnection) { #if defined(__KERNEL__) + PVR_UNREFERENCED_PARAMETER(hDevConnection); + return (PVRSRVGetPVRSRVData())->pui32InfoPage; #else - return hDevConnection->pui32InfoPage; + return hDevConnection->pui32InfoPage; #endif } diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/info_page_defs.h b/drivers/gpu/drm/img/img-volcanic/services/include/info_page_defs.h index d3bc1538a0c7..949afcfd869d 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/info_page_defs.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/info_page_defs.h @@ -3,8 +3,8 @@ @Title Kernel/User mode general purpose shared memory. @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved @Description General purpose shared memory (i.e. information page) mapped by - kernel space driver and user space clients. All information page - entries are sizeof(IMG_UINT32) on both 32/64-bit environments. + kernel space driver and user space clients. All information page + entries are sizeof(IMG_UINT32) on both 32/64-bit environments. @License Dual MIT/GPLv2 The contents of this file are subject to the MIT license as set out below. @@ -46,46 +46,88 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef INFO_PAGE_DEFS_H #define INFO_PAGE_DEFS_H +/* Info page is divided in "blocks" of size INFO_PAGE_CHUNK_SIZE. Each block + * should start with the INFO_PAGE_[NAME]_BLOCK_START macro which takes the + * value of previous block (except for the first block which starts from 0). + * + * Each last value of the block (INFO_PAGE_[NAME]_BLOCK_END) should be unused + * within that block since it's a first value of the next block. This value + * should be a multiple of INFO_PAGE_CHUNK_SIZE. + * + * Care must be taken to not go over allowed number of elements in each block + * which is marked with the INFO_PAGE_[NAME]_BLOCK_END macro. + * + * Blocks consist of entries that are defined with the INFO_PAGE_ENTRY() macro. + * Each entry must define a unique index within the block and as mentioned + * can't go over the INFO_PAGE_[NAME]_BLOCK_END limit. + * + * Always add blocks to the end of the existing list and update + * INFO_PAGE_TOTAL_SIZE after. + * + * See current usage of the Info Page below for examples. + */ + +#define INFO_PAGE_CHUNK_SIZE 8 +#define INFO_PAGE_BLOCK_END(start,size) ((start) + (size) * INFO_PAGE_CHUNK_SIZE) +#define INFO_PAGE_ENTRY(start,index) ((start) + (index)) +#define INFO_PAGE_SIZE_IN_BYTES(end) ((end) * sizeof(IMG_UINT32)) + +#define INFO_PAGE_CACHEOP_BLOCK_START 0 +#define INFO_PAGE_CACHEOP_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_CACHEOP_BLOCK_START, 1) +#define INFO_PAGE_HWPERF_BLOCK_START INFO_PAGE_CACHEOP_BLOCK_END +#define INFO_PAGE_HWPERF_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_HWPERF_BLOCK_START, 1) +#define INFO_PAGE_TIMEOUT_BLOCK_START INFO_PAGE_HWPERF_BLOCK_END +#define INFO_PAGE_TIMEOUT_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_TIMEOUT_BLOCK_START, 2) +#define INFO_PAGE_BRIDGE_BLOCK_START INFO_PAGE_TIMEOUT_BLOCK_END +#define INFO_PAGE_BRIDGE_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_BRIDGE_BLOCK_START, 1) +#define INFO_PAGE_DEBUG_BLOCK_START INFO_PAGE_BRIDGE_BLOCK_END +#define INFO_PAGE_DEBUG_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_DEBUG_BLOCK_START, 1) +#define INFO_PAGE_DEVMEM_BLOCK_START INFO_PAGE_DEBUG_BLOCK_END +#define INFO_PAGE_DEVMEM_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_DEVMEM_BLOCK_START, 1) + +/* IMPORTANT: Make sure this always uses the last INFO_PAGE_[NAME]_BLOCK_END definition.*/ +#define INFO_PAGE_TOTAL_SIZE INFO_PAGE_SIZE_IN_BYTES(INFO_PAGE_DEVMEM_BLOCK_END) /* CacheOp information page entries */ -#define CACHEOP_INFO_IDX_START 0x00 -#define CACHEOP_INFO_UMKMTHRESHLD (CACHEOP_INFO_IDX_START + 1) /*!< UM=>KM routing threshold in bytes */ -#define CACHEOP_INFO_KMDFTHRESHLD (CACHEOP_INFO_IDX_START + 2) /*!< KM/DF threshold in bytes */ -#define CACHEOP_INFO_LINESIZE (CACHEOP_INFO_IDX_START + 3) /*!< CPU data cache line size */ -#define CACHEOP_INFO_PGSIZE (CACHEOP_INFO_IDX_START + 4) /*!< CPU MMU page size */ -#define CACHEOP_INFO_IDX_END (CACHEOP_INFO_IDX_START + 5) + +#define CACHEOP_INFO_UMKMTHRESHLD INFO_PAGE_ENTRY(INFO_PAGE_CACHEOP_BLOCK_START, 0) /*!< UM=>KM routing threshold in bytes */ +#define CACHEOP_INFO_KMDFTHRESHLD INFO_PAGE_ENTRY(INFO_PAGE_CACHEOP_BLOCK_START, 1) /*!< KM/DF threshold in bytes */ +#define CACHEOP_INFO_LINESIZE INFO_PAGE_ENTRY(INFO_PAGE_CACHEOP_BLOCK_START, 2) /*!< CPU data cache line size */ +#define CACHEOP_INFO_PGSIZE INFO_PAGE_ENTRY(INFO_PAGE_CACHEOP_BLOCK_START, 3) /*!< CPU MMU page size */ /* HWPerf information page entries */ -#define HWPERF_INFO_IDX_START (CACHEOP_INFO_IDX_END) -#define HWPERF_FILTER_SERVICES_IDX (HWPERF_INFO_IDX_START + 0) -#define HWPERF_FILTER_EGL_IDX (HWPERF_INFO_IDX_START + 1) -#define HWPERF_FILTER_OPENGLES_IDX (HWPERF_INFO_IDX_START + 2) -#define HWPERF_FILTER_OPENCL_IDX (HWPERF_INFO_IDX_START + 3) -#define HWPERF_FILTER_VULKAN_IDX (HWPERF_INFO_IDX_START + 4) -#define HWPERF_FILTER_OPENGL_IDX (HWPERF_INFO_IDX_START + 5) -#define HWPERF_INFO_IDX_END (HWPERF_INFO_IDX_START + 6) - -/* timeout values */ -#define TIMEOUT_INFO_IDX_START (HWPERF_INFO_IDX_END) -#define TIMEOUT_INFO_VALUE_RETRIES (TIMEOUT_INFO_IDX_START + 0) -#define TIMEOUT_INFO_VALUE_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 1) -#define TIMEOUT_INFO_CONDITION_RETRIES (TIMEOUT_INFO_IDX_START + 2) -#define TIMEOUT_INFO_CONDITION_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 3) -#define TIMEOUT_INFO_TASK_QUEUE_RETRIES (TIMEOUT_INFO_IDX_START + 4) -#define TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 5) -#define TIMEOUT_INFO_IDX_END (TIMEOUT_INFO_IDX_START + 6) + +#define HWPERF_FILTER_SERVICES_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 0) +#define HWPERF_FILTER_EGL_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 1) +#define HWPERF_FILTER_OPENGLES_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 2) +#define HWPERF_FILTER_OPENCL_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 3) +#define HWPERF_FILTER_VULKAN_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 4) +#define HWPERF_FILTER_OPENGL_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 5) + +/* Timeout values */ + +#define TIMEOUT_INFO_VALUE_RETRIES INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 0) +#define TIMEOUT_INFO_VALUE_TIMEOUT_MS INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 1) +#define TIMEOUT_INFO_CONDITION_RETRIES INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 2) +#define TIMEOUT_INFO_CONDITION_TIMEOUT_MS INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 3) +#define TIMEOUT_INFO_TASK_QUEUE_RETRIES INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 4) +#define TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 5) /* Bridge Info */ -#define BRIDGE_INFO_IDX_START (TIMEOUT_INFO_IDX_END) -#define BRIDGE_INFO_RGX_BRIDGES (BRIDGE_INFO_IDX_START + 0) -#define BRIDGE_INFO_PVR_BRIDGES (BRIDGE_INFO_IDX_START + 1) -#define BRIDGE_INFO_IDX_END (BRIDGE_INFO_IDX_START + 2) + +#define BRIDGE_INFO_RGX_BRIDGES INFO_PAGE_ENTRY(INFO_PAGE_BRIDGE_BLOCK_START, 0) +#define BRIDGE_INFO_PVR_BRIDGES INFO_PAGE_ENTRY(INFO_PAGE_BRIDGE_BLOCK_START, 1) /* Debug features */ -#define DEBUG_FEATURE_FLAGS (BRIDGE_INFO_IDX_END) -#define DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED 0x1 -#define DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED 0x2 -#define DEBUG_FEATURE_FLAGS_IDX_END (DEBUG_FEATURE_FLAGS + 1) +#define DEBUG_FEATURE_FLAGS INFO_PAGE_ENTRY(INFO_PAGE_DEBUG_BLOCK_START, 0) + +#define DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED 0x1 /* flag - not part of info page */ +#define DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED 0x2 /* flag - not part of info page */ + +/* Device memory related information */ + +/* This value is 64-bits wide, next value should have index larger by 2 */ +#define DEVMEM_INFO_PHYS_BUF_MAX_SIZE INFO_PAGE_ENTRY(INFO_PAGE_DEVMEM_BLOCK_START, 0) #endif /* INFO_PAGE_DEFS_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/km_apphint_defs_common.h b/drivers/gpu/drm/img/img-volcanic/services/include/km_apphint_defs_common.h index 4c56921443f7..ad38c76793d4 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/km_apphint_defs_common.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/km_apphint_defs_common.h @@ -56,6 +56,8 @@ X(EnableTrustedDeviceAceConfig, BOOL, GPUVIRT_VAL, PVRSRV_APPHINT_ X(CleanupThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADPRIORITY, NO_PARAM_TABLE, ALWAYS ) \ X(WatchdogThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY, NO_PARAM_TABLE, ALWAYS ) \ X(HWPerfClientBufferSize, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE, NO_PARAM_TABLE, ALWAYS ) \ +X(DevmemHistoryBufSizeLog2, UINT32, ALWAYS, PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2, NO_PARAM_TABLE, ALWAYS ) \ +X(DevmemHistoryMaxEntries, UINT32, ALWAYS, PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES, NO_PARAM_TABLE, ALWAYS ) /* ******************************************************************************* @@ -72,13 +74,15 @@ X(DisableClockGating, BOOL, ALWAYS, PVRSRV_APPHINT_ X(DisableDMOverlap, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEDMOVERLAP, NO_PARAM_TABLE, ALWAYS ) \ \ X(EnableRandomContextSwitch, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \ -X(EnableSoftResetContextSwitch, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \ +X(EnableSoftResetContextSwitch, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESOFTRESETCONTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \ X(EnableFWContextSwitch, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \ X(FWContextSwitchProfile, UINT32, VALIDATION, PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE, NO_PARAM_TABLE, ALWAYS ) \ \ X(EnableRDPowerIsland, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLERDPOWERISLAND, NO_PARAM_TABLE, ALWAYS ) \ \ -X(DriverMode, UINT32, ALWAYS, PVRSRV_APPHINT_DRIVERMODE, NO_PARAM_TABLE, ALWAYS ) \ +X(DriverMode, STRING, ALWAYS, PVRSRV_APPHINT_DRIVERMODE, NO_PARAM_TABLE, ALWAYS ) \ +X(AutoVzGPUPowerdown, BOOL, ALWAYS, PVRSRV_APPHINT_AUTOVZGPUPOWERDOWN, NO_PARAM_TABLE, ALWAYS ) \ +X(GuestFWHeapStride, UINT64, ALWAYS, PVRSRV_APPHINT_GUESTFWHEAPSTRIDE, NO_PARAM_TABLE, ALWAYS ) \ \ X(FirmwarePerf, UINT32, VALIDATION, PVRSRV_APPHINT_FIRMWAREPERF, NO_PARAM_TABLE, ALWAYS ) \ \ @@ -86,7 +90,6 @@ X(HWPerfFWBufSizeInKB, UINT32, PDUMP, PVRSRV_APPHINT_ X(HWPerfHostBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB, NO_PARAM_TABLE, ALWAYS ) \ X(HWPerfHostThreadTimeoutInMS, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS, NO_PARAM_TABLE, ALWAYS ) \ \ -X(JonesDisableMask, UINT32, VALIDATION, PVRSRV_APPHINT_JONESDISABLEMASK, NO_PARAM_TABLE, ALWAYS ) \ X(NewFilteringMode, BOOL, VALIDATION, PVRSRV_APPHINT_NEWFILTERINGMODE, NO_PARAM_TABLE, ALWAYS ) \ X(TruncateMode, UINT32, VALIDATION, PVRSRV_APPHINT_TRUNCATEMODE, NO_PARAM_TABLE, ALWAYS ) \ X(EmuMaxFreq, UINT32, ALWAYS, PVRSRV_APPHINT_EMUMAXFREQ, NO_PARAM_TABLE, ALWAYS ) \ @@ -96,10 +99,11 @@ X(RGXBVNC, STRING, ALWAYS, PVRSRV_APPHINT_ X(FWContextSwitchCrossDM, UINT32, ALWAYS, 0, NO_PARAM_TABLE, ALWAYS ) \ X(ValidateIrq, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATEIRQ, NO_PARAM_TABLE, ALWAYS ) \ \ -X(TPUTrilinearFracMaskPDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \ -X(TPUTrilinearFracMaskVDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \ -X(TPUTrilinearFracMaskCDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \ -X(TPUTrilinearFracMaskTDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \ +X(TPUTrilinearFracMaskPDM, UINT32, VALIDATION, 0x7, NO_PARAM_TABLE, ALWAYS ) \ +X(TPUTrilinearFracMaskVDM, UINT32, VALIDATION, 0x7, NO_PARAM_TABLE, ALWAYS ) \ +X(TPUTrilinearFracMaskCDM, UINT32, VALIDATION, 0x7, NO_PARAM_TABLE, ALWAYS ) \ +X(TPUTrilinearFracMaskTDM, UINT32, VALIDATION, 0x7, NO_PARAM_TABLE, ALWAYS ) \ +X(TPUTrilinearFracMaskRDM, UINT32, VALIDATION, 0x7, NO_PARAM_TABLE, ALWAYS ) \ X(HTBufferSizeInKB, UINT32, ALWAYS, PVRSRV_APPHINT_HTBUFFERSIZE, NO_PARAM_TABLE, ALWAYS ) \ X(FWTraceBufSizeInDWords, UINT32, ALWAYS, PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS, NO_PARAM_TABLE, ALWAYS ) \ \ @@ -116,7 +120,26 @@ X(RiscvDmiTest, BOOL, VALIDATION, PVRSRV_APPHINT_ X(DevMemFWHeapPolicy, UINT32, ALWAYS, PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY, NO_PARAM_TABLE, ALWAYS ) \ \ X(EnableAPMAll, UINT32, VALIDATION, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE, ALWAYS ) \ -X(KernelCCBSizeLog2, UINT32, VALIDATION, PVRSRV_APPHINT_KCCB_SIZE_LOG2, NO_PARAM_TABLE, ALWAYS ) +X(KernelCCBSizeLog2, UINT32, VALIDATION, PVRSRV_APPHINT_KCCB_SIZE_LOG2, NO_PARAM_TABLE, ALWAYS ) \ +\ +X(SyncCheckpointPoolMaxLog2, UINT32, ALWAYS, PVRSRV_APPHINT_CHECKPOINTPOOLMAXLOG2, NO_PARAM_TABLE, ALWAYS ) \ +X(SyncCheckpointPoolInitLog2, UINT32, ALWAYS, PVRSRV_APPHINT_CHECKPOINTPOOLINITLOG2, NO_PARAM_TABLE, ALWAYS ) \ +X(PhysHeapMinMemOnConnection, UINT32, ALWAYS, PVRSRV_APPHINT_PHYSHEAPMINMEMONCONNECTION, NO_PARAM_TABLE, ALWAYS ) \ +\ +X(RestrictGpuLocalPhysHeapSizeMB, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ +X(PhysHeapHybridDefault2CpuLocal, BOOL, ALWAYS, 0, NO_PARAM_TABLE, ALWAYS ) \ +\ +X(DebugDumpFWTLogType, UINT32, ALWAYS, PVRSRV_APPHINT_DEBUGDUMPFWTLOGTYPE, NO_PARAM_TABLE, ALWAYS ) \ +\ +X(EnableIdleCycleStealing, UINT32, VALIDATION, PVRSRV_APPHINT_ENABLEIDLECYCLESTEALING, NO_PARAM_TABLE, ALWAYS ) \ +\ +X(FaultDetectionTimeInterval, UINT32, VALIDATION, PVRSRV_APPHINT_FAULTDETECTIONTIMEINTERVAL_USEC, NO_PARAM_TABLE, ALWAYS ) \ +\ +X(ICSTimeIntervalThreshold, UINT32, VALIDATION, PVRSRV_APPHINT_ICSTIMEINTERVAL_THRESHOLD, NO_PARAM_TABLE, ALWAYS ) \ +\ +X(ICSTestModeOn, BOOL, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ +\ +X(FaultInjection, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) /* ******************************************************************************* @@ -149,7 +172,7 @@ X(CheckMList, BOOL, ALWAYS, PVRSRV_APPHINT_ X(EnableLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLELOGGROUP, fwt_loggroup_tbl, ALWAYS ) \ X(FirmwareLogType, UINT32List, ALWAYS, PVRSRV_APPHINT_FIRMWARELOGTYPE, fwt_logtype_tbl, ALWAYS ) \ X(HWRDebugDumpLimit, UINT32, ALWAYS, PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT, NO_PARAM_TABLE, ALWAYS ) \ -X(TimeCorrClock, UINT32List, ALWAYS, PVRSRV_APPHINT_TIMECORRCLOCK, timecorr_clk_tbl, ALWAYS ) \ +X(SecondaryOSClockSource, UINT32List, ALWAYS, PVRSRV_APPHINT_SECONDARYOSCLOCKSOURCE, timecorr_clk_tbl, ALWAYS ) \ X(HWPerfFWFilter, UINT64, ALWAYS, PVRSRV_APPHINT_HWPERFFWFILTER, NO_PARAM_TABLE, ALWAYS ) \ /* Device host config */ \ X(EnableAPM, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE, ALWAYS ) \ @@ -217,13 +240,8 @@ X(NEVER) #define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE #define apphint_modparam_class_PDUMP(a, b, c) #endif -#if defined(SUPPORT_VALIDATION) - #define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE - #define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c) -#else #define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE #define apphint_modparam_class_VALIDATION(a, b, c) -#endif #if defined(SUPPORT_GPUVIRT_VALIDATION) #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c) @@ -253,7 +271,7 @@ X(NEVER) #else #define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_FALSE #endif -#if defined(DEBUG) || defined(SUPPORT_VALIDATION) +#if defined(DEBUG) #define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_TRUE #else #define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_FALSE diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/physheap.h b/drivers/gpu/drm/img/img-volcanic/services/include/physheap.h index 13b60128cc92..9cca0dd25286 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/physheap.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/physheap.h @@ -49,13 +49,39 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pmr_impl.h" #include "physheap_config.h" #include "pvrsrv_device.h" +#include "ra.h" #ifndef PHYSHEAP_H #define PHYSHEAP_H +#define B2KB(x) ((x) >> 10) +#define B2MB(x) ((x) >> 20) + +static inline IMG_UINT64 KB2B(IMG_UINT64 ui64Kilobytes) { return ui64Kilobytes << 10; } +static inline IMG_UINT64 MB2B(IMG_UINT64 ui64Megabytes) { return ui64Megabytes << 20; } + typedef struct _PHYS_HEAP_ PHYS_HEAP; #define INVALID_PHYS_HEAP 0xDEADDEAD +/* PMB (Physical Memory Block) */ +typedef struct _PMB_ PMB; + +typedef IMG_UINT32 PHYS_HEAP_POLICY; + +/* Heap has default allocation policy and does not require + * any additional OS Functionality. Physically contiguous + * allocations are required for this physheap. + */ +#define PHYS_HEAP_POLICY_DEFAULT (0U) + +/* + * Heap has allocation strategy that may produce non + * physically contiguous allocations, additional OS functionality + * is required to map these allocations into the kernel. + */ +#define PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG (1U) +#define PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG_MASK (1U) + struct _CONNECTION_DATA_; typedef struct _PG_HANDLE_ @@ -135,6 +161,29 @@ typedef IMG_UINT32 (*PFN_GET_PAGE_SHIFT)(void); */ /**************************************************************************/ typedef void (*PFN_GET_MEM_STATS)(PHEAP_IMPL_DATA, IMG_UINT64 *, IMG_UINT64 *); +/*************************************************************************/ /*! +@Function Callback function PFN_GET_HEAP_STATS_STR_ITER +@Description Get string of heap memory spans constituting the heap. This + function can be iterated on to print sequential lines of the + heap data. Iterate until IMG_FALSE is returned +@Input PHEAP_IMPL_DATA Pointer to implementation data. +@InOut IMG_CHAR Pointer to string buffer to be populated + with sequential heap data. +@Input IMG_UINT32 Size of the string buffer. +@InOut void** Iter handle. +@Return IMG_BOOL +*/ /**************************************************************************/ +typedef IMG_BOOL (*PFN_GET_HEAP_SPANS_STR_ITER)(PHEAP_IMPL_DATA, IMG_CHAR *, IMG_UINT32, void **); + +/*************************************************************************/ /*! +@Function Callback function PFN_GET_HEAP_DLM_BACKING +@Description Extract reference to DLM heap backing the current IMA heap. +@Input PHEAP_IMPL_DATA Pointer to implementation data. +@InOut PHYS_HEAP** Pointer to DLM backing heap. +@Return void +*/ /**************************************************************************/ +typedef void (*PFN_GET_HEAP_DLM_BACKING)(PHEAP_IMPL_DATA, PHYS_HEAP **); + #if defined(SUPPORT_GPUVIRT_VALIDATION) typedef PVRSRV_ERROR (*PFN_PAGES_ALLOC_GPV)(PHYS_HEAP *psPhysHeap, size_t uiSize, PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, @@ -190,6 +239,27 @@ typedef PVRSRV_ERROR (*PFN_CREATE_PMR)(PHYS_HEAP *psPhysHeap, PMR **ppsPMRPtr, IMG_UINT32 ui32PDumpFlags); +/*************************************************************************/ /*! +@Function Callback function PFN_CREATE_PMB +@Description Create a PMB physical allocation and back with Card memory + on creation, if required. The card memory comes + directly from the DLM Phys Heap's associated pool of memory. +@Input psPhysHeap Pointer to Phys Heap to create the PMB on, + physheap should be DLM type. +@Input uiSize Allocation size. +@Input pszAnnotation Annotation. +@Output ppsPMBPtr Pointer to PMB created. +@Output puiBase Out pointer to RA Base of PMB. +@Output puiSize Out pointer to size of PMB +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_CREATE_PMB)(PHYS_HEAP *psPhysHeap, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszAnnotation, + PMB **ppsPMBPtr, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiSize); + /*! Implementation specific function table */ typedef struct PHEAP_IMPL_FUNCS_TAG { @@ -198,8 +268,11 @@ typedef struct PHEAP_IMPL_FUNCS_TAG PFN_GET_CPU_PADDR pfnGetCPUPAddr; PFN_GET_SIZE pfnGetSize; PFN_GET_PAGE_SHIFT pfnGetPageShift; - PFN_GET_MEM_STATS pfnGetPMRFactoryMemStats; + PFN_GET_MEM_STATS pfnGetFactoryMemStats; + PFN_GET_HEAP_SPANS_STR_ITER pfnGetHeapSpansStringIter; + PFN_GET_HEAP_DLM_BACKING pfnGetHeapDLMBacking; PFN_CREATE_PMR pfnCreatePMR; + PFN_CREATE_PMB pfnCreatePMB; #if defined(SUPPORT_GPUVIRT_VALIDATION) PFN_PAGES_ALLOC_GPV pfnPagesAllocGPV; #endif @@ -233,7 +306,7 @@ void PhysHeapDeInitDeviceHeaps(PPVRSRV_DEVICE_NODE psDeviceNode); on heap type. @Input psDevNode Pointer to device node struct. @Input psConfig Heap configuration. -@Output ppsPhysHeap Pointer to the created heap. +@Output ppsPhysHeap Optional pointer to the created heap. Can be NULL @Return PVRSRV_ERROR PVRSRV_OK or error code */ /**************************************************************************/ PVRSRV_ERROR @@ -247,15 +320,16 @@ PhysHeapCreateHeapFromConfig(PPVRSRV_DEVICE_NODE psDevNode, Destroy with PhysHeapDestroy when no longer required. @Input psDevNode Pointer to device node struct @Input psConfig Heap configuration. +@Input uiPolicy Phys heap allocation policy. @Input pvImplData Implementation specific data. Can be NULL. @Input psImplFuncs Implementation specific function table. Must be a valid pointer. -@Output ppsPhysHeap Pointer to the created heap. Must be a valid - pointer. +@Output ppsPhysHeap Optional pointer to the created heap. Can be NULL @Return PVRSRV_ERROR PVRSRV_OK or error code */ /**************************************************************************/ PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode, PHYS_HEAP_CONFIG *psConfig, + PHYS_HEAP_POLICY uiPolicy, PHEAP_IMPL_DATA pvImplData, PHEAP_IMPL_FUNCS *psImplFuncs, PHYS_HEAP **ppsPhysHeap); @@ -273,28 +347,16 @@ void PhysHeapDestroy(PHYS_HEAP *psPhysHeap); PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap); /*************************************************************************/ /*! -@Function PhysHeapAcquireByUsage -@Description Acquire PhysHeap by usage flag. -@Input ui32UsageFlag PhysHeap usage flag -@Input psDevNode Pointer to device node struct -@Output ppsPhysHeap PhysHeap if found. -@Return PVRSRV_ERROR PVRSRV_OK or error code -*/ /**************************************************************************/ -PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag, - PPVRSRV_DEVICE_NODE psDevNode, - PHYS_HEAP **ppsPhysHeap); - -/*************************************************************************/ /*! -@Function PhysHeapAcquireByDevPhysHeap +@Function PhysHeapAcquireByID @Description Acquire PhysHeap by DevPhysHeap. @Input eDevPhysHeap Device Phys Heap. @Input psDevNode Pointer to device node struct @Output ppsPhysHeap PhysHeap if found. @Return PVRSRV_ERROR PVRSRV_OK or error code */ /**************************************************************************/ -PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap, - PPVRSRV_DEVICE_NODE psDevNode, - PHYS_HEAP **ppsPhysHeap); +PVRSRV_ERROR PhysHeapAcquireByID(PVRSRV_PHYS_HEAP eDevPhysHeap, + PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP **ppsPhysHeap); void PhysHeapRelease(PHYS_HEAP *psPhysHeap); @@ -309,6 +371,14 @@ PHEAP_IMPL_DATA PhysHeapGetImplData(PHYS_HEAP *psPhysHeap); PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap); +/*************************************************************************/ /*! +@Function PhysHeapGetPolicy +@Description Get phys heap allocation policy flags. +@Input psPhysHeap Pointer to physical heap. +@Return PHYS_HEAP_POLICY Phys heap policy flags. +*/ /**************************************************************************/ +PHYS_HEAP_POLICY PhysHeapGetPolicy(PHYS_HEAP *psPhysHeap); + /*************************************************************************/ /*! @Function PhysHeapGetFlags @Description Get phys heap usage flags. @@ -319,6 +389,14 @@ PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap); IMG_BOOL PhysHeapValidateDefaultHeapExists(PPVRSRV_DEVICE_NODE psDevNode); +#if defined(SUPPORT_STATIC_IPA) +IMG_UINT32 PhysHeapGetIPAValue(PHYS_HEAP *psPhysHeap); + +IMG_UINT32 PhysHeapGetIPAMask(PHYS_HEAP *psPhysHeap); + +IMG_UINT32 PhysHeapGetIPAShift(PHYS_HEAP *psPhysHeap); +#endif + PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap, IMG_CPU_PHYADDR *psCpuPAddr); @@ -326,16 +404,6 @@ PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap, PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap, IMG_UINT64 *puiSize); -/*************************************************************************/ /*! -@Function PVRSRVGetDevicePhysHeapCount -@Description Get the physical heap count supported by the device. -@Input psDevNode Device node, the heap count is requested for. -@Output pui32PhysHeapCount Buffer that holds the heap count -@Return None -*/ /**************************************************************************/ -void PVRSRVGetDevicePhysHeapCount(PPVRSRV_DEVICE_NODE psDevNode, - IMG_UINT32 *pui32PhysHeapCount); - /*************************************************************************/ /*! @Function PhysHeapGetMemInfo @Description Get phys heap memory statistics for a given physical heap ID. @@ -347,23 +415,9 @@ void PVRSRVGetDevicePhysHeapCount(PPVRSRV_DEVICE_NODE psDevNode, */ /**************************************************************************/ PVRSRV_ERROR PhysHeapGetMemInfo(PPVRSRV_DEVICE_NODE psDevNode, - IMG_UINT32 ui32PhysHeapCount, - PVRSRV_PHYS_HEAP *paePhysHeapID, - PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats); - -/*************************************************************************/ /*! -@Function PhysheapGetPhysMemUsage -@Description Get memory statistics for a given physical heap. -@Input psPhysHeap Physical heap -@Output pui64TotalSize Buffer that holds the total memory size of the - given physical heap. -@Output pui64FreeSize Buffer that holds the free memory available in - a given physical heap. -@Return none -*/ /**************************************************************************/ -void PhysheapGetPhysMemUsage(PHYS_HEAP *psPhysHeap, - IMG_UINT64 *pui64TotalSize, - IMG_UINT64 *pui64FreeSize); + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP *paePhysHeapID, + PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats); PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap, IMG_DEV_PHYADDR *psDevPAddr); @@ -380,6 +434,8 @@ void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap, IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap); +const IMG_CHAR *PhysHeapName(PHYS_HEAP *psPhysHeap); + /*************************************************************************/ /*! @Function PhysHeapCreatePMR @Description Function calls an implementation-specific function pointer. @@ -397,10 +453,21 @@ PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, const IMG_CHAR *pszAnnotation, IMG_PID uiPid, PMR **ppsPMRPtr, - IMG_UINT32 ui32PDumpFlags); + IMG_UINT32 ui32PDumpFlags, + PVRSRV_MEMALLOCFLAGS_T *uiOutFlags); -PVRSRV_ERROR PhysHeapInit(void); -void PhysHeapDeinit(void); +/*************************************************************************/ /*! +@Function PhysHeapCreatePMB +@Description Function calls an implementation-specific function pointer. + See @Ref PFN_CREATE_PMB "PFN_CREATE_PMB" for details. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR PhysHeapCreatePMB(PHYS_HEAP *psPhysHeap, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszAnnotation, + PMB **ppsPMRPtr, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiSize); /*************************************************************************/ /*! @Function PhysHeapDeviceNode @@ -411,12 +478,12 @@ void PhysHeapDeinit(void); PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap); /*************************************************************************/ /*! -@Function PhysHeapPVRLayerAcquire -@Description Is phys heap to be acquired in PVR layer? +@Function PhysHeapInitByPVRLayer +@Description Is phys heap to be initialised in PVR layer? @Input ePhysHeap phys heap @Return IMG_BOOL return IMG_TRUE if yes */ /**************************************************************************/ -IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap); +IMG_BOOL PhysHeapInitByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap); /*************************************************************************/ /*! @Function PhysHeapUserModeAlloc @@ -466,4 +533,32 @@ PVRSRV_ERROR PhysHeapPagesClean(PHYS_HEAP *psPhysHeap, */ /**************************************************************************/ IMG_UINT32 PhysHeapGetPageShift(PHYS_HEAP *psPhysHeap); +/*************************************************************************/ /*! +@Function PhysHeapFreeMemCheck +@Description Check a physheap has the required amount of free memory. + +@Input psPhysHeap Pointer to physical heap. +@Input ui64MinRequiredMem The minimum free memory for success (bytes). +@Output pui64FreeMem The free memory in the physical heap (bytes). + +@Return PVRSRV_ERROR If successful PVRSRV_OK else a PVRSRV_ERROR code. +*/ /**************************************************************************/ +PVRSRV_ERROR PhysHeapFreeMemCheck(PHYS_HEAP *psPhysHeap, + IMG_UINT64 ui64MinRequiredMem, + IMG_UINT64 *pui64FreeMem); + + +#if defined(PVRSRV_ENABLE_XD_MEM) +/*************************************************************************/ /*! +@Function PhysHeapSpasWithDevice +@Description Check to see if the device can "see" the SPAS region of the heap. + Or in other words, check if any physheap exists + in the same SPAS region as `psFromPhysHeap` and is tied to `psToDevNode`. +@Input psFromPhysHeap The physheap whose SPAS region to match. +@Input psToDevNode The device node the other physheap must be tied to. +@Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR PhysHeapSpasWithDevice(PHYS_HEAP* psFromPhysHeap, PPVRSRV_DEVICE_NODE psToDevNode); +#endif + #endif /* PHYSHEAP_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/physheap_config.h b/drivers/gpu/drm/img/img-volcanic/services/include/physheap_config.h index 9d4d786dd078..53d4c909331f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/physheap_config.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/physheap_config.h @@ -47,23 +47,54 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PHYSHEAP_CONFIG_H #include "img_types.h" +#include "pvr_debug.h" +#include "lock_types.h" +#include "dllist.h" +#include "pvrsrv_error.h" #include "pvrsrv_memallocflags.h" #include "pvrsrv_memalloc_physheap.h" typedef IMG_UINT32 PHYS_HEAP_USAGE_FLAGS; -#define PHYS_HEAP_USAGE_GPU_LOCAL (1<uConfig.sLMA) == sizeof(((PHYS_HEAP_CONFIG*)0)->uConfig.sDMA), + "PHYS_HEAP_CONFIG sizeof sLMA != sDMA, Type has potentially changed"); + +static INLINE IMG_UINT64 PhysHeapConfigGetSize(PHYS_HEAP_CONFIG *psConfig) +{ + switch (psConfig->eType) + { + case PHYS_HEAP_TYPE_LMA: + return psConfig->uConfig.sLMA.uiSize; + case PHYS_HEAP_TYPE_IMA: + PVR_ASSERT(!"IMA Config has no size member"); + return 0; +#if defined(__KERNEL__) + case PHYS_HEAP_TYPE_DMA: + return psConfig->uConfig.sDMA.uiSize; +#endif + case PHYS_HEAP_TYPE_DLM: + return psConfig->uConfig.sDLM.uiSize; + case PHYS_HEAP_TYPE_UMA: + PVR_ASSERT(!"UMA Config has no size member"); + return 0; + default: + PVR_ASSERT(!"Not Implemented for Config Type"); + return 0; + } +} + +static INLINE IMG_CPU_PHYADDR PhysHeapConfigGetStartAddr(PHYS_HEAP_CONFIG *psConfig) +{ + IMG_CPU_PHYADDR sUnsupportedPhyAddr = {0}; + switch (psConfig->eType) + { + case PHYS_HEAP_TYPE_LMA: + return psConfig->uConfig.sLMA.sStartAddr; + case PHYS_HEAP_TYPE_IMA: + PVR_ASSERT(!"IMA Config has no StartAddr member"); + return sUnsupportedPhyAddr; +#if defined(__KERNEL__) + case PHYS_HEAP_TYPE_DMA: + return psConfig->uConfig.sDMA.sStartAddr; +#endif + case PHYS_HEAP_TYPE_DLM: + return psConfig->uConfig.sDLM.sStartAddr; + case PHYS_HEAP_TYPE_UMA: + PVR_ASSERT(!"UMA Config has no StartAddr member"); + return sUnsupportedPhyAddr; + default: + PVR_ASSERT(!"Not Implemented for Config Type"); + return sUnsupportedPhyAddr; + } +} + +static INLINE IMG_DEV_PHYADDR PhysHeapConfigGetCardBase(PHYS_HEAP_CONFIG *psConfig) +{ + IMG_DEV_PHYADDR sUnsupportedPhyAddr = {0}; + switch (psConfig->eType) + { + case PHYS_HEAP_TYPE_LMA: + return psConfig->uConfig.sLMA.sCardBase; + case PHYS_HEAP_TYPE_IMA: + PVR_ASSERT(!"IMA Config has no CardBase member"); + return sUnsupportedPhyAddr; +#if defined(__KERNEL__) + case PHYS_HEAP_TYPE_DMA: + return psConfig->uConfig.sDMA.sCardBase; +#endif + case PHYS_HEAP_TYPE_UMA: + return psConfig->uConfig.sUMA.sCardBase; + case PHYS_HEAP_TYPE_DLM: + return psConfig->uConfig.sDLM.sCardBase; + default: + PVR_ASSERT(!"Not Implemented for Config Type"); + return sUnsupportedPhyAddr; + } +} + +#define PHYS_HEAP_NAME_SIZE 24 + #endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/pvr_bridge.h b/drivers/gpu/drm/img/img-volcanic/services/include/pvr_bridge.h index dc3cf769f7ee..dafc51dd57f0 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/pvr_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/pvr_bridge.h @@ -82,7 +82,7 @@ extern "C" { #if defined(SUPPORT_SECURE_EXPORT) #include "common_smm_bridge.h" #endif -#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +#if defined(PVRSRV_ENABLE_HTB) #include "common_htbuffer_bridge.h" #endif #include "common_pvrtl_bridge.h" @@ -94,9 +94,6 @@ extern "C" { #include "common_validation_bridge.h" #endif -#if defined(PVR_TESTING_UTILS) -#include "common_tutils_bridge.h" -#endif #include "common_devicememhistory_bridge.h" #include "common_synctracking_bridge.h" @@ -272,13 +269,8 @@ extern "C" { /* 18: TUTILS interface functions */ #define PVRSRV_BRIDGE_TUTILS 18UL -#if defined(PVR_TESTING_UTILS) -#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1) -#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST) -#else #define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0 #define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST) -#endif /* 19: DevMem history interface functions */ #define PVRSRV_BRIDGE_DEVICEMEMHISTORY 19UL @@ -287,13 +279,13 @@ extern "C" { /* 20: Host Trace Buffer interface functions */ #define PVRSRV_BRIDGE_HTBUFFER 20UL -#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +#if defined(PVRSRV_ENABLE_HTB) #define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST + 1) #define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST + PVRSRV_BRIDGE_HTBUFFER_CMD_LAST) -#else +#else /* !PVRSRV_ENABLE_HTB */ #define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST 0 #define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST) -#endif +#endif /* PVRSRV_ENABLE_HTB */ /* 21: Non-Linux Display functions */ #define PVRSRV_BRIDGE_DCPLAT 21UL @@ -390,15 +382,9 @@ static const IMG_UINT32 gui32PVRBridges = | (1U << (PVRSRV_BRIDGE_PVRTL - PVRSRV_BRIDGE_FIRST)) #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) | (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) -#endif -#if defined(SUPPORT_VALIDATION) - | (1U << (PVRSRV_BRIDGE_VALIDATION - PVRSRV_BRIDGE_FIRST)) -#endif -#if defined(PVR_TESTING_UTILS) - | (1U << (PVRSRV_BRIDGE_TUTILS - PVRSRV_BRIDGE_FIRST)) #endif | (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST)) -#if defined(SUPPORT_HTBUFFER) +#if defined(PVRSRV_ENABLE_HTB) | (1U << (PVRSRV_BRIDGE_HTBUFFER - PVRSRV_BRIDGE_FIRST)) #endif #if defined(SUPPORT_DISPLAY_CLASS) && defined(SUPPORT_DCPLAT_BRIDGE) diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/rgx_bridge.h b/drivers/gpu/drm/img/img-volcanic/services/include/rgx_bridge.h index fa4ca1ff50b3..5168f6f23ea5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/rgx_bridge.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/rgx_bridge.h @@ -50,8 +50,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. extern "C" { #endif -#include "rgx_fwif_km.h" - #define RGXFWINITPARAMS_VERSION 1 #define RGXFWINITPARAMS_EXTENSION 128 @@ -74,7 +72,9 @@ extern "C" { #if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) #include "common_rgxregconfig_bridge.h" #endif +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) #include "common_rgxkicksync_bridge.h" +#endif #include "common_rgxtimerquery_bridge.h" #if defined(SUPPORT_RGXRAY_BRIDGE) #include "common_rgxray_bridge.h" @@ -168,9 +168,13 @@ extern "C" { /* 136: RGX kicksync interface */ #define PVRSRV_BRIDGE_RGXKICKSYNC 136UL +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) #define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST + 1) #define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST) - +#else +#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST) +#endif /* 137: RGX TQ2 interface */ #define PVRSRV_BRIDGE_RGXTQ2 137UL #if defined(SUPPORT_FASTRENDER_DM) @@ -213,6 +217,9 @@ static const IMG_UINT32 gui32RGXBridges = | (1U << (PVRSRV_BRIDGE_RGXFWDBG - PVRSRV_BRIDGE_RGX_FIRST)) #if defined(PDUMP) | (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST)) +#endif +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) + | (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST)) #endif | (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST)) #if defined(SUPPORT_REGCONFIG) diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/rgx_fw_info.h b/drivers/gpu/drm/img/img-volcanic/services/include/rgx_fw_info.h index 2f012d59ba5a..fb31ac440024 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/rgx_fw_info.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/rgx_fw_info.h @@ -105,10 +105,14 @@ typedef enum * FILE_SIZE */ -#define FW_INFO_VERSION (1) +#define FW_INFO_VERSION (2) + +/* Firmware is built for open source driver and uses open source version numbering */ +#define FW_INFO_FLAGS_OPEN_SOURCE (1U) typedef struct { + /* FW_INFO_VERSION 1 */ IMG_UINT32 ui32InfoVersion; /* FW info version */ IMG_UINT32 ui32HeaderLen; /* Header length */ IMG_UINT32 ui32LayoutEntryNum; /* Number of entries in the layout table */ @@ -116,6 +120,11 @@ typedef struct IMG_UINT64 RGXFW_ALIGN ui64BVNC; /* BVNC */ IMG_UINT32 ui32FwPageSize; /* Page size of processor on which firmware executes */ IMG_UINT32 ui32Flags; /* Compatibility flags */ + + /* FW_INFO_VERSION 2 */ + IMG_UINT16 ui16PVRVersionMajor; /* DDK major version number */ + IMG_UINT16 ui16PVRVersionMinor; /* DDK minor version number */ + IMG_UINT32 ui32PVRVersionBuild; /* DDK build number */ } RGX_FW_INFO_HEADER; typedef struct diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/rgx_tq_shared.h b/drivers/gpu/drm/img/img-volcanic/services/include/rgx_tq_shared.h index dc10b6eecc91..2db19224c441 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/rgx_tq_shared.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/rgx_tq_shared.h @@ -51,8 +51,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define TQ_PREP_FLAGS_COMMAND_MASK (0xfU) #define TQ_PREP_FLAGS_COMMAND_SHIFT 0 #define TQ_PREP_FLAGS_PDUMPCONTINUOUS (1U << 4) -#define TQ_PREP_FLAGS_START (1U << 5) -#define TQ_PREP_FLAGS_END (1U << 6) #define TQ_PREP_FLAGS_COMMAND_SET(m) \ ((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK) diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/rgxtransfer_shader.h b/drivers/gpu/drm/img/img-volcanic/services/include/rgxtransfer_shader.h index b7b3a645e453..f261d2d04075 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/rgxtransfer_shader.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/rgxtransfer_shader.h @@ -55,15 +55,13 @@ typedef struct _RGX_SHADER_HEADER_ IMG_UINT32 ui32Version; IMG_UINT32 ui32NumFragment; IMG_UINT32 ui32SizeFragment; - IMG_UINT32 ui32NumTDMFragment; - IMG_UINT32 ui32SizeTDMFragment; IMG_UINT32 ui32SizeClientMem; } RGX_SHADER_HEADER; /* TQ shaders version is used to check compatibility between the binary TQ shaders file and the DDK. This number should be incremented if a change to the TQ shader factory breaks compatibility. */ -#define RGX_TQ_SHADERS_VERSION 1U +#define RGX_TQ_SHADERS_VERSION 2U #define RGX_TQ_SHADERS_VERSION_PACK \ (((RGX_TQ_SHADERS_VERSION & 0xFFU) << 16) | ((PVRVERSION_MAJ & 0xFFU) << 8) | ((PVRVERSION_MIN & 0xFFU) << 0)) diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/rogue/km_apphint_defs.h b/drivers/gpu/drm/img/img-volcanic/services/include/rogue/km_apphint_defs.h index d9e0b16c65b0..f2ffad0030cd 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/rogue/km_apphint_defs.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/rogue/km_apphint_defs.h @@ -81,6 +81,8 @@ X(ECCRAMErrInj, UINT32, VALIDATION, 0, X(TFBCCompressionControlGroup, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP, NO_PARAM_TABLE, ALWAYS ) \ X(TFBCCompressionControlScheme, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME, NO_PARAM_TABLE, ALWAYS ) \ X(TFBCCompressionControlYUVFormat, BOOL, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ +X(TFBCCompressionControlLossyMinChannel, BOOL, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ +X(TFBCVersionDowngrade, UINT32, ALWAYS, PVRSRV_APPHINT_TFBCVERSION, NO_PARAM_TABLE, ALWAYS ) \ /* ******************************************************************************* diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/rogue/rgxapi_km.h b/drivers/gpu/drm/img/img-volcanic/services/include/rogue/rgxapi_km.h index 65ba85d20b4b..7d7efd0afb5e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/rogue/rgxapi_km.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/rogue/rgxapi_km.h @@ -44,22 +44,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXAPI_KM_H #define RGXAPI_KM_H -#if defined(SUPPORT_SHARED_SLC) -/*************************************************************************/ /*! -@Function RGXInitSLC -@Description Init the SLC after a power up. It is required to call this - function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't - be called. - -@Input hDevHandle RGX Device Node -@Return PVRSRV_ERROR System error code -*/ /**************************************************************************/ -PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle); -#endif - #include "rgx_hwperf.h" - /****************************************************************************** * RGX HW Performance Profiling Control API(s) *****************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/sync_checkpoint_internal.h b/drivers/gpu/drm/img/img-volcanic/services/include/sync_checkpoint_internal.h index ce178474112c..2514ff3b6973 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/sync_checkpoint_internal.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/sync_checkpoint_internal.h @@ -54,7 +54,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "lock.h" #include "devicemem.h" #include "rgx_fwif_shared.h" -#include "rgx_fwif_km.h" struct SYNC_CHECKPOINT_RECORD; @@ -64,35 +63,18 @@ struct SYNC_CHECKPOINT_RECORD; typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ _SYNC_CHECKPOINT_CONTEXT_CTL, *_PSYNC_CHECKPOINT_CONTEXT_CTL; -typedef struct SYNC_CHECKPOINT_CONTEXT_TAG -{ - PPVRSRV_DEVICE_NODE psDevNode; - IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the RA */ - RA_ARENA *psSubAllocRA; /*!< RA context */ - IMG_CHAR azSpanName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the span RA */ - RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ - ATOMIC_T hRefCount; /*!< Ref count for this context */ - ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */ - POS_LOCK hLock; - _PSYNC_CHECKPOINT_CONTEXT_CTL psContextCtl; -#if defined(PDUMP) - DLLIST_NODE sSyncCheckpointBlockListHead; /*!< List head for the sync chkpt blocks in this context*/ - POS_LOCK hSyncCheckpointBlockListLock; /*!< sync chkpt blocks list lock*/ - DLLIST_NODE sListNode; /*!< List node for the sync chkpt context list*/ -#endif -} _SYNC_CHECKPOINT_CONTEXT; +typedef struct SYNC_CHECKPOINT_CONTEXT_TAG _SYNC_CHECKPOINT_CONTEXT; typedef struct _SYNC_CHECKPOINT_BLOCK_ { ATOMIC_T hRefCount; /*!< Ref count for this sync block */ POS_LOCK hLock; _SYNC_CHECKPOINT_CONTEXT *psContext; /*!< Our copy of the services connection */ - PPVRSRV_DEVICE_NODE psDevNode; IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync checkpoint block */ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */ DEVMEM_MEMDESC *hMemDesc; /*!< DevMem allocation for block */ volatile IMG_UINT32 *pui32LinAddr; /*!< Server-code CPU mapping */ - IMG_UINT64 uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */ + RA_BASE_T uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */ #if defined(PDUMP) DLLIST_NODE sListNode; /*!< List node for the sync chkpt blocks */ #endif @@ -102,7 +84,6 @@ typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE; typedef struct SYNC_CHECKPOINT_TAG { - //_SYNC_CHECKPOINT_CONTEXT *psContext; /*!< pointer to the parent context of this checkpoint */ /* A sync checkpoint is assigned a unique ID, to avoid any confusion should * the same memory be re-used later for a different checkpoint */ @@ -110,18 +91,22 @@ typedef struct SYNC_CHECKPOINT_TAG ATOMIC_T hRefCount; /*!< Ref count for this sync */ ATOMIC_T hEnqueuedCCBCount; /*!< Num times sync has been put in CCBs */ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */ - IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */ + RA_BASE_T uiAllocatedAddr; /*!< Allocated address of the sync */ volatile SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */ PRGXFWIF_UFO_ADDR sCheckpointUFOAddr; /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */ IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */ PVRSRV_TIMELINE hTimeline; /*!< Timeline on which this sync checkpoint was created */ - IMG_UINT32 ui32ValidationCheck; IMG_PID uiProcess; /*!< The Process ID of the process which created this sync checkpoint */ PSYNC_CHECKPOINT_RECORD_HANDLE hRecord; /*!< Sync record handle */ DLLIST_NODE sListNode; /*!< List node for the global sync chkpt list */ DLLIST_NODE sDeferredFreeListNode; /*!< List node for the deferred free sync chkpt list */ IMG_UINT32 ui32FWAddr; /*!< FWAddr stored at sync checkpoint alloc time */ +#if defined(PDUMP) PDUMP_FLAGS_T ui32PDumpFlags; /*!< Pdump Capture mode to be used for POL*/ +#endif +#if defined(DEBUG) + IMG_UINT32 ui32ValidationCheck; /*!< Structure validity pattern */ +#endif } SYNC_CHECKPOINT; diff --git a/drivers/gpu/drm/img/img-volcanic/services/include/volcanic/km_apphint_defs.h b/drivers/gpu/drm/img/img-volcanic/services/include/volcanic/km_apphint_defs.h index a32c1d1a2a8a..81397088bddb 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/include/volcanic/km_apphint_defs.h +++ b/drivers/gpu/drm/img/img-volcanic/services/include/volcanic/km_apphint_defs.h @@ -81,6 +81,7 @@ X(HWValEnableSPUPowerMaskChange, BOOL, VALIDATION, PVRSRV_APPHINT_ X(HWValAvailableSPUMask, UINT32, VALIDATION, PVRSRV_APPHINT_HWVALAVAILABLESPUMASK, NO_PARAM_TABLE, ALWAYS ) \ X(HWValAvailableRACMask, UINT32, VALIDATION, PVRSRV_APPHINT_HWVALAVAILABLERACMASK, NO_PARAM_TABLE, ALWAYS ) \ X(EnableSPUClockGating, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESPUCLOCKGATING, NO_PARAM_TABLE, ALWAYS ) \ +X(HWValTpuF20BilPrecision, UINT32, VALIDATION, PVRSRV_APPHINT_HWVALTPUF20BILPRECISION, NO_PARAM_TABLE, ALWAYS ) \ \ X(HWPerfDisableCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECOUNTERFILTER, NO_PARAM_TABLE, ALWAYS ) \ \ @@ -96,20 +97,26 @@ X(USRMNumRegionsTDM, UINT32, VALIDATION, 0, X(UVBRMNumRegionsVDM, UINT64, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ X(UVBRMNumRegionsDDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ \ -X(CDMArbitrationOverride, UINT32, ALWAYS, PVRSRV_APPHINT_CDMARBITRATIONOVERRIDE, NO_PARAM_TABLE, ALWAYS ) \ -\ -X(DualLockstepFWProcessor, BOOL, VALIDATION, 1, NO_PARAM_TABLE, ALWAYS ) \ X(GPUStatePin, BOOL, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ X(PowerDomainKickInterval, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ \ X(RCEDisableMask, UINT64, VALIDATION, PVRSRV_APPHINT_RCEDISABLEMASK, NO_PARAM_TABLE, ALWAYS ) \ X(PCGPktDropThresh, UINT32, VALIDATION, PVRSRV_APPHINT_PCGPKTDROPTHRESH, NO_PARAM_TABLE, ALWAYS ) \ +X(RaySLCMMUAutoCacheOps, UINT32, VALIDATION, PVRSRV_APPHINT_RAYSLCMMUAUTOCACHEOPS, NO_PARAM_TABLE, ALWAYS ) \ +\ +X(ClkCtrl0, UINT64, VALIDATION, PVRSRV_APPHINT_CLKCTRL0, NO_PARAM_TABLE, ALWAYS ) \ +X(ClkCtrl1, UINT64, VALIDATION, PVRSRV_APPHINT_CLKCTRL1, NO_PARAM_TABLE, ALWAYS ) \ +X(ClkCtrl2, UINT32, VALIDATION, PVRSRV_APPHINT_CLKCTRL2, NO_PARAM_TABLE, ALWAYS ) \ +X(TPUTAGCtrl, UINT32, VALIDATION, 0x10, NO_PARAM_TABLE, ALWAYS ) \ +X(TPUMADDCtrl, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ + /* ******************************************************************************* Debugfs parameters (volcanic-specific) - driver configuration ******************************************************************************/ #define APPHINT_LIST_DEBUGINFO \ /* name, type, class, default, helper, guest, */ \ +X(MMUVal_ParityFlipPMR, STRING, VALIDATION, PVRSRV_APPHINT_MMUVAL_PARITYFLIPPMR, NO_PARAM_TABLE, ALWAYS ) \ /* ******************************************************************************* diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/cache_km.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/cache_km.c index 71ed28342d3d..f9c4ce986e18 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/cache_km.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/cache_km.c @@ -70,7 +70,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_sync_server.h" #include "km_apphint_defs.h" #include "km_apphint_defs_common.h" -#include "oskm_apphint.h" +#include "os_apphint.h" #include "di_server.h" /* This header must always be included last */ @@ -159,7 +159,6 @@ typedef struct _CACHEOP_WORK_QUEUE_ IMG_UINT32 uiLineSize; IMG_UINT32 uiLineShift; IMG_UINT32 uiPageShift; - OS_CACHE_OP_ADDR_TYPE uiCacheOpAddrType; PMR *psInfoPagePMR; IMG_UINT32 *pui32InfoPage; @@ -269,7 +268,8 @@ static void CacheOpStatsExecLogWrite(CACHEOP_WORK_ITEM *psCacheOpWorkItem) 1, gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, &sDevPAddr, - &bValid); + &bValid, + CPU_USE); eLockError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR); PVR_LOG_IF_ERROR(eLockError, "PMRUnlockSysPhysAddresses"); @@ -830,6 +830,8 @@ static INLINE PVRSRV_ERROR CacheOpValidateUMVA(PMR *psPMR, #if !defined(__linux__) || defined(CACHEFLUSH_NO_KMRBF_USING_UMVA) + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); pvAddr = NULL; #else /* Validate VA, assume most basic address limit access_ok() check */ @@ -895,6 +897,10 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, PVRSRV_ERROR eError = PVRSRV_OK; IMG_BYTE *pbCpuVirtAddr = NULL; IMG_BOOL *pbValid = abValid; + OS_CACHE_OP_ADDR_TYPE eCacheOpAddrType; + + psDevNode = PMR_DeviceNode(psPMR); + eCacheOpAddrType = OSCPUCacheOpAddressType(psDevNode); if (uiCacheOp == PVRSRV_CACHE_OP_NONE || uiCacheOp == PVRSRV_CACHE_OP_TIMELINE) { @@ -903,13 +909,18 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, if (! bIsRequestValidated) { - IMG_DEVMEM_SIZE_T uiLPhysicalSize; - /* Need to validate parameters before proceeding */ - eError = PMR_PhysicalSize(psPMR, &uiLPhysicalSize); - PVR_LOG_RETURN_IF_ERROR(eError, "uiLPhysicalSize"); - - PVR_LOG_RETURN_IF_FALSE(((uiOffset+uiSize) <= uiLPhysicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); + /* Check for size + offset overflow */ + PVR_LOG_RETURN_IF_FALSE(((uiOffset + uiSize) >= uiSize), + "Overflow detected on offset + size parameters", + PVRSRV_ERROR_INVALID_PARAMS); + /* Since size + offset is later aligned to page size check for overflow with alignment */ + PVR_LOG_RETURN_IF_FALSE((((uiOffset + uiSize) + gsCwq.uiPageSize - 1) >= (uiOffset + uiSize)), + "Overflow detected on offset + size parameters with applied alignment", + PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE(((uiOffset+uiSize) <= PMR_PhysicalSize(psPMR)), + CACHEOP_DEVMEM_OOR_ERROR_STRING, + PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); eError = PMRLockSysPhysAddresses(psPMR); PVR_LOG_RETURN_IF_ERROR(eError, "PMRLockSysPhysAddresses"); @@ -921,7 +932,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, { pvAddress = pbCpuVirtAddr; - if (pvAddress && gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + if (pvAddress && eCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) { CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pvAddress, uiSize, uiCacheOp); @@ -955,7 +966,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, */ CACHEOP_PVR_ASSERT(pbCpuVirtAddr == NULL); - if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + if (eCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_PHYSICAL) { PVR_DPF((PVR_DBG_WARNING, "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, using paddress", @@ -1005,7 +1016,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); PVR_LOG_GOTO_WITH_ERROR("PMRReleaseKernelMappingData", eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0); } - else if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + else if (eCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) { PVR_DPF((PVR_DBG_WARNING, "%s: Bad vaddress 0x%p in CPU d-cache maint. op, using reacquired vaddress 0x%p", @@ -1049,7 +1060,6 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, /* Need this for kernel mapping */ bPMRIsSparse = PMR_IsSparse(psPMR); - psDevNode = PMR_DeviceNode(psPMR); /* Round the incoming offset down to the nearest cache-line / page aligned-address */ uiCLAlignedEndOffset = uiOffset + uiSize; @@ -1079,7 +1089,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, { pbValid = abValid; } - else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + else if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) { psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR)); if (! psCpuPhyAddr) @@ -1091,12 +1101,15 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, } } + /* Lock the PMR while we are obtaining and using phys addrs */ + PMRLockPMR(psPMR); + /* We always retrieve PMR data in bulk, up-front if number of pages is within PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a dynamic buffer has been allocated to satisfy requests outside limits */ if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid) { - if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) { /* Look-up PMR CpuPhyAddr once, if possible */ eError = PMR_CpuPhysAddr(psPMR, @@ -1104,7 +1117,8 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, ui32NumOfPages, uiPgAlignedStartOffset, psCpuPhyAddr, - pbValid); + pbValid, + CPU_USE | MAPPING_USE); if (eError == PVRSRV_OK) { bIsPMRInfoValid = IMG_TRUE; @@ -1133,15 +1147,16 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, /* Never cross page boundary without looking up corresponding PMR page physical address and/or page validity if these were not looked-up, in bulk, up-front */ ui32PageIndex = 0; - if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) { eError = PMR_CpuPhysAddr(psPMR, gsCwq.uiPageShift, 1, uiPgAlignedOffset, psCpuPhyAddr, - pbValid); - PVR_LOG_GOTO_IF_ERROR(eError, "PMR_CpuPhysAddr", e0); + pbValid, + CPU_USE | MAPPING_USE); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_CpuPhysAddr", ErrUnlockPMR); } else { @@ -1150,7 +1165,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, 1, uiPgAlignedOffset, pbValid); - PVR_LOG_GOTO_IF_ERROR(eError, "PMR_IsOffsetValid", e0); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_IsOffsetValid", ErrUnlockPMR); } } @@ -1168,7 +1183,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, (void *)(uintptr_t)((uintptr_t)pvAddress + (uintptr_t)(uiPgAlignedOffset-uiPgAlignedStartOffset)); } /* Skip CpuVA acquire if CacheOp can be maintained entirely using CpuPA */ - else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + else if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) { if (bPMRIsSparse) { @@ -1179,7 +1194,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, (void **)&pbCpuVirtAddr, &uiOutSize, &hPrivOut); - PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", ErrUnlockPMR); } else { @@ -1190,7 +1205,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, (void **)&pbCpuVirtAddr, &uiOutSize, &hPrivOut); - PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", ErrUnlockPMR); } } @@ -1198,7 +1213,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, CacheOpExecRangeBased(psDevNode, uiCacheOp, pbCpuVirtAddr, - (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ? + (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ? psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0], uiPgAlignedOffset, uiCLAlignedStartOffset, @@ -1207,7 +1222,7 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, if (! pvAddress) { /* The caller has not supplied either a KM/UM CpuVA, release mapping */ - if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) { eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); @@ -1215,6 +1230,8 @@ static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, } } +ErrUnlockPMR: + PMRUnlockPMR(psPMR); e0: if (psCpuPhyAddr != asCpuPhyAddr) { @@ -1590,7 +1607,6 @@ PVRSRV_ERROR CacheOpInit (void) gsCwq.uiLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); gsCwq.uiLineShift = ExactLog2(gsCwq.uiLineSize); PVR_LOG_RETURN_IF_FALSE((gsCwq.uiLineSize && gsCwq.uiPageSize && gsCwq.uiPageShift), "", PVRSRV_ERROR_INIT_FAILURE); - gsCwq.uiCacheOpAddrType = OSCPUCacheOpAddressType(); #if defined(CACHEOP_DEBUG) /* debugfs file read-out is not concurrent, so lock protects against this */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/connection_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/connection_server.c index d455e5cb140a..b8d3994bcce1 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/connection_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/connection_server.c @@ -54,6 +54,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "osfunc.h" #include "tlstream.h" #include "rgxhwperf_common.h" +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif /* PID associated with Connection currently being purged by Cleanup thread */ static IMG_PID gCurrentPurgeConnectionPid; @@ -64,6 +67,7 @@ static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection) PROCESS_HANDLE_BASE *psProcessHandleBase; IMG_UINT64 ui64MaxBridgeTime; PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); if (psPVRSRVData->bUnload) { @@ -92,14 +96,14 @@ static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection) if (psProcessHandleBase != NULL) { - /* PVRSRVReleaseProcessHandleBase() calls PVRSRVFreeKernelHendles() + /* PVRSRVReleaseProcessHandleBase() calls PVRSRVFreeKernelHandles() * and PVRSRVFreeHandleBase() for the process handle base. * Releasing kernel handles can never return RETRY error because - * release function for those handles are NOPs and PVRSRVFreeKernelHendles() + * release function for those handles are NOPs and PVRSRVFreeKernelHandles() * doesn't even call pfnReleaseData() callback. * Process handles can potentially return RETRY hence additional check * below. */ - eError = PVRSRVReleaseProcessHandleBase(psProcessHandleBase, psConnection->pid, + eError = PVRSRVReleaseProcessHandleBase(psProcessHandleBase, ui64MaxBridgeTime); if (PVRSRVIsRetryError(eError)) { @@ -146,12 +150,13 @@ static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection) if (psConnection->psPDumpConnectionData != NULL) { - PDumpUnregisterConnection(psConnection->psPDumpConnectionData); + PDumpUnregisterConnection(psDevNode, + psConnection->psPDumpConnectionData); psConnection->psPDumpConnectionData = NULL; } #if defined(PVRSRV_ENABLE_PROCESS_STATS) - PVRSRVStatsDeviceDisconnect(OSGetDevNode(psConnection)); + PVRSRVStatsDeviceDisconnect(psDevNode); #endif /* Call environment specific connection data deinit function */ @@ -174,6 +179,21 @@ static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection) OSFreeMemNoStats(psConnection); +#if defined(SUPPORT_PMR_DEFERRED_FREE) + /* Kick the Firmware to invalidate caches to clear all the zombie PMRs. + * If there are not zombie PMRs or no mappings were freed the kick will not + * be executed. + * + * This is needed: + * - when the process is killed and the connection cleanup has to clean up + * all dangling handles. + * - there are any outstanding PMRs in the zombie list due to no + * invalidation being executed before connection destruction + */ + eError = MMU_CacheInvalidateKick(psDevNode, NULL); + PVR_LOG_IF_ERROR(eError, "MMU_CacheInvalidateKick"); +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + return PVRSRV_OK; } @@ -182,6 +202,7 @@ PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData) CONNECTION_DATA *psConnection; PVRSRV_ERROR eError; PROCESS_HANDLE_BASE *psProcessHandleBase; + PVRSRV_DEVICE_NODE *psDevNode; /* Allocate connection data area, no stats since process not registered yet */ psConnection = OSAllocZMemNoStats(sizeof(*psConnection)); @@ -197,20 +218,25 @@ PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData) eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData); PVR_LOG_GOTO_IF_ERROR(eError, "OSConnectionPrivateDataInit", failure); + /* Must come after OSConnectionPrivateDataInit */ + psDevNode = OSGetDevNode(psConnection); + PVR_LOG_GOTO_IF_NOMEM(psDevNode, eError, failure); + + if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot connect to the device during deinit")); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_DEVICE, failure); + } + psConnection->pid = OSGetCurrentClientProcessIDKM(); psConnection->vpid = OSGetCurrentVirtualProcessID(); psConnection->tid = (IMG_UINT32)OSGetCurrentClientThreadIDKM(); - OSStringLCopy(psConnection->pszProcName, OSGetCurrentClientProcessNameKM(), PVRSRV_CONNECTION_PROCESS_NAME_LEN); + OSStringSafeCopy(psConnection->pszProcName, OSGetCurrentClientProcessNameKM(), PVRSRV_CONNECTION_PROCESS_NAME_LEN); #if defined(SUPPORT_DMA_TRANSFER) - OSLockCreate(&psConnection->hDmaReqLock); - - eError = OSEventObjectCreate("Dma transfer cleanup event object", - &psConnection->hDmaEventObject); - PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", failure); - - OSAtomicWrite(&psConnection->ui32NumDmaTransfersInFlight, 0); - psConnection->bAcceptDmaRequests = IMG_TRUE; + eError = PVRSRVInitialiseDMA(psDevNode, psConnection); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVInitialiseDMA", failure); #endif /* Register this connection with the sync core */ @@ -221,9 +247,10 @@ PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData) * Register this connection and Sync PDump callback with * the pdump core. Pass in the Sync connection data. */ - eError = PDumpRegisterConnection(psConnection->psSyncConnectionData, - SyncConnectionPDumpSyncBlocks, - &psConnection->psPDumpConnectionData); + eError = PDumpRegisterConnection(psDevNode, + psConnection->psSyncConnectionData, + SyncConnectionPDumpSyncBlocks, + &psConnection->psPDumpConnectionData); PVR_LOG_GOTO_IF_ERROR(eError, "PDumpRegisterConnection", failure); /* Allocate handle base for this connection */ @@ -231,15 +258,14 @@ PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData) PVRSRV_HANDLE_BASE_TYPE_CONNECTION); PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", failure); - /* get process handle base (if it doesn't exist it will be allocated) */ - eError = PVRSRVAcquireProcessHandleBase(psConnection->pid, &psProcessHandleBase); + /* get process handle base for the current process (if it doesn't exist it will be allocated) */ + eError = PVRSRVAcquireProcessHandleBase(&psProcessHandleBase); PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAcquireProcessHandleBase", failure); /* hConnectionsLock now resides in PVRSRV_DEVICE_NODE */ { IMG_BOOL bHostStreamIsNull; PVRSRV_RGXDEV_INFO *psRgxDevInfo; - PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); #if defined(PVRSRV_ENABLE_PROCESS_STATS) eError = PVRSRVStatsDeviceConnect(psDevNode); @@ -254,7 +280,7 @@ PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData) #endif OSLockRelease(psDevNode->hConnectionsLock); - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) { psRgxDevInfo = _RGX_DEVICE_INFO_FROM_NODE(psDevNode); @@ -316,50 +342,6 @@ static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData) return eErrorConnection; } -#if defined(SUPPORT_DMA_TRANSFER) -static void WaitForOutstandingDma(CONNECTION_DATA *psConnectionData) -{ - - PVRSRV_ERROR eError; - IMG_HANDLE hEvent; - IMG_UINT32 ui32Tries = 100; - -#if defined(DMA_VERBOSE) - PVR_DPF((PVR_DBG_ERROR, - "Waiting on %d DMA transfers in flight...", OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight))); -#endif - - eError = OSEventObjectOpen(psConnectionData->hDmaEventObject, &hEvent); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); - return; - } - - while (OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight) != 0) - { - /* - #define DMA_TRANSFER_TIMEOUT_US (5000000ULL) - - This currently doesn't work properly. Wait time is not as requested. - Using OSSleepms instead - - OSEventObjectWaitKernel(hEvent, DMA_TRANSFER_TIMEOUT_US); - */ - OSSleepms(50); - if (!ui32Tries) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Timeout while waiting on outstanding DMA transfers!", __func__)); - break; - } - - ui32Tries--; - } - - OSEventObjectClose(hEvent); -} -#endif - void PVRSRVCommonConnectionDisconnect(void *pvDataPtr) { CONNECTION_DATA *psConnectionData = pvDataPtr; @@ -374,17 +356,36 @@ void PVRSRVCommonConnectionDisconnect(void *pvDataPtr) { PDumpDisconnectionNotify(psDevNode); } -#if defined(SUPPORT_DMA_TRANSFER) - OSLockAcquire(psConnectionData->hDmaReqLock); - psConnectionData->bAcceptDmaRequests = IMG_FALSE; + /* Add a HOST_CLIENT_INFO event to match the one on connection */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) + { + IMG_BOOL bHostStreamIsNull; + PVRSRV_RGXDEV_INFO *psRgxDevInfo; + + psRgxDevInfo = _RGX_DEVICE_INFO_FROM_NODE(psDevNode); - OSLockRelease(psConnectionData->hDmaReqLock); + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + bHostStreamIsNull = (IMG_BOOL)(psRgxDevInfo->hHWPerfHostStream == NULL); + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); - WaitForOutstandingDma(psConnectionData); + if (!bHostStreamIsNull) + { + if (TLStreamIsOpenForReading(psRgxDevInfo->hHWPerfHostStream)) + { + /* Announce this client connection in the host stream, if event mask is set */ + RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(psDevNode, psConnectionData->pid, psConnectionData->pszProcName); + } + } + } - OSEventObjectDestroy(psConnectionData->hDmaEventObject); - OSLockDestroy(psConnectionData->hDmaReqLock); +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + /* Mark remaining resources for driver to free */ + RIConnectionClosed(psConnectionData); +#endif + +#if defined(SUPPORT_DMA_TRANSFER) + PVRSRVDeInitialiseDMA(psDevNode, psConnectionData); #endif #if defined(DEBUG) || defined(PDUMP) @@ -399,10 +400,14 @@ void PVRSRVCommonConnectionDisconnect(void *pvDataPtr) /* Defer the release of the connection data */ psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData; psConnectionData->sCleanupThreadFn.pvData = psConnectionData; - psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE; + /* Some resources in HANDLE_BASE may need FW idle confirmation + * hence setting to TRUE to use the global EO for retries which is + * signalled by the device MISR */ + psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_TRUE; + psConnectionData->sCleanupThreadFn.eCleanupType = PVRSRV_CLEANUP_TYPE_CONNECTION; CLEANUP_THREAD_SET_RETRY_COUNT(&psConnectionData->sCleanupThreadFn, CLEANUP_THREAD_RETRY_COUNT_DEFAULT); - PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn); + PVRSRVCleanupThreadAddWork(psDevNode, &psConnectionData->sCleanupThreadFn); } } @@ -428,7 +433,8 @@ void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode, * The mutex is initialised as part of DeviceInitialize() which occurs * on first access to the device node. */ - if (psDevNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + if ((psDevNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) && + (psDevNode->eDevState != PVRSRV_DEVICE_STATE_FROZEN)) { PVR_DUMPDEBUG_LOG("Connections: No Devices: No active connections"); return; @@ -439,7 +445,7 @@ void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode, { PVR_DUMPDEBUG_LOG(CONNECTIONS_PREFIX " No active connections", (unsigned char)psDevNode->sDevId.ui32InternalID, - (unsigned char)psDevNode->sDevId.i32OsDeviceID); + (unsigned char)psDevNode->sDevId.i32KernelDeviceID); } else { @@ -453,8 +459,8 @@ void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode, MAX_CONNECTIONS_PREFIX, CONNECTIONS_PREFIX, (unsigned char)psDevNode->sDevId.ui32InternalID, - (unsigned char)psDevNode->sDevId.i32OsDeviceID); - OSStringLCopy(sActiveConnections+uiPos, szTmpConBuff, uiSize); + (unsigned char)psDevNode->sDevId.i32KernelDeviceID); + OSStringSafeCopy(sActiveConnections+uiPos, szTmpConBuff, uiSize); /* Move the write offset to the end of the current string */ uiPos += i; @@ -471,7 +477,7 @@ void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode, i = MIN(MAX_DEBUG_DUMP_CONNECTION_STR_LEN, i); bPrinted = IMG_FALSE; - OSStringLCopy(sActiveConnections+uiPos, sTmpBuff, uiSize); + OSStringSafeCopy(sActiveConnections+uiPos, sTmpBuff, uiSize); /* Move the write offset to the end of the current string */ uiPos += i; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/dc_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/dc_server.c deleted file mode 100644 index dcb07d901677..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/dc_server.c +++ /dev/null @@ -1,2330 +0,0 @@ -/**************************************************************************/ /*! -@File -@Title Server side Display Class functions -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Implements the server side functions of the Display Class - interface. -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ /***************************************************************************/ - -#include "allocmem.h" -#include "lock.h" -#include "osfunc.h" -#include "img_types.h" -#include "img_defs.h" -#include "scp.h" -#include "dc_server.h" -#include "kerneldisplay.h" -#include "pvr_debug.h" -#include "pvr_notifier.h" -#include "pmr.h" -#include "sync_server.h" -#include "pvrsrv.h" -#include "process_stats.h" -#include "cache_km.h" -#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -#include "ri_server.h" -#endif - -struct _DC_DISPLAY_CONTEXT_ -{ - DC_DEVICE *psDevice; - SCP_CONTEXT *psSCPContext; - IMG_HANDLE hDisplayContext; - IMG_UINT32 ui32ConfigsInFlight; - ATOMIC_T sRefCount; - POS_LOCK hLock; - POS_LOCK hConfigureLock; // Guard against concurrent calls to pfnContextConfigure during DisplayContextFlush - IMG_UINT32 ui32TokenOut; - IMG_UINT32 ui32TokenIn; - - IMG_HANDLE hCmdCompNotify; - - IMG_BOOL bIssuedNullFlip; - IMG_HANDLE hMISR; - IMG_HANDLE hDebugNotify; - void *hTimer; - - IMG_BOOL bPauseMISR; - DLLIST_NODE sListNode; -}; - -struct _DC_DEVICE_ -{ - PVRSRV_DEVICE_NODE *psDevNode; - const DC_DEVICE_FUNCTIONS *psFuncTable; - IMG_UINT32 ui32MaxConfigsInFlight; - IMG_HANDLE hDeviceData; - ATOMIC_T sRefCount; - IMG_UINT32 ui32Index; - IMG_HANDLE psEventList; - IMG_HANDLE hSystemBuffer; - PMR *psSystemBufferPMR; - PHYS_HEAP *psPhysHeap; - DC_DISPLAY_CONTEXT sSystemContext; - DLLIST_NODE sListNode; /* List of devices */ -}; - -typedef enum _DC_BUFFER_TYPE_ -{ - DC_BUFFER_TYPE_UNKNOWN = 0, - DC_BUFFER_TYPE_ALLOC, - DC_BUFFER_TYPE_IMPORT, - DC_BUFFER_TYPE_SYSTEM, -} DC_BUFFER_TYPE; - -typedef struct _DC_BUFFER_ALLOC_DATA_ -{ - PMR *psPMR; -} DC_BUFFER_ALLOC_DATA; - -typedef struct _DC_BUFFER_IMPORT_DATA_ -{ - /* - Required as the DC doesn't need to map the PMR during the import call we - need to make sure that the PMR doesn't get freed before the DC maps it - by taking an ref on the PMR during the import and drop it on the unimport. - */ - IMG_UINT32 ui32NumPlanes; - PMR *apsImport[3]; -} DC_BUFFER_IMPORT_DATA; - -struct _DC_BUFFER_ -{ - DC_DISPLAY_CONTEXT *psDisplayContext; - DC_BUFFER_TYPE eType; - union { - DC_BUFFER_ALLOC_DATA sAllocData; - DC_BUFFER_IMPORT_DATA sImportData; - } uBufferData; - IMG_HANDLE hBuffer; - IMG_UINT32 ui32MapCount; - ATOMIC_T sRefCount; - POS_LOCK hMapLock; -}; - -typedef struct _DC_CMD_RDY_DATA_ -{ - DC_DISPLAY_CONTEXT *psDisplayContext; - IMG_UINT32 ui32BufferCount; - PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib; - IMG_HANDLE *pahBuffer; - IMG_UINT32 ui32DisplayPeriod; -} DC_CMD_RDY_DATA; - -typedef struct _DC_CMD_COMP_DATA_ -{ - DC_DISPLAY_CONTEXT *psDisplayContext; - IMG_UINT32 ui32BufferCount; - DC_BUFFER **apsBuffer; - IMG_UINT32 ui32Token; - IMG_BOOL bDirectNullFlip; -} DC_CMD_COMP_DATA; - -typedef struct _DC_BUFFER_PMR_DATA_ -{ - DC_BUFFER *psBuffer; /*!< The buffer this PMR private data refers to */ - IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize; /*!< Log 2 of the buffers pagesize */ - IMG_UINT32 ui32PageCount; /*!< Number of pages in this buffer */ - IMG_DEV_PHYADDR *pasDevPAddr; /*!< Pointer to an array of device physical addresses */ - void *pvLinAddr; /*!< CPU virtual pointer or NULL if the DC driver didn't have one */ -} DC_BUFFER_PMR_DATA; - -static POS_LOCK g_hDCListLock; -static POS_LOCK g_hDCDevListLock; /*!< Mutex for device list access */ - -static DLLIST_NODE g_sDCDeviceListHead; /*!< List of DC devices found */ -static IMG_UINT32 g_ui32DCDeviceCount; -static IMG_UINT32 g_ui32DCNextIndex; -static POS_LOCK g_hDCDisplayContextsListLock; -static DLLIST_NODE g_sDisplayContextsListHead; - - -#if defined(DC_DEBUG) && defined(REFCOUNT_DEBUG) -#define DC_REFCOUNT_PRINT(fmt, ...) \ - PVRSRVDebugPrintf(PVR_DBG_WARNING, \ - __FILE__, \ - __LINE__, \ - fmt, \ - __VA_ARGS__) -#else -#define DC_REFCOUNT_PRINT(fmt, ...) -#endif - -#if defined(DC_DEBUG) -#define DC_DEBUG_PRINT(fmt, ...) \ - PVRSRVDebugPrintf(PVR_DBG_WARNING, \ - __FILE__, \ - __LINE__, \ - fmt, \ - __VA_ARGS__) -#else -#define DC_DEBUG_PRINT(fmt, ...) -#endif - -/***************************************************************************** - * Private functions * - *****************************************************************************/ - -static void _DCDeviceAcquireRef(DC_DEVICE *psDevice) -{ - IMG_INT iRefCount = OSAtomicIncrement(&psDevice->sRefCount); - - DC_REFCOUNT_PRINT("%s: DC device %p, refcount = %d", __func__, psDevice, - iRefCount); - PVR_UNREFERENCED_PARAMETER(iRefCount); -} - -static void _DCDeviceReleaseRef(DC_DEVICE *psDevice) -{ - IMG_UINT iRefCount = OSAtomicDecrement(&psDevice->sRefCount); - if (iRefCount == 0) - { - OSLockAcquire(g_hDCDevListLock); - dllist_remove_node(&psDevice->sListNode); - OSLockRelease(g_hDCDevListLock); - - OSLockAcquire(g_hDCListLock); - g_ui32DCDeviceCount--; - OSLockRelease(g_hDCListLock); - } - else - { - /* Signal this devices event list as the unload might be blocked on it */ - OSEventObjectSignal(psDevice->psEventList); - } - - DC_REFCOUNT_PRINT("%s: DC device %p, refcount = %d", __func__, psDevice, - iRefCount); -} - -static void _DCDisplayContextAcquireRef(DC_DISPLAY_CONTEXT *psDisplayContext) -{ - IMG_INT iRefCount = OSAtomicIncrement(&psDisplayContext->sRefCount); - - DC_REFCOUNT_PRINT("%s: DC display context %p, refcount = %d", __func__, - psDisplayContext, iRefCount); - PVR_UNREFERENCED_PARAMETER(iRefCount); -} - -static void _DCDisplayContextReleaseRef(DC_DISPLAY_CONTEXT *psDisplayContext) -{ - IMG_INT iRefCount = OSAtomicDecrement(&psDisplayContext->sRefCount); - if (iRefCount == 0) - { - DC_DEVICE *psDevice = psDisplayContext->psDevice; - - PVRSRVUnregisterDeviceDbgRequestNotify(psDisplayContext->hDebugNotify); - - OSLockAcquire(g_hDCDisplayContextsListLock); - dllist_remove_node(&psDisplayContext->sListNode); - OSLockRelease(g_hDCDisplayContextsListLock); - - /* unregister the device from cmd complete notifications */ - PVRSRVUnregisterCmdCompleteNotify(psDisplayContext->hCmdCompNotify); - psDisplayContext->hCmdCompNotify = NULL; - - OSUninstallMISR(psDisplayContext->hMISR); - SCPDestroy(psDisplayContext->psSCPContext); - psDevice->psFuncTable->pfnContextDestroy(psDisplayContext->hDisplayContext); - _DCDeviceReleaseRef(psDevice); - OSLockDestroy(psDisplayContext->hConfigureLock); - OSLockDestroy(psDisplayContext->hLock); - OSFreeMem(psDisplayContext); - } - - DC_REFCOUNT_PRINT("%s: DC display context %p, refcount = %d", __func__, - psDisplayContext, iRefCount); -} - -static void _DCBufferAcquireRef(DC_BUFFER *psBuffer) -{ - IMG_INT iRefCount = OSAtomicIncrement(&psBuffer->sRefCount); - - DC_REFCOUNT_PRINT("%s: DC buffer %p, refcount = %d", __func__, psBuffer, - iRefCount); - PVR_UNREFERENCED_PARAMETER(iRefCount); -} - - -static void _DCFreeAllocedBuffer(DC_BUFFER *psBuffer) -{ - DC_DISPLAY_CONTEXT *psDisplayContext = psBuffer->psDisplayContext; - DC_DEVICE *psDevice = psDisplayContext->psDevice; - - psDevice->psFuncTable->pfnBufferFree(psBuffer->hBuffer); - _DCDisplayContextReleaseRef(psDisplayContext); -} - -static void _DCFreeImportedBuffer(DC_BUFFER *psBuffer) -{ - DC_DISPLAY_CONTEXT *psDisplayContext = psBuffer->psDisplayContext; - DC_DEVICE *psDevice = psDisplayContext->psDevice; - IMG_UINT32 i; - - for (i=0;iuBufferData.sImportData.ui32NumPlanes;i++) - { - PMRUnrefPMR(psBuffer->uBufferData.sImportData.apsImport[i]); - } - psDevice->psFuncTable->pfnBufferFree(psBuffer->hBuffer); - _DCDisplayContextReleaseRef(psDisplayContext); -} - -static void _DCFreeSystemBuffer(DC_BUFFER *psBuffer) -{ - DC_DISPLAY_CONTEXT *psDisplayContext = psBuffer->psDisplayContext; - DC_DEVICE *psDevice = psDisplayContext->psDevice; - - psDevice->psFuncTable->pfnBufferSystemRelease(psBuffer->hBuffer); - _DCDeviceReleaseRef(psDevice); -} - -/* - Drop a reference on the buffer. Last person gets to free it - */ -static void _DCBufferReleaseRef(DC_BUFFER *psBuffer) -{ - IMG_INT iRefCount = OSAtomicDecrement(&psBuffer->sRefCount); - if (iRefCount == 0) - { - switch (psBuffer->eType) - { - case DC_BUFFER_TYPE_ALLOC: - _DCFreeAllocedBuffer(psBuffer); - break; - case DC_BUFFER_TYPE_IMPORT: - _DCFreeImportedBuffer(psBuffer); - break; - case DC_BUFFER_TYPE_SYSTEM: - _DCFreeSystemBuffer(psBuffer); - break; - default: - PVR_ASSERT(IMG_FALSE); - } - OSLockDestroy(psBuffer->hMapLock); - OSFreeMem(psBuffer); - } - - DC_REFCOUNT_PRINT("%s: DC buffer %p, refcount = %d", __func__, psBuffer, - iRefCount); - PVR_UNREFERENCED_PARAMETER(iRefCount); -} - -static PVRSRV_ERROR _DCBufferMap(DC_BUFFER *psBuffer) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - - OSLockAcquire(psBuffer->hMapLock); - if (psBuffer->ui32MapCount++ == 0) - { - DC_DEVICE *psDevice = psBuffer->psDisplayContext->psDevice; - - if (psDevice->psFuncTable->pfnBufferMap) - { - eError = psDevice->psFuncTable->pfnBufferMap(psBuffer->hBuffer); - PVR_GOTO_IF_ERROR(eError, out_unlock); - } - - _DCBufferAcquireRef(psBuffer); - } - - DC_REFCOUNT_PRINT("%s: DC buffer %p, MapCount = %d", - __func__, psBuffer, psBuffer->ui32MapCount); - -out_unlock: - OSLockRelease(psBuffer->hMapLock); - return eError; -} - -static void _DCBufferUnmap(DC_BUFFER *psBuffer) -{ - DC_DEVICE *psDevice = psBuffer->psDisplayContext->psDevice; - IMG_UINT32 ui32MapCount; - - OSLockAcquire(psBuffer->hMapLock); - ui32MapCount = --psBuffer->ui32MapCount; - OSLockRelease(psBuffer->hMapLock); - - if (ui32MapCount == 0) - { - if (psDevice->psFuncTable->pfnBufferUnmap) - { - psDevice->psFuncTable->pfnBufferUnmap(psBuffer->hBuffer); - } - - _DCBufferReleaseRef(psBuffer); - } - DC_REFCOUNT_PRINT("%s: DC Buffer %p, MapCount = %d", - __func__, psBuffer, ui32MapCount); -} - -static PVRSRV_ERROR _DCDeviceBufferArrayCreate(IMG_UINT32 ui32BufferCount, - DC_BUFFER **papsBuffers, - IMG_HANDLE **pahDeviceBuffers) -{ - IMG_HANDLE *ahDeviceBuffers; - IMG_UINT32 i; - - /* Create an array of the DC's private Buffer handles */ - ahDeviceBuffers = OSAllocZMem(sizeof(IMG_HANDLE) * ui32BufferCount); - PVR_RETURN_IF_NOMEM(ahDeviceBuffers); - - for (i=0;ihBuffer; - } - - *pahDeviceBuffers = ahDeviceBuffers; - - return PVRSRV_OK; -} - -static void _DCDeviceBufferArrayDestroy(IMG_HANDLE ahDeviceBuffers) -{ - OSFreeMem(ahDeviceBuffers); -} - -static IMG_BOOL _DCDisplayContextReady(void *hReadyData) -{ - DC_CMD_RDY_DATA *psReadyData = (DC_CMD_RDY_DATA *) hReadyData; - DC_DISPLAY_CONTEXT *psDisplayContext = psReadyData->psDisplayContext; - DC_DEVICE *psDevice = psDisplayContext->psDevice; - - if (psDisplayContext->ui32ConfigsInFlight >= psDevice->ui32MaxConfigsInFlight) - { - /* - We're at the DC's max commands in-flight so don't take this command - off the queue - */ - return IMG_FALSE; - } - - return IMG_TRUE; -} - -#if defined(SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG) -static void _RetireTimeout(void *pvData) -{ - DC_CMD_COMP_DATA *psCompleteData = pvData; - DC_DISPLAY_CONTEXT *psDisplayContext = psCompleteData->psDisplayContext; - - PVR_DPF((PVR_DBG_ERROR, "Timeout fired for operation %d", psCompleteData->ui32Token)); - SCPDumpStatus(psDisplayContext->psSCPContext, NULL); - - OSDisableTimer(psDisplayContext->hTimer); - OSRemoveTimer(psDisplayContext->hTimer); - psDisplayContext->hTimer = NULL; -} -#endif /* SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG */ - -static void _DCDisplayContextConfigure(void *hReadyData, - void *hCompleteData) -{ - DC_CMD_RDY_DATA *psReadyData = (DC_CMD_RDY_DATA *) hReadyData; - DC_DISPLAY_CONTEXT *psDisplayContext = psReadyData->psDisplayContext; - DC_DEVICE *psDevice = psDisplayContext->psDevice; - - OSLockAcquire(psDisplayContext->hLock); - psDisplayContext->ui32ConfigsInFlight++; - -#if defined(SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG) - if (psDisplayContext->ui32ConfigsInFlight == psDevice->ui32MaxConfigsInFlight) - { - /* - We've just sent out a new config which has filled the DC's pipeline. - This means that we expect a retire within a VSync period, start - a timer that will print out a message if we haven't got a complete - within a reasonable period (200ms) - */ - PVR_ASSERT(psDisplayContext->hTimer == NULL); - psDisplayContext->hTimer = OSAddTimer(_RetireTimeout, hCompleteData, 200); - OSEnableTimer(psDisplayContext->hTimer); - } -#endif - - OSLockRelease(psDisplayContext->hLock); - -#if defined(DC_DEBUG) - { - DC_DEBUG_PRINT("_DCDisplayContextConfigure: Send command (%d) out", - ((DC_CMD_COMP_DATA*) hCompleteData)->ui32Token); - } -#endif /* DC_DEBUG */ - - /* - * Note: A risk exists that _DCDisplayContextConfigure may be called simultaneously - * from both SCPRun (MISR context) and DCDisplayContextFlush. - * This lock ensures no concurrent calls are made to pfnContextConfigure. - */ - OSLockAcquire(psDisplayContext->hConfigureLock); - /* - Note: We've already done all the acquire refs at - DCDisplayContextConfigure time. - */ - psDevice->psFuncTable->pfnContextConfigure(psDisplayContext->hDisplayContext, - psReadyData->ui32BufferCount, - psReadyData->pasSurfAttrib, - psReadyData->pahBuffer, - psReadyData->ui32DisplayPeriod, - hCompleteData); - OSLockRelease(psDisplayContext->hConfigureLock); - -} - -/* - _DCDisplayContextRun - - Kick the MISR which will check for any commands which can be processed - */ -static INLINE void _DCDisplayContextRun(DC_DISPLAY_CONTEXT *psDisplayContext) -{ - OSScheduleMISR(psDisplayContext->hMISR); -} - -/* - DC_MISRHandler_DisplaySCP - - This gets called when this MISR is fired - */ -static void DC_MISRHandler_DisplaySCP(void *pvData) -{ - DC_DISPLAY_CONTEXT *psDisplayContext = pvData; - - if ( !psDisplayContext->bPauseMISR ) - { - SCPRun(psDisplayContext->psSCPContext); - } -} - -/* - * PMR related functions and structures - */ - -/* - Callback function for locking the system physical page addresses. - As we acquire the display memory at PMR create time there is nothing - to do here. - */ -static PVRSRV_ERROR _DCPMRLockPhysAddresses(PMR_IMPL_PRIVDATA pvPriv) -{ - DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv; - DC_BUFFER *psBuffer = psPMRPriv->psBuffer; - DC_DEVICE *psDevice = psBuffer->psDisplayContext->psDevice; - PVRSRV_ERROR eError; - - psPMRPriv->pasDevPAddr = OSAllocZMem(sizeof(IMG_DEV_PHYADDR) * - psPMRPriv->ui32PageCount); - PVR_GOTO_IF_NOMEM(psPMRPriv->pasDevPAddr, eError, fail_alloc); - - eError = psDevice->psFuncTable->pfnBufferAcquire(psBuffer->hBuffer, - psPMRPriv->pasDevPAddr, - &psPMRPriv->pvLinAddr); - PVR_GOTO_IF_ERROR(eError, fail_query); - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if defined(PVRSRV_ENABLE_MEMORY_STATS) - { - IMG_UINT32 i; - for (i = 0; i < psPMRPriv->ui32PageCount; i++) - { - IMG_CPU_PHYADDR sCPUPhysAddr; - PVRSRV_MEM_ALLOC_TYPE eAllocType; - - if (PhysHeapGetType(psDevice->psPhysHeap) == PHYS_HEAP_TYPE_LMA) { - eAllocType = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; - } else { - eAllocType = PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; - } - - sCPUPhysAddr.uiAddr = ((uintptr_t)psPMRPriv->pvLinAddr) + i * (1 << psPMRPriv->uiLog2PageSize); - PVRSRVStatsAddMemAllocRecord(eAllocType, - NULL, - sCPUPhysAddr, - 1 << psPMRPriv->uiLog2PageSize, - OSGetCurrentClientProcessIDKM() - DEBUG_MEMSTATS_VALUES); - } - } -#else - { - PVRSRV_MEM_ALLOC_TYPE eAllocType; - - if (PhysHeapGetType(psDevice->psPhysHeap) == PHYS_HEAP_TYPE_LMA) { - eAllocType = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; - } else { - eAllocType = PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; - } - - PVRSRVStatsIncrMemAllocStat(eAllocType, - psPMRPriv->ui32PageCount * (1 << psPMRPriv->uiLog2PageSize), - OSGetCurrentClientProcessIDKM()); - } -#endif -#endif - - return PVRSRV_OK; - -fail_query: - OSFreeMem(psPMRPriv->pasDevPAddr); -fail_alloc: - return eError; -} - -static PVRSRV_ERROR _DCPMRUnlockPhysAddresses(PMR_IMPL_PRIVDATA pvPriv) -{ - DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv; - DC_BUFFER *psBuffer = psPMRPriv->psBuffer; - DC_DEVICE *psDevice = psBuffer->psDisplayContext->psDevice; - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if !defined(PVRSRV_ENABLE_MEMORY_STATS) - { - PVRSRV_MEM_ALLOC_TYPE eAllocType; - - if (PhysHeapGetType(psDevice->psPhysHeap) == PHYS_HEAP_TYPE_LMA) { - eAllocType = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; - } else { - eAllocType = PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; - } - - PVRSRVStatsDecrMemAllocStat(eAllocType, - psPMRPriv->ui32PageCount * (1 << psPMRPriv->uiLog2PageSize), - OSGetCurrentClientProcessIDKM()); - } -#else - { - PVRSRV_MEM_ALLOC_TYPE eAllocType; - IMG_UINT32 i; - - if (PhysHeapGetType(psDevice->psPhysHeap) == PHYS_HEAP_TYPE_LMA) { - eAllocType = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; - } else { - eAllocType = PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; - } - - for (i = 0; i < psPMRPriv->ui32PageCount; i++) - { - IMG_CPU_PHYADDR sCPUPhysAddr; - - sCPUPhysAddr.uiAddr = ((uintptr_t)psPMRPriv->pvLinAddr) + i * (1 << psPMRPriv->uiLog2PageSize); - PVRSRVStatsRemoveMemAllocRecord(eAllocType, - sCPUPhysAddr.uiAddr, - OSGetCurrentClientProcessIDKM()); - } - } -#endif -#endif - - psDevice->psFuncTable->pfnBufferRelease(psBuffer->hBuffer); - OSFreeMem(psPMRPriv->pasDevPAddr); - - return PVRSRV_OK; -} - -static PVRSRV_ERROR _DCPMRDevPhysAddr(PMR_IMPL_PRIVDATA pvPriv, - IMG_UINT32 ui32Log2PageSize, - IMG_UINT32 ui32NumOfPages, - IMG_DEVMEM_OFFSET_T *puiOffset, - IMG_BOOL *pbValid, - IMG_DEV_PHYADDR *psDevAddrPtr) -{ - DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv; - IMG_UINT32 uiPageIndex; - IMG_UINT32 uiInPageOffset; - IMG_DEV_PHYADDR sDevAddr; - IMG_UINT32 idx; - - PVR_RETURN_IF_INVALID_PARAM(psPMRPriv->uiLog2PageSize == ui32Log2PageSize); - - for (idx=0; idx < ui32NumOfPages; idx++) - { - if (pbValid[idx]) - { - /* verify the cast - N.B. Strictly... this could be triggered by an illegal uiOffset arg too. */ - uiPageIndex = (IMG_UINT32)(puiOffset[idx] >> ui32Log2PageSize); - PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiPageIndex << ui32Log2PageSize == puiOffset[idx]); - - uiInPageOffset = (IMG_UINT32)(puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << ui32Log2PageSize)); - PVR_ASSERT(puiOffset[idx] == ((IMG_DEVMEM_OFFSET_T)uiPageIndex << ui32Log2PageSize) + uiInPageOffset); - PVR_ASSERT(uiPageIndex < psPMRPriv->ui32PageCount); - PVR_ASSERT(uiInPageOffset < (1U << ui32Log2PageSize)); - - sDevAddr.uiAddr = psPMRPriv->pasDevPAddr[uiPageIndex].uiAddr; - PVR_ASSERT((sDevAddr.uiAddr & ((1UL << ui32Log2PageSize) - 1U)) == 0U); - - psDevAddrPtr[idx] = sDevAddr; - psDevAddrPtr[idx].uiAddr += uiInPageOffset; - } - } - - return PVRSRV_OK; -} - -#if defined(INTEGRITY_OS) -static PVRSRV_ERROR _DCPMRAcquireKernelMappingData(PMR_IMPL_PRIVDATA pvPriv, - size_t uiOffset, - size_t uiSize, - void **ppvKernelAddressOut, - IMG_HANDLE *phHandleOut, - PMR_FLAGS_T ulFlags) -{ - DC_BUFFER_PMR_DATA *psPMRPriv = (DC_BUFFER_PMR_DATA *)pvPriv; - DC_BUFFER *psBuffer = NULL; - DC_DEVICE *psDevice = NULL; - IMG_HANDLE hMapping = NULL; - void *pvKernelAddr = NULL; - PVRSRV_ERROR eError = PVRSRV_OK; - - PVR_LOG_RETURN_IF_INVALID_PARAM(psPMRPriv, "psPMRPriv"); - - psBuffer = psPMRPriv->psBuffer; - psDevice = psBuffer->psDisplayContext->psDevice; - - eError = psDevice->psFuncTable->pfnAcquireKernelMappingData(psBuffer->hBuffer, &hMapping, &pvKernelAddr); - PVR_LOG_RETURN_IF_ERROR(eError, "pfnAcquireKernelMappingData"); - - *phHandleOut = hMapping; - *ppvKernelAddressOut = pvKernelAddr; - - return eError; -} - -static void _DCPMRReleaseKernelMappingData(PMR_IMPL_PRIVDATA pvPriv, - IMG_HANDLE hHandle) -{ - PVR_UNREFERENCED_PARAMETER(pvPriv); - PVR_UNREFERENCED_PARAMETER(hHandle); -} -#endif - -static PVRSRV_ERROR _DCPMRFinalize(PMR_IMPL_PRIVDATA pvPriv) -{ - DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv; - - _DCBufferReleaseRef(psPMRPriv->psBuffer); - OSFreeMem(psPMRPriv); - - return PVRSRV_OK; -} - -static PVRSRV_ERROR _DCPMRReadBytes(PMR_IMPL_PRIVDATA pvPriv, - IMG_DEVMEM_OFFSET_T uiOffset, - IMG_UINT8 *pcBuffer, - size_t uiBufSz, - size_t *puiNumBytes) -{ - DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv; - DC_BUFFER *psBuffer = psPMRPriv->psBuffer; - DC_DEVICE *psDevice = psBuffer->psDisplayContext->psDevice; - IMG_CPU_PHYADDR sCpuPAddr; - size_t uiBytesCopied = 0; - size_t uiBytesToCopy = uiBufSz; - size_t uiBytesCopyableFromPage; - void *pvMapping; - IMG_UINT8 *pcKernelPointer; - size_t uiBufferOffset = 0; - size_t uiPageIndex; - size_t uiInPageOffset; - - /* If we already have a CPU mapping just us it */ - if (psPMRPriv->pvLinAddr) - { - pcKernelPointer = psPMRPriv->pvLinAddr; - OSDeviceMemCopy(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz); - *puiNumBytes = uiBufSz; - return PVRSRV_OK; - } - - /* Copy the data page by page */ - while (uiBytesToCopy > 0) - { - /* we have to kmap one page in at a time */ - uiPageIndex = TRUNCATE_64BITS_TO_SIZE_T(uiOffset >> psPMRPriv->uiLog2PageSize); - - uiInPageOffset = TRUNCATE_64BITS_TO_SIZE_T(uiOffset - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << psPMRPriv->uiLog2PageSize)); - uiBytesCopyableFromPage = uiBytesToCopy; - if (uiBytesCopyableFromPage + uiInPageOffset > (1U<uiLog2PageSize)) - { - uiBytesCopyableFromPage = (1 << psPMRPriv->uiLog2PageSize)-uiInPageOffset; - } - - PhysHeapDevPAddrToCpuPAddr(psDevice->psPhysHeap, 1, &sCpuPAddr, &psPMRPriv->pasDevPAddr[uiPageIndex]); - - pvMapping = OSMapPhysToLin(sCpuPAddr, - 1 << psPMRPriv->uiLog2PageSize, - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); - PVR_ASSERT(pvMapping != NULL); - pcKernelPointer = pvMapping; - OSDeviceMemCopy(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInPageOffset], uiBytesCopyableFromPage); - OSUnMapPhysToLin(pvMapping, 1 << psPMRPriv->uiLog2PageSize); - - uiBufferOffset += uiBytesCopyableFromPage; - uiBytesToCopy -= uiBytesCopyableFromPage; - uiOffset += uiBytesCopyableFromPage; - uiBytesCopied += uiBytesCopyableFromPage; - } - - *puiNumBytes = uiBytesCopied; - return PVRSRV_OK; -} - -static PMR_IMPL_FUNCTAB sDCPMRFuncTab = { - .pfnLockPhysAddresses = _DCPMRLockPhysAddresses, - .pfnUnlockPhysAddresses = _DCPMRUnlockPhysAddresses, - .pfnDevPhysAddr = _DCPMRDevPhysAddr, -#if !defined(INTEGRITY_OS) - .pfnAcquireKernelMappingData = NULL, - .pfnReleaseKernelMappingData = NULL, -#else - .pfnAcquireKernelMappingData = _DCPMRAcquireKernelMappingData, - .pfnReleaseKernelMappingData = _DCPMRReleaseKernelMappingData, -#endif - .pfnReadBytes = _DCPMRReadBytes, - .pfnWriteBytes = NULL, - .pfnChangeSparseMem = NULL, - .pfnChangeSparseMemCPUMap = NULL, - .pfnMMap = NULL, - .pfnFinalize = _DCPMRFinalize -}; - -static PVRSRV_ERROR _DCCreatePMR(IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, - IMG_UINT32 ui32PageCount, - PHYS_HEAP *psPhysHeap, - DC_BUFFER *psBuffer, - PMR **ppsPMR, - IMG_BOOL bSystemBuffer, - const IMG_CHAR *pszAnnotation) -{ - DC_BUFFER_PMR_DATA *psPMRPriv; - IMG_DEVMEM_SIZE_T uiBufferSize; - PVRSRV_ERROR eError; - IMG_UINT32 uiMappingTable = 0; - - PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap, "psPhysHeap"); - - /* - Create the PMR for this buffer. - - Note: At this stage we don't need to know the physical pages just - the page size and the size of the PMR. The 1st call that needs the - physical pages will cause a request into the DC driver (pfnBufferQuery) - */ - psPMRPriv = OSAllocZMem(sizeof(DC_BUFFER_PMR_DATA)); - PVR_GOTO_IF_NOMEM(psPMRPriv, eError, fail_privalloc); - - /* Take a reference on the buffer (for the copy in the PMR) */ - _DCBufferAcquireRef(psBuffer); - - /* Fill in the private data for the PMR */ - psPMRPriv->uiLog2PageSize = uiLog2PageSize; - psPMRPriv->ui32PageCount = ui32PageCount; - psPMRPriv->pasDevPAddr = NULL; - psPMRPriv->psBuffer = psBuffer; - - uiBufferSize = (1 << uiLog2PageSize) * ui32PageCount; - - /* Create the PMR for the MM layer */ - eError = PMRCreatePMR(psPhysHeap, - uiBufferSize, - 1, - 1, - &uiMappingTable, - uiLog2PageSize, - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_UNCACHED_WC, - pszAnnotation, - &sDCPMRFuncTab, - psPMRPriv, - PMR_TYPE_DC, - ppsPMR, - bSystemBuffer ? PDUMP_PERSIST : PDUMP_NONE); - PVR_GOTO_IF_ERROR(eError, fail_pmrcreate); - - return PVRSRV_OK; - -fail_pmrcreate: - _DCBufferReleaseRef(psBuffer); - OSFreeMem(psPMRPriv); -fail_privalloc: - return eError; -} - -static void _DCDisplayContextNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) -{ - DC_DISPLAY_CONTEXT *psDisplayContext = (DC_DISPLAY_CONTEXT*) hCmdCompHandle; - - _DCDisplayContextRun(psDisplayContext); -} - -static void _DCDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, - IMG_UINT32 ui32VerbLevel, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile) -{ - DC_DISPLAY_CONTEXT *psDisplayContext = (DC_DISPLAY_CONTEXT*) hDebugRequestHandle; - - PVR_DUMPDEBUG_LOG("Configs in-flight = %d", psDisplayContext->ui32ConfigsInFlight); - - if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) - { - PVR_DUMPDEBUG_LOG("------[ Display context SCP status ]------"); - SCPDumpStatus(psDisplayContext->psSCPContext, pfnDumpDebugPrintf, pvDumpDebugFile); - } -} - -/***************************************************************************** - * Public interface functions exposed through the bridge to services client * - *****************************************************************************/ - -PVRSRV_ERROR DCDevicesQueryCount(IMG_UINT32 *pui32DeviceCount) -{ - *pui32DeviceCount = g_ui32DCDeviceCount; - return PVRSRV_OK; -} - -PVRSRV_ERROR DCDevicesEnumerate(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32DeviceArraySize, - IMG_UINT32 *pui32DeviceCount, - IMG_UINT32 *paui32DeviceIndex) -{ - IMG_UINT32 ui32DeviceCount = 0; - DLLIST_NODE *psNode, *psNext; - - PVR_UNREFERENCED_PARAMETER(psConnection); - - /* Check that we don't have any NULL pointers / zero-sized arrays passed */ - PVR_LOG_RETURN_IF_FALSE((0U != ui32DeviceArraySize), "ui32DeviceArraySize invalid", PVRSRV_ERROR_INVALID_PARAMS); - PVR_LOG_RETURN_IF_FALSE((NULL != pui32DeviceCount), "pui32DeviceCount invalid", PVRSRV_ERROR_INVALID_PARAMS); - PVR_LOG_RETURN_IF_FALSE((NULL != paui32DeviceIndex), "paui32DeviceIndex invalid", PVRSRV_ERROR_INVALID_PARAMS); - - /* Iterate over whole list using dllist_foreach() */ - - OSLockAcquire(g_hDCDevListLock); - dllist_foreach_node(&g_sDCDeviceListHead, psNode, psNext) - { - DC_DEVICE *psTmp = IMG_CONTAINER_OF(psNode, DC_DEVICE, sListNode); - - if (psTmp->psDevNode == psDevNode) - { - paui32DeviceIndex[ui32DeviceCount] = psTmp->ui32Index; - ui32DeviceCount++; - } - } - OSLockRelease(g_hDCDevListLock); - - OSLockAcquire(g_hDCListLock); - *pui32DeviceCount = ui32DeviceCount; - OSLockRelease(g_hDCListLock); - - return PVRSRV_OK; -} - -PVRSRV_ERROR DCDeviceAcquire(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32DeviceIndex, - DC_DEVICE **ppsDevice) -{ - DLLIST_NODE *psNode, *psNext; - DC_DEVICE *psDevice; - PVRSRV_ERROR eError = PVRSRV_ERROR_NO_DC_DEVICES_FOUND; - IMG_BOOL bFound = IMG_FALSE; - - OSLockAcquire(g_hDCDevListLock); - if (!dllist_is_empty(&g_sDCDeviceListHead)) - { - dllist_foreach_node(&g_sDCDeviceListHead, psNode, psNext) - { - psDevice = IMG_CONTAINER_OF(psNode, DC_DEVICE, sListNode); - if ((psDevice->ui32Index == ui32DeviceIndex) && - (psDevice->psDevNode == psDevNode)) - { - bFound = IMG_TRUE; - eError = PVRSRV_OK; - break; - } - } - } - OSLockRelease(g_hDCDevListLock); - - if (bFound) - { - _DCDeviceAcquireRef(psDevice); - *ppsDevice = psDevice; - } - - return eError; -} - -PVRSRV_ERROR DCDeviceRelease(DC_DEVICE *psDevice) -{ - _DCDeviceReleaseRef(psDevice); - - return PVRSRV_OK; -} - -PVRSRV_ERROR DCGetInfo(DC_DEVICE *psDevice, - DC_DISPLAY_INFO *psDisplayInfo) -{ - psDevice->psFuncTable->pfnGetInfo(psDevice->hDeviceData, - psDisplayInfo); - - return PVRSRV_OK; -} - -PVRSRV_ERROR DCPanelQueryCount(DC_DEVICE *psDevice, - IMG_UINT32 *pui32NumPanels) -{ - psDevice->psFuncTable->pfnPanelQueryCount(psDevice->hDeviceData, - pui32NumPanels); - - return PVRSRV_OK; -} - -PVRSRV_ERROR DCPanelQuery(DC_DEVICE *psDevice, - IMG_UINT32 ui32PanelsArraySize, - IMG_UINT32 *pui32NumPanels, - PVRSRV_PANEL_INFO *pasPanelInfo) -{ - psDevice->psFuncTable->pfnPanelQuery(psDevice->hDeviceData, - ui32PanelsArraySize, - pui32NumPanels, - pasPanelInfo); - - return PVRSRV_OK; -} - -PVRSRV_ERROR DCFormatQuery(DC_DEVICE *psDevice, - IMG_UINT32 ui32FormatArraySize, - PVRSRV_SURFACE_FORMAT *pasFormat, - IMG_UINT32 *pui32Supported) -{ - psDevice->psFuncTable->pfnFormatQuery(psDevice->hDeviceData, - ui32FormatArraySize, - pasFormat, - pui32Supported); - - return PVRSRV_OK; -} - -PVRSRV_ERROR DCDimQuery(DC_DEVICE *psDevice, - IMG_UINT32 ui32DimSize, - PVRSRV_SURFACE_DIMS *pasDim, - IMG_UINT32 *pui32Supported) -{ - psDevice->psFuncTable->pfnDimQuery(psDevice->hDeviceData, - ui32DimSize, - pasDim, - pui32Supported); - - return PVRSRV_OK; -} - - -PVRSRV_ERROR DCSetBlank(DC_DEVICE *psDevice, - IMG_BOOL bEnabled) -{ - PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_IMPLEMENTED; - if (psDevice->psFuncTable->pfnSetBlank) - { - eError = psDevice->psFuncTable->pfnSetBlank(psDevice->hDeviceData, - bEnabled); - } - - return eError; -} - -PVRSRV_ERROR DCSetVSyncReporting(DC_DEVICE *psDevice, - IMG_BOOL bEnabled) -{ - PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_IMPLEMENTED; - if (psDevice->psFuncTable->pfnSetVSyncReporting) - { - eError = psDevice->psFuncTable->pfnSetVSyncReporting(psDevice->hDeviceData, - bEnabled); - } - - return eError; -} - -PVRSRV_ERROR DCLastVSyncQuery(DC_DEVICE *psDevice, - IMG_INT64 *pi64Timestamp) -{ - PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_IMPLEMENTED; - if (psDevice->psFuncTable->pfnLastVSyncQuery) - { - eError = psDevice->psFuncTable->pfnLastVSyncQuery(psDevice->hDeviceData, - pi64Timestamp); - } - - return eError; -} - -/* - The system buffer breaks the rule of only calling DC callbacks on first - ref and last deref. For the pfnBufferSystemAcquire this is expected - as each call could get back a different buffer, but calls to - pfnBufferAcquire and pfnBufferRelease could happen multiple times - for the same buffer - */ -PVRSRV_ERROR DCSystemBufferAcquire(DC_DEVICE *psDevice, - IMG_UINT32 *pui32ByteStride, - DC_BUFFER **ppsBuffer) -{ - DC_BUFFER *psNew; - PMR *psPMR; - PVRSRV_ERROR eError; - IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize; - IMG_UINT32 ui32PageCount; - - if (psDevice->psFuncTable->pfnBufferSystemAcquire == NULL) - { - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_NO_SYSTEM_BUFFER, fail_nopfn); - } - - psNew = OSAllocZMem(sizeof(DC_BUFFER)); - PVR_GOTO_IF_NOMEM(psNew, eError, fail_alloc); - - eError = OSLockCreate(&psNew->hMapLock); - PVR_GOTO_IF_ERROR(eError, fail_maplock); - - eError = psDevice->psFuncTable->pfnBufferSystemAcquire(psDevice->hDeviceData, - &uiLog2PageSize, - &ui32PageCount, - pui32ByteStride, - &psNew->hBuffer); - PVR_GOTO_IF_ERROR(eError, fail_bufferacquire); - - psNew->psDisplayContext = &psDevice->sSystemContext; - psNew->eType = DC_BUFFER_TYPE_SYSTEM; - psNew->ui32MapCount = 0; - OSAtomicWrite(&psNew->sRefCount, 1); - - /* - Creating the PMR for the system buffer is a bit tricky as there is no - "create" call for it. - We should only ever have one PMR for the same buffer and so we can't - just create one every call to this function. We also have to deal with - the system buffer changing (mode change) so we can't just create the PMR - once at DC driver register time. - So what we do is cache the DC's handle to the system buffer and check if - this call the handle has changed (indicating a mode change) and create - a new PMR in this case. - */ - if (psNew->hBuffer != psDevice->hSystemBuffer) - { - DC_DISPLAY_INFO sDisplayInfo; - IMG_INT32 i32RITextSize; - IMG_CHAR pszRIText[DEVMEM_ANNOTATION_MAX_LEN]; - if (psDevice->psSystemBufferPMR) - { - /* - Mode change: - We've already got a system buffer but the DC has given us a new - one so we need to drop the 2nd reference we took on it as a - different system buffer will be freed as DC unregister time - */ - PMRUnrefPMR(psDevice->psSystemBufferPMR); - } - - DCGetInfo(psDevice, &sDisplayInfo); - - i32RITextSize = OSSNPrintf((IMG_CHAR *)pszRIText, DEVMEM_ANNOTATION_MAX_LEN, "%s: SysBuf", (IMG_CHAR *)sDisplayInfo.szDisplayName); - if (i32RITextSize < 0) { - pszRIText[0] = '\0'; - i32RITextSize = 0; - } - else - { - pszRIText[DEVMEM_ANNOTATION_MAX_LEN-1] = '\0'; - } - - eError = _DCCreatePMR(uiLog2PageSize, - ui32PageCount, - psDevice->psPhysHeap, - psNew, - &psPMR, - IMG_TRUE, - pszRIText); - - PVR_GOTO_IF_ERROR(eError, fail_createpmr); - -#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) - { - /* Dummy handle - we don't need to store the reference to the PMR RI entry. Its deletion is handled internally. */ - - eError = RIWritePMREntryKM (psPMR); - } -#endif - - psNew->uBufferData.sAllocData.psPMR = psPMR; - psDevice->hSystemBuffer = psNew->hBuffer; - psDevice->psSystemBufferPMR = psPMR; - - /* - Take a 2nd reference on the PMR as we always drop a reference - in the release call but we don't want the PMR to be freed until - either a new system buffer as been acquired or the DC device gets - unregistered - */ - PMRRefPMR(psDevice->psSystemBufferPMR); - } - else - { - /* - A PMR for the system buffer as already been created so just - take a reference to the PMR to make sure it doesn't go away - */ - PMRRefPMR(psDevice->psSystemBufferPMR); - psNew->uBufferData.sAllocData.psPMR = psDevice->psSystemBufferPMR; - } - - /* - The system buffer is tied to the device unlike all other buffers - which are tied to a display context. - */ - _DCDeviceAcquireRef(psDevice); - - *ppsBuffer = psNew; - - return PVRSRV_OK; - -fail_createpmr: -fail_bufferacquire: - OSLockDestroy(psNew->hMapLock); -fail_maplock: - OSFreeMem(psNew); -fail_alloc: -fail_nopfn: - return eError; -} - -PVRSRV_ERROR DCSystemBufferRelease(DC_BUFFER *psBuffer) -{ - PMRUnrefPMR(psBuffer->uBufferData.sAllocData.psPMR); - _DCBufferReleaseRef(psBuffer); - return PVRSRV_OK; -} - -PVRSRV_ERROR DCDisplayContextCreate(DC_DEVICE *psDevice, - DC_DISPLAY_CONTEXT **ppsDisplayContext) -{ - DC_DISPLAY_CONTEXT *psDisplayContext; - PVRSRV_ERROR eError; - - psDisplayContext = OSAllocMem(sizeof(DC_DISPLAY_CONTEXT)); - PVR_RETURN_IF_NOMEM(psDisplayContext); - - psDisplayContext->psDevice = psDevice; - psDisplayContext->hDisplayContext = NULL; - psDisplayContext->ui32TokenOut = 0; - psDisplayContext->ui32TokenIn = 0; - psDisplayContext->ui32ConfigsInFlight = 0; - psDisplayContext->bIssuedNullFlip = IMG_FALSE; - psDisplayContext->hTimer = NULL; - psDisplayContext->bPauseMISR = IMG_FALSE; - OSAtomicWrite(&psDisplayContext->sRefCount, 1); - - eError = OSLockCreate(&psDisplayContext->hLock); - PVR_GOTO_IF_ERROR(eError, FailLock); - - eError = OSLockCreate(&psDisplayContext->hConfigureLock); - PVR_GOTO_IF_ERROR(eError, FailLock2); - - /* Create a Software Command Processor with 4K CCB size. - * With the HWC it might be possible to reach the limit off the buffer. - * This could be bad when the buffers currently on the screen can't be - * flipped to the new one, cause the command for them doesn't fit into the - * queue (Deadlock). This situation should properly detected to make at - * least the debugging easier. */ - eError = SCPCreate(psDevice->psDevNode, 12, &psDisplayContext->psSCPContext); - PVR_GOTO_IF_ERROR(eError, FailSCP); - - eError = psDevice->psFuncTable->pfnContextCreate(psDevice->hDeviceData, - &psDisplayContext->hDisplayContext); - - PVR_GOTO_IF_ERROR(eError, FailDCDeviceContext); - - _DCDeviceAcquireRef(psDevice); - - /* Create an MISR for our display context */ - eError = OSInstallMISR(&psDisplayContext->hMISR, - DC_MISRHandler_DisplaySCP, - psDisplayContext, - "DC_DisplaySCP"); - PVR_GOTO_IF_ERROR(eError, FailMISR); - - /* - Register for the command complete callback. - - Note: - After calling this function our MISR can be called at any point. - */ - eError = PVRSRVRegisterCmdCompleteNotify(&psDisplayContext->hCmdCompNotify, _DCDisplayContextNotify, psDisplayContext); - PVR_GOTO_IF_ERROR(eError, FailRegisterCmdComplete); - - /* Register our debug request notify callback */ - eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDisplayContext->hDebugNotify, - psDevice->psDevNode, - _DCDebugRequest, - DEBUG_REQUEST_DC, - psDisplayContext); - PVR_GOTO_IF_ERROR(eError, FailRegisterDbgRequest); - - *ppsDisplayContext = psDisplayContext; - - OSLockAcquire(g_hDCDisplayContextsListLock); - /* store pointer to first/only display context, required for DCDisplayContextFlush */ - dllist_add_to_tail(&g_sDisplayContextsListHead, &psDisplayContext->sListNode); - OSLockRelease(g_hDCDisplayContextsListLock); - - return PVRSRV_OK; - -FailRegisterDbgRequest: - PVRSRVUnregisterCmdCompleteNotify(psDisplayContext->hCmdCompNotify); -FailRegisterCmdComplete: - OSUninstallMISR(psDisplayContext->hMISR); -FailMISR: - _DCDeviceReleaseRef(psDevice); - psDevice->psFuncTable->pfnContextDestroy(psDisplayContext->hDisplayContext); -FailDCDeviceContext: - SCPDestroy(psDisplayContext->psSCPContext); -FailSCP: - OSLockDestroy(psDisplayContext->hConfigureLock); -FailLock2: - OSLockDestroy(psDisplayContext->hLock); -FailLock: - OSFreeMem(psDisplayContext); - return eError; -} - -PVRSRV_ERROR DCDisplayContextConfigureCheck(DC_DISPLAY_CONTEXT *psDisplayContext, - IMG_UINT32 ui32PipeCount, - PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib, - DC_BUFFER **papsBuffers) -{ - DC_DEVICE *psDevice = psDisplayContext->psDevice; - PVRSRV_ERROR eError; - IMG_HANDLE *ahBuffers; - - _DCDisplayContextAcquireRef(psDisplayContext); - - /* Create an array of private device specific buffer handles */ - eError = _DCDeviceBufferArrayCreate(ui32PipeCount, - papsBuffers, - &ahBuffers); - PVR_GOTO_IF_ERROR(eError, FailBufferArrayCreate); - - /* Do we need to check if this is valid config? */ - if (psDevice->psFuncTable->pfnContextConfigureCheck) - { - - eError = psDevice->psFuncTable->pfnContextConfigureCheck(psDisplayContext->hDisplayContext, - ui32PipeCount, - pasSurfAttrib, - ahBuffers); - PVR_GOTO_IF_ERROR(eError, FailConfigCheck); - } - - _DCDeviceBufferArrayDestroy(ahBuffers); - _DCDisplayContextReleaseRef(psDisplayContext); - return PVRSRV_OK; - -FailConfigCheck: - _DCDeviceBufferArrayDestroy(ahBuffers); -FailBufferArrayCreate: - _DCDisplayContextReleaseRef(psDisplayContext); - - PVR_ASSERT(eError != PVRSRV_OK); - return eError; -} - - -static void _DCDisplayContextFlush(PDLLIST_NODE psNode) -{ - DC_CMD_RDY_DATA sReadyData; - DC_CMD_COMP_DATA sCompleteData; - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 ui32NumConfigsInSCP, ui32GoodRuns, ui32LoopCount; - - DC_DISPLAY_CONTEXT * psDisplayContext = IMG_CONTAINER_OF(psNode, DC_DISPLAY_CONTEXT, sListNode); - - /* Make the NULL flip command data */ - sReadyData.psDisplayContext = psDisplayContext; - sReadyData.ui32DisplayPeriod = 0; - sReadyData.ui32BufferCount = 0; - sReadyData.pasSurfAttrib = NULL; - sReadyData.pahBuffer = NULL; - - sCompleteData.psDisplayContext = psDisplayContext; - sCompleteData.ui32BufferCount = 0; - sCompleteData.ui32Token = 0; - sCompleteData.bDirectNullFlip = IMG_TRUE; - - /* Stop the MISR to stop the SCP from running outside of our control */ - psDisplayContext->bPauseMISR = IMG_TRUE; - - /* - * Flush loop control: - * take the total number of Configs owned by the SCP including those - * "in-flight" with the DC, then multiply by 2 to account for any padding - * commands in the SCP buffer - */ - ui32NumConfigsInSCP = psDisplayContext->ui32TokenOut - psDisplayContext->ui32TokenIn; - ui32NumConfigsInSCP *= 2; - ui32GoodRuns = 0; - ui32LoopCount = 0; - - /* - * Calling SCPRun first, ensures that any call to SCPRun from the MISR - * context completes before we insert any NULL flush direct to the DC. - * SCPRun returns PVRSRV_OK (0) if the run command (Configure) executes OR there - * is no work to be done OR it consumes a padding command. - * By counting a "good" SCPRun for each of the ui32NumConfigsInSCP we ensure - * that all Configs currently in the SCP are flushed to the DC. - * - * In the case where we fail dependencies (PVRSRV_ERROR_FAILED_DEPENDENCIES (15)) - * but there are outstanding ui32ConfigsInFlight that may satisfy them, - * we just loop and try again. - * In the case where there is still more work but the DC is full - * (PVRSRV_ERROR_NOT_READY (254)), we just loop and try again. - * - * During a flush, NULL flips may be inserted if waiting for the 3D (not - * actually deadlocked), but this should be benign - */ - while ( ui32GoodRuns < ui32NumConfigsInSCP && ui32LoopCount < 500 ) - { - eError = SCPRun( psDisplayContext->psSCPContext ); - - if ( 0 == ui32LoopCount && PVRSRV_ERROR_FAILED_DEPENDENCIES != eError && 1 != psDisplayContext->ui32ConfigsInFlight ) - { - PVR_DPF((PVR_DBG_ERROR, "DCDisplayContextFlush: called when not required")); - break; - } - - if ( PVRSRV_OK == eError ) - { - ui32GoodRuns++; - } - else if ( PVRSRV_ERROR_FAILED_DEPENDENCIES == eError && 1 == psDisplayContext->ui32ConfigsInFlight ) - { - PVR_DPF((PVR_DBG_WARNING, "DCDisplayContextFlush: inserting NULL flip")); - - /* The next Config may be dependent on the single Config currently in the DC */ - /* Issue a NULL flip to free it */ - _DCDisplayContextAcquireRef(psDisplayContext); - _DCDisplayContextConfigure( (void *)&sReadyData, (void *)&sCompleteData ); - } - - /* Give up the timeslice to let something happen */ - OSSleepms(1); - ui32LoopCount++; - } - - if ( ui32LoopCount >= 500 ) - { - PVR_DPF((PVR_DBG_ERROR, "DCDisplayContextFlush: Failed to flush after > 500 milliseconds")); - } - - PVR_DPF((PVR_DBG_WARNING, "DCDisplayContextFlush: inserting final NULL flip")); - - /* The next Config may be dependent on the single Config currently in the DC */ - /* Issue a NULL flip to free it */ - _DCDisplayContextAcquireRef(psDisplayContext); - _DCDisplayContextConfigure( (void *)&sReadyData, (void *)&sCompleteData ); - - /* re-enable the MISR/SCP */ - psDisplayContext->bPauseMISR = IMG_FALSE; -} - - -PVRSRV_ERROR DCDisplayContextFlush(void) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - - OSLockAcquire(g_hDCDisplayContextsListLock); - - if ( !dllist_is_empty(&g_sDisplayContextsListHead) ) - { - DLLIST_NODE *psNode, *psNext; - dllist_foreach_node(&g_sDisplayContextsListHead, psNode, psNext) - { - _DCDisplayContextFlush(psNode); - } - } - else - { - PVR_DPF((PVR_DBG_ERROR, "DCDisplayContextFlush: No display contexts found")); - eError = PVRSRV_ERROR_INVALID_CONTEXT; - } - - OSLockRelease(g_hDCDisplayContextsListLock); - - return eError; -} - - -PVRSRV_ERROR DCDisplayContextConfigure(DC_DISPLAY_CONTEXT *psDisplayContext, - IMG_UINT32 ui32PipeCount, - PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib, - DC_BUFFER **papsBuffers, - IMG_UINT32 ui32DisplayPeriod, - IMG_UINT32 ui32MaxDepth, - PVRSRV_FENCE iAcquireFence, - PVRSRV_TIMELINE iReleaseFenceTimeline, - PVRSRV_FENCE *piReleaseFence) -{ - DC_DEVICE *psDevice = psDisplayContext->psDevice; - PVRSRV_ERROR eError; - IMG_HANDLE *ahBuffers; - IMG_UINT32 ui32BuffersMapped = 0; - IMG_UINT32 i; - IMG_UINT32 ui32CmdRdySize; - IMG_UINT32 ui32CmdCompSize; - IMG_UINT32 ui32CopySize; - IMG_PUINT8 pui8ReadyData; - void *pvCompleteData; - DC_CMD_RDY_DATA *psReadyData; - DC_CMD_COMP_DATA *psCompleteData; - - _DCDisplayContextAcquireRef(psDisplayContext); - - if (ui32MaxDepth == 1) - { - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DC_INVALID_MAXDEPTH, FailMaxDepth); - } - else if (ui32MaxDepth > 0) - { - /* ui32TokenOut/In wrap-around case takes care of itself. */ - if (psDisplayContext->ui32TokenOut - psDisplayContext->ui32TokenIn >= ui32MaxDepth) - { - _DCDisplayContextRun(psDisplayContext); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_RETRY, FailMaxDepth); - } - } - - /* Reset the release fence */ - if (piReleaseFence) - *piReleaseFence = PVRSRV_NO_FENCE; - - /* If we get sent a NULL flip then we don't need to do the check or map */ - if (ui32PipeCount != 0) - { - /* Create an array of private device specific buffer handles */ - eError = _DCDeviceBufferArrayCreate(ui32PipeCount, - papsBuffers, - &ahBuffers); - PVR_GOTO_IF_ERROR(eError, FailBufferArrayCreate); - - /* Do we need to check if this is valid config? */ - if (psDevice->psFuncTable->pfnContextConfigureCheck) - { - eError = psDevice->psFuncTable->pfnContextConfigureCheck(psDisplayContext->hDisplayContext, - ui32PipeCount, - pasSurfAttrib, - ahBuffers); - PVR_GOTO_IF_ERROR(eError, FailConfigCheck); - } - - /* Map all the buffers that are going to be used */ - for (i=0;ipsSCPContext, - iAcquireFence, - _DCDisplayContextReady, - _DCDisplayContextConfigure, - ui32CmdRdySize, - ui32CmdCompSize, - (void **)&pui8ReadyData, - &pvCompleteData, - iReleaseFenceTimeline, - piReleaseFence); - - PVR_GOTO_IF_ERROR(eError, FailCommandAlloc); - - /* - Set up command ready data - */ - psReadyData = (DC_CMD_RDY_DATA *)(void *)pui8ReadyData; - pui8ReadyData += sizeof(DC_CMD_RDY_DATA); - - psReadyData->ui32DisplayPeriod = ui32DisplayPeriod; - psReadyData->psDisplayContext = psDisplayContext; - psReadyData->ui32BufferCount = ui32PipeCount; - - /* Copy over surface attribute array */ - if (ui32PipeCount != 0) - { - psReadyData->pasSurfAttrib = (PVRSRV_SURFACE_CONFIG_INFO *)(void *)pui8ReadyData; - ui32CopySize = sizeof(PVRSRV_SURFACE_CONFIG_INFO) * ui32PipeCount; - OSCachedMemCopy(psReadyData->pasSurfAttrib, pasSurfAttrib, ui32CopySize); - pui8ReadyData = pui8ReadyData + ui32CopySize; - } - else - { - psReadyData->pasSurfAttrib = NULL; - } - - /* Copy over device buffer handle buffer array */ - if (ui32PipeCount != 0) - { - psReadyData->pahBuffer = (IMG_HANDLE)pui8ReadyData; - ui32CopySize = sizeof(IMG_HANDLE) * ui32PipeCount; - OSCachedMemCopy(psReadyData->pahBuffer, ahBuffers, ui32CopySize); - } - else - { - psReadyData->pahBuffer = NULL; - } - - /* - Set up command complete data - */ - psCompleteData = pvCompleteData; - pvCompleteData = (IMG_PUINT8)pvCompleteData + sizeof(DC_CMD_COMP_DATA); - - psCompleteData->psDisplayContext = psDisplayContext; - psCompleteData->ui32Token = psDisplayContext->ui32TokenOut++; - psCompleteData->ui32BufferCount = ui32PipeCount; - psCompleteData->bDirectNullFlip = IMG_FALSE; - - if (ui32PipeCount != 0) - { - /* Copy the buffer pointers */ - psCompleteData->apsBuffer = pvCompleteData; - for (i=0;iapsBuffer[i] = papsBuffers[i]; - } - } - - /* Submit the command */ - eError = SCPSubmitCommand(psDisplayContext->psSCPContext); - - /* Check for new work on this display context */ - _DCDisplayContextRun(psDisplayContext); - - /* The only way this submit can fail is if there is a bug in this module */ - PVR_ASSERT(eError == PVRSRV_OK); - - if (ui32PipeCount != 0) - { - _DCDeviceBufferArrayDestroy(ahBuffers); - } - - return PVRSRV_OK; - -FailCommandAlloc: -FailMapBuffer: - if (ui32PipeCount != 0) - { - for (i=0;ibIssuedNullFlip) - { - eError = DCDisplayContextConfigure(psDisplayContext, - 0, - NULL, - NULL, - 0, - 0, - PVRSRV_NO_FENCE, - PVRSRV_NO_TIMELINE, - PVRSRV_NO_FENCE_PTR); - - PVR_RETURN_IF_ERROR(eError); - psDisplayContext->bIssuedNullFlip = IMG_TRUE; - } - - /* - Flush out everything from SCP - - This will ensure that the MISR isn't dropping the last reference - which would cause a deadlock during cleanup - */ - eError = SCPFlush(psDisplayContext->psSCPContext); - PVR_RETURN_IF_ERROR(eError); - - _DCDisplayContextReleaseRef(psDisplayContext); - - return PVRSRV_OK; -} - -PVRSRV_ERROR DCBufferAlloc(DC_DISPLAY_CONTEXT *psDisplayContext, - DC_BUFFER_CREATE_INFO *psSurfInfo, - IMG_UINT32 *pui32ByteStride, - DC_BUFFER **ppsBuffer) -{ - DC_DEVICE *psDevice = psDisplayContext->psDevice; - DC_BUFFER *psNew; - PMR *psPMR; - PVRSRV_ERROR eError; - IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize; - IMG_UINT32 ui32PageCount; - DC_DISPLAY_INFO sDisplayInfo; - IMG_INT32 i32RITextSize; - IMG_CHAR pszRIText[DEVMEM_ANNOTATION_MAX_LEN]; - - psNew = OSAllocZMem(sizeof(DC_BUFFER)); - PVR_RETURN_IF_NOMEM(psNew); - - eError = OSLockCreate(&psNew->hMapLock); - PVR_GOTO_IF_ERROR(eError, fail_maplock); - - eError = psDevice->psFuncTable->pfnBufferAlloc(psDisplayContext->hDisplayContext, - psSurfInfo, - &uiLog2PageSize, - &ui32PageCount, - pui32ByteStride, - &psNew->hBuffer); - PVR_GOTO_IF_ERROR(eError, fail_bufferalloc); - - /* - Fill in the basic info for our buffer - (must be before _DCCreatePMR) - */ - psNew->psDisplayContext = psDisplayContext; - psNew->eType = DC_BUFFER_TYPE_ALLOC; - psNew->ui32MapCount = 0; - OSAtomicWrite(&psNew->sRefCount, 1); - - DCGetInfo(psDevice, &sDisplayInfo); - - i32RITextSize = OSSNPrintf((IMG_CHAR *)pszRIText, DEVMEM_ANNOTATION_MAX_LEN, "%s: BufAlloc", (IMG_CHAR *)sDisplayInfo.szDisplayName); - if (i32RITextSize < 0) { - pszRIText[0] = '\0'; - i32RITextSize = 0; - } - else - { - pszRIText[DEVMEM_ANNOTATION_MAX_LEN-1] = '\0'; - } - - eError = _DCCreatePMR(uiLog2PageSize, - ui32PageCount, - psDevice->psPhysHeap, - psNew, - &psPMR, - IMG_FALSE, - pszRIText); - PVR_GOTO_IF_ERROR(eError, fail_createpmr); - -#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) - { - /* Dummy handle - we don't need to store the reference to the PMR RI entry. Its deletion is handled internally. */ - eError = RIWritePMREntryKM (psPMR); - } -#endif - - psNew->uBufferData.sAllocData.psPMR = psPMR; - _DCDisplayContextAcquireRef(psDisplayContext); - - *ppsBuffer = psNew; - - return PVRSRV_OK; - -fail_createpmr: - psDevice->psFuncTable->pfnBufferFree(psNew->hBuffer); -fail_bufferalloc: - OSLockDestroy(psNew->hMapLock); -fail_maplock: - OSFreeMem(psNew); - return eError; -} - -PVRSRV_ERROR DCBufferFree(DC_BUFFER *psBuffer) -{ - /* - Only drop the reference on the PMR if this is a DC allocated - buffer. In the case of imported buffers the 3rd party DC - driver manages the PMR's "directly" - */ - if (psBuffer->eType == DC_BUFFER_TYPE_ALLOC) - { - PMRUnrefPMR(psBuffer->uBufferData.sAllocData.psPMR); - } - _DCBufferReleaseRef(psBuffer); - - return PVRSRV_OK; -} - -PVRSRV_ERROR DCBufferImport(DC_DISPLAY_CONTEXT *psDisplayContext, - IMG_UINT32 ui32NumPlanes, - PMR **papsImport, - DC_BUFFER_IMPORT_INFO *psSurfAttrib, - DC_BUFFER **ppsBuffer) -{ - DC_DEVICE *psDevice = psDisplayContext->psDevice; - DC_BUFFER *psNew; - PVRSRV_ERROR eError; - IMG_UINT32 i; - - if (psDevice->psFuncTable->pfnBufferImport == NULL) - { - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_NOT_SUPPORTED, FailEarlyError); - } - - psNew = OSAllocZMem(sizeof(DC_BUFFER)); - PVR_GOTO_IF_NOMEM(psNew, eError, FailEarlyError); - - eError = OSLockCreate(&psNew->hMapLock); - PVR_GOTO_IF_ERROR(eError, FailMapLock); - - eError = psDevice->psFuncTable->pfnBufferImport(psDisplayContext->hDisplayContext, - ui32NumPlanes, - (IMG_HANDLE **)papsImport, - psSurfAttrib, - &psNew->hBuffer); - PVR_GOTO_IF_ERROR(eError, FailBufferImport); - - /* - Take a reference on the PMR to make sure it can't be released before - we've finished with it - */ - for (i=0;iuBufferData.sImportData.apsImport[i] = papsImport[i]; - } - - _DCDisplayContextAcquireRef(psDisplayContext); - psNew->psDisplayContext = psDisplayContext; - psNew->eType = DC_BUFFER_TYPE_IMPORT; - psNew->uBufferData.sImportData.ui32NumPlanes = ui32NumPlanes; - psNew->ui32MapCount = 0; - OSAtomicWrite(&psNew->sRefCount, 1); - - *ppsBuffer = psNew; - - return PVRSRV_OK; - -FailBufferImport: - OSLockDestroy(psNew->hMapLock); -FailMapLock: - OSFreeMem(psNew); - -FailEarlyError: - return eError; -} - -PVRSRV_ERROR DCBufferUnimport(DC_BUFFER *psBuffer) -{ - _DCBufferReleaseRef(psBuffer); - return PVRSRV_OK; -} - - -PVRSRV_ERROR DCBufferAcquire(DC_BUFFER *psBuffer, PMR **ppsPMR) -{ - PMR *psPMR = psBuffer->uBufferData.sAllocData.psPMR; - PVRSRV_ERROR eError; - - PVR_LOG_GOTO_IF_INVALID_PARAM(psBuffer->eType != DC_BUFFER_TYPE_IMPORT, eError, fail_typecheck); - PMRRefPMR(psPMR); - - *ppsPMR = psPMR; - return PVRSRV_OK; - -fail_typecheck: - return eError; -} - -PVRSRV_ERROR DCBufferRelease(PMR *psPMR) -{ - /* - Drop our reference on the PMR. If we're the last one then the PMR - will be freed and are _DCPMRFinalize function will be called where - we drop our reference on the buffer - */ - PMRUnrefPMR(psPMR); - return PVRSRV_OK; -} - -PVRSRV_ERROR DCBufferPin(DC_BUFFER *psBuffer, DC_PIN_HANDLE *phPin) -{ - *phPin = psBuffer; - return _DCBufferMap(psBuffer); -} - -PVRSRV_ERROR DCBufferUnpin(DC_PIN_HANDLE hPin) -{ - DC_BUFFER *psBuffer = hPin; - - _DCBufferUnmap(psBuffer); - return PVRSRV_OK; -} - - -/***************************************************************************** - * Public interface functions for 3rd party display class devices * - *****************************************************************************/ - -PVRSRV_ERROR DCRegisterDevice(DC_DEVICE_FUNCTIONS *psFuncTable, - IMG_UINT32 ui32MaxConfigsInFlight, - IMG_HANDLE hDeviceData, - IMG_HANDLE *phSrvHandle) -{ - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - DC_DEVICE *psNew; - PVRSRV_ERROR eError; - - if (!psPVRSRVData || !psPVRSRVData->psDeviceNodeList) - { - return PVRSRV_ERROR_RETRY; - } - - psNew = OSAllocMem(sizeof(DC_DEVICE)); - PVR_GOTO_IF_NOMEM(psNew, eError, FailAlloc); - - /* Associate display devices to the first device node */ - /* Iterate over psDeviceNodeList to get the correct dev_t for this - * instance - Use psNext to traverse the list - */ - psNew->psDevNode = PVRSRVGetDeviceInstance(g_ui32DCDeviceCount); - PVR_GOTO_IF_INVALID_PARAM(psNew->psDevNode != NULL, eError, FailEventObject); - psNew->psFuncTable = psFuncTable; - psNew->ui32MaxConfigsInFlight = ui32MaxConfigsInFlight; - psNew->hDeviceData = hDeviceData; - psNew->ui32Index = g_ui32DCNextIndex++; - dllist_init(&psNew->sListNode); - OSAtomicWrite(&psNew->sRefCount, 1); - eError = OSEventObjectCreate("DC_EVENT_OBJ", &psNew->psEventList); - PVR_GOTO_IF_ERROR(eError, FailEventObject); - - eError = PhysHeapAcquireByUsage((PHYS_HEAP_USAGE_FLAGS)PHYS_HEAP_USAGE_DISPLAY, psNew->psDevNode, &psNew->psPhysHeap); - if (eError == PVRSRV_ERROR_PHYSHEAP_ID_INVALID) - { - /* DC based system layers must provide a DISPLAY Phys Heap or a - * GPU_LOCAL heap which the display controller can operate with. This - * can not use the default defined heap for the device as a CPU_LOCAL - * value might not be accessible by the display controller HW. */ - eError = PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_GPU_LOCAL, psNew->psDevNode, &psNew->psPhysHeap); - } - PVR_LOG_GOTO_IF_ERROR(eError, "DCRegisterDevice: DISPLAY heap not found", FailPhysHeap); - - /* Init state required for system surface */ - psNew->hSystemBuffer = NULL; - psNew->psSystemBufferPMR = NULL; - psNew->sSystemContext.psDevice = psNew; - psNew->sSystemContext.hDisplayContext = hDeviceData; - - OSLockAcquire(g_hDCDevListLock); - dllist_add_to_tail(&g_sDCDeviceListHead, &psNew->sListNode); - OSLockRelease(g_hDCDevListLock); - - OSLockAcquire(g_hDCListLock); - g_ui32DCDeviceCount++; - OSLockRelease(g_hDCListLock); - - *phSrvHandle = (IMG_HANDLE) psNew; - - return PVRSRV_OK; - -FailPhysHeap: - OSEventObjectDestroy(psNew->psEventList); -FailEventObject: - OSFreeMem(psNew); -FailAlloc: - PVR_ASSERT(eError != PVRSRV_OK); - return eError; -} - -void DCUnregisterDevice(IMG_HANDLE hSrvHandle) -{ - DC_DEVICE *psDevice = (DC_DEVICE *) hSrvHandle; - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - PVRSRV_ERROR eError; - IMG_INT iRefCount; - - /* - If the system buffer was acquired and a PMR created for it, release - it before releasing the device as the PMR will have a reference to - the device - */ - if (psDevice->psSystemBufferPMR) - { - PMRUnrefPMR(psDevice->psSystemBufferPMR); - } - - /* - * At this stage the DC driver wants to unload, if other things have - * reference to the DC device we need to block here until they have - * been release as when this function returns the DC driver code could - * be unloaded. - */ - - iRefCount = OSAtomicRead(&psDevice->sRefCount); - if (iRefCount != 1) - { - /* If the driver is in a bad state we just free resources regardless */ - if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) - { - IMG_HANDLE hEvent; - - eError = OSEventObjectOpen(psDevice->psEventList, &hEvent); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to open event object (%d), will busy wait", - __func__, eError)); - hEvent = NULL; - } - - while (OSAtomicRead(&psDevice->sRefCount) != 1) - { - if (hEvent) - { - OSEventObjectWait(hEvent); - } - } - - OSEventObjectClose(hEvent); - } - else - { - PVR_DPF((PVR_DBG_ERROR, "%s: Services is in a bad state. Force unregister DC device.", - __func__)); - } - } - - _DCDeviceReleaseRef(psDevice); - - if (OSAtomicRead(&psDevice->sRefCount) == 0) - { - PhysHeapRelease(psDevice->psPhysHeap); - OSEventObjectDestroy(psDevice->psEventList); - OSFreeMem(psDevice); - } - else - { - /* there are stuck references so it is not safe to free the memory */ - PVR_DPF((PVR_DBG_ERROR, "%s: DC device has stuck references. Not freeing memory.", __func__)); - } -} - -void DCDisplayConfigurationRetired(IMG_HANDLE hConfigData) -{ - DC_CMD_COMP_DATA *psData = hConfigData; - DC_DISPLAY_CONTEXT *psDisplayContext = psData->psDisplayContext; - IMG_UINT32 i; - - DC_DEBUG_PRINT("DCDisplayConfigurationRetired: Command (%d) received", psData->ui32Token); - /* Check the config is as expected */ - if (!psData->bDirectNullFlip && psData->ui32Token != psDisplayContext->ui32TokenIn) - { - PVR_DPF((PVR_DBG_ERROR, - "Display config retired in unexpected order (was %d, expecting %d)", - psData->ui32Token, psDisplayContext->ui32TokenIn)); - PVR_ASSERT(IMG_FALSE); - } - - OSLockAcquire(psDisplayContext->hLock); - if ( !psData->bDirectNullFlip ) - { - psDisplayContext->ui32TokenIn++; - } - -#if defined(SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG) - if (psDisplayContext->hTimer) - { - OSDisableTimer(psDisplayContext->hTimer); - OSRemoveTimer(psDisplayContext->hTimer); - psDisplayContext->hTimer = NULL; - } -#endif /* SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG */ - - psDisplayContext->ui32ConfigsInFlight--; - OSLockRelease(psDisplayContext->hLock); - - for (i = 0; i < psData->ui32BufferCount; i++) - { - _DCBufferUnmap(psData->apsBuffer[i]); - } - - _DCDisplayContextReleaseRef(psDisplayContext); - - /* - Note: - - We must call SCPCommandComplete here and not before as we need - to ensure that we're not the last to hold the reference as - we can't destroy the display context from the MISR which we - can be called from. - - Ignore any fence checks if doing a null flip (e.g. when trying to unblock - stalled applications). - */ - SCPCommandComplete(psDisplayContext->psSCPContext, psData->bDirectNullFlip); - - /* Notify devices (including ourself) in case some item has been unblocked */ - PVRSRVCheckStatus(NULL); -} - -IMG_BOOL DCDisplayHasPendingCommand(IMG_HANDLE hConfigData) -{ - DC_CMD_COMP_DATA *psData = hConfigData; - DC_DISPLAY_CONTEXT *psDisplayContext = psData->psDisplayContext; - IMG_BOOL bRet; - - _DCDisplayContextAcquireRef(psDisplayContext); - bRet = SCPHasPendingCommand(psDisplayContext->psSCPContext); - _DCDisplayContextReleaseRef(psDisplayContext); - - return bRet; -} - -PVRSRV_ERROR DCImportBufferAcquire(IMG_HANDLE hImport, - IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, - IMG_UINT32 *pui32PageCount, - IMG_DEV_PHYADDR **ppasDevPAddr) -{ - PMR *psPMR = hImport; - IMG_DEV_PHYADDR *pasDevPAddr; - IMG_DEVMEM_SIZE_T uiLogicalSize; - size_t uiPageCount; - IMG_BOOL *pbValid; - PVRSRV_ERROR eError; -#if defined(DEBUG) - IMG_UINT32 i; -#endif - - PMR_LogicalSize(psPMR, &uiLogicalSize); - - uiPageCount = TRUNCATE_64BITS_TO_SIZE_T(uiLogicalSize >> uiLog2PageSize); - - pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) * uiPageCount); - PVR_GOTO_IF_NOMEM(pasDevPAddr, eError, e0); - - pbValid = OSAllocMem(uiPageCount * sizeof(IMG_BOOL)); - PVR_GOTO_IF_NOMEM(pbValid, eError, e1); - - /* Lock the pages */ - eError = PMRLockSysPhysAddresses(psPMR); - PVR_GOTO_IF_ERROR(eError, e2); - - /* Get page physical addresses */ - eError = PMR_DevPhysAddr(psPMR, uiLog2PageSize, uiPageCount, 0, - pasDevPAddr, pbValid); - PVR_GOTO_IF_ERROR(eError, e3); - -#if defined(DEBUG) - /* The DC import function doesn't support - sparse allocations */ - for (i=0; ihDisplayContext; -} - -IMG_UINT32 DCDeviceGetIndex(IMG_HANDLE hDeviceData) -{ - IMG_UINT32 ui32Index = 0; - DLLIST_NODE *psNode, *psNext; - - OSLockAcquire(g_hDCDevListLock); - dllist_foreach_node(&g_sDCDeviceListHead, psNode, psNext) - { - DC_DEVICE *psDevice = IMG_CONTAINER_OF(psNode, DC_DEVICE, sListNode); - if (psDevice->hDeviceData == hDeviceData) - { - ui32Index = psDevice->ui32Index; - break; - } - } - OSLockRelease(g_hDCDevListLock); - - return ui32Index; -} - -IMG_HANDLE DCDeviceGetDeviceAtIndex(IMG_UINT32 ui32DeviceIndex) -{ - IMG_HANDLE hDeviceData = NULL; - DLLIST_NODE *psNode, *psNext; - - OSLockAcquire(g_hDCDevListLock); - dllist_foreach_node(&g_sDCDeviceListHead, psNode, psNext) - { - DC_DEVICE *psDevice = IMG_CONTAINER_OF(psNode, DC_DEVICE, sListNode); - - if (psDevice->ui32Index == ui32DeviceIndex) - { - hDeviceData = psDevice->hDeviceData; - break; - } - } - OSLockRelease(g_hDCDevListLock); - - return hDeviceData; -} - -#endif - -/***************************************************************************** - * Public interface functions for services * - *****************************************************************************/ - -PVRSRV_ERROR DCResetDevice(DC_DEVICE *psDevice) -{ - PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_IMPLEMENTED; - if (psDevice->psFuncTable->pfnResetDevice != NULL) - { - eError = psDevice->psFuncTable->pfnResetDevice(psDevice->hDeviceData); - } - - return eError; -} - -PVRSRV_ERROR DCInit(void) -{ - PVRSRV_ERROR eError; - - g_ui32DCNextIndex = 0; - dllist_init(&g_sDisplayContextsListHead); - dllist_init(&g_sDCDeviceListHead); - - eError = OSLockCreate(&g_hDCListLock); - PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:1", err_out); - - eError = OSLockCreate(&g_hDCDisplayContextsListLock); - PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:2", err_hDCDisplayContextsListLock); - - eError = OSLockCreate(&g_hDCDevListLock); - PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:3", err_hDCDevListLock); - - return PVRSRV_OK; - -err_hDCDevListLock: - OSLockDestroy(g_hDCDisplayContextsListLock); -err_hDCDisplayContextsListLock: - OSLockDestroy(g_hDCListLock); -err_out: - return eError; -} - -PVRSRV_ERROR DCDeInit(void) -{ - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - - if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) - { - PVR_ASSERT(dllist_is_empty(&g_sDCDeviceListHead)); - } - - OSLockDestroy(g_hDCDevListLock); - OSLockDestroy(g_hDCDisplayContextsListLock); - OSLockDestroy(g_hDCListLock); - - return PVRSRV_OK; -} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/debug_common.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/debug_common.c index ab2127c2f0c9..4499ed6de993 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/debug_common.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/debug_common.c @@ -53,10 +53,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_options.h" #include "allocmem.h" #include "rgxfwutils.h" +#include "rgxfwriscv.h" +#include "osfunc.h" +#if defined(SUPPORT_RGX) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) +#include "rgxfwdbg.h" +#endif #ifdef SUPPORT_RGX #include "rgxdevice.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "rgxinit.h" #include "rgxmmudefs_km.h" #endif @@ -64,13 +69,17 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. static DI_ENTRY *gpsVersionDIEntry; static DI_ENTRY *gpsStatusDIEntry; -#ifdef SUPPORT_VALIDATION -static DI_ENTRY *gpsTestMemLeakDIEntry; -#endif /* SUPPORT_VALIDATION */ #if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) static DI_ENTRY *gpsDebugLevelDIEntry; #endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ +#if defined(SUPPORT_RGX) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) +struct DI_VZ_DATA { + PVRSRV_DEVICE_NODE *psDevNode; + IMG_UINT32 ui32DriverID; +}; +#endif + static void _DumpDebugDIPrintfWrapper(void *pvDumpDebugFile, const IMG_CHAR *pszFormat, ...) { IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; @@ -157,7 +166,7 @@ static void *_VersionDINext(OSDI_IMPL_ENTRY *psEntry,void *pvPriv, #define STR_DEBUG "debug" #define STR_RELEASE "release" -#if defined(DEBUG) || defined(SUPPORT_VALIDATION) +#if defined(DEBUG) #define BUILD_OPT_LEN 80 static inline void _AppendOptionStr(IMG_CHAR pszBuildOptions[], const IMG_CHAR* str, OSDI_IMPL_ENTRY *psEntry, IMG_UINT32* pui32BuildOptionLen) @@ -174,7 +183,7 @@ static inline void _AppendOptionStr(IMG_CHAR pszBuildOptions[], const IMG_CHAR* } if (strLen < optStrLen) { - OSStringLCopy(pszBuildOptions+ui32BuildOptionLen, str, strLen); + OSStringSafeCopy(pszBuildOptions+ui32BuildOptionLen, str, strLen); ui32BuildOptionLen += strLen - 1; } *pui32BuildOptionLen = ui32BuildOptionLen; @@ -234,7 +243,7 @@ static int _VersionDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; #ifdef SUPPORT_RGX PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; -#if defined(DEBUG) || defined(SUPPORT_VALIDATION) +#if defined(DEBUG) IMG_CHAR pszBuildOptions[BUILD_OPT_LEN]; IMG_UINT32 ui32BuildOptionLen = 0; static const char* aszOptions[] = RGX_BUILD_OPTIONS_LIST; @@ -245,7 +254,7 @@ static int _VersionDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) DIPrintf(psEntry, "\nDevice Name: %s\n", psDevConfig->pszName); DIPrintf(psEntry, "Device ID: %u:%d\n", psDevNode->sDevId.ui32InternalID, - psDevNode->sDevId.i32OsDeviceID); + psDevNode->sDevId.i32KernelDeviceID); if (psDevConfig->pszVersion) { @@ -266,56 +275,96 @@ static int _VersionDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) } } -#ifdef SUPPORT_RGX - /* print device's firmware version info */ - if (psDevInfo->psRGXFWIfOsInitMemDesc != NULL) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) { - /* psDevInfo->psRGXFWIfOsInitMemDesc should be permanently mapped */ - if (psDevInfo->psRGXFWIfOsInit != NULL) +#ifdef SUPPORT_RGX + /* print device's firmware version info */ + if (psDevInfo->psRGXFWIfOsInitMemDesc != NULL) { - if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) + /* psDevInfo->psRGXFWIfOsInitMemDesc should be permanently mapped */ + if (psDevInfo->psRGXFWIfOsInit != NULL) { - const RGXFWIF_COMPCHECKS *psRGXCompChecks = - &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks; - IMG_UINT32 ui32DDKVer = psRGXCompChecks->ui32DDKVersion; - - DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, - "Firmware", - PVRVERSION_UNPACK_MAJ(ui32DDKVer), - PVRVERSION_UNPACK_MIN(ui32DDKVer), - psRGXCompChecks->ui32DDKBuild, - ((psRGXCompChecks->ui32BuildOptions & - OPTIONS_DEBUG_EN) ? STR_DEBUG : STR_RELEASE), - psRGXCompChecks->ui32BuildOptions, - PVR_BUILD_DIR); - bFwVersionInfoPrinted = IMG_TRUE; - -#if defined(DEBUG) || defined(SUPPORT_VALIDATION) - DIPrintf(psEntry, "Firmware Build Options:\n"); - - for (i = 0; i < ARRAY_SIZE(aszOptions); i++) + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks, + INVALIDATE); + if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) { - if ((psRGXCompChecks->ui32BuildOptions & 1<psRGXFWIfOsInit->sRGXCompChecks; + IMG_UINT32 ui32DDKVer = psRGXCompChecks->ui32DDKVersion; + + DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, + "Firmware", + PVRVERSION_UNPACK_MAJ(ui32DDKVer), + PVRVERSION_UNPACK_MIN(ui32DDKVer), + psRGXCompChecks->ui32DDKBuild, + ((psRGXCompChecks->ui32BuildOptions & + OPTIONS_DEBUG_EN) ? STR_DEBUG : STR_RELEASE), + psRGXCompChecks->ui32BuildOptions, + PVR_BUILD_DIR); + bFwVersionInfoPrinted = IMG_TRUE; + +#if defined(DEBUG) + DIPrintf(psEntry, "Firmware Build Options:\n"); + + for (i = 0; i < ARRAY_SIZE(aszOptions); i++) { - _AppendOptionStr(pszBuildOptions, aszOptions[i], psEntry, &ui32BuildOptionLen); + if ((psRGXCompChecks->ui32BuildOptions & 1<sFWInfoHeader; + DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, + "Firmware", + psFWInfoHeader->ui16PVRVersionMajor, + psFWInfoHeader->ui16PVRVersionMinor, + psFWInfoHeader->ui32PVRVersionBuild, + ((psFWInfoHeader->ui32Flags & + OPTIONS_DEBUG_EN) ? STR_DEBUG : STR_RELEASE), + psFWInfoHeader->ui32Flags, + PVR_BUILD_DIR); + + bFwVersionInfoPrinted = IMG_TRUE; +#if defined(DEBUG) + DIPrintf(psEntry, "Firmware Build Options:\n"); + + for (i = 0; i < ARRAY_SIZE(aszOptions); i++) + { + if ((psFWInfoHeader->ui32Flags & 1<pvDevice, RGXFWIF_DM_GP, &sCounterDumpCmd, - 0, PDUMP_FLAGS_CONTINUOUS, pui32kCCBCommandSlot); PVR_LOG_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); @@ -367,7 +415,7 @@ static int _DebugPowerDataDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; IMG_UINT32 ui32kCCBCommandSlot; - PVRSRV_ERROR eError = PVRSRV_OK; + int eError = 0; PVR_UNREFERENCED_PARAMETER(pvData); @@ -404,9 +452,16 @@ static int _DebugPowerDataDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) /* Read back the buffer */ { IMG_UINT32* pui32PowerBuffer; - IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod; + IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod, ui32NumOfCores; IMG_UINT32 i, j; + if (!psDevInfo->psCounterBufferMemDesc) + { + PVR_DPF((PVR_DBG_ERROR, "Counter buffer not allocated!")); + OSLockRelease(psDevInfo->hCounterDumpingLock); + return -EINVAL; + } + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCounterBufferMemDesc, (void**)&pui32PowerBuffer); if (eError != PVRSRV_OK) @@ -416,10 +471,20 @@ static int _DebugPowerDataDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) return -EIO; } + RGXFwSharedMemCacheOpExec(pui32PowerBuffer, PAGE_SIZE, PVRSRV_CACHE_OP_INVALIDATE); + ui32NumOfRegs = *pui32PowerBuffer++; ui32SamplePeriod = *pui32PowerBuffer++; + ui32NumOfCores = *pui32PowerBuffer++; + PVR_DPF((PVR_DBG_MESSAGE, "Number of power counters: %u.", ui32NumOfRegs)); + + if (ui32NumOfCores == 0) + { + PVR_DPF((PVR_DBG_ERROR, "No GPU cores enabled!")); + eError = -EINVAL; + } - if (ui32NumOfRegs) + if (ui32NumOfRegs && ui32NumOfCores) { DIPrintf(psEntry, "Power counter data for device\n"); DIPrintf(psEntry, "Sample period: 0x%08x\n", ui32SamplePeriod); @@ -434,13 +499,24 @@ static int _DebugPowerDataDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) DIPrintf(psEntry, "0x%08x:", ui32RegOffset); - for (j = 0; j < ui32NumOfInstances; j++) + for (j = 0; j < ui32NumOfInstances * ui32NumOfCores; j++) { ui32Low = *pui32PowerBuffer++; - ui32High = *pui32PowerBuffer++; +#if defined(RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CATURIX_XTP_TOP_INFRASTRUCTURE)) + { + /* Power counters have 32-bit range */ + DIPrintf(psEntry, " 0x%08x", ui32Low); + } + else +#endif + { + /* Power counters have 64-bit range */ + ui32High = *pui32PowerBuffer++; - DIPrintf(psEntry, " 0x%016llx", - (IMG_UINT64) ui32Low | (IMG_UINT64) ui32High << 32); + DIPrintf(psEntry, " 0x%016" IMG_UINT64_FMTSPECx, + (IMG_UINT64) ui32Low | (IMG_UINT64) ui32High << 32); + } } DIPrintf(psEntry, "\n"); @@ -565,6 +641,50 @@ static void *_DebugStatusDINext(OSDI_IMPL_ENTRY *psEntry, *pui64Pos); OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); +#ifdef SUPPORT_RGX + if (psDeviceNode && !PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (psDevInfo && psDevInfo->pfnGetGpuUtilStats) + { + PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; + PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); + + if (eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK) + { + PVRSRV_ERROR eError; +#if defined(EMULATOR) || defined(VIRTUAL_PLATFORM) + static IMG_BOOL bFirstTime = IMG_TRUE; +#endif + + OSLockAcquire(psDevInfo->hGpuUtilStatsLock); + + eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode, + psDebugInfo->hGpuUtilUserDebugFS, + &psDevInfo->sGpuUtilStats); + + OSLockRelease(psDevInfo->hGpuUtilStatsLock); + + if (eError != PVRSRV_OK) + { +#if defined(EMULATOR) || defined(VIRTUAL_PLATFORM) + if (bFirstTime) + { + bFirstTime = IMG_FALSE; +#endif /* defined(EMULATOR) || defined(VIRTUAL_PLATFORM) */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get GPU statistics (%s)", + __func__, PVRSRVGetErrorString(eError))); +#if defined(EMULATOR) || defined(VIRTUAL_PLATFORM) + } +#endif /* defined(EMULATOR) || defined(VIRTUAL_PLATFORM) */ + } + } + } + } +#endif + return psDeviceNode; } @@ -603,7 +723,7 @@ static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) PVRSRV_DEVICE_HEALTH_REASON eHealthReason; DIPrintf(psEntry, "\nDevice ID: %u:%d\n", psDeviceNode->sDevId.ui32InternalID, - psDeviceNode->sDevId.i32OsDeviceID); + psDeviceNode->sDevId.i32KernelDeviceID); /* Update the health status now if possible... */ if (psDeviceNode->pfnUpdateHealthStatus) @@ -634,6 +754,7 @@ static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " (Idling)"; break; case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " (Restarting)"; break; case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " (Missing interrupts)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_PCI_ERROR: pszReason = " (PCI error)"; break; default: pszReason = " (Unknown reason)"; break; } @@ -653,13 +774,19 @@ static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) { #ifdef SUPPORT_RGX PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const RGXFWIF_HWRINFOBUF *psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + const RGXFWIF_HWRINFOBUF *psHWRInfoBuf; + const RGXFWIF_SYSDATA *psFwSysData; + + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfHWRInfoBufCtl, INVALIDATE); + psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; + + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); + psFwSysData = psDevInfo->psRGXFWIfFwSysData; #ifdef PVRSRV_DEBUG_LISR_EXECUTION /* Show the detected #LISR, #MISR scheduled calls */ - DIPrintf(psEntry, "RGX #LISR: %llu\n", psDeviceNode->ui64nLISR); - DIPrintf(psEntry, "RGX #MISR: %llu\n", psDeviceNode->ui64nMISR); + DIPrintf(psEntry, "RGX #LISR: %" IMG_UINT64_FMTSPEC "\n", psDeviceNode->ui64nLISR); + DIPrintf(psEntry, "RGX #MISR: %" IMG_UINT64_FMTSPEC "\n", psDeviceNode->ui64nMISR); #endif /* PVRSRV_DEBUG_LISR_EXECUTION */ /* Calculate the number of HWR events in total across all the DMs... */ @@ -679,6 +806,8 @@ static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) DIPrintf(psEntry, "CRR Event Count: %d\n", ui32CRREventCount); #ifdef PVRSRV_STALLED_CCB_ACTION /* Write the number of Sync Lockup Recovery (SLR) events... */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested, + INVALIDATE); DIPrintf(psEntry, "SLR Event Count: %d\n", psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested); #endif /* PVRSRV_STALLED_CCB_ACTION */ } @@ -694,7 +823,7 @@ static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) * - Perform actual on-chip GPU power/dvfs management. * - As a result no more information can be provided. */ - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { if (psFwSysData != NULL) { @@ -708,57 +837,46 @@ static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) if (psDevInfo->pfnGetGpuUtilStats && eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK) { - PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; - RGXFWIF_GPU_UTIL_STATS *psGpuUtilStats = OSAllocMem(sizeof(*psGpuUtilStats)); - PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_GPU_UTIL_STATS *psGpuUtilStats = &psDevInfo->sGpuUtilStats; - if (psGpuUtilStats == NULL) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate GPU stats memory", __func__)); - goto return_; - } + OSLockAcquire(psDevInfo->hGpuUtilStatsLock); - eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode, - psDebugInfo->hGpuUtilUserDebugFS, - psGpuUtilStats); - - if ((eError == PVRSRV_OK) && - ((IMG_UINT32)psGpuUtilStats->ui64GpuStatCumulative)) + if ((IMG_UINT32)psGpuUtilStats->ui64GpuStatCumulative) { const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "GEOM", "3D", "CDM", "RAY", "GEOM2", "GEOM3", "GEOM4"}; IMG_UINT64 util; IMG_UINT32 rem; - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; RGXFWIF_DM eDM; + IMG_INT iDM_Util = 0; -#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))) { apszDmNames[RGXFWIF_DM_TDM] = "2D"; } -#endif util = 100 * psGpuUtilStats->ui64GpuStatActive; util = OSDivide64(util, (IMG_UINT32)psGpuUtilStats->ui64GpuStatCumulative, &rem); DIPrintf(psEntry, "GPU Utilisation: %u%%\n", (IMG_UINT32)util); - DIPrintf(psEntry, " "); + DIPrintf(psEntry, "DM Utilisation:"); - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - DIPrintf(psEntry, " VM%u", ui32OSid); + DIPrintf(psEntry, " VM%u", ui32DriverID); } DIPrintf(psEntry, "\n"); - for (eDM = RGXFWIF_DM_TDM; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++) + for (eDM = RGXFWIF_DM_TDM; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++,iDM_Util++) { - DIPrintf(psEntry, "%-5s Utilisation: ", apszDmNames[eDM]); + DIPrintf(psEntry, " %5s: ", apszDmNames[eDM]); - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - IMG_UINT32 uiDivisor = (IMG_UINT32)psGpuUtilStats->aaui64DMOSStatCumulative[eDM][ui32OSid]; + IMG_UINT32 uiDivisor = (IMG_UINT32)psGpuUtilStats->aaui64DMOSStatCumulative[iDM_Util][ui32DriverID]; if (uiDivisor == 0U) { @@ -766,7 +884,7 @@ static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) continue; } - util = 100 * psGpuUtilStats->aaui64DMOSStatActive[eDM][ui32OSid]; + util = 100 * psGpuUtilStats->aaui64DMOSStatActive[iDM_Util][ui32DriverID]; util = OSDivide64(util, uiDivisor, &rem); DIPrintf(psEntry, "%3u%% ", (IMG_UINT32)util); @@ -781,19 +899,18 @@ static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) DIPrintf(psEntry, "GPU Utilisation: -\n"); } - OSFreeMem(psGpuUtilStats); + OSLockRelease(psDevInfo->hGpuUtilStatsLock); + } } #endif /* SUPPORT_RGX */ } } -#ifdef SUPPORT_RGX -return_: -#endif return 0; } +#if defined(DEBUG) static IMG_INT64 DebugStatusSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, void *pvData) { @@ -810,6 +927,57 @@ static IMG_INT64 DebugStatusSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, *pui64Pos += ui64Count; return ui64Count; } +#endif + +const IMG_CHAR *PVRSRVGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState) +{ + static const char *const _pszDeviceStateStrings[] = { + #define X(_name) #_name, + PVRSRV_DEVICE_STATE_LIST + #undef X + }; + + if (eDevState < 0 || eDevState >= PVRSRV_DEVICE_STATE_LAST) + { + return "Undefined"; + } + + return _pszDeviceStateStrings[eDevState]; +} + + +const IMG_CHAR *PVRSRVGetDebugHealthStatusString(PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus) +{ + static const char *const _pszDeviceHealthStatusStrings[] = { + #define X(_name) #_name, + PVRSRV_DEVICE_HEALTH_STATUS_LIST + #undef X + }; + + if (eHealthStatus < 0 || eHealthStatus >= PVRSRV_DEVICE_HEALTH_STATUS_LAST) + { + return "Undefined"; + } + + return _pszDeviceHealthStatusStrings[eHealthStatus]; +} + + +const IMG_CHAR *PVRSRVGetDebugHealthReasonString(PVRSRV_DEVICE_HEALTH_REASON eHealthReason) +{ + static const char *const _pszDeviceHealthReasonStrings[] = { + #define X(_name) #_name, + PVRSRV_DEVICE_HEALTH_REASON_LIST + #undef X + }; + + if (eHealthReason < 0 || eHealthReason >= PVRSRV_DEVICE_HEALTH_REASON_LAST) + { + return "Undefined"; + } + + return _pszDeviceHealthReasonStrings[eHealthReason]; +} /*************************************************************************/ /*! Dump Debug DebugFS entry @@ -841,6 +1009,8 @@ static int _DebugFWTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(pvData); + if (psDevInfo != NULL) { RGXDumpFirmwareTrace(_DumpDebugDIPrintfWrapper, psEntry, psDevInfo); @@ -853,58 +1023,16 @@ static int _DebugFWTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) Firmware Translated Page Tables DebugFS entry */ /**************************************************************************/ -static void _DocumentFwMapping(OSDI_IMPL_ENTRY *psEntry, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32FwVA, - IMG_CPU_PHYADDR sCpuPA, - IMG_DEV_PHYADDR sDevPA, - IMG_UINT64 ui64PTE) -{ -#if defined(RGX_FEATURE_MIPS_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) - { - DIPrintf(psEntry, "| 0x%08X | " - "0x%016" IMG_UINT64_FMTSPECX " | " - "0x%016" IMG_UINT64_FMTSPECX " | " - "%s%s%s |\n", - ui32FwVA, - (IMG_UINT64) sCpuPA.uiAddr, - sDevPA.uiAddr, - gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(ui64PTE)], - gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(ui64PTE)], - gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(ui64PTE)]); - } - else -#endif - { - /* META and RISCV use a subset of the GPU's virtual address space */ - DIPrintf(psEntry, "| 0x%08X | " - "0x%016" IMG_UINT64_FMTSPECX " | " - "0x%016" IMG_UINT64_FMTSPECX " | " - "%s%s%s%s%s%s |\n", - ui32FwVA, - (IMG_UINT64) sCpuPA.uiAddr, - sDevPA.uiAddr, - BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN) ? "P" : " ", - BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_PM_SRC_EN) ? "PM" : " ", -#if defined(RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) - BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) ? "B" : " ", -#else - " ", -#endif - BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_CC_EN) ? "C" : " ", - BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_READ_ONLY_EN) ? "RO" : "RW", - BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_VALID_EN) ? "V" : " "); - } -} - +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) static int _FirmwareMappingsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) { PVRSRV_DEVICE_NODE *psDeviceNode; PVRSRV_RGXDEV_INFO *psDevInfo; IMG_UINT32 ui32FwVA; IMG_UINT32 ui32FwPageSize; - IMG_UINT32 ui32OSID; + IMG_UINT32 ui32DriverID; + + PVR_UNREFERENCED_PARAMETER(pvData); psDeviceNode = DIGetPrivData(psEntry); @@ -950,14 +1078,14 @@ static int _FirmwareMappingsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); } - for (ui32OSID = 0; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) ((RGX_FIRMWARE_RAW_HEAP_BASE + - (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)) & UINT_MAX); + (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE)) & UINT_MAX); IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_SIZE & UINT_MAX); DIPrintf(psEntry, "| OS ID %u |\n" - "+-----------------+------------------------+------------------------+--------------+\n", ui32OSID); + "+-----------------+------------------------+------------------------+--------------+\n", ui32DriverID); for (ui32FwVA = ui32FwHeapBase; ui32FwVA < ui32FwHeapEnd; @@ -972,7 +1100,8 @@ static int _FirmwareMappingsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) if (eError == PVRSRV_OK) { - _DocumentFwMapping(psEntry, psDevInfo, ui32FwVA, sCpuPA, sDevPA, ui64PTE); + RGXDocumentFwMapping(psDevInfo, _DumpDebugDIPrintfWrapper, psEntry, + ui32FwVA, sCpuPA, sDevPA, ui64PTE); } else if (eError != PVRSRV_ERROR_DEVICEMEM_NO_MAPPING) { @@ -983,14 +1112,17 @@ static int _FirmwareMappingsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) DIPrintf(psEntry, "+-----------------+------------------------+------------------------+--------------+\n"); - if (PVRSRV_VZ_MODE_IS(NATIVE)) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode)) { break; } +#endif } return 0; } +#endif #ifdef SUPPORT_FIRMWARE_GCOV @@ -1052,336 +1184,368 @@ static int _FirmwareGcovDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) #endif /* SUPPORT_FIRMWARE_GCOV */ -#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS -/*************************************************************************/ /*! - Power monitoring DebugFS entry -*/ /**************************************************************************/ +#if defined(SUPPORT_RISCV_GDB) +#define RISCV_DMI_SIZE (8U) -static int _PowMonTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +static IMG_INT64 _RiscvDmiRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) { - PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; - PVR_UNREFERENCED_PARAMETER(pvData); + ui64Count = MIN(RISCV_DMI_SIZE, ui64Count); + memcpy(pcBuffer, &psDebugInfo->ui64RiscvDmi, ui64Count); - if (psDevInfo != NULL) + return ui64Count; +} + +static IMG_INT64 _RiscvDmiWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; + + if (psDevInfo == NULL) { - RGXDumpPowerMonitoring(_DumpDebugDIPrintfWrapper, psEntry, psDevInfo); + PVR_DPF((PVR_DBG_ERROR, "%s: devinfo is NULL", __func__)); + return 0; } - return 0; -} + ui64Count -= 1; /* Drop `\0` */ + ui64Count = MIN(RISCV_DMI_SIZE, ui64Count); -#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ + memcpy(&psDebugInfo->ui64RiscvDmi, pcBuffer, ui64Count); -#ifdef SUPPORT_VALIDATION + RGXRiscvDmiOp(psDevInfo, &psDebugInfo->ui64RiscvDmi); -#ifndef SYS_RGX_DEV_UNMAPPED_FW_REG -#define SYS_RGX_DEV_UNMAPPED_FW_REG 0XFFFFFFFF + return ui64Count; +} #endif -#define DI_RGXREGS_TIMEOUT_MS 1000 + +#endif /* SUPPORT_RGX */ + + +#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) /*************************************************************************/ /*! - RGX Registers Dump DebugFS entry + Debug level DebugFS entry */ /**************************************************************************/ -static IMG_INT64 _RgxRegsSeek(IMG_UINT64 ui64Offset, void *pvData) +static int DebugLevelDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) { - PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; - PVRSRV_RGXDEV_INFO *psDevInfo; + DIPrintf(psEntry, "%u\n", OSDebugLevel()); - PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -1); + return 0; +} - psDevInfo = psDeviceNode->pvDevice; +#ifndef __GNUC__ +static int __builtin_ffsl(long int x) +{ + for (size_t i = 0; i < sizeof(x) * 8; i++) + { + if (x & (1 << i)) + { + return i + 1; + } + } + return 0; +} +#endif /* __GNUC__ */ + +static IMG_INT64 DebugLevelSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + const IMG_UINT uiMaxBufferSize = 6; + IMG_UINT32 ui32Level; + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + if (sscanf(pcBuffer, "%u", &ui32Level) == 0) + { + return -EINVAL; + } - PVR_LOG_RETURN_IF_FALSE(ui64Offset <= (psDevInfo->ui32RegSize - 4), - "register offset is too big", -1); + OSSetDebugLevel(ui32Level & ((1 << __builtin_ffsl(DBGPRIV_LAST)) - 1)); - return ui64Offset; + *pui64Pos += ui64Count; + return ui64Count; } +#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ -static IMG_INT64 _RgxRegsRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, - IMG_UINT64 *pui64Pos, void *pvData) +#if defined(SUPPORT_RGX) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) +static int VZPriorityDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) { - PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT64 ui64RegVal = 0; + DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); PVRSRV_RGXDEV_INFO *psDevInfo; - IMG_UINT64 ui64CompRes; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg; + IMG_UINT32 ui32DriverID; - PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -ENXIO); - PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, - "wrong RGX register size", -EIO); - PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), - "register read offset isn't aligned", -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); - psDevInfo = psDeviceNode->pvDevice; + psDevInfo = psVZDriverData->psDevNode->pvDevice; + PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); - if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG) - { - if (!psDevInfo->bFirmwareInitialised) - { - PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but " - "Firmware isn't yet initialised\n")); - return -EIO; - } + psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); - reinit_completion(&psDevInfo->sFwRegs.sRegComp); + ui32DriverID = psVZDriverData->ui32DriverID; + PVR_RETURN_IF_FALSE(ui32DriverID < (RGXFW_HOST_DRIVER_ID + RGX_NUM_DRIVERS_SUPPORTED), + -EINVAL); - eError = RGXScheduleRgxRegCommand(psDevInfo, - 0x00, - ui64Count, - (IMG_UINT32) *pui64Pos, - IMG_FALSE); + RGXFwSharedMemCacheOpValue(psRuntimeCfg->ai32DriverPriority[ui32DriverID], INVALIDATE); + DIPrintf(psEntry, "%u\n", psRuntimeCfg->ai32DriverPriority[ui32DriverID]); - if (eError != PVRSRV_OK) - { - PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand"); - return -EIO; - } + return 0; +} - ui64CompRes = wait_for_completion_timeout(&psDevInfo->sFwRegs.sRegComp, - msecs_to_jiffies(DI_RGXREGS_TIMEOUT_MS)); - if (!ui64CompRes) - { - PVR_DPF((PVR_DBG_ERROR, "FW RGX Register access timeout %#x\n", - (IMG_UINT32) *pui64Pos)); - return -EIO; - } +static IMG_INT64 VZPrioritySet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; + const IMG_UINT32 uiMaxBufferSize = 12; + IMG_UINT32 ui32Priority; + PVRSRV_ERROR eError; + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); - OSCachedMemCopy(pcBuffer, &psDevInfo->sFwRegs.ui64RegVal, ui64Count); + if (OSStringToUINT32(pcBuffer, 10, &ui32Priority) != PVRSRV_OK) + { + return -EINVAL; } - else + + eError = PVRSRVRGXFWDebugSetDriverPriorityKM(NULL, psVZDriverData->psDevNode, + psVZDriverData->ui32DriverID, ui32Priority); + if (eError != PVRSRV_OK) { - ui64RegVal = ui64Count == 4 ? - OSReadHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos) : - OSReadHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos); - OSCachedMemCopy(pcBuffer, &ui64RegVal, ui64Count); + return -EIO; } + *pui64Pos += ui64Count; return ui64Count; } -static IMG_INT64 _RgxRegsWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, - IMG_UINT64 *pui64Pos, void *pvData) +static int VZTimeSliceIntervalDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) { - PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT64 ui64RegVal = 0; + DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg; - /* ignore the '\0' character */ - ui64Count -= 1; + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); - PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -ENXIO); - PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, - "wrong RGX register size", -EIO); - PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), - "register read offset isn't aligned", -EINVAL); + psDevInfo = psVZDriverData->psDevNode->pvDevice; + PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); - psDevInfo = psDeviceNode->pvDevice; + psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); - if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG) - { - if (!psDevInfo->bFirmwareInitialised) - { - PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but " - "Firmware isn't yet initialised\n")); - return -EIO; - } + DIPrintf(psEntry, "%u ms (0: disable)\n", psRuntimeCfg->ui32TSIntervalMs); - if (ui64Count == 4) - ui64RegVal = (IMG_UINT64) *((IMG_UINT32 *) pcBuffer); - else - ui64RegVal = *((IMG_UINT64 *) pcBuffer); + return 0; +} - eError = RGXScheduleRgxRegCommand(psDevInfo, - ui64RegVal, - ui64Count, - (IMG_UINT32) *pui64Pos, - IMG_TRUE); - if (eError != PVRSRV_OK) - { - PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand"); - return -EIO; - } +static IMG_INT64 VZTimeSliceIntervalSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; + const IMG_UINT32 uiMaxBufferSize = 12; + IMG_UINT32 ui32TSIntervalMs; + PVRSRV_ERROR eError; + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); + if (OSStringToUINT32(pcBuffer, 10, &ui32TSIntervalMs) != PVRSRV_OK) + { + return -EINVAL; } - else + + eError = PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM(NULL, psVZDriverData->psDevNode, + ui32TSIntervalMs); + if (eError != PVRSRV_OK) { - if (ui64Count == 4) - { - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos, - *((IMG_UINT32 *) (void *) pcBuffer)); - } - else - { - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos, - *((IMG_UINT64 *) (void *) pcBuffer)); - } + return -EIO; } + *pui64Pos += ui64Count; return ui64Count; } -#endif /* SUPPORT_VALIDATION */ +static int VZTimeSliceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg; + IMG_UINT32 ui32DriverID; -#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) -#define RISCV_DMI_SIZE (8U) + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -static IMG_INT64 _RiscvDmiRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, - IMG_UINT64 *pui64Pos, void *pvData) -{ - PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; - PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; + psDevInfo = psVZDriverData->psDevNode->pvDevice; + PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); - ui64Count = MIN(RISCV_DMI_SIZE, ui64Count); - memcpy(pcBuffer, &psDebugInfo->ui64RiscvDmi, ui64Count); + psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); - return ui64Count; + ui32DriverID = psVZDriverData->ui32DriverID; + PVR_RETURN_IF_FALSE(ui32DriverID < (RGXFW_HOST_DRIVER_ID + RGX_NUM_DRIVERS_SUPPORTED), + -EINVAL); + + DIPrintf(psEntry, "%u (0: auto; 1%% to 100%%)\n", psRuntimeCfg->aui32TSPercentage[ui32DriverID]); + + return 0; } -static IMG_INT64 _RiscvDmiWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, - IMG_UINT64 *pui64Pos, void *pvData) +static IMG_INT64 VZTimeSliceSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) { - PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; + const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; + const IMG_UINT32 uiMaxBufferSize = 12; + IMG_UINT32 ui32TSPercentage; + PVRSRV_ERROR eError; - if (psDevInfo == NULL) + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); + + if (OSStringToUINT32(pcBuffer, 10, &ui32TSPercentage) != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "%s: devinfo is NULL", __func__)); - return 0; + return -EINVAL; } - ui64Count -= 1; /* Drop `\0` */ - ui64Count = MIN(RISCV_DMI_SIZE, ui64Count); - - memcpy(&psDebugInfo->ui64RiscvDmi, pcBuffer, ui64Count); - - RGXRiscvDmiOp(psDevInfo, &psDebugInfo->ui64RiscvDmi); + eError = PVRSRVRGXFWDebugSetDriverTimeSliceKM(NULL, psVZDriverData->psDevNode, + psVZDriverData->ui32DriverID, ui32TSPercentage); + if (eError != PVRSRV_OK) + { + return -EIO; + } + *pui64Pos += ui64Count; return ui64Count; } -#endif -#endif /* SUPPORT_RGX */ +static int VZIsolationGroupDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg; + IMG_UINT32 ui32DriverID; -#ifdef SUPPORT_VALIDATION + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -static int TestMemLeakDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -{ - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + psDevInfo = psVZDriverData->psDevNode->pvDevice; + PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); - PVR_UNREFERENCED_PARAMETER(pvData); + psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); - PVR_RETURN_IF_FALSE(pvData != NULL, -EINVAL); + ui32DriverID = psVZDriverData->ui32DriverID; + PVR_RETURN_IF_FALSE(ui32DriverID < (RGXFW_HOST_DRIVER_ID + RGX_NUM_DRIVERS_SUPPORTED), + -EINVAL); - DIPrintf(psEntry, "os: %s, %u\ngpu: %s, %u\nmmu: %s, %u\n", - psPVRSRVData->sMemLeakIntervals.ui32OSAlloc ? "enabled" : "disabled", - psPVRSRVData->sMemLeakIntervals.ui32OSAlloc, - psPVRSRVData->sMemLeakIntervals.ui32GPU ? "enabled" : "disabled", - psPVRSRVData->sMemLeakIntervals.ui32GPU, - psPVRSRVData->sMemLeakIntervals.ui32MMU ? "enabled" : "disabled", - psPVRSRVData->sMemLeakIntervals.ui32MMU); + RGXFwSharedMemCacheOpValue(psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID], INVALIDATE); + DIPrintf(psEntry, "%u\n", psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID]); return 0; } -static IMG_INT64 TestMemLeakDISet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, - IMG_UINT64 *pui64Pos, void *pvData) +static IMG_INT64 VZIsolationGroupSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) { - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - IMG_CHAR *pcTemp; - unsigned long ui32MemLeakInterval; - - PVR_UNREFERENCED_PARAMETER(pvData); + const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; + const IMG_UINT32 uiMaxBufferSize = 12; + IMG_UINT32 ui32IsolationGroup; + PVRSRV_ERROR eError; PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); - PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); - PVR_RETURN_IF_FALSE(ui64Count <= 16, -EINVAL); + PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); - pcTemp = strchr(pcBuffer, ','); - - if (kstrtoul(pcTemp+1, 0, &ui32MemLeakInterval) != 0) + if (OSStringToUINT32(pcBuffer, 10, &ui32IsolationGroup) != PVRSRV_OK) { return -EINVAL; } - if (strncmp(pcBuffer, "os", pcTemp-pcBuffer) == 0) - { - psPVRSRVData->sMemLeakIntervals.ui32OSAlloc = ui32MemLeakInterval; - } - else if (strncmp(pcBuffer, "gpu", pcTemp-pcBuffer) == 0) - { - psPVRSRVData->sMemLeakIntervals.ui32GPU = ui32MemLeakInterval; - } - else if (strncmp(pcBuffer, "mmu", pcTemp-pcBuffer) == 0) - { - psPVRSRVData->sMemLeakIntervals.ui32MMU = ui32MemLeakInterval; - } - else + eError = PVRSRVRGXFWDebugSetDriverIsolationGroupKM(NULL, psVZDriverData->psDevNode, + psVZDriverData->ui32DriverID, ui32IsolationGroup); + if (eError != PVRSRV_OK) { - return -EINVAL; + return -EIO; } *pui64Pos += ui64Count; return ui64Count; } -#endif /* SUPPORT_VALIDATION */ +static int VZConnectionCooldownPeriodDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg; -#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -/*************************************************************************/ /*! - Debug level DebugFS entry -*/ /**************************************************************************/ + psDevInfo = psVZDriverData->psDevNode->pvDevice; + PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); -static int DebugLevelDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -{ - DIPrintf(psEntry, "%u\n", OSDebugLevel()); + psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); - return 0; -} + DIPrintf(psEntry, "%u sec\n", psRuntimeCfg->ui32VzConnectionCooldownPeriodInSec); -#ifndef __GNUC__ -static int __builtin_ffsl(long int x) -{ - for (size_t i = 0; i < sizeof(x) * 8; i++) - { - if (x & (1 << i)) - { - return i + 1; - } - } return 0; } -#endif /* __GNUC__ */ -static IMG_INT64 DebugLevelSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, - IMG_UINT64 *pui64Pos, void *pvData) +static IMG_INT64 VZConnectionCooldownPeriodSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) { - const IMG_UINT uiMaxBufferSize = 6; - IMG_UINT32 ui32Level; + const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; + const IMG_UINT32 uiMaxBufferSize = 12; + IMG_UINT32 ui32VzConnectionCooldownPeriodInSec; + PVRSRV_ERROR eError; PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); - PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); + PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); - if (sscanf(pcBuffer, "%u", &ui32Level) == 0) + if (OSStringToUINT32(pcBuffer, 10, &ui32VzConnectionCooldownPeriodInSec) != PVRSRV_OK) { return -EINVAL; } - OSSetDebugLevel(ui32Level & ((1 << __builtin_ffsl(DBGPRIV_LAST)) - 1)); + eError = PVRSRVRGXFWDebugSetVzConnectionCooldownPeriodInSecKM(NULL, psVZDriverData->psDevNode, + ui32VzConnectionCooldownPeriodInSec); + if (eError != PVRSRV_OK) + { + return -EIO; + } *pui64Pos += ui64Count; return ui64Count; } -#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ + +#endif PVRSRV_ERROR DebugCommonInitDriver(void) { @@ -1419,8 +1583,10 @@ PVRSRV_ERROR DebugCommonInitDriver(void) .pfnStop = _DebugStatusDIStop, .pfnNext = _DebugStatusDINext, .pfnShow = _DebugStatusDIShow, +#if defined(DEBUG) .pfnWrite = DebugStatusSet, //'K' expected + Null terminator +#endif .ui32WriteLenMax= ((1U)+1U) }; eError = DICreateEntry("status", NULL, &sIterator, psPVRSRVData, @@ -1428,19 +1594,6 @@ PVRSRV_ERROR DebugCommonInitDriver(void) PVR_GOTO_IF_ERROR(eError, return_error_); } -#ifdef SUPPORT_VALIDATION - { - DI_ITERATOR_CB sIterator = { - .pfnShow = TestMemLeakDIShow, - .pfnWrite = TestMemLeakDISet, - //Function only allows max 15 chars + Null terminator - .ui32WriteLenMax = ((15U)+1U) - }; - eError = DICreateEntry("test_memleak", NULL, &sIterator, psPVRSRVData, - DI_ENTRY_TYPE_GENERIC, &gpsTestMemLeakDIEntry); - PVR_GOTO_IF_ERROR(eError, return_error_); - } -#endif /* SUPPORT_VALIDATION */ #if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) { @@ -1473,12 +1626,6 @@ void DebugCommonDeInitDriver(void) } #endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ -#ifdef SUPPORT_VALIDATION - if (gpsTestMemLeakDIEntry != NULL) - { - DIDestroyEntry(gpsTestMemLeakDIEntry); - } -#endif /* SUPPORT_VALIDATION */ if (gpsStatusDIEntry != NULL) { @@ -1496,6 +1643,7 @@ PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; PVRSRV_ERROR eError; IMG_CHAR pszDeviceId[sizeof("gpu4294967296")]; + __maybe_unused PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; OSSNPrintf(pszDeviceId, sizeof(pszDeviceId), "gpu%02d", psDeviceNode->sDevId.ui32InternalID); @@ -1516,7 +1664,7 @@ PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) } #ifdef SUPPORT_RGX - if (! PVRSRV_VZ_MODE_IS(GUEST)) + if (! PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { { DI_ITERATOR_CB sIterator = {.pfnShow = _DebugFWTraceDIShow}; @@ -1542,6 +1690,7 @@ PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) } #endif /* SUPPORT_FIRMWARE_GCOV */ +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) { DI_ITERATOR_CB sIterator = {.pfnShow = _FirmwareMappingsDIShow}; eError = DICreateEntry("firmware_mappings", psDebugInfo->psGroup, &sIterator, @@ -1549,8 +1698,10 @@ PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) &psDebugInfo->psFWMappingsEntry); PVR_GOTO_IF_ERROR(eError, return_error_); } +#endif -#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) +#if defined(SUPPORT_RISCV_GDB) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) { DI_ITERATOR_CB sIterator = { .pfnRead = _RiscvDmiRead, @@ -1563,34 +1714,98 @@ PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) psDebugInfo->ui64RiscvDmi = 0ULL; } #endif /* SUPPORT_VALIDATION || SUPPORT_RISCV_GDB */ - } -#ifdef SUPPORT_VALIDATION - { - DI_ITERATOR_CB sIterator = { - .pfnSeek = _RgxRegsSeek, - .pfnRead = _RgxRegsRead, - .pfnWrite = _RgxRegsWrite, - //Max size of input binary data is 4 bytes (UINT32) or 8 bytes (UINT64) - .ui32WriteLenMax = ((8U)+1U) - }; - eError = DICreateEntry("rgxregs", psDebugInfo->psGroup, &sIterator, psDeviceNode, - DI_ENTRY_TYPE_RANDOM_ACCESS, &psDebugInfo->psRGXRegsEntry); - PVR_GOTO_IF_ERROR(eError, return_error_); - } -#endif /* SUPPORT_VALIDATION */ +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST, DEVNODE, psDeviceNode)) + { + eError = DICreateGroup("vz", psDebugInfo->psGroup, &psDebugInfo->psVZGroup); + PVR_GOTO_IF_ERROR(eError, return_error_); -#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS - if (! PVRSRV_VZ_MODE_IS(GUEST)) - { - DI_ITERATOR_CB sIterator = { - .pfnShow = _PowMonTraceDIShow - }; - eError = DICreateEntry("power_mon", psDebugInfo->psGroup, &sIterator, psDeviceNode, - DI_ENTRY_TYPE_GENERIC, &psDebugInfo->psPowMonEntry); - PVR_GOTO_IF_ERROR(eError, return_error_); + { + IMG_UINT32 ui32DriverID; + + DI_ITERATOR_CB sPriorityIterator = { + .pfnShow = VZPriorityDIShow, + .pfnWrite = VZPrioritySet, + //Max value of UINT_MAX (10 chars) + Null terminator + .ui32WriteLenMax = sizeof("4294967295") + }; + + DI_ITERATOR_CB sTimeSliceIntervalIterator = { + .pfnShow = VZTimeSliceIntervalDIShow, + .pfnWrite = VZTimeSliceIntervalSet, + //Max value of UINT_MAX (10 chars) + Null terminator + .ui32WriteLenMax = sizeof("4294967295") + }; + + DI_ITERATOR_CB sTimeSliceIterator = { + .pfnShow = VZTimeSliceDIShow, + .pfnWrite = VZTimeSliceSet, + //Max value of UINT_MAX (10 chars) + Null terminator + .ui32WriteLenMax = sizeof("4294967295") + }; + + DI_ITERATOR_CB sIsolationGroupIterator = { + .pfnShow = VZIsolationGroupDIShow, + .pfnWrite = VZIsolationGroupSet, + //Max value of UINT_MAX (10 chars) + Null terminator + .ui32WriteLenMax = sizeof("4294967295") + }; + + DI_ITERATOR_CB sVzConnectionCooldownPeriodIterator = { + .pfnShow = VZConnectionCooldownPeriodDIShow, + .pfnWrite = VZConnectionCooldownPeriodSet, + //Max value of UINT_MAX (10 chars) + Null terminator + .ui32WriteLenMax = sizeof("4294967295") + }; + + FOREACH_SUPPORTED_DRIVER(ui32DriverID) + { + IMG_CHAR szDriverID[4]; + OSSNPrintf(szDriverID, 4, "%u", ui32DriverID); + + eError = DICreateGroup(szDriverID, psDebugInfo->psVZGroup, &psDebugInfo->apsVZDriverGroups[ui32DriverID]); + PVR_GOTO_IF_ERROR(eError, return_error_); + + psDebugInfo->apsVZDriverData[ui32DriverID] = OSAllocMem(sizeof(PVRSRV_DEVICE_DEBUG_INFO)); + PVR_GOTO_IF_NOMEM(psDebugInfo->apsVZDriverData[ui32DriverID], eError, return_error_); + + psDebugInfo->apsVZDriverData[ui32DriverID]->psDevNode = psDeviceNode; + psDebugInfo->apsVZDriverData[ui32DriverID]->ui32DriverID = ui32DriverID; + + eError = DICreateEntry("priority", psDebugInfo->apsVZDriverGroups[ui32DriverID], + &sPriorityIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, + &psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID]); + PVR_GOTO_IF_ERROR(eError, return_error_); + + eError = DICreateEntry("time_slice", psDebugInfo->apsVZDriverGroups[ui32DriverID], + &sTimeSliceIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, + &psDebugInfo->apsVZDriverTimeSliceDIEntries[ui32DriverID]); + PVR_GOTO_IF_ERROR(eError, return_error_); + + if (ui32DriverID == RGXFW_HOST_DRIVER_ID) + { + eError = DICreateEntry("time_slice_interval", psDebugInfo->apsVZDriverGroups[ui32DriverID], + &sTimeSliceIntervalIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, + &psDebugInfo->psVZDriverTimeSliceIntervalDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + + eError = DICreateEntry("vz_connection_cooldown_period", psDebugInfo->apsVZDriverGroups[ui32DriverID], + &sVzConnectionCooldownPeriodIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, + &psDebugInfo->psVZDriverConnectionCooldownPeriodDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } + + eError = DICreateEntry("isolation_group", psDebugInfo->apsVZDriverGroups[ui32DriverID], + &sIsolationGroupIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, + &psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID]); + PVR_GOTO_IF_ERROR(eError, return_error_); + } + } + } +#endif /* defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1 */ } -#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ + #ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS { DI_ITERATOR_CB sIterator = { @@ -1627,6 +1842,7 @@ PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; + __maybe_unused PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; #if defined(PVRSRV_ENABLE_PROCESS_STATS) if (psDebugInfo->psPowerTimingStatsEntry != NULL) @@ -1644,23 +1860,69 @@ void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) } #endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */ -#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS - if (psDebugInfo->psPowMonEntry != NULL) - { - DIDestroyEntry(psDebugInfo->psPowMonEntry); - psDebugInfo->psPowMonEntry = NULL; - } -#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ -#ifdef SUPPORT_VALIDATION - if (psDebugInfo->psRGXRegsEntry != NULL) +#ifdef SUPPORT_RGX +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST, DEVNODE, psDeviceNode)) { - DIDestroyEntry(psDebugInfo->psRGXRegsEntry); - psDebugInfo->psRGXRegsEntry = NULL; + IMG_UINT32 ui32DriverID; + + FOREACH_SUPPORTED_DRIVER(ui32DriverID) + { + if (psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID] != NULL) + { + DIDestroyEntry(psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID]); + psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID] = NULL; + } + + if (psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID] != NULL) + { + DIDestroyEntry(psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID]); + psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID] = NULL; + } + + if (psDebugInfo->apsVZDriverTimeSliceDIEntries[ui32DriverID] != NULL) + { + DIDestroyEntry(psDebugInfo->apsVZDriverTimeSliceDIEntries[ui32DriverID]); + psDebugInfo->apsVZDriverTimeSliceDIEntries[ui32DriverID] = NULL; + } + + if (ui32DriverID == RGXFW_HOST_DRIVER_ID) + { + if (psDebugInfo->psVZDriverTimeSliceIntervalDIEntry != NULL) + { + DIDestroyEntry(psDebugInfo->psVZDriverTimeSliceIntervalDIEntry); + psDebugInfo->psVZDriverTimeSliceIntervalDIEntry = NULL; + } + + if (psDebugInfo->psVZDriverConnectionCooldownPeriodDIEntry != NULL) + { + DIDestroyEntry(psDebugInfo->psVZDriverConnectionCooldownPeriodDIEntry); + psDebugInfo->psVZDriverConnectionCooldownPeriodDIEntry = NULL; + } + } + + if (psDebugInfo->apsVZDriverData[ui32DriverID] != NULL) + { + OSFreeMem(psDebugInfo->apsVZDriverData[ui32DriverID]); + psDebugInfo->apsVZDriverData[ui32DriverID] = NULL; + } + + if (psDebugInfo->apsVZDriverGroups[ui32DriverID] != NULL) + { + DIDestroyGroup(psDebugInfo->apsVZDriverGroups[ui32DriverID]); + psDebugInfo->apsVZDriverGroups[ui32DriverID] = NULL; + } + } + + if (psDebugInfo->psVZGroup != NULL) + { + DIDestroyGroup(psDebugInfo->psVZGroup); + psDebugInfo->psVZGroup = NULL; + } } -#endif /* SUPPORT_VALIDATION */ +#endif -#ifdef SUPPORT_RGX if (psDebugInfo->psFWTraceEntry != NULL) { DIDestroyEntry(psDebugInfo->psFWTraceEntry); @@ -1681,8 +1943,9 @@ void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) psDebugInfo->psFWMappingsEntry = NULL; } -#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) - if (psDebugInfo->psRiscvDmiDIEntry != NULL) +#if defined(SUPPORT_RISCV_GDB) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR) && + (psDebugInfo->psRiscvDmiDIEntry != NULL)) { DIDestroyEntry(psDebugInfo->psRiscvDmiDIEntry); psDebugInfo->psRiscvDmiDIEntry = NULL; @@ -1710,3 +1973,23 @@ void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) psDebugInfo->psGroup = NULL; } } + +/* + Appends flags strings to a null-terminated string buffer +*/ +void DebugCommonFlagStrings(IMG_CHAR *psDesc, + IMG_UINT32 ui32DescSize, + const IMG_FLAGS2DESC *psConvTable, + IMG_UINT32 ui32TableSize, + IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) + { + if (BITMASK_HAS(ui32Flags, psConvTable[ui32Idx].uiFlag)) + { + OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); + } + } +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_heapcfg.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_heapcfg.c index af2b83343e39..9b53d026bb5d 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_heapcfg.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_heapcfg.c @@ -56,7 +56,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. static INLINE void _CheckBlueprintHeapAlignment(DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint) { - IMG_UINT32 ui32OSPageSize = OSGetPageShift(); + IMG_UINT32 ui32OSPageSize = OSGetPageSize(); /* Any heap length should at least match OS page size at the minimum or * a multiple of OS page size */ @@ -72,7 +72,7 @@ static INLINE void _CheckBlueprintHeapAlignment(DEVMEM_HEAP_BLUEPRINT *psHeapBlu psHeapBlueprint->uiHeapLength, psHeapBlueprint->uiHeapLength)); PVR_DPF((PVR_DBG_ERROR, - "Heap Size should always be a non-zero value and a " + "Heap Size should always be at least the DevMem minimum size and a " "multiple of OS Page Size:%u(0x%x)", ui32OSPageSize, ui32OSPageSize)); PVR_ASSERT(psHeapBlueprint->uiHeapLength >= ui32OSPageSize); @@ -123,6 +123,8 @@ HeapCfgHeapCount(CONNECTION_DATA * psConnection, IMG_UINT32 uiHeapConfigIndex, IMG_UINT32 *puiNumHeapsOut) { + PVR_UNREFERENCED_PARAMETER(psConnection); + if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) { return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; @@ -140,6 +142,8 @@ HeapCfgHeapConfigName(CONNECTION_DATA * psConnection, IMG_UINT32 uiHeapConfigNameBufSz, IMG_CHAR *pszHeapConfigNameOut) { + PVR_UNREFERENCED_PARAMETER(psConnection); + if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) { return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; @@ -196,6 +200,8 @@ HeapCfgHeapDetails(CONNECTION_DATA * psConnection, { DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint; + PVR_UNREFERENCED_PARAMETER(psConnection); + if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) { return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_history_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_history_server.c index a9dc32d0e81e..f28e96fa55e1 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_history_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_history_server.c @@ -53,7 +53,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "devicemem_history_server.h" #include "pdump_km.h" -#define ALLOCATION_LIST_NUM_ENTRIES 10000 + +#if (PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES < 5000) +#error PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES is too low. +#elif (PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES > 250000) +#error PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES is too high. +#else +#define ALLOCATION_LIST_NUM_ENTRIES PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES +#endif + /* data type to hold an allocation index. * we make it 16 bits wide if possible @@ -108,9 +116,9 @@ typedef enum _COMMAND_TYPE_ * This command is inserted into the circular buffer to provide an updated * timestamp. * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order - * for the whole command to fit into 8 bytes. + * for the whole command to fit into 10 bytes. */ -typedef struct _COMMAND_TIMESTAMP_ +typedef struct __attribute__((__packed__))_COMMAND_TIMESTAMP_ { IMG_UINT8 aui8TimeNs[7]; } COMMAND_TIMESTAMP; @@ -119,7 +127,7 @@ typedef struct _COMMAND_TIMESTAMP_ * This command denotes the allocation at the given index was wholly mapped * in to the GPU MMU */ -typedef struct _COMMAND_MAP_ALL_ +typedef struct __attribute__((__packed__))_COMMAND_MAP_ALL_ { ALLOC_INDEX_T uiAllocIndex; } COMMAND_MAP_ALL; @@ -131,34 +139,36 @@ typedef struct _COMMAND_MAP_ALL_ */ typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL; +// This shift allows room for 512GiB virtual memory regions at 4Kb pages. +#define VM_RANGE_SHIFT 27 + /* packing attributes for the MAP_RANGE command */ -#define MAP_RANGE_MAX_START ((1 << 18) - 1) -#define MAP_RANGE_MAX_RANGE ((1 << 12) - 1) +#define MAP_RANGE_MAX_START ((1 << VM_RANGE_SHIFT) - 1) +#define MAP_RANGE_MAX_RANGE ((1 << VM_RANGE_SHIFT) - 1) /* MAP_RANGE command: * Denotes a range of pages within the given allocation being mapped. * The range is expressed as [Page Index] + [Page Count] - * This information is packed into a 40-bit integer, in order to make - * the command size 8 bytes. + * This information is packed into a 72/88-bit struct, in order to make + * the command size 9/11 bytes. */ -typedef struct _COMMAND_MAP_RANGE_ +typedef struct __attribute__((__packed__))_COMMAND_MAP_RANGE_ { - IMG_UINT8 aui8Data[5]; + IMG_UINT8 aui8Data[7]; ALLOC_INDEX_T uiAllocIndex; } COMMAND_MAP_RANGE; /* UNMAP_RANGE command: - * Denotes a range of pages within the given allocation being mapped. + * Denotes a range of pages within the given allocation being unmapped. * The range is expressed as [Page Index] + [Page Count] - * This information is packed into a 40-bit integer, in order to make - * the command size 8 bytes. - * Note: COMMAND_MAP_RANGE and COMMAND_UNMAP_RANGE commands have the same layout. + * This information is packed into a 72/88-bit struct, in order to make + * the command size 9/11 bytes. */ typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE; /* wrapper structure for a command */ -typedef struct _COMMAND_WRAPPER_ +typedef struct __attribute__((__packed__))_COMMAND_WRAPPER_ { IMG_UINT8 ui8Type; union { @@ -171,7 +181,15 @@ typedef struct _COMMAND_WRAPPER_ } COMMAND_WRAPPER; /* target size for the circular buffer of commands */ -#define CIRCULAR_BUFFER_SIZE_KB 2048 +#if (PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 < 5) +#error PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 is too low. +#elif (PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 > 18) +#error PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 is too high. +#else +#define CIRCULAR_BUFFER_SIZE_KB (1 << PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2) +#endif + + /* turn the circular buffer target size into a number of commands */ #define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER)) @@ -189,6 +207,14 @@ typedef struct _RECORDS_ IMG_UINT32 ui32Head; IMG_UINT32 ui32Tail; COMMAND_WRAPPER *pasCircularBuffer; + /* Times the CB has wrapped back to start */ + IMG_UINT64 ui64CBWrapCount; + /* Records of CB commands sent */ + IMG_UINT64 ui64MapAllCount;//Incremented by InsertMapAllCommand() + IMG_UINT64 ui64UnMapAllCount;//Incremented by InsertUnmapAllCommand() + IMG_UINT64 ui64MapRangeCount;//Incremented by InsertMapRangeCommand() + IMG_UINT64 ui64UnMapRangeCount;//Incremented by InsertUnmapRangeCommand() + IMG_UINT64 ui64TimeStampCount;//Incremented by InsertTimeStampCommand() } RECORDS; typedef struct _DEVICEMEM_HISTORY_DATA_ @@ -272,6 +298,11 @@ static COMMAND_WRAPPER *AcquireCBSlot(DEVICEMEM_HISTORY_DATA *psDevHData) (psDevHData->sRecords.ui32Head + 1) % CIRCULAR_BUFFER_NUM_COMMANDS; + if (psDevHData->sRecords.ui32Head == 0) + { + psDevHData->sRecords.ui64CBWrapCount++; + } + return psSlot; } @@ -413,7 +444,7 @@ static void InsertTimeStampCommand(IMG_UINT64 ui64Now, PVRSRV_DEVICE_NODE *psDev psCommand = AcquireCBSlot(psDevHData); psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP; - + psDevHData->sRecords.ui64TimeStampCount++; TimeStampPack(&psCommand->u.sTimeStamp, ui64Now); } @@ -435,6 +466,7 @@ static void InsertMapAllCommand(PVRSRV_DEVICE_NODE *psDeviceNode, psCommand->ui8Type = COMMAND_TYPE_MAP_ALL; psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex; + psDevHData->sRecords.ui64MapAllCount++; #if defined(PDUMP) EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_MAP_ALL, ui32AllocIndex); @@ -459,6 +491,7 @@ static void InsertUnmapAllCommand(PVRSRV_DEVICE_NODE *psDeviceNode, psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL; psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex; + psDevHData->sRecords.ui64UnMapAllCount++; #if defined(PDUMP) EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex); @@ -466,24 +499,29 @@ static void InsertUnmapAllCommand(PVRSRV_DEVICE_NODE *psDeviceNode, } /* MapRangePack: - * Pack the given StartPage and Count values into the 40-bit representation + * Pack the given StartPage and Count values into the 75-bit representation * in the MAP_RANGE command. */ static void MapRangePack(COMMAND_MAP_RANGE *psMapRange, IMG_UINT32 ui32StartPage, IMG_UINT32 ui32Count) + { IMG_UINT64 ui64Data; IMG_UINT32 i; - /* we must encode the data into 40 bits: - * 18 bits for the start page index - * 12 bits for the range + /* we must encode the data into 54 bits: + * 27 bits for the start page index + * 27 bits for the range */ + PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START); PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE); - ui64Data = (((IMG_UINT64) ui32StartPage) << 12) | ui32Count; + ui32StartPage &= MAP_RANGE_MAX_START; + ui32Count &= MAP_RANGE_MAX_RANGE; + + ui64Data = (((IMG_UINT64) ui32StartPage) << VM_RANGE_SHIFT) | ui32Count; for (i = 0; i < ARRAY_SIZE(psMapRange->aui8Data); i++) { @@ -492,8 +530,8 @@ static void MapRangePack(COMMAND_MAP_RANGE *psMapRange, } } -/* MapRangePack: - * Unpack the StartPage and Count values from the 40-bit representation +/* MapRangeUnpack: + * Unpack the StartPage and Count values from the 75-bit representation * in the MAP_RANGE command. */ static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange, @@ -509,8 +547,8 @@ static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange, ui64Data |= (IMG_UINT64) psMapRange->aui8Data[i - 1]; } - *pui32StartPage = (ui64Data >> 12); - *pui32Count = ui64Data & ((1 << 12) - 1); + *pui32StartPage = (IMG_UINT32)(ui64Data >> VM_RANGE_SHIFT); + *pui32Count = (IMG_UINT32) ui64Data & (MAP_RANGE_MAX_RANGE); } /* InsertMapRangeCommand: @@ -534,6 +572,7 @@ static void InsertMapRangeCommand(PVRSRV_DEVICE_NODE *psDeviceNode, psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE; psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; + psDevHData->sRecords.ui64MapRangeCount++; MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); @@ -567,6 +606,7 @@ static void InsertUnmapRangeCommand(PVRSRV_DEVICE_NODE *psDeviceNode, psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE; psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; + psDevHData->sRecords.ui64UnMapRangeCount++; MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); @@ -732,7 +772,7 @@ static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32Log2PageSize) { - OSStringLCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName)); + OSStringSafeCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName)); psAlloc->ui64Serial = ui64Serial; psAlloc->uiPID = uiPID; psAlloc->sDevVAddr = sDevVAddr; @@ -752,7 +792,6 @@ static PVRSRV_ERROR CreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_DEV_VIRTADDR sDevVAddr, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32Log2PageSize, - IMG_BOOL bAutoPurge, IMG_UINT32 *puiAllocationIndex) { IMG_UINT32 ui32Alloc; @@ -801,8 +840,7 @@ static IMG_BOOL MatchAllocation(DEVICEMEM_HISTORY_DATA *psDevHData, IMG_DEV_VIRTADDR sDevVAddr, IMG_DEVMEM_SIZE_T uiSize, const IMG_CHAR *pszName, - IMG_UINT32 ui32Log2PageSize, - IMG_PID uiPID) + IMG_UINT32 ui32Log2PageSize) { RECORD_ALLOCATION *psAlloc; @@ -834,7 +872,6 @@ static PVRSRV_ERROR FindOrCreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, const char *pszName, IMG_UINT32 ui32Log2PageSize, IMG_PID uiPID, - IMG_BOOL bSparse, IMG_UINT32 *pui32AllocationIndexOut, IMG_BOOL *pbCreated) { @@ -855,8 +892,7 @@ static PVRSRV_ERROR FindOrCreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, sDevVAddr, uiSize, pszName, - ui32Log2PageSize, - uiPID); + ui32Log2PageSize); if (bHaveAllocation) { *pbCreated = IMG_FALSE; @@ -875,7 +911,6 @@ static PVRSRV_ERROR FindOrCreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, sDevVAddr, uiSize, ui32Log2PageSize, - IMG_TRUE, &ui32AllocationIndex); if (eError == PVRSRV_OK) @@ -926,7 +961,7 @@ static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR, return; } - for (i = 0; i < psMappingTable->ui32NumVirtChunks; i++) + for (i = 0; i < psMappingTable->ui32NumLogicalChunks; i++) { if (psMappingTable->aui32Translation[i] != TRANSLATION_INVALID) { @@ -951,7 +986,7 @@ static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR, */ if ((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) || (ui32RunCount == MAP_RANGE_MAX_RANGE) || - (i == (psMappingTable->ui32NumVirtChunks - 1))) + (i == (psMappingTable->ui32NumLogicalChunks - 1))) { if (bMap) { @@ -1070,12 +1105,14 @@ PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, IMG_UINT32 *pui32AllocationIndexOut) { IMG_BOOL bSparse = PMR_IsSparse(psPMR); - IMG_UINT64 ui64Serial; + IMG_UINT64 ui64Serial = PMRInternalGetUID(psPMR); IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); PVRSRV_ERROR eError; IMG_BOOL bCreated; DEVICEMEM_HISTORY_DATA *psDevHData; + PVR_UNREFERENCED_PARAMETER(ui32Offset); + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && !CHECK_ALLOC_INDEX(ui32AllocationIndex)) { @@ -1085,8 +1122,6 @@ PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, return PVRSRV_ERROR_INVALID_PARAMS; } - PMRGetUID(psPMR, &ui64Serial); - psDevHData = DevmemFindDataFromDev(PMR_DeviceNode(psPMR)); if (psDevHData == NULL) @@ -1104,7 +1139,6 @@ PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, szName, ui32Log2PageSize, uiPID, - bSparse, &ui32AllocationIndex, &bCreated); @@ -1151,6 +1185,8 @@ static void VRangeInsertMapUnmapCommands(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32NumPages, const IMG_CHAR *pszName) { + PVR_UNREFERENCED_PARAMETER(pszName); + while (ui32NumPages > 0) { IMG_UINT32 ui32PagesToAdd; @@ -1231,7 +1267,6 @@ PVRSRV_ERROR DevicememHistoryMapVRangeKM(CONNECTION_DATA *psConnection, szName, ui32Log2PageSize, uiPID, - IMG_FALSE, &ui32AllocationIndex, &bCreated); @@ -1310,7 +1345,6 @@ PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(CONNECTION_DATA *psConnection, szName, ui32Log2PageSize, uiPID, - IMG_FALSE, &ui32AllocationIndex, &bCreated); @@ -1371,12 +1405,14 @@ PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, IMG_UINT32 *pui32AllocationIndexOut) { IMG_BOOL bSparse = PMR_IsSparse(psPMR); - IMG_UINT64 ui64Serial; + IMG_UINT64 ui64Serial = PMRInternalGetUID(psPMR); IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); PVRSRV_ERROR eError; IMG_BOOL bCreated; DEVICEMEM_HISTORY_DATA *psDevHData; + PVR_UNREFERENCED_PARAMETER(ui32Offset); + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && !CHECK_ALLOC_INDEX(ui32AllocationIndex)) { @@ -1386,8 +1422,6 @@ PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, return PVRSRV_ERROR_INVALID_PARAMS; } - PMRGetUID(psPMR, &ui64Serial); - psDevHData = DevmemFindDataFromDev(PMR_DeviceNode(psPMR)); if (psDevHData == NULL) @@ -1405,7 +1439,6 @@ PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, szName, ui32Log2PageSize, uiPID, - bSparse, &ui32AllocationIndex, &bCreated); @@ -1477,12 +1510,14 @@ PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, IMG_UINT32 ui32AllocationIndex, IMG_UINT32 *pui32AllocationIndexOut) { - IMG_UINT64 ui64Serial; + IMG_UINT64 ui64Serial = PMRInternalGetUID(psPMR); IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); PVRSRV_ERROR eError; IMG_BOOL bCreated; DEVICEMEM_HISTORY_DATA *psDevHData; + PVR_UNREFERENCED_PARAMETER(ui32Offset); + if (!PMRValidateSize((IMG_UINT64) ui32AllocPageCount << ui32Log2PageSize)) { PVR_LOG_VA(PVR_DBG_ERROR, @@ -1501,8 +1536,6 @@ PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, return PVRSRV_ERROR_INVALID_PARAMS; } - PMRGetUID(psPMR, &ui64Serial); - psDevHData = DevmemFindDataFromDev(PMR_DeviceNode(psPMR)); if (psDevHData == NULL) @@ -1520,7 +1553,6 @@ PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, szName, ui32Log2PageSize, uiPID, - IMG_TRUE /* bSparse */, &ui32AllocationIndex, &bCreated); @@ -1686,6 +1718,39 @@ static void MapUnmapCommandGetInfo(DEVICEMEM_HISTORY_DATA *psHData, } } +void DevicememHistoryDumpRecordStats(PVRSRV_DEVICE_NODE *psDevNode, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + DEVICEMEM_HISTORY_DATA *psDevHData; + psDevHData = DevmemFindDataFromDev(psDevNode); + + if (psDevHData) + { + PVR_DUMPDEBUG_LOG(" DevmemHistoryRecordStats -" + " CBWC:%"IMG_UINT64_FMTSPEC + " MAC:%"IMG_UINT64_FMTSPEC + " UMAC:%"IMG_UINT64_FMTSPEC + " MRC:%"IMG_UINT64_FMTSPEC + " UMRC:%"IMG_UINT64_FMTSPEC + " TSC:%"IMG_UINT64_FMTSPEC + " MAX:%"IMG_UINT64_FMTSPEC + " CHD:%u", + psDevHData->sRecords.ui64CBWrapCount, + psDevHData->sRecords.ui64MapAllCount, + psDevHData->sRecords.ui64UnMapAllCount, + psDevHData->sRecords.ui64MapRangeCount, + psDevHData->sRecords.ui64UnMapRangeCount, + psDevHData->sRecords.ui64TimeStampCount, + (IMG_UINT64)CIRCULAR_BUFFER_NUM_COMMANDS, + psDevHData->sRecords.ui32Head); + } + else + { + PVR_DUMPDEBUG_LOG(" DevmemHistoryRecordStats - None"); + } +} + /* DevicememHistoryQuery: * Entry point for rgxdebug to look up addresses relating to a page fault */ @@ -1704,6 +1769,7 @@ IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, /* initialise the results count for the caller */ psQueryOut->ui32NumResults = 0; + psQueryOut->ui64SearchCount = 0; psDevHData = DevmemFindDataFromDev(psQueryIn->psDevNode); @@ -1754,6 +1820,13 @@ IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, while (!bLast) { psCommand = CircularBufferIteratePrevious(psDevHData, ui32Head, &ui32Iter, &eType, &bLast); + if (psCommand == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: CircularBufferIteratePrevious returned NULL psCommand", + __func__)); + return IMG_FALSE; + } if (eType == COMMAND_TYPE_TIMESTAMP) { @@ -1803,6 +1876,8 @@ IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, continue; } + psQueryOut->ui64SearchCount++; + /* if the caller wants us to match any allocation in the * same page as the allocation then tweak the real start/end * addresses of the allocation here @@ -1810,7 +1885,7 @@ IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, if (bMatchAnyAllocInPage) { sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1); - sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1); + sAllocEndAddr.uiAddr = PVR_ALIGN(sAllocEndAddr.uiAddr, (IMG_UINT64)ui32PageSizeBytes); } if ((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) && @@ -1818,7 +1893,7 @@ IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, { DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults]; - OSStringLCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString)); + OSStringSafeCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString)); psResult->sBaseDevVAddr = psAlloc->sDevVAddr; psResult->uiSize = psAlloc->uiSize; psResult->bMap = bMap; @@ -1947,6 +2022,13 @@ static void DevicememHistoryPrintAll(OSDI_IMPL_ENTRY *psEntry) psCommand = CircularBufferIteratePrevious(psDevHData, ui32Head, &ui32Iter, &eType, &bLast); + if (psCommand == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: CircularBufferIteratePrevious returned NULL psCommand", + __func__)); + return; + } if (eType == COMMAND_TYPE_TIMESTAMP) { @@ -2028,7 +2110,7 @@ static int DevicememHistoryPrintAllWrapper(OSDI_IMPL_ENTRY *psEntry, static PVRSRV_ERROR CreateRecords(DEVICEMEM_HISTORY_DATA *psDevHData) { psDevHData->sRecords.pasAllocations = - OSAllocMemNoStats(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES); + OSAllocZMemNoStats(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES); PVR_RETURN_IF_NOMEM(psDevHData->sRecords.pasAllocations); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_server.c index 993fe5251b30..e26a7ab3b814 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/devicemem_server.c @@ -63,9 +63,18 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv.h" /* for PVRSRVGetPVRSRVData() */ + #define DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE (1 << 0) #define DEVMEMHEAP_REFCOUNT_MIN 1 #define DEVMEMHEAP_REFCOUNT_MAX IMG_INT32_MAX +#define DEVMEMRESERVATION_ACQUISITION_MIN 0 +#define DEVMEMRESERVATION_ACQUISITION_MAX IMG_INT32_MAX +#define DEVMEMRESERVATION_REFCOUNT_MAX IMG_INT32_MAX +#define DEVMEM_PMR_RSRV_NODE_REFCOUNT_MAX 2 +#define DEVMEMCTX_REFCOUNT_MIN 1 +#define DEVMEMCTX_REFCOUNT_MAX IMG_INT32_MAX + +#define DEVMEMX_SPLIT_MAPPING_PREALLOC_COUNT 2 struct _DEVMEMINT_CTX_ { @@ -78,25 +87,30 @@ struct _DEVMEMINT_CTX_ know about us at all. */ MMU_CONTEXT *psMMUContext; - ATOMIC_T hRefCount; - /* This handle is for devices that require notification when a new memory context is created and they need to store private data that is associated with the context. */ IMG_HANDLE hPrivData; - /* Protects access to sProcessNotifyListHead */ - POSWR_LOCK hListLock; - /* The following tracks UM applications that need to be notified of a - * page fault */ + * page fault. + * Access to this list is protected by lock defined on a device node: + * PVRSRV_DEVICE_NODE::hPageFaultNotifyLock. */ DLLIST_NODE sProcessNotifyListHead; - /* The following is a node for the list of registered devmem contexts */ + /* The following is a node for the list of registered devmem contexts. + * Access to this list is protected by lock defined on a device node: + * PVRSRV_DEVICE_NODE::hPageFaultNotifyLock. */ DLLIST_NODE sPageFaultNotifyListElem; /* Device virtual address of a page fault on this context */ IMG_DEV_VIRTADDR sFaultAddress; + /* Bitfield stating which heaps were created on this context. */ + IMG_UINT64 uiCreatedHeaps; + + /* Context's reference count */ + ATOMIC_T hRefCount; + /* General purpose flags */ IMG_UINT32 ui32Flags; }; @@ -112,9 +126,8 @@ struct _DEVMEMINT_CTX_EXPORT_ struct _DEVMEMINT_HEAP_ { struct _DEVMEMINT_CTX_ *psDevmemCtx; - IMG_UINT32 uiLog2PageSize; IMG_DEV_VIRTADDR sBaseAddr; - ATOMIC_T uiRefCount; + IMG_DEV_VIRTADDR sLastAddr; /* Private data for callback functions */ IMG_HANDLE hPrivData; @@ -124,20 +137,184 @@ struct _DEVMEMINT_HEAP_ /* Callback function deinit */ PFN_HEAP_DEINIT pfnDeInit; + + /* Heap's reference count */ + ATOMIC_T uiRefCount; + + /* Page shift of the heap */ + IMG_UINT32 uiLog2PageSize; + + /* Copy of the heap index from Device Heap Configuration module */ + IMG_UINT32 uiHeapIndex; }; +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + +typedef enum _DEVMEM_RESERVATION_TYPE_ +{ + DEVMEM_RES_TYPE_NONE, + DEVMEM_RES_TYPE_STD, /* Devmem Reservation */ + DEVMEM_RES_TYPE_X, /* DevmemX Reservation */ + DEVMEM_RES_TYPE_ZOMBIE /* Reservation Freed but still has references */ +} DEVMEM_RESERVATION_TYPE; + +/* Forward declare for callback definitions */ +typedef struct _DEVMEM_PMR_RSRV_NODE_ DEVMEM_PMR_RSRV_NODE; + +/* Forward declare for Remap functions */ +static PVRSRV_ERROR _RefPMRRsrvNode(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode); +static void _UnrefAndMaybeDestroyPMRRsrvNode(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode); + +/* Callback definition for remaps */ +typedef PVRSRV_ERROR (*PFN_REMAP_PAGE_FN)(PMR *psPMR, + DLLIST_NODE *psMappingListHead, + DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode, + IMG_UINT32 ui32LogicalPgOffset); +/* Callback definition for checking the page is in the mapping */ +typedef IMG_BOOL (*PFN_IS_PAGE_IN_MAP_FN)(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode, IMG_UINT32 ui32LogicalPgOffset); +/* Callback definition for free */ +typedef void (*PFN_FREE_PARENT_STRUCT_FN)(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode); + +/* Node linked to PMR for remap. + * Also used to control lifetime of struct in which it is embedded. + */ +typedef struct _DEVMEM_PMR_RSRV_NODE_ +{ + /* List node given to the PMR to create the reservation link.*/ + DLLIST_NODE sLinkNode; + /* Type of reservation, determines re-map path, should be + * protected by reservation lock + */ + DEVMEM_RESERVATION_TYPE eType; + + ATOMIC_T iRefCount; + PFN_REMAP_PAGE_FN pfnRemapPage; + PFN_IS_PAGE_IN_MAP_FN pfnIsPageInMap; + PFN_FREE_PARENT_STRUCT_FN pfnFreeParent; + +} DEVMEM_PMR_RSRV_NODE; + +/* DevmemX representation of a mapping on the reservation */ +typedef struct _DEVMEMX_MAPPING_ +{ + /* Link node given to the PMR to create the link for remap */ + DEVMEM_PMR_RSRV_NODE sPMRRsrvLinkNode; + DEVMEMXINT_RESERVATION *psRsrv; + PMR *psPMR; + + /* Details of the mapped range */ + PVRSRV_MEMALLOCFLAGS_T uiFlags; + IMG_UINT32 uiPhysPageOffset; + IMG_UINT32 uiVirtPageOffset; + IMG_UINT32 uiPageCount; +} DEVMEMX_MAPPING; + +typedef struct _DEVMEM_MAPPING_ +{ + DEVMEM_PMR_RSRV_NODE sPMRRsrvLinkNode; + DEVMEMINT_RESERVATION *psRsrv; +} DEVMEM_MAPPING; +#endif + +/** + * Indicates where the reservation will be used. + * + * Used as locking classes for DevMem reservations to resolve lockdep warnings. + */ +typedef enum _RESERVATION_USAGE_ +{ + RESERVATION_USAGE_SERVER_FW = 0, // Reservations used by the server / FW (excludes dma_bufs) + RESERVATION_USAGE_CLIENT // Reservations used by client apps (includes dma_bufs) +} RESERVATION_USAGE; + struct _DEVMEMINT_RESERVATION_ { struct _DEVMEMINT_HEAP_ *psDevmemHeap; IMG_DEV_VIRTADDR sBase; IMG_DEVMEM_SIZE_T uiLength; + + /* lock used to guard against potential race when freeing reservation, also protects + * parallel operations occurring on the same reservation such as a (un)mapping operation + * from UM and a remap operation from the Linux Kernel + */ + POS_LOCK hLock; + RESERVATION_USAGE eLockClass; + + /* Indicator for ensuring range has not been externally acquired whilst attempting release. + * Protects freeing the reservation while other device resources still have reference such as + * FreeLists and ZSBuffer. + */ + IMG_INT32 i32DevResAcquisitionCount; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + /* Mapping node representing the mapping on this reservation */ + DEVMEM_MAPPING *psDevmemMapping; +#endif + /* KM refcount used to protect the lifetime of the reservation itself. This ensures + * synchronisation of use between UM usage and KM usage such as remap paths. We may need + * to keep the reservation alive for these KM paths even once the UM has released it since + * it is used to synchronise operations originating from these KM paths and between UM and KM. + */ + ATOMIC_T iLifetimeRefCount; + + /* We keep a reference to the single PMR associated with this reservation + * once mapped. When creating the reservation this is null. Used in + * ChangeSparse to validate parameters. We could have a sparse PMR with + * no backing and have it mapped to a reservation. + */ + PMR *psMappedPMR; + + /* Array of bitfields of size `uiNumPages / MAP_MASK_SHIFT`. + * This array represents the mapping between a PMR (psMappedPMR) and + * the reservation. Each bit represents an index of a physical page - a value + * 1 means the page is mapped and vice versa. + */ + IMG_UINT8 *pui8Map; + #define MAP_MASK_SHIFT 3 }; -struct _DEVMEMINT_MAPPING_ +/*! Object representing a virtual range reservation and mapping between + * the virtual range and a set of PMRs. + * + * The physical allocations may be mapped entirely or partially to the entire + * or partial virtual range. */ +struct _DEVMEMXINT_RESERVATION_ { - struct _DEVMEMINT_RESERVATION_ *psReservation; - PMR *psPMR; - IMG_UINT32 uiNumPages; + /*! Pointer to a device memory heap this reservation is made on. */ + struct _DEVMEMINT_HEAP_ *psDevmemHeap; + /*! Base device virtual address of this reservation. */ + IMG_DEV_VIRTADDR sBase; + /*! Size of this reservation (in bytes). */ + IMG_DEVMEM_SIZE_T uiLength; + /*! Lock for protecting concurrent operations on the mapping. Also protects + * parallel operations occurring on the same reservation such as a (un)mapping operation + * from UM and a remap operation from the Linux Kernel. As such also protects mapping records + * kept for remap operations. + */ + POS_LOCK hLock; + + /* KM refcount used to protect the lifetime of the reservation itself. This ensures + * synchronisation of use between UM usage and KM usage such as remap paths. We may need + * to keep the reservation alive for these KM paths even once the UM has released it since + * it is used to synchronise operations originating from these KM paths and between UM and KM. + */ + ATOMIC_T iLifetimeRefCount; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + /*! Array of DevmemX Map Ranges of size `uiNumPages`. This array represents how the + * physical memory is mapped to the virtual range. Each entry in the array + * represents to one device page which means that one PMR may be spread + * across many indices and related to many ranges */ + DEVMEMX_MAPPING *ppsDevmemXMapping[IMG_FLEX_ARRAY_MEMBER]; +#else + + /*! Array of PMRs of size `uiNumPages`. This array represents how the + * physical memory is mapped to the virtual range. Each entry in the array + * represents to one device page which means that one PMR may be spread + * across many indices. */ + PMR **ppsPMR; +#endif }; struct _DEVMEMINT_PF_NOTIFY_ @@ -146,14 +323,917 @@ struct _DEVMEMINT_PF_NOTIFY_ DLLIST_NODE sProcessNotifyListElem; }; +/** Computes division using log2 of divisor. */ +#define LOG2_DIV(x, log2) ((x) >> (log2)) + +/** Computes modulo of a power of 2. */ +#define LOG2_MOD(x, log2) ((x) & ((1 << (log2)) - 1)) + +static INLINE IMG_UINT32 _DevmemReservationPageCount(DEVMEMINT_RESERVATION *psRsrv); + +/*************************************************************************/ /*! +@Function DevmemIntReservationIsIndexMapped +@Description Checks whether a particular index in the reservation has been + mapped to a page in psMappedPMR. + +@Return IMG_TRUE if mapped or IMG_FALSE if not. +*/ /**************************************************************************/ +static INLINE IMG_BOOL DevmemIntReservationIsIndexMapped(DEVMEMINT_RESERVATION *psReservation, + IMG_UINT32 ui32Index) +{ + IMG_UINT32 ui32MapIndex = LOG2_DIV(ui32Index, MAP_MASK_SHIFT); + + PVR_ASSERT(psReservation != NULL); + PVR_ASSERT(ui32Index < _DevmemReservationPageCount(psReservation)); + + return BIT_ISSET(psReservation->pui8Map[ui32MapIndex], LOG2_MOD(ui32Index, MAP_MASK_SHIFT)); +} + +/*************************************************************************/ /*! +@Function DevmemIntReservationSetMappingIndex +@Description Sets an index of the reservation map to indicate a mapped or + unmapped PMR page. + +@Note The reservations hLock must be acquired before calling this + function. + +@Return None +*/ /**************************************************************************/ +static void DevmemIntReservationSetMappingIndex(DEVMEMINT_RESERVATION *psReservation, + IMG_UINT32 ui32Index, + IMG_BOOL bMap) +{ + IMG_UINT32 ui32MapIndex = LOG2_DIV(ui32Index, MAP_MASK_SHIFT); + + PVR_ASSERT(psReservation != NULL); + PVR_ASSERT(ui32Index < _DevmemReservationPageCount(psReservation)); + + if (bMap) + { + BIT_SET(psReservation->pui8Map[ui32MapIndex], LOG2_MOD(ui32Index, MAP_MASK_SHIFT)); + } + else + { + BIT_UNSET(psReservation->pui8Map[ui32MapIndex], LOG2_MOD(ui32Index, MAP_MASK_SHIFT)); + } +} + +static INLINE IMG_UINT32 +_DevmemXReservationPageCount(DEVMEMXINT_RESERVATION *psRsrv) +{ + return psRsrv->uiLength >> psRsrv->psDevmemHeap->uiLog2PageSize; +} + +static INLINE IMG_DEV_VIRTADDR +_DevmemXReservationPageAddress(DEVMEMXINT_RESERVATION *psRsrv, IMG_UINT32 uiVirtPageOffset) +{ + IMG_DEV_VIRTADDR sAddr = { + .uiAddr = psRsrv->sBase.uiAddr + ((IMG_UINT64)uiVirtPageOffset << psRsrv->psDevmemHeap->uiLog2PageSize) + }; + + return sAddr; +} + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + +/**************************************** + * Map & Remap synchronisation helpers * + ****************************************/ + +static void +_AcquireReservationPMRLocks(POS_LOCK hRsrvLock, PMR* psPMR) +{ + OSLockAcquire(hRsrvLock); + PMRLockPMR(psPMR); +} + +static void +_AcquireReservationPMRLocksNested(POS_LOCK hRsrvLock, RESERVATION_USAGE eLockClass, PMR* psPMR) +{ + OSLockAcquireNested(hRsrvLock, eLockClass); + PMRLockPMR(psPMR); +} + +static void +_ReleaseReservationPMRLocks(POS_LOCK hRsrvLock, PMR* psPMR) +{ + PMRUnlockPMR(psPMR); + OSLockRelease(hRsrvLock); +} + +static void +_DeschedAllowRemapToComplete(POS_LOCK hRsrvLock, PMR* psPMR) +{ + PMRUnlockPMR(psPMR); + OSLockRelease(hRsrvLock); + OSReleaseThreadQuanta(); + OSLockAcquire(hRsrvLock); + PMRLockPMR(psPMR); +} + +static void +_DeschedAllowRemapToCompleteNested(POS_LOCK hRsrvLock, RESERVATION_USAGE eLockClass, PMR* psPMR) +{ + PMRUnlockPMR(psPMR); + OSLockRelease(hRsrvLock); + OSReleaseThreadQuanta(); + OSLockAcquireNested(hRsrvLock, eLockClass); + PMRLockPMR(psPMR); +} + +/******************* + * Remap functions * + *******************/ + +static IMG_BOOL +_CheckMultipleRelatedMappingsExist(DLLIST_NODE *psMappingListHead, + IMG_UINT32 ui32LogicalPgOffset) +{ + PDLLIST_NODE pNext, pNode; + DEVMEM_PMR_RSRV_NODE *psFoundPMRResLinkNode = NULL; + IMG_BOOL bMultipleFound = IMG_FALSE; + + dllist_foreach_node(psMappingListHead, pNode, pNext) + { + DEVMEM_PMR_RSRV_NODE *psPMRResLinkNode = IMG_CONTAINER_OF(pNode, DEVMEM_PMR_RSRV_NODE, sLinkNode); + + if (psPMRResLinkNode->pfnIsPageInMap(psPMRResLinkNode, ui32LogicalPgOffset)) + { + if (psFoundPMRResLinkNode == NULL) + { + psFoundPMRResLinkNode = psPMRResLinkNode; + } + else + { + /* We have found multiple mappings associated with this page, + * we will reject the remap request + */ + bMultipleFound = IMG_TRUE; + break; + } + } + } + + return bMultipleFound; +} + +static PVRSRV_ERROR +_pfnDevmemRemapPageInPMR(PMR *psPMR, + DLLIST_NODE *psMappingListHead, + DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode, + IMG_UINT32 ui32LogicalPgOffset) +{ + PVRSRV_ERROR eError; + DEVMEM_MAPPING* psMapping = IMG_CONTAINER_OF(psPMRRsrvNode, DEVMEM_MAPPING, sPMRRsrvLinkNode); + DEVMEMINT_RESERVATION *psRsrv = psMapping->psRsrv; + IMG_DEV_VIRTADDR sDevVAddr; + + _AcquireReservationPMRLocksNested(psRsrv->hLock, + psRsrv->eLockClass, + psPMR); + + PVR_ASSERT(psMapping->sPMRRsrvLinkNode.eType != DEVMEM_RES_TYPE_X); + + /* First check if the Mapping is in Zombie state. This means UM has freed it between us + * finding the related mapping on the PMR list and us preparing to action a remap here. + */ + if (psMapping->sPMRRsrvLinkNode.eType == DEVMEM_RES_TYPE_ZOMBIE) + { + /* This mapping has been deleted by UM */ + eError = PVRSRV_ERROR_DEVICEMEM_REMAP_REJECTED; + goto _Return; + } + + /* Now we have locked the reservation and the PMR, check if multiple mapping entries exist + * on the list related to the page to remap. If there are multiple, we must reject the + * request to avoid inconsistency in event of remap failure. + */ + if (_CheckMultipleRelatedMappingsExist(psMappingListHead, ui32LogicalPgOffset)) + { + /* This PMR page has multiple associated mappings, reject migrate */ + eError = PVRSRV_ERROR_DEVICEMEM_REMAP_REJECTED; + goto _Return; + } + + sDevVAddr.uiAddr = psRsrv->sBase.uiAddr + ((IMG_UINT64)ui32LogicalPgOffset << psRsrv->psDevmemHeap->uiLog2PageSize); + + eError = MMU_RemapPage(psRsrv->psDevmemHeap->psDevmemCtx->psMMUContext, + psRsrv->uiFlags, + sDevVAddr, + psRsrv->psDevmemHeap->uiLog2PageSize, + psPMR, + ui32LogicalPgOffset); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Devmem mapped PMR Remap page failed")); + } + +_Return: + _ReleaseReservationPMRLocks(psRsrv->hLock, psPMR); + return eError; +} + +static IMG_BOOL +_pfnDevmemCheckPMRPageInMappingRange(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode, IMG_UINT32 ui32LogicalPgOffset) +{ + /* Devmem reservations can have 0 or 1 mappings, hence always return true if called for when mapping present. + * We also don't support sparse mappings for movable pages, so the offset will always be valid for the + * mapping should it exist. + */ + return IMG_TRUE; +} + +static PVRSRV_ERROR +_pfnDevmemXRemapPageInPMR(PMR *psPMR, + DLLIST_NODE *psMappingListHead, + DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode, + IMG_UINT32 ui32LogicalPgOffset) +{ + PVRSRV_ERROR eError; + DEVMEMX_MAPPING *psDevmemXMapping = IMG_CONTAINER_OF(psPMRRsrvNode, + DEVMEMX_MAPPING, + sPMRRsrvLinkNode); + DEVMEMXINT_RESERVATION *psRsrv = psDevmemXMapping->psRsrv; + IMG_UINT32 uiVirtPageOffset; + IMG_DEV_VIRTADDR sDevVAddr; + + _AcquireReservationPMRLocks(psRsrv->hLock, psPMR); + + PVR_ASSERT(psDevmemXMapping->sPMRRsrvLinkNode.eType != DEVMEM_RES_TYPE_STD); + + /* First check if the Mapping is in Zombie state. This means UM has freed it between us + * finding the related mapping on the PMR list and us preparing to action a remap here. + */ + if (psDevmemXMapping->sPMRRsrvLinkNode.eType == DEVMEM_RES_TYPE_ZOMBIE) + { + /* This mapping has been deleted by UM */ + eError = PVRSRV_ERROR_DEVICEMEM_REMAP_REJECTED; + goto _Return; + } + + /* Next check if the mapping range is still valid for the PMR page, this could be changed + * by a UM map overwriting part of an old PMR page range if we were already in the process + * of mapping when remap occurred and we checked for the existence of related mappings. + */ + if (!psPMRRsrvNode->pfnIsPageInMap(psPMRRsrvNode, ui32LogicalPgOffset)) + { + /* This range is no longer related to the page we wish to migrate. */ + eError = PVRSRV_ERROR_DEVICEMEM_REMAP_REJECTED; + goto _Return; + } + + /* Finally, check if multiple mapping entries exist on the list related to the page to remap. + * If there are multiple, we must reject the request to avoid inconsistency in event of remap failure. + */ + if (_CheckMultipleRelatedMappingsExist(psMappingListHead, ui32LogicalPgOffset)) + { + /* This PMR page has multiple associated mappings, reject migrate */ + eError = PVRSRV_ERROR_DEVICEMEM_REMAP_REJECTED; + goto _Return; + } + + uiVirtPageOffset = psDevmemXMapping->uiVirtPageOffset + + (ui32LogicalPgOffset - psDevmemXMapping->uiPhysPageOffset); + + sDevVAddr = _DevmemXReservationPageAddress(psDevmemXMapping->psRsrv, + uiVirtPageOffset); + + eError = + MMU_RemapPage(psDevmemXMapping->psRsrv->psDevmemHeap->psDevmemCtx->psMMUContext, + psDevmemXMapping->uiFlags, + sDevVAddr, + psDevmemXMapping->psRsrv->psDevmemHeap->uiLog2PageSize, + psPMR, + ui32LogicalPgOffset); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "DevmemX mapped PMR Remap page failed")); + } + + +_Return: + _ReleaseReservationPMRLocks(psRsrv->hLock, psPMR); + return eError; +} + +static IMG_BOOL +_pfnDevmemXCheckPMRPageInMappingRange(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode, IMG_UINT32 ui32LogicalPgOffset) +{ + DEVMEMX_MAPPING *psDevmemXMapping = IMG_CONTAINER_OF(psPMRRsrvNode, + DEVMEMX_MAPPING, + sPMRRsrvLinkNode); + + if (ui32LogicalPgOffset >= psDevmemXMapping->uiPhysPageOffset + psDevmemXMapping->uiPageCount || + ui32LogicalPgOffset < psDevmemXMapping->uiPhysPageOffset) + { + /* This range is not related to the page we wish to migrate. We return IMG_FALSE + * because there is no operation to perform on this mapping. + */ + return IMG_FALSE; + } + + return IMG_TRUE; +} + + +PVRSRV_ERROR +DevmemIntRemapPageInPMR(PMR *psPMR, DLLIST_NODE *psMappingListHead, IMG_UINT32 ui32LogicalPgOffset) +{ + DEVMEM_PMR_RSRV_NODE *psFoundPMRResLinkNode = NULL; + PVRSRV_ERROR eError = PVRSRV_OK; + PDLLIST_NODE pNext, pNode; + + /* Lock the PMR whilst obtaining nodes from the list but not performing remap actions on them, + * this allows us to protect lifetimes of the attachments */ + PMRLockPMR(psPMR); + + /* The PMR caller of this function has no perspective as to if a mapping + * on its list is related to the page it wishes to remap and so we must iterate + * over the list checking for related mappings. If we find a related mapping, pass + * it onto the remap function for processing. + */ + dllist_foreach_node(psMappingListHead, pNode, pNext) + { + DEVMEM_PMR_RSRV_NODE *psPMRResLinkNode = IMG_CONTAINER_OF(pNode, DEVMEM_PMR_RSRV_NODE, sLinkNode); + + if (psPMRResLinkNode->pfnIsPageInMap(psPMRResLinkNode, ui32LogicalPgOffset)) + { + psFoundPMRResLinkNode = psPMRResLinkNode; + break; + } + } + + if (psFoundPMRResLinkNode) + { + eError = _RefPMRRsrvNode(psFoundPMRResLinkNode); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemIntRemapPageInPMR.", ErrUnlockPMRExit); + + PMRUnlockPMR(psPMR); + + eError = psFoundPMRResLinkNode->pfnRemapPage(psPMR, + psMappingListHead, + psFoundPMRResLinkNode, + ui32LogicalPgOffset); + PVR_GOTO_IF_ERROR(eError, ErrUnrefNodeExit); + + _UnrefAndMaybeDestroyPMRRsrvNode(psFoundPMRResLinkNode); + } + else + { + PMRUnlockPMR(psPMR); + } + + return eError; + +ErrUnlockPMRExit: + PMRUnlockPMR(psPMR); + return eError; + +ErrUnrefNodeExit: + _UnrefAndMaybeDestroyPMRRsrvNode(psFoundPMRResLinkNode); + return eError; +} + +/********************************************************** + * DEVMEM_PMR_RSRV_NODE functions and lifetime management * + **********************************************************/ + +static void _InitialisePMRRsrvNode(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode, + DEVMEM_RESERVATION_TYPE eType, + PFN_REMAP_PAGE_FN pfnRemapPage, + PFN_IS_PAGE_IN_MAP_FN pfnIsPageInMap, + PFN_FREE_PARENT_STRUCT_FN pfnFreeParent) +{ + /* Automatically start with a ref */ + OSAtomicWrite(&psPMRRsrvNode->iRefCount, 1); + + psPMRRsrvNode->eType = eType; + psPMRRsrvNode->pfnRemapPage = pfnRemapPage; + psPMRRsrvNode->pfnIsPageInMap = pfnIsPageInMap; + psPMRRsrvNode->pfnFreeParent = pfnFreeParent; +} + +static +PVRSRV_ERROR _RefPMRRsrvNode(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode) +{ + IMG_INT32 iOldValue = OSAtomicAddUnless(&psPMRRsrvNode->iRefCount, 1, + DEVMEM_PMR_RSRV_NODE_REFCOUNT_MAX); + + if (iOldValue == DEVMEM_PMR_RSRV_NODE_REFCOUNT_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the PMRRsrvNode, " + "reference count has overflowed.", __func__)); + return PVRSRV_ERROR_ATOMIC_OVERFLOW; + } + + return PVRSRV_OK; +} + +static +void _UnrefAndMaybeDestroyPMRRsrvNode(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode) +{ + if (OSAtomicDecrement(&psPMRRsrvNode->iRefCount) == 0) + { + psPMRRsrvNode->pfnFreeParent(psPMRRsrvNode); + } +} + +static +void _LinkPMRRsrvNode(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode, PMR *psPMR) +{ + PMRLockHeldAssert(psPMR); + PMRLinkGPUMapping(psPMR, &psPMRRsrvNode->sLinkNode); +} + +static +void _UnlinkPMRRsrvNode(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode, PMR *psPMR) +{ + PMRLockHeldAssert(psPMR); + + psPMRRsrvNode->eType = DEVMEM_RES_TYPE_ZOMBIE; + PMRUnlinkGPUMapping(psPMR, &psPMRRsrvNode->sLinkNode); +} + +/*********************************** + * Reservation lifetime management * + ***********************************/ + +static PVRSRV_ERROR +_DevmemReservationRef(DEVMEMINT_RESERVATION *psRsrv) +{ + IMG_INT32 iOldValue = OSAtomicAddUnless(&psRsrv->iLifetimeRefCount, 1, + DEVMEMRESERVATION_REFCOUNT_MAX); + + if (iOldValue == DEVMEMCTX_REFCOUNT_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the reservation, " + "reference count has overflowed.", __func__)); + return PVRSRV_ERROR_ATOMIC_OVERFLOW; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_DevmemXReservationRef(DEVMEMXINT_RESERVATION *psRsrv) +{ + IMG_INT32 iOldValue = OSAtomicAddUnless(&psRsrv->iLifetimeRefCount, 1, + DEVMEMRESERVATION_REFCOUNT_MAX); + + if (iOldValue == DEVMEMCTX_REFCOUNT_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the reservation, " + "reference count has overflowed.", __func__)); + return PVRSRV_ERROR_ATOMIC_OVERFLOW; + } + + return PVRSRV_OK; +} +#endif /* defined(SUPPORT_LINUX_OSPAGE_MIGRATION) */ + +static void +_DevmemReservationUnrefAndMaybeDestroy(DEVMEMINT_RESERVATION *psRsrv) +{ + if (OSAtomicDecrement(&psRsrv->iLifetimeRefCount) == 0) + { + OSLockDestroy(psRsrv->hLock); + OSFreeMem(psRsrv); + } +} + +static void +_DevmemXReservationUnrefAndMaybeDestroy(DEVMEMXINT_RESERVATION *psRsrv) +{ + if (OSAtomicDecrement(&psRsrv->iLifetimeRefCount) == 0) + { + OSLockDestroy(psRsrv->hLock); + OSFreeMem(psRsrv); + } +} + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + +/***************************** + * Devmem mapping management * + *****************************/ + +static void _pfnDevmemMappingFreeCallback(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode) +{ + DEVMEM_MAPPING *psDevmemMapping; + PVR_LOG_RETURN_VOID_IF_FALSE(psPMRRsrvNode != NULL, "psPMRRsrvNode"); + + psDevmemMapping = IMG_CONTAINER_OF(psPMRRsrvNode, DEVMEM_MAPPING, sPMRRsrvLinkNode); + + _DevmemReservationUnrefAndMaybeDestroy(psDevmemMapping->psRsrv); + OSFreeMem(psDevmemMapping); +} + +static DEVMEM_MAPPING* _AllocateDevmemMapping(DEVMEMINT_RESERVATION *psRsrv, + PMR *psPMR) +{ + PVRSRV_ERROR eError; + + DEVMEM_MAPPING *psDevmemMapping = OSAllocZMem(sizeof(*psDevmemMapping)); + if (!psDevmemMapping) + { + PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", + "psDevmemMapping", + __func__)); + + return NULL; + } + + eError = _DevmemReservationRef(psRsrv); + if (eError != PVRSRV_OK) + { + OSFreeMem(psDevmemMapping); + return NULL; + } + + _InitialisePMRRsrvNode(&psDevmemMapping->sPMRRsrvLinkNode, + DEVMEM_RES_TYPE_STD, + _pfnDevmemRemapPageInPMR, + _pfnDevmemCheckPMRPageInMappingRange, + _pfnDevmemMappingFreeCallback); + + psDevmemMapping->psRsrv = psRsrv; + + return psDevmemMapping; +} + +/***************************** + * DevmemX mapping management * + *****************************/ + +static void _FreePreAllocatedDevmemXMapping(DEVMEMX_MAPPING *psDevmemXMapping) +{ + PVR_LOG_RETURN_VOID_IF_FALSE(psDevmemXMapping != NULL, "psDevmemXMapping"); + + _DevmemXReservationUnrefAndMaybeDestroy(psDevmemXMapping->psRsrv); + OSFreeMem(psDevmemXMapping); +} + +static void _pfnDevmemXMappingFreeCallback(DEVMEM_PMR_RSRV_NODE *psPMRRsrvNode) +{ + DEVMEMX_MAPPING *psDevmemXMapping; + PVR_LOG_RETURN_VOID_IF_FALSE(psPMRRsrvNode != NULL, "psPMRRsrvNode"); + + psDevmemXMapping = IMG_CONTAINER_OF(psPMRRsrvNode, + DEVMEMX_MAPPING, + sPMRRsrvLinkNode); + + /* This pfn is called from either DestroyDevmemXMapping or the Remap path + * dropping its final reference. At this point in both cases the mapping has + * been removed from the list and by doing so de-initialised. Meaning we + * can treat it as though it is in a pre-allocated state. + */ + _FreePreAllocatedDevmemXMapping(psDevmemXMapping); +} + +static void _DestroyDevmemXMapping(DEVMEMX_MAPPING *psDevmemXMapping) +{ + PMRLockHeldAssert(psDevmemXMapping->psPMR); + + PVR_LOG_RETURN_VOID_IF_FALSE(psDevmemXMapping != NULL, "psDevmemXMapping"); + + _UnlinkPMRRsrvNode(&psDevmemXMapping->sPMRRsrvLinkNode, psDevmemXMapping->psPMR); + _UnrefAndMaybeDestroyPMRRsrvNode(&psDevmemXMapping->sPMRRsrvLinkNode); +} + +static DEVMEMX_MAPPING* _PreAllocateDevmemXMapping(DEVMEMXINT_RESERVATION *psRsrv) +{ + PVRSRV_ERROR eError; + DEVMEMX_MAPPING *psDevmemXMapping = OSAllocZMem(sizeof(*psDevmemXMapping)); + if (!psDevmemXMapping) + { + PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", + "psDevmemXMapping", + __func__)); + + return NULL; + } + + /* Take ref and set the reservation as we need to fail early */ + eError = _DevmemXReservationRef(psRsrv); + if (eError != PVRSRV_OK) + { + OSFreeMem(psDevmemXMapping); + return NULL; + } + psDevmemXMapping->psRsrv = psRsrv; + + _InitialisePMRRsrvNode(&psDevmemXMapping->sPMRRsrvLinkNode, + DEVMEM_RES_TYPE_X, + _pfnDevmemXRemapPageInPMR, + _pfnDevmemXCheckPMRPageInMappingRange, + _pfnDevmemXMappingFreeCallback); + + return psDevmemXMapping; +} + +static void _InitialiseDevmemXMapping(DEVMEMX_MAPPING *psDevmemXMapping, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiPhysPageOffset, + IMG_UINT32 uiVirtPageOffset, + IMG_UINT32 uiPageCount) +{ + PMRLockHeldAssert(psPMR); + + psDevmemXMapping->uiFlags = uiFlags; + psDevmemXMapping->uiPhysPageOffset = uiPhysPageOffset; + psDevmemXMapping->uiVirtPageOffset = uiVirtPageOffset; + psDevmemXMapping->uiPageCount = uiPageCount; + psDevmemXMapping->psPMR = psPMR; + + _LinkPMRRsrvNode(&psDevmemXMapping->sPMRRsrvLinkNode, psPMR); +} + +static DEVMEMX_MAPPING* _AllocateDevmemXMapping(DEVMEMXINT_RESERVATION *psRsrv, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiPhysPageOffset, + IMG_UINT32 uiVirtPageOffset, + IMG_UINT32 uiPageCount) +{ + DEVMEMX_MAPPING* psDevmemXMapping; + + PMRLockHeldAssert(psPMR); + + psDevmemXMapping = _PreAllocateDevmemXMapping(psRsrv); + if (!psDevmemXMapping) + { + return NULL; + } + + _InitialiseDevmemXMapping(psDevmemXMapping, + psPMR, + uiFlags, + uiPhysPageOffset, + uiVirtPageOffset, + uiPageCount); + + return psDevmemXMapping; +} + +/* Return new range */ +static void _SplitDevmemXMapping(DEVMEMX_MAPPING *psSrcDevmemXMapping, + DEVMEMX_MAPPING *psSplitDevmemXMapping, + IMG_UINT32 uiNewMappingOffset) +{ + IMG_UINT32 uiCorrectedPageCount; + IMG_UINT32 uiNewMappingPageCount; + IMG_UINT32 uiNewMappingPhysOffset; + + PVR_ASSERT(psSrcDevmemXMapping != NULL); + PVR_ASSERT(psSplitDevmemXMapping != NULL); + + uiCorrectedPageCount = uiNewMappingOffset - psSrcDevmemXMapping->uiVirtPageOffset; + uiNewMappingPageCount = (psSrcDevmemXMapping->uiVirtPageOffset + psSrcDevmemXMapping->uiPageCount) - + uiNewMappingOffset; + uiNewMappingPhysOffset = psSrcDevmemXMapping->uiPhysPageOffset + uiCorrectedPageCount; + + PVR_LOG_GOTO_IF_FALSE(uiNewMappingOffset > psSrcDevmemXMapping->uiVirtPageOffset, + "New range offset precedes source mapping.", + ErrorExit); + PVR_LOG_GOTO_IF_FALSE(uiNewMappingOffset <= (psSrcDevmemXMapping->uiVirtPageOffset + + psSrcDevmemXMapping->uiPageCount), + "New range offset exceeds source mapping.", + ErrorExit); + + _InitialiseDevmemXMapping(psSplitDevmemXMapping, + psSrcDevmemXMapping->psPMR, + psSrcDevmemXMapping->uiFlags, + uiNewMappingPhysOffset, + uiNewMappingOffset, + uiNewMappingPageCount); + + psSrcDevmemXMapping->uiPageCount = uiCorrectedPageCount; + + return; + +ErrorExit: + /* This should never happen and indicates logic error */ + PVR_DPF((PVR_DBG_FATAL, "Unable to split range for devmemx accounting!")); + OSWarnOn(1); +} + +static void _AdjustDevmemXMappingStart(DEVMEMX_MAPPING *psSrcDevmemXMapping, + IMG_UINT32 uiNewMappingOffsetStart) +{ + IMG_UINT32 uiVirtualPagesDiff = (uiNewMappingOffsetStart - psSrcDevmemXMapping->uiVirtPageOffset); + IMG_UINT32 uiCorrectedPageCount = psSrcDevmemXMapping->uiPageCount - uiVirtualPagesDiff; + IMG_UINT32 uiCorrectedPhysOffset = psSrcDevmemXMapping->uiPhysPageOffset + uiVirtualPagesDiff; + + psSrcDevmemXMapping->uiVirtPageOffset = uiNewMappingOffsetStart; + psSrcDevmemXMapping->uiPhysPageOffset = uiCorrectedPhysOffset; + psSrcDevmemXMapping->uiPageCount = uiCorrectedPageCount; +} + +static void _AdjustDevmemXMappingEnd(DEVMEMX_MAPPING *psSrcDevmemXMapping, + IMG_UINT32 uiNewMappingOffsetEnd) +{ + IMG_UINT32 uiCorrectedPageCount = uiNewMappingOffsetEnd - psSrcDevmemXMapping->uiVirtPageOffset; + + psSrcDevmemXMapping->uiPageCount = uiCorrectedPageCount; +} + +static void _ReplaceCentreSplitExistingMapping(DEVMEMXINT_RESERVATION *psRsrv, + IMG_UINT32 uiVirtPageOffset, + IMG_UINT32 uiPageCount, + DEVMEMX_MAPPING *psReplaceMapping, + DEVMEMX_MAPPING *psSplitMappings[2], + IMG_UINT32 *uiDeferPMRUnlockPhyCount) +{ + IMG_UINT32 i; + DEVMEMX_MAPPING *psCurrentMapping = psRsrv->ppsDevmemXMapping[uiVirtPageOffset]; + PMR *psCurrentPMR = psCurrentMapping->psPMR; + + /* If the psCurrentMapping references the same PMR we are replacing it with we + * already have the PMR locked above, else we need to lock the PMR we are adjusting. + */ + if (psReplaceMapping && + psCurrentPMR == psReplaceMapping->psPMR) + { + _SplitDevmemXMapping(psCurrentMapping, psSplitMappings[0], uiVirtPageOffset); + + _SplitDevmemXMapping(psSplitMappings[0], psSplitMappings[1], uiVirtPageOffset + uiPageCount); + + _DestroyDevmemXMapping(psSplitMappings[0]); + + *uiDeferPMRUnlockPhyCount = uiPageCount; + } + else + { + PMRLockPMR(psCurrentPMR); + + _SplitDevmemXMapping(psCurrentMapping, psSplitMappings[0], uiVirtPageOffset); + + _SplitDevmemXMapping(psSplitMappings[0], psSplitMappings[1], uiVirtPageOffset + uiPageCount); + + _DestroyDevmemXMapping(psSplitMappings[0]); + + PMRUnlockPMR(psCurrentPMR); + + /* Drop references on PMR we have replaced */ + PMRUnlockSysPhysAddressesN(psCurrentPMR, uiPageCount); + } + + /* Update replaced centre region */ + for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount); i++) + { + psRsrv->ppsDevmemXMapping[i] = psReplaceMapping; + } + + /* Update split after mappings with new entry */ + for (i = psSplitMappings[1]->uiVirtPageOffset; + i < psSplitMappings[1]->uiVirtPageOffset + psSplitMappings[1]->uiPageCount; i++) + { + psRsrv->ppsDevmemXMapping[i] = psSplitMappings[1]; + } +} + +static void _AdjustOrDestroyReplacedMapping(DEVMEMX_MAPPING *psCurrentMapping, + IMG_UINT32 uiIterStart, + IMG_UINT32 uiIterEnd) +{ + __maybe_unused PMR *psCurrentPMR = psCurrentMapping->psPMR; + + PMRLockHeldAssert(psCurrentPMR); + + /* Adjust or free the map range we just replaced. + * + * 3 Cases: + * New range replaces all existing range + * New range replaces from end of exist range. (This can only happen on first iter) + * New range replaces from start of existing range. + * */ + if (psCurrentMapping->uiVirtPageOffset == uiIterStart && + psCurrentMapping->uiVirtPageOffset + psCurrentMapping->uiPageCount == uiIterEnd) + { + _DestroyDevmemXMapping(psCurrentMapping); + } + else if (psCurrentMapping->uiVirtPageOffset < uiIterStart) + { + _AdjustDevmemXMappingEnd(psCurrentMapping, uiIterStart); + } + else if (uiIterEnd < psCurrentMapping->uiVirtPageOffset + (psCurrentMapping->uiPageCount)) + { + _AdjustDevmemXMappingStart(psCurrentMapping, uiIterEnd); + } + else + { + PVR_DPF((PVR_DBG_FATAL, "Unable to perform range record adjustment")); + } +} + +static void _ReplaceReservationMappingRecords(DEVMEMXINT_RESERVATION *psRsrv, + IMG_UINT32 uiVirtPageOffset, + IMG_UINT32 uiPageCount, + DEVMEMX_MAPPING *psReplaceMapping, + IMG_UINT32 *uiDeferPMRUnlockPhyCount) +{ + IMG_UINT32 i; + + for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount);) + { + IMG_UINT32 j = i; + IMG_UINT32 uiConsolidate = 0; + DEVMEMX_MAPPING *psCurrentMapping = psRsrv->ppsDevmemXMapping[i]; + PMR *psCurrentPMR = psCurrentMapping->psPMR; + + /* If we have attempted to delete a virtual record from an empty page entry */ + if (psReplaceMapping == NULL && + psCurrentMapping == NULL) + { + i++; + continue; + } + /* If we are adding a new record to the current range reference */ + else if (psCurrentMapping == NULL) + { + psRsrv->ppsDevmemXMapping[i] = psReplaceMapping; + i++; + continue; + } + + /* Iterate over records with the same CurrentMapRange and replace them + * in the Map Range array + */ + do + { + psRsrv->ppsDevmemXMapping[j] = psReplaceMapping; + uiConsolidate++; + j++; + } + while (j < uiVirtPageOffset + uiPageCount && + psCurrentMapping == psRsrv->ppsDevmemXMapping[j] + ); + + /* If the psCurrentMapping references the same PMR we are replacing it with we + * already have the PMR locked above, else we need to lock the PMR we are adjusting. + */ + if (psReplaceMapping && + psCurrentPMR == psReplaceMapping->psPMR) + { + _AdjustOrDestroyReplacedMapping(psCurrentMapping, i, j); + + /* We can't UnlockPhyAddrs for this mapping since we have locked it + * in the layer above, defer until we are done with all adjustments + * to this PMR. + */ + *uiDeferPMRUnlockPhyCount += uiConsolidate; + } + else + { + PMRLockPMR(psCurrentPMR); + _AdjustOrDestroyReplacedMapping(psCurrentMapping, i, j); + PMRUnlockPMR(psCurrentPMR); + + /* Drop references on PMR we have replaced */ + PMRUnlockSysPhysAddressesN(psCurrentPMR, uiConsolidate); + } + + i += uiConsolidate; + } +} + +static void _DeleteReservationMappingRecords(DEVMEMXINT_RESERVATION *psRsrv, + IMG_UINT32 uiVirtPageOffset, + IMG_UINT32 uiPageCount) +{ + __maybe_unused IMG_UINT32 uiDeferUnlockPhyCount = 0; + _ReplaceReservationMappingRecords(psRsrv, + uiVirtPageOffset, + uiPageCount, + NULL, + &uiDeferUnlockPhyCount); + + /* We don't have a PMR locked when calling this function so there will be + * no UnlockPhys to defer. + */ + PVR_ASSERT(uiDeferUnlockPhyCount == 0); +} +#endif /* defined(SUPPORT_LINUX_OSPAGE_MIGRATION) */ + /*************************************************************************/ /*! @Function DevmemIntCtxAcquire @Description Acquire a reference to the provided device memory context. -@Return None +@Return IMG_TRUE on success, IMG_FALSE on overflow */ /**************************************************************************/ -static INLINE void DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx) +static INLINE IMG_BOOL DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx) { - OSAtomicIncrement(&psDevmemCtx->hRefCount); + IMG_INT32 iOldValue = OSAtomicAddUnless(&psDevmemCtx->hRefCount, 1, + DEVMEMCTX_REFCOUNT_MAX); + + if (iOldValue == DEVMEMCTX_REFCOUNT_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " + "context, reference count has overflowed.", __func__)); + return IMG_FALSE; + } + + return IMG_TRUE; } /*************************************************************************/ /*! @@ -171,6 +1251,19 @@ static INLINE void DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx) PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode; DLLIST_NODE *psNode, *psNodeNext; + /* Protect removing the node from the list in case it's being accessed + * by DevmemIntPFNotify(). */ + OSWRLockAcquireWrite(psDevNode->hPageFaultNotifyLock); + /* If this context is in the list registered for a debugger, remove + * from that list */ + if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem)) + { + dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); + } + /* It should be safe to release the lock here (as long as + * DevmemIntPFNotify() protects accessing memory contexts as well). */ + OSWRLockReleaseWrite(psDevNode->hPageFaultNotifyLock); + /* If there are any PIDs registered for page fault notification. * Loop through the registered PIDs and free each one */ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) @@ -181,21 +1274,12 @@ static INLINE void DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx) OSFreeMem(psNotifyNode); } - /* If this context is in the list registered for a debugger, remove - * from that list */ - if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem)) - { - dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); - } - if (psDevNode->pfnUnregisterMemoryContext) { psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData); } MMU_ContextDestroy(psDevmemCtx->psMMUContext); - OSWRLockDestroy(psDevmemCtx->hListLock); - PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p", __func__, psDevmemCtx)); OSFreeMem(psDevmemCtx); @@ -209,10 +1293,10 @@ static INLINE void DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx) */ /**************************************************************************/ static INLINE IMG_BOOL DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap) { - IMG_BOOL bSuccess = OSAtomicAddUnless(&psDevmemHeap->uiRefCount, 1, - DEVMEMHEAP_REFCOUNT_MAX); + IMG_INT32 iOldValue = OSAtomicAddUnless(&psDevmemHeap->uiRefCount, 1, + DEVMEMHEAP_REFCOUNT_MAX); - if (!bSuccess) + if (iOldValue == DEVMEMHEAP_REFCOUNT_MAX) { PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " "heap, reference count has overflowed.", __func__)); @@ -231,43 +1315,71 @@ static INLINE IMG_BOOL DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap) */ /**************************************************************************/ static INLINE void DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap) { - IMG_BOOL bSuccess = OSAtomicSubtractUnless(&psDevmemHeap->uiRefCount, 1, - DEVMEMHEAP_REFCOUNT_MIN); + IMG_INT32 iOldValue = OSAtomicSubtractUnless(&psDevmemHeap->uiRefCount, 1, + DEVMEMHEAP_REFCOUNT_MIN); + + if (iOldValue == DEVMEMHEAP_REFCOUNT_MIN) + { + PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " + "heap, reference count has underflowed.", __func__)); + } +} + +/*************************************************************************/ /*! +@Function DevmemIntReservationAcquire +@Description Acquire a reference to the provided device memory reservation. + Prevents releasing of the reservation if external device + resource components still require it. +@Return IMG_TRUE if referenced and IMG_FALSE in case of error +*/ /**************************************************************************/ +IMG_BOOL DevmemIntReservationAcquire(DEVMEMINT_RESERVATION *psDevmemReservation) +{ + IMG_BOOL bSuccess = IMG_TRUE; + + OSLockAcquireNested(psDevmemReservation->hLock, psDevmemReservation->eLockClass); + + if (psDevmemReservation->i32DevResAcquisitionCount == DEVMEMRESERVATION_ACQUISITION_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " + "reservation, acquisition count has overflowed.", __func__)); - if (!bSuccess) + bSuccess = IMG_FALSE; + } + else { - PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " - "heap, reference count has underflowed.", __func__)); + psDevmemReservation->i32DevResAcquisitionCount++; } -} -PVRSRV_ERROR -DevmemCompatReserve2(PMR *psPMR) -{ - PVR_UNREFERENCED_PARAMETER(psPMR); - return PVRSRV_ERROR_NOT_IMPLEMENTED; -} + OSLockRelease(psDevmemReservation->hLock); -PVRSRV_ERROR -DevmemCompatReserve4(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR) -{ - PVR_UNREFERENCED_PARAMETER(psDevmemMapping); - PVR_UNREFERENCED_PARAMETER(psPMR); - return PVRSRV_ERROR_NOT_IMPLEMENTED; -} -PVRSRV_ERROR -DevmemCompatReserve1(PMR *psPMR) -{ - PVR_UNREFERENCED_PARAMETER(psPMR); - return PVRSRV_ERROR_NOT_IMPLEMENTED; + return bSuccess; } -PVRSRV_ERROR -DevmemCompatReserve3(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR) +/*************************************************************************/ /*! +@Function DevmemIntReservationRelease +@Description Release the reference to the provided device memory reservation. + Once these references have been released the + reservation is allowed to be released from UM. +@Return None. +*/ /**************************************************************************/ +void DevmemIntReservationRelease(DEVMEMINT_RESERVATION *psDevmemReservation) { - PVR_UNREFERENCED_PARAMETER(psDevmemMapping); - PVR_UNREFERENCED_PARAMETER(psPMR); - return PVRSRV_ERROR_NOT_IMPLEMENTED; + OSLockAcquireNested(psDevmemReservation->hLock, psDevmemReservation->eLockClass); + + if (psDevmemReservation->i32DevResAcquisitionCount == DEVMEMRESERVATION_ACQUISITION_MIN) + { + PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to release the device memory " + "reservation, acquisition count has underflowed.", __func__)); + + /* for better debugging */ + PVR_ASSERT(psDevmemReservation->i32DevResAcquisitionCount == DEVMEMRESERVATION_ACQUISITION_MIN); + } + else + { + psDevmemReservation->i32DevResAcquisitionCount--; + } + + OSLockRelease(psDevmemReservation->hLock); } /*************************************************************************/ /*! @@ -422,7 +1534,6 @@ DevmemIntCtxCreate(CONNECTION_DATA *psConnection, *pui32CPUCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); /* Initialise the PID notify list */ - OSWRLockCreate(&psDevmemCtx->hListLock); dllist_init(&(psDevmemCtx->sProcessNotifyListHead)); psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL; psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL; @@ -433,6 +1544,8 @@ DevmemIntCtxCreate(CONNECTION_DATA *psConnection, /* Initialise flags */ psDevmemCtx->ui32Flags = 0; + psDevmemCtx->uiCreatedHeaps = 0; + return PVRSRV_OK; fail_register: @@ -454,30 +1567,70 @@ PVRSRV_ERROR DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, IMG_UINT32 uiHeapConfigIndex, IMG_UINT32 uiHeapIndex, - IMG_DEV_VIRTADDR sHeapBaseAddr, - IMG_DEVMEM_SIZE_T uiHeapLength, - IMG_UINT32 uiLog2DataPageSize, DEVMEMINT_HEAP **ppsDevmemHeapPtr) { DEVMEMINT_HEAP *psDevmemHeap; PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sBlueprintHeapBaseAddr; + IMG_DEVMEM_SIZE_T uiBlueprintHeapLength; + IMG_DEVMEM_SIZE_T uiBlueprintResRgnLength; + IMG_UINT32 ui32BlueprintLog2DataPageSize; + IMG_UINT32 ui32BlueprintLog2ImportAlignment; PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__)); - /* allocate a Devmem context */ + if (!DevmemIntCtxAcquire(psDevmemCtx)) + { + return PVRSRV_ERROR_REFCOUNT_OVERFLOW; + } + + /* Retrieve page size and base addr match the heap blueprint */ + eError = HeapCfgHeapDetails(NULL, + psDevmemCtx->psDevNode, + uiHeapConfigIndex, + uiHeapIndex, + 0, NULL, + &sBlueprintHeapBaseAddr, + &uiBlueprintHeapLength, + &uiBlueprintResRgnLength, + &ui32BlueprintLog2DataPageSize, + &ui32BlueprintLog2ImportAlignment); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get details for HeapConfig:%d HeapIndex:%d.", + __func__, uiHeapConfigIndex, uiHeapIndex)); + goto ErrorCtxRelease; + } + + /* uiHeapConfigIndex and uiHeapIndex are validated in HeapCfgHeapDetails() + * so it should be safe to use here without additional checks. We must assert + * though that the index is less than the number of bits in uiCreatedHeaps + * bitfield (we assume 8 bits in a byte and bitfield width of 64). */ + PVR_ASSERT(uiHeapIndex < sizeof(psDevmemCtx->uiCreatedHeaps) * 8); + + if (BIT_ISSET(psDevmemCtx->uiCreatedHeaps, uiHeapIndex)) + { + eError = PVRSRV_ERROR_ALREADY_EXISTS; + goto ErrorCtxRelease; + } + + /* allocate the heap object */ psDevmemHeap = OSAllocMem(sizeof(*psDevmemHeap)); - PVR_LOG_RETURN_IF_NOMEM(psDevmemHeap, "psDevmemHeap"); + PVR_LOG_GOTO_IF_NOMEM(psDevmemHeap, eError, ErrorCtxRelease); psDevmemHeap->psDevmemCtx = psDevmemCtx; - - DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx); + psDevmemHeap->uiLog2PageSize = ui32BlueprintLog2DataPageSize; + psDevmemHeap->sBaseAddr = sBlueprintHeapBaseAddr; + /* Store the last accessible address as our LastAddr. We can access + * every address between sBlueprintHeapBaseAddr and + * sBlueprintHeapBaseAddr + uiBlueprintHeapLength - 1 + */ + psDevmemHeap->sLastAddr.uiAddr = sBlueprintHeapBaseAddr.uiAddr + uiBlueprintHeapLength - 1; + psDevmemHeap->uiHeapIndex = uiHeapIndex; OSAtomicWrite(&psDevmemHeap->uiRefCount, 1); - psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize; - psDevmemHeap->sBaseAddr = sHeapBaseAddr; - - eError = HeapCfgGetCallbacks(psDevmemHeap->psDevmemCtx->psDevNode, + eError = HeapCfgGetCallbacks(psDevmemCtx->psDevNode, uiHeapConfigIndex, uiHeapIndex, &psDevmemHeap->pfnInit, @@ -486,195 +1639,724 @@ DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get callbacks for HeapConfig:%d HeapIndex:%d.", __func__, uiHeapConfigIndex, uiHeapIndex)); - goto ErrorCtxRelease; + goto ErrorFreeDevmemHeap; } if (psDevmemHeap->pfnInit != NULL) { - eError = psDevmemHeap->pfnInit(psDevmemHeap->psDevmemCtx->psDevNode, + eError = psDevmemHeap->pfnInit(psDevmemCtx->psDevNode, psDevmemHeap, &psDevmemHeap->hPrivData); - PVR_GOTO_IF_ERROR(eError, ErrorCtxRelease); + PVR_GOTO_IF_ERROR(eError, ErrorFreeDevmemHeap); } + PVR_DPF((PVR_DBG_VERBOSE, "%s: sBaseAddr = %" IMG_UINT64_FMTSPECX ", " + "sLastAddr = %" IMG_UINT64_FMTSPECX, __func__, + psDevmemHeap->sBaseAddr.uiAddr, psDevmemHeap->sLastAddr.uiAddr)); + + BIT_SET(psDevmemCtx->uiCreatedHeaps, uiHeapIndex); + *ppsDevmemHeapPtr = psDevmemHeap; return PVRSRV_OK; -ErrorCtxRelease: - DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx); +ErrorFreeDevmemHeap: OSFreeMem(psDevmemHeap); +ErrorCtxRelease: + DevmemIntCtxRelease(psDevmemCtx); return eError; } -PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_DEF_PAGE *psDefPage, - IMG_INT uiInitValue, - IMG_CHAR *pcDefPageName, - IMG_BOOL bInitPage) +static INLINE PVRSRV_ERROR ReserveRangeParamValidation(DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sReservationVAddr, + IMG_DEVMEM_SIZE_T uiVirtualSize) { - IMG_UINT32 ui32RefCnt; - PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEV_VIRTADDR sLastReserveAddr; + IMG_UINT64 ui64InvalidSizeMask = (1 << psDevmemHeap->uiLog2PageSize) - 1; - OSLockAcquire(psDefPage->psPgLock); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemHeap != NULL, "psDevmemHeap"); - /* We know there will not be 4G number of sparse PMR's */ - ui32RefCnt = OSAtomicIncrement(&psDefPage->atRefCounter); + sLastReserveAddr.uiAddr = sReservationVAddr.uiAddr + uiVirtualSize - 1; - if (1 == ui32RefCnt) + /* Check that the requested address is not less than the base address of the heap. */ + if (sReservationVAddr.uiAddr < psDevmemHeap->sBaseAddr.uiAddr) { - IMG_DEV_PHYADDR sDevPhysAddr = {0}; + PVR_LOG_VA(PVR_DBG_ERROR, + "sReservationVAddr ("IMG_DEV_VIRTADDR_FMTSPEC") is invalid! " + "Must be greater or equal to "IMG_DEV_VIRTADDR_FMTSPEC, + sReservationVAddr.uiAddr, + psDevmemHeap->sBaseAddr.uiAddr); + return PVRSRV_ERROR_INVALID_PARAMS; + } -#if defined(PDUMP) - PDUMPCOMMENT(psDevNode, "Alloc %s page object", pcDefPageName); + /* Check the allocation size is valid (must be page granular). */ + if ((uiVirtualSize & ui64InvalidSizeMask) != 0 || uiVirtualSize == 0) + { + PVR_LOG_VA(PVR_DBG_ERROR, + "uiVirtualSize ("IMG_DEVMEM_SIZE_FMTSPEC") is invalid! Must a multiple of %u and greater than 0", + uiVirtualSize, + 1 << psDevmemHeap->uiLog2PageSize); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (uiVirtualSize > PMR_MAX_SUPPORTED_SIZE) + { + PVR_LOG_VA(PVR_DBG_ERROR, + "uiVirtualSize must be less than or equal to the max PMR size (" + IMG_DEVMEM_SIZE_FMTSPEC")", + PMR_MAX_SUPPORTED_SIZE); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Check that requested address + size fits in our heap. */ + if (sLastReserveAddr.uiAddr > psDevmemHeap->sLastAddr.uiAddr) + { + PVR_LOG_VA(PVR_DBG_ERROR, + "sReservationVAddr ("IMG_DEV_VIRTADDR_FMTSPEC") is invalid! " + "Must be lower than "IMG_DEV_VIRTADDR_FMTSPEC, + sReservationVAddr.uiAddr, + psDevmemHeap->sLastAddr.uiAddr - uiVirtualSize + 1); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemXIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sReservationVAddr, + IMG_DEVMEM_SIZE_T uiVirtualSize, + DEVMEMXINT_RESERVATION **ppsRsrv) +{ + DEVMEMXINT_RESERVATION *psRsrv; + IMG_UINT32 uiNumPages; + PVRSRV_ERROR eError; + + PVR_ASSERT(ppsRsrv != NULL); + + eError = ReserveRangeParamValidation(psDevmemHeap, + sReservationVAddr, + uiVirtualSize); + PVR_LOG_RETURN_IF_ERROR(eError, "ReserveRangeParamValidation"); + + + if (!DevmemIntHeapAcquire(psDevmemHeap)) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReturnError); + } + + uiNumPages = uiVirtualSize >> psDevmemHeap->uiLog2PageSize; +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + psRsrv = OSAllocZMem(sizeof(*psRsrv) + sizeof(*psRsrv->ppsDevmemXMapping) * uiNumPages); + PVR_LOG_GOTO_IF_NOMEM(psRsrv, eError, ErrorUnreferenceHeap); +#else + psRsrv = OSAllocZMem(sizeof(*psRsrv->ppsPMR) * uiNumPages + sizeof(*psRsrv)); + PVR_LOG_GOTO_IF_NOMEM(psRsrv, eError, ErrorUnreferenceHeap); + + psRsrv->ppsPMR = IMG_OFFSET_ADDR(psRsrv, sizeof(*psRsrv)); #endif - /* Allocate the dummy page required for sparse backing */ - eError = DevPhysMemAlloc(psDevNode, - (1 << psDefPage->ui32Log2PgSize), - 0, - uiInitValue, - bInitPage, -#if defined(PDUMP) - psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName, - pcDefPageName, - &psDefPage->hPdumpPg, + OSAtomicWrite(&psRsrv->iLifetimeRefCount, 1); + + eError = OSLockCreate(&psRsrv->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", ErrorFreeReservation); + + psRsrv->sBase = sReservationVAddr; + psRsrv->uiLength = uiVirtualSize; + + eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext, + uiVirtualSize, + 0, /* IMG_UINT32 uiProtFlags */ + 0, /* alignment is n/a since we supply devvaddr */ + &sReservationVAddr, + psDevmemHeap->uiLog2PageSize); + PVR_GOTO_IF_ERROR(eError, ErrorDestroyLock); + + /* since we supplied the virt addr, MMU_Alloc shouldn't have + chosen a new one for us */ + PVR_ASSERT(sReservationVAddr.uiAddr == psRsrv->sBase.uiAddr); + + psRsrv->psDevmemHeap = psDevmemHeap; + *ppsRsrv = psRsrv; + + return PVRSRV_OK; + +ErrorDestroyLock: + OSLockDestroy(psRsrv->hLock); +ErrorFreeReservation: + OSFreeMem(psRsrv); +ErrorUnreferenceHeap: + DevmemIntHeapRelease(psDevmemHeap); +ErrorReturnError: + return eError; +} + +PVRSRV_ERROR +DevmemXIntUnreserveRange(DEVMEMXINT_RESERVATION *psRsrv) +{ + IMG_UINT32 ui32FirstMappedIdx = IMG_UINT32_MAX; // Initialise with invalid value. + IMG_UINT32 ui32ContigPageCount = 0; + IMG_UINT32 ui32PageIdx; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* No need to lock the mapping here since this is a handle destruction path which can not be + * executed while there are outstanding handle lookups, i.e. other operations are performed + * on the mapping. Bridge and handle framework also make sure this path can also not be executed + * concurrently. */ + + for (ui32PageIdx = 0; ui32PageIdx < _DevmemXReservationPageCount(psRsrv); ui32PageIdx++) + { +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + if (psRsrv->ppsDevmemXMapping[ui32PageIdx] != NULL) +#else + if (psRsrv->ppsPMR[ui32PageIdx] != NULL) #endif - &psDefPage->sPageHandle, - &sDevPhysAddr); - if (PVRSRV_OK != eError) { - OSAtomicDecrement(&psDefPage->atRefCounter); + if (ui32ContigPageCount == 0) + { +#if defined(DEBUG) + if (ui32FirstMappedIdx == IMG_UINT32_MAX) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Reservation was not unmapped! The reservation will " + "be unmapped before proceeding.", + __func__)); + } +#endif + ui32FirstMappedIdx = ui32PageIdx; + } + + ui32ContigPageCount++; + } + else + { + if (ui32ContigPageCount != 0) + { + eError = DevmemXIntUnmapPages(psRsrv, + ui32FirstMappedIdx, + ui32ContigPageCount); + PVR_RETURN_IF_ERROR(eError); + } + + ui32ContigPageCount = 0; + } + } + + if (ui32ContigPageCount != 0) + { + eError = DevmemXIntUnmapPages(psRsrv, + ui32FirstMappedIdx, + ui32ContigPageCount); + PVR_RETURN_IF_ERROR(eError); + } + + MMU_Free(psRsrv->psDevmemHeap->psDevmemCtx->psMMUContext, + psRsrv->sBase, + psRsrv->uiLength, + psRsrv->psDevmemHeap->uiLog2PageSize); + + /* Don't bother with refcount on reservation, as a reservation only ever + * holds one mapping, so we directly decrement the refcount on the heap + * instead. + * Function will print an error if the heap could not be unreferenced. */ + DevmemIntHeapRelease(psRsrv->psDevmemHeap); + + /* We have partially destroyed the object but we may need to keep the wrapper object + * around since it may be referenced in KM threads where we need the lock to avoid + * race conditions. UM threads will no longer have a handle to this object when this + * bridge call exits. + */ + _DevmemXReservationUnrefAndMaybeDestroy(psRsrv); + + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR +DevmemValidateFlags(PMR *psPMR, PVRSRV_MEMALLOCFLAGS_T uiMapFlags) +{ + PMR_FLAGS_T uiPMRFlags = PMR_Flags(psPMR); + PVRSRV_ERROR eError = PVRSRV_OK; + + if (PVRSRV_CHECK_GPU_READABLE(uiMapFlags) && !PVRSRV_CHECK_GPU_READABLE(uiPMRFlags)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR is not GPU readable.", __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_FLAGS, ErrorReturnError); + } + + if (PVRSRV_CHECK_GPU_WRITEABLE(uiMapFlags) && !PVRSRV_CHECK_GPU_WRITEABLE(uiPMRFlags)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR is not GPU writeable.", __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_FLAGS, ErrorReturnError); + } + + if ((uiMapFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) != + (uiPMRFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR's device specific flags don't match mapping flags.", __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_FLAGS, ErrorReturnError); + } + +ErrorReturnError: + return eError; +} + +PVRSRV_ERROR +DevmemXIntMapPages(DEVMEMXINT_RESERVATION *psRsrv, + PMR *psPMR, + IMG_UINT32 uiPageCount, + IMG_UINT32 uiPhysPageOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiVirtPageOffset) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiPMRMaxChunkCount = PMR_LogicalSize(psPMR) >> PMR_GetLog2Contiguity(psPMR); + DEVMEMINT_HEAP *psDevmemHeap = psRsrv->psDevmemHeap; + IMG_UINT32 uiLog2PageSize = psDevmemHeap->uiLog2PageSize; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + DEVMEMX_MAPPING *psNewMapping; + DEVMEMX_MAPPING *apsPotentialSplitMapping[DEVMEMX_SPLIT_MAPPING_PREALLOC_COUNT] = {0}; + DEVMEMX_MAPPING *psCurrentMapping; + IMG_UINT32 uiLockedPMRDeferUnlockPhyCount = 0; +#endif + + /* Test uiPageCount+uiPhysPageOffset will not exceed IMG_UINT32_MAX (and thereby wrap) */ + PVR_LOG_RETURN_IF_INVALID_PARAM(((IMG_UINT64)uiPageCount + (IMG_UINT64)uiPhysPageOffset) <= (IMG_UINT64)IMG_UINT32_MAX, "uiPageCount+uiPhysPageOffset exceeds IMG_UINT32_MAX"); + /* Test we do not exceed the PMR's maximum physical extent (in pages) */ + PVR_LOG_RETURN_IF_INVALID_PARAM((uiPageCount + uiPhysPageOffset) <= uiPMRMaxChunkCount, "uiPageCount+uiPhysPageOffset"); + + /* Test uiVirtPageOffset+uiPageCount will not exceed IMG_UINT32_MAX (and thereby wrap) */ + PVR_LOG_RETURN_IF_INVALID_PARAM(((IMG_UINT64)uiVirtPageOffset + (IMG_UINT64)uiPageCount) <= (IMG_UINT64)IMG_UINT32_MAX, "uiVirtPageOffset+uiPageCount exceeds IMG_UINT32_MAX"); + /* The range is not valid for the given virtual descriptor */ + PVR_LOG_RETURN_IF_FALSE((uiVirtPageOffset + uiPageCount) <= _DevmemXReservationPageCount(psRsrv), + "mapping offset out of range", PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); + PVR_LOG_RETURN_IF_FALSE((uiFlags & ~PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK) == 0, + "invalid flags", PVRSRV_ERROR_INVALID_FLAGS); + PVR_LOG_RETURN_IF_FALSE(!PMR_IsSparse(psPMR), + "PMR is Sparse, devmemx PMRs should be non-sparse", PVRSRV_ERROR_INVALID_FLAGS); + PVR_LOG_RETURN_IF_FALSE(!(PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC), + "PMR allocation is deferred, devmemx PMRs can not be deferred", PVRSRV_ERROR_INVALID_FLAGS); + + if (uiLog2PageSize > PMR_GetLog2Contiguity(psPMR)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device heap and PMR have incompatible " + "Log2Contiguity (%u - %u). PMR contiguity must be a multiple " + "of the heap contiguity!", __func__, uiLog2PageSize, + PMR_GetLog2Contiguity(psPMR))); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = DevmemValidateFlags(psPMR, uiFlags); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemValidateFlags"); + + OSLockAcquire(psRsrv->hLock); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + psCurrentMapping = psRsrv->ppsDevmemXMapping[uiVirtPageOffset]; + + if (psCurrentMapping != NULL && /* Does a mapping currently exist in this entry */ + /* Does the start of the current mapping precede the requested offset */ + psCurrentMapping->uiVirtPageOffset < uiVirtPageOffset && + /* Does the end of the mapping precede the requested offset + page count */ + uiVirtPageOffset + uiPageCount < psCurrentMapping->uiVirtPageOffset + (psCurrentMapping->uiPageCount - 1)) + { + apsPotentialSplitMapping[0] = _PreAllocateDevmemXMapping(psRsrv); + if (apsPotentialSplitMapping[0] == NULL) + { + PVR_LOG_GOTO_WITH_ERROR("_PreAllocateDevmemXMapping", eError, PVRSRV_ERROR_OUT_OF_MEMORY, ErrUnlockRsrv); + } + + apsPotentialSplitMapping[1] = _PreAllocateDevmemXMapping(psRsrv); + if (apsPotentialSplitMapping[1] == NULL) + { + PVR_LOG_GOTO_WITH_ERROR("_PreAllocateDevmemXMapping", eError, PVRSRV_ERROR_OUT_OF_MEMORY, ErrFreeFirstSplitMapping); + } + } + /* Lock address `uiPageCount` times. This will also take N references on + * the PMR. */ + eError = PMRLockSysPhysAddressesN(psPMR, uiPageCount); + PVR_GOTO_IF_ERROR(eError, ErrFreeSecondSplitMapping); + + PMRLockPMR(psPMR); + + psNewMapping = _AllocateDevmemXMapping(psRsrv, + psPMR, + uiFlags, + uiPhysPageOffset, + uiVirtPageOffset, + uiPageCount); + if (!psNewMapping) + { + PVR_LOG_GOTO_WITH_ERROR("_AllocateDevmemXMapping", eError, PVRSRV_ERROR_OUT_OF_MEMORY, ErrUnlockPMR); + } + + do + { + eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + _DevmemXReservationPageAddress(psRsrv, uiVirtPageOffset), + psPMR, + uiPhysPageOffset, + uiPageCount, + NULL, + psDevmemHeap->uiLog2PageSize); + if (eError == PVRSRV_OK) + { + break; + } + else if (eError == PVRSRV_ERROR_RETRY) + { + /* This path may request this call to retry mapping at a later time, if this happens we + * need to relinquish the synchronisation resources to allow migrate to complete and + * try again + */ + _DeschedAllowRemapToComplete(psRsrv->hLock, psPMR); } else { - psDefPage->ui64PgPhysAddr = sDevPhysAddr.uiAddr; + /* Unexpected Error code path */ + goto ErrFreeNewMapping; + } + } while (eError == PVRSRV_ERROR_RETRY); + + /* If this has overwritten the middle of an existing mapping */ + if (apsPotentialSplitMapping[0] != NULL) + { + _ReplaceCentreSplitExistingMapping(psRsrv, + uiVirtPageOffset, + uiPageCount, + psNewMapping, + apsPotentialSplitMapping, + &uiLockedPMRDeferUnlockPhyCount); + } + else + { + _ReplaceReservationMappingRecords(psRsrv, + uiVirtPageOffset, + uiPageCount, + psNewMapping, + &uiLockedPMRDeferUnlockPhyCount); + } + PMRUnlockPMR(psPMR); + + /* Drop any deferred references on PMR we had locked */ + if (uiLockedPMRDeferUnlockPhyCount != 0) + { + PMRUnlockSysPhysAddressesN(psPMR, uiLockedPMRDeferUnlockPhyCount); + } + +#else + + /* Lock address `uiPageCount` times. This will also take N references on + * the PMR. */ + eError = PMRLockSysPhysAddressesN(psPMR, uiPageCount); + PVR_GOTO_IF_ERROR(eError, ErrUnlockRsrv); + + PMRLockPMR(psPMR); + + eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + _DevmemXReservationPageAddress(psRsrv, uiVirtPageOffset), + psPMR, + uiPhysPageOffset, + uiPageCount, + NULL, + psDevmemHeap->uiLog2PageSize); + PVR_GOTO_IF_ERROR(eError, ErrUnlockPMR); + + PMRUnlockPMR(psPMR); + + { + IMG_UINT32 i; + for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount); i++) + { + if (psRsrv->ppsPMR[i] != NULL) + { + { + PVRSRV_ERROR eError2 = PMRUnlockSysPhysAddresses(psRsrv->ppsPMR[i]); + PVR_LOG_IF_ERROR(eError2, "PMRUnlockSysPhysAddresses"); + } + } + + psRsrv->ppsPMR[i] = psPMR; + } + } +#endif + + OSLockRelease(psRsrv->hLock); + + return PVRSRV_OK; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +ErrFreeNewMapping: + _DestroyDevmemXMapping(psNewMapping); +ErrUnlockPMR: + PMRUnlockPMR(psPMR); + { + PVRSRV_ERROR eError2 = PMRUnlockSysPhysAddressesN(psPMR, uiPageCount); + PVR_ASSERT(eError2 == PVRSRV_OK); + } +ErrFreeSecondSplitMapping: + _FreePreAllocatedDevmemXMapping(apsPotentialSplitMapping[1]); +ErrFreeFirstSplitMapping: + _FreePreAllocatedDevmemXMapping(apsPotentialSplitMapping[0]); +#else +ErrUnlockPMR: + PMRUnlockPMR(psPMR); + { + PVRSRV_ERROR eError2 = PMRUnlockSysPhysAddressesN(psPMR, uiPageCount); + PVR_LOG_IF_ERROR(eError2, "PMRUnlockSysPhysAddresses"); + } +#endif +ErrUnlockRsrv: + OSLockRelease(psRsrv->hLock); + + return eError; +} + +PVRSRV_ERROR +DevmemXIntUnmapPages(DEVMEMXINT_RESERVATION *psRsrv, + IMG_UINT32 uiVirtPageOffset, + IMG_UINT32 uiPageCount) +{ + DEVMEMINT_HEAP *psDevmemHeap = psRsrv->psDevmemHeap; + PVRSRV_ERROR eError; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + DEVMEMX_MAPPING *apsPotentialSplitMapping[DEVMEMX_SPLIT_MAPPING_PREALLOC_COUNT] = {0}; + DEVMEMX_MAPPING *psCurrentMapping; +#endif + + /* Test uiVirtPageOffset+uiPageCount will not exceed IMG_UINT32_MAX (and thereby wrap) */ + PVR_LOG_RETURN_IF_INVALID_PARAM(((IMG_UINT64)uiVirtPageOffset + (IMG_UINT64)uiPageCount) <= (IMG_UINT64)IMG_UINT32_MAX, "uiVirtPageOffset+uiPageCount exceeds IMG_UINT32_MAX"); + PVR_LOG_RETURN_IF_FALSE((uiVirtPageOffset + uiPageCount) <= _DevmemXReservationPageCount(psRsrv), + "mapping offset out of range", PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); + + OSLockAcquire(psRsrv->hLock); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + psCurrentMapping = psRsrv->ppsDevmemXMapping[uiVirtPageOffset]; + if (psCurrentMapping != NULL &&/* Does a mapping currently exist in this entry */ + /* Does the start of the current mapping precede the requested offset */ + psCurrentMapping->uiVirtPageOffset < uiVirtPageOffset && + /* Does the end of the mapping precede the requested offset + page count */ + uiVirtPageOffset + uiPageCount < psCurrentMapping->uiVirtPageOffset + (psCurrentMapping->uiPageCount - 1)) + { + apsPotentialSplitMapping[0] = _PreAllocateDevmemXMapping(psRsrv); + if (apsPotentialSplitMapping[0] == NULL) + { + PVR_LOG_GOTO_WITH_ERROR("_PreAllocateDevmemXMapping", eError, PVRSRV_ERROR_OUT_OF_MEMORY, ErrUnlock); + } + + apsPotentialSplitMapping[1] = _PreAllocateDevmemXMapping(psRsrv); + if (apsPotentialSplitMapping[1] == NULL) + { + PVR_LOG_GOTO_WITH_ERROR("_PreAllocateDevmemXMapping", eError, PVRSRV_ERROR_OUT_OF_MEMORY, ErrFreeFirstSplitMapping); + } + } +#endif + + /* Unmap the pages and mark them invalid in the MMU PTE */ + eError = MMU_UnmapPages(psDevmemHeap->psDevmemCtx->psMMUContext, + 0, + _DevmemXReservationPageAddress(psRsrv, uiVirtPageOffset), + uiPageCount, + NULL, + psDevmemHeap->uiLog2PageSize, + 0); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + PVR_LOG_GOTO_IF_ERROR(eError, "MMU_UnmapPages", ErrFreeSecondSplitMapping); + + /* If this has overwritten the middle of an existing mapping */ + if (apsPotentialSplitMapping[0] != NULL) + { + __maybe_unused IMG_UINT32 uiDeferUnlockPhyCount = 0; + + _ReplaceCentreSplitExistingMapping(psRsrv, + uiVirtPageOffset, + uiPageCount, + NULL, + apsPotentialSplitMapping, + &uiDeferUnlockPhyCount); + /* We don't have a PMR locked when calling this function so there will be + * no UnlockPhys to defer. + */ + PVR_ASSERT(uiDeferUnlockPhyCount == 0); + } + else + { + _DeleteReservationMappingRecords(psRsrv, + uiVirtPageOffset, + uiPageCount); + } +#else + PVR_LOG_GOTO_IF_ERROR(eError, "MMU_UnmapPages", ErrUnlock); + { + IMG_UINT32 i; + for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount); i++) + { + if (psRsrv->ppsPMR[i] != NULL) + { + PVRSRV_ERROR eError2 = PMRUnlockSysPhysAddresses(psRsrv->ppsPMR[i]); + PVR_LOG_IF_ERROR(eError2, "PMRUnlockSysPhysAddresses"); + + psRsrv->ppsPMR[i] = NULL; + } } } +#endif + + OSLockRelease(psRsrv->hLock); + + return PVRSRV_OK; - OSLockRelease(psDefPage->psPgLock); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +ErrFreeSecondSplitMapping: + _FreePreAllocatedDevmemXMapping(apsPotentialSplitMapping[1]); +ErrFreeFirstSplitMapping: + _FreePreAllocatedDevmemXMapping(apsPotentialSplitMapping[0]); +#endif +ErrUnlock: + OSLockRelease(psRsrv->hLock); return eError; } -void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_DEF_PAGE *psDefPage, - IMG_CHAR *pcDefPageName) +PVRSRV_ERROR +DevmemXIntMapVRangeToBackingPage(DEVMEMXINT_RESERVATION *psRsrv, + IMG_UINT32 uiPageCount, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiVirtPageOffset) { - IMG_UINT32 ui32RefCnt; + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemHeap = psRsrv->psDevmemHeap; - ui32RefCnt = OSAtomicRead(&psDefPage->atRefCounter); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + DEVMEMX_MAPPING *apsPotentialSplitMapping[DEVMEMX_SPLIT_MAPPING_PREALLOC_COUNT] = {0}; + DEVMEMX_MAPPING *psCurrentMapping; +#endif - /* For the cases where the dummy page allocation fails due to lack of memory - * The refcount can still be 0 even for a sparse allocation */ - if (0 != ui32RefCnt) + /* Test uiVirtPageOffset+uiPageCount will not exceed IMG_UINT32_MAX (and thereby wrap) */ + PVR_LOG_RETURN_IF_INVALID_PARAM(((IMG_UINT64)uiVirtPageOffset + (IMG_UINT64)uiPageCount) <= (IMG_UINT64)IMG_UINT32_MAX, "uiVirtPageOffset+uiPageCount exceeds IMG_UINT32_MAX"); + /* The range is not valid for the given virtual descriptor */ + PVR_LOG_RETURN_IF_FALSE((uiVirtPageOffset + uiPageCount) <= _DevmemXReservationPageCount(psRsrv), + "mapping offset out of range", PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); + PVR_LOG_RETURN_IF_FALSE((uiFlags & ~(PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK | + PVRSRV_MEMALLOCFLAG_ZERO_BACKING)) == 0, + "invalid flags", PVRSRV_ERROR_INVALID_FLAGS); + + OSLockAcquire(psRsrv->hLock); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + psCurrentMapping = psRsrv->ppsDevmemXMapping[uiVirtPageOffset]; + if (psCurrentMapping != NULL &&/* Does a mapping currently exist in this entry */ + /* Does the start of the current mapping precede the requested offset */ + psCurrentMapping->uiVirtPageOffset < uiVirtPageOffset && + /* Does the end of the mapping precede the requested offset + page count*/ + uiVirtPageOffset + uiPageCount < psCurrentMapping->uiVirtPageOffset + (psCurrentMapping->uiPageCount - 1)) { - OSLockAcquire(psDefPage->psPgLock); - - /* We know there will not be 4G number of sparse PMR's */ - ui32RefCnt = OSAtomicDecrement(&psDefPage->atRefCounter); - - if (0 == ui32RefCnt) + apsPotentialSplitMapping[0] = _PreAllocateDevmemXMapping(psRsrv); + if (apsPotentialSplitMapping[0] == NULL) { - PDUMPCOMMENT(psDevNode, "Free %s page object", pcDefPageName); - - /* Free the dummy page when refcount reaches zero */ - DevPhysMemFree(psDevNode, -#if defined(PDUMP) - psDefPage->hPdumpPg, -#endif - &psDefPage->sPageHandle); - -#if defined(PDUMP) - psDefPage->hPdumpPg = NULL; -#endif - psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + PVR_LOG_GOTO_WITH_ERROR("_PreAllocateDevmemXMapping", eError, PVRSRV_ERROR_OUT_OF_MEMORY, ErrUnlock); } - OSLockRelease(psDefPage->psPgLock); + apsPotentialSplitMapping[1] = _PreAllocateDevmemXMapping(psRsrv); + if (apsPotentialSplitMapping[1] == NULL) + { + PVR_LOG_GOTO_WITH_ERROR("_PreAllocateDevmemXMapping", eError, PVRSRV_ERROR_OUT_OF_MEMORY, ErrFreeFirstSplitMapping); + } } +#endif -} - -PVRSRV_ERROR -DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation, - PMR *psPMR, - IMG_UINT32 ui32PageCount, - IMG_UINT32 ui32PhysicalPgOffset, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddrBase) -{ - PVRSRV_ERROR eError; - IMG_UINT32 uiPMRMaxChunkCount = PMRGetMaxChunkCount(psPMR); + eError = MMUX_MapVRangeToBackingPage(psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + _DevmemXReservationPageAddress(psRsrv, uiVirtPageOffset), + uiPageCount, + psDevmemHeap->uiLog2PageSize); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + PVR_LOG_GOTO_IF_ERROR(eError, "MMU_UnmapPages", ErrFreeSecondSplitMapping); - PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PageCount < uiPMRMaxChunkCount), "ui32PageCount"); - PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PhysicalPgOffset < uiPMRMaxChunkCount), "ui32PhysicalPgOffset"); + /* If this has overwritten the middle of an existing mapping */ + if (apsPotentialSplitMapping[0] != NULL) + { + IMG_UINT32 uiDeferUnlockPhyCount = 0; + + _ReplaceCentreSplitExistingMapping(psRsrv, + uiVirtPageOffset, + uiPageCount, + NULL, + apsPotentialSplitMapping, + &uiDeferUnlockPhyCount); + /* We don't have a PMR locked when calling this function so there will be + * no UnlockPhys to defer. + */ + PVR_ASSERT(uiDeferUnlockPhyCount == 0); + } + else + { + _DeleteReservationMappingRecords(psRsrv, + uiVirtPageOffset, + uiPageCount); + } +#else + PVR_LOG_GOTO_IF_ERROR(eError, "MMU_UnmapPages", ErrUnlock); - if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR)) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). " - "PMR contiguity must be a multiple of the heap contiguity!", - __func__, - psReservation->psDevmemHeap->uiLog2PageSize, - PMR_GetLog2Contiguity(psPMR))); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + IMG_UINT32 i; + for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount); i++) + { + if (psRsrv->ppsPMR[i] != NULL) + { + eError = PMRUnlockSysPhysAddresses(psRsrv->ppsPMR[i]); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + psRsrv->ppsPMR[i] = NULL; + } + } } +#endif - eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, - uiFlags, - sDevVAddrBase, - psPMR, - ui32PhysicalPgOffset, - ui32PageCount, - NULL, - psReservation->psDevmemHeap->uiLog2PageSize); + OSLockRelease(psRsrv->hLock); -e0: + return eError; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +ErrFreeSecondSplitMapping: + _FreePreAllocatedDevmemXMapping(apsPotentialSplitMapping[1]); +ErrFreeFirstSplitMapping: + _FreePreAllocatedDevmemXMapping(apsPotentialSplitMapping[0]); +#endif +ErrUnlock: + OSLockRelease(psRsrv->hLock); return eError; } -PVRSRV_ERROR -DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation, - IMG_DEV_VIRTADDR sDevVAddrBase, - IMG_UINT32 ui32PageCount) +static INLINE IMG_UINT32 +_DevmemReservationPageCount(DEVMEMINT_RESERVATION *psRsrv) { - PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PageCount < PMR_MAX_SUPPORTED_4K_PAGE_COUNT), "ui32PageCount"); - - /* Unmap the pages and mark them invalid in the MMU PTE */ - MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, - 0, - sDevVAddrBase, - ui32PageCount, - NULL, - psReservation->psDevmemHeap->uiLog2PageSize, - 0); - - return PVRSRV_OK; + return psRsrv->uiLength >> psRsrv->psDevmemHeap->uiLog2PageSize; } PVRSRV_ERROR -DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, - DEVMEMINT_RESERVATION *psReservation, - PMR *psPMR, - PVRSRV_MEMALLOCFLAGS_T uiMapFlags, - DEVMEMINT_MAPPING **ppsMappingPtr) +DevmemIntMapPMR(DEVMEMINT_RESERVATION *psReservation, PMR *psPMR) { PVRSRV_ERROR eError; - DEVMEMINT_MAPPING *psMapping; /* number of pages (device pages) that allocation spans */ IMG_UINT32 ui32NumDevPages; /* device virtual address of start of allocation */ - IMG_DEV_VIRTADDR sAllocationDevVAddr; + IMG_DEV_VIRTADDR sReservationVAddr; /* and its length */ - IMG_DEVMEM_SIZE_T uiAllocationSize; - IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; - IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE; - PVRSRV_DEVICE_NODE *psDevNode; - PMR_FLAGS_T uiPMRFlags; - PVRSRV_DEF_PAGE *psDefPage; - IMG_CHAR *pszPageName; + IMG_DEVMEM_SIZE_T uiVirtualSize; + IMG_UINT32 uiLog2HeapContiguity = psReservation->psDevmemHeap->uiLog2PageSize; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags = psReservation->uiFlags; + IMG_BOOL bIsSparse = IMG_FALSE; + void *pvTmpBuf = NULL; + IMG_UINT32 i; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psReservation->psMappedPMR == NULL, "psReservation"); + PVR_LOG_RETURN_IF_INVALID_PARAM(PMR_LogicalSize(psPMR) == psReservation->uiLength, "psPMR logical size"); if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR)) { @@ -686,214 +2368,336 @@ DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, PMR_GetLog2Contiguity(psPMR) )); PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, ErrorReturnError); } - psDevNode = psDevmemHeap->psDevmemCtx->psDevNode; - /* Don't bother with refcount on reservation, as a reservation - only ever holds one mapping, so we directly increment the - refcount on the heap instead */ - if (!DevmemIntHeapAcquire(psDevmemHeap)) - { - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReturnError); - } + eError = DevmemValidateFlags(psPMR, uiMapFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemValidateFlags", ErrorReturnError); - /* allocate memory to record the mapping info */ - psMapping = OSAllocMem(sizeof(*psMapping)); - PVR_LOG_GOTO_IF_NOMEM(psMapping, eError, ErrorUnreference); + OSLockAcquireNested(psReservation->hLock, psReservation->eLockClass); - uiAllocationSize = psReservation->uiLength; + uiVirtualSize = psReservation->uiLength; - ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2HeapContiguity) + 1); - PVR_ASSERT((IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity == uiAllocationSize); + ui32NumDevPages = 0xffffffffU & ( ( (uiVirtualSize - 1) >> uiLog2HeapContiguity) + 1); + PVR_ASSERT((IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity == uiVirtualSize); eError = PMRLockSysPhysAddresses(psPMR); - PVR_GOTO_IF_ERROR(eError, ErrorFreeMapping); + PVR_GOTO_IF_ERROR(eError, ErrorReleaseResLock); + + PMRLockPMR(psPMR); - sAllocationDevVAddr = psReservation->sBase; + sReservationVAddr = psReservation->sBase; /*Check if the PMR that needs to be mapped is sparse */ bIsSparse = PMR_IsSparse(psPMR); if (bIsSparse) { - /*Get the flags*/ - uiPMRFlags = PMR_Flags(psPMR); - bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags); - - if (bNeedBacking) - { - IMG_INT uiInitValue; + IMG_DEV_PHYADDR *psDevPAddr; + IMG_BOOL *pbValid; - if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags)) - { - psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage; - uiInitValue = PVR_ZERO_PAGE_INIT_VALUE; - pszPageName = DEV_ZERO_PAGE; - } - else - { - psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage; - uiInitValue = PVR_DUMMY_PAGE_INIT_VALUE; - pszPageName = DUMMY_PAGE; - } + pvTmpBuf = OSAllocMem((sizeof(IMG_DEV_PHYADDR) + sizeof(IMG_BOOL)) * + ui32NumDevPages); + PVR_LOG_GOTO_IF_NOMEM(pvTmpBuf, eError, ErrorUnlockPhysAddr); - /* Error is logged with in the function if any failures. - * As the allocation fails we need to fail the map request and - * return appropriate error - * - * Allocation of dummy/zero page is done after locking the pages for PMR physically - * By implementing this way, the best case path of dummy/zero page being most likely to be - * allocated after physically locking down pages, is considered. - * If the dummy/zero page allocation fails, we do unlock the physical address and the impact - * is a bit more in on demand mode of operation */ - eError = DevmemIntAllocDefBackingPage(psDevNode, - psDefPage, - uiInitValue, - pszPageName, - IMG_TRUE); - PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr); - } + psDevPAddr = IMG_OFFSET_ADDR(pvTmpBuf, 0); + pbValid = IMG_OFFSET_ADDR(pvTmpBuf, ui32NumDevPages * sizeof(IMG_DEV_PHYADDR)); /* N.B. We pass mapping permission flags to MMU_MapPages and let * it reject the mapping if the permissions on the PMR are not compatible. */ - eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, + eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, uiMapFlags, - sAllocationDevVAddr, + sReservationVAddr, psPMR, 0, ui32NumDevPages, NULL, uiLog2HeapContiguity); - PVR_GOTO_IF_ERROR(eError, ErrorFreeDefBackingPage); + PVR_GOTO_IF_ERROR(eError, ErrorFreeSparseTmpBuf); + + /* Determine which entries of the PMR are valid */ + eError = PMR_DevPhysAddr(psPMR, + uiLog2HeapContiguity, + ui32NumDevPages, + 0, + psDevPAddr, + pbValid, + DEVICE_USE | MAPPING_USE); + /* PVRSRV_ERROR_RETRY with SUPPORT_LINUX_OSPAGE_MIGRATION feature + * enabled will not be returned here even with the use of + * DEVICE_USE | MAPPING_USE because the feature does not + * support migratable sparse PMRs. + */ + PVR_GOTO_IF_ERROR(eError, ErrorUnmap); + + for (i = 0; i < ui32NumDevPages; i++) + { + if (DevmemIntReservationIsIndexMapped(psReservation, i)) + { + eError = PMRUnrefPMR(psReservation->psMappedPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnrefPMR"); + + DevmemIntReservationSetMappingIndex(psReservation, i, IMG_FALSE); + } + + if (pbValid[i]) + { + eError = PMRRefPMR(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRRefPMR"); + + DevmemIntReservationSetMappingIndex(psReservation, i, IMG_TRUE); + } + } + + OSFreeMem(pvTmpBuf); + } +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + else + { + do + { + eError = MMU_MapPMRFast(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + sReservationVAddr, + psPMR, + (IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity, + uiMapFlags, + uiLog2HeapContiguity); + if (eError == PVRSRV_OK) + { + break; + } + else if (eError == PVRSRV_ERROR_RETRY) + { + /* This path may request this call to retry mapping at a later time, if this happens we + * need to relinquish the synchronisation resources to allow migrate to complete and + * try again + */ + _DeschedAllowRemapToCompleteNested(psReservation->hLock, + psReservation->eLockClass, + psPMR); + } + else + { + /* Unexpected Error code path */ + goto ErrorUnlockPhysAddr; + } + } while (eError == PVRSRV_ERROR_RETRY); } +#else else { - eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, - sAllocationDevVAddr, + eError = MMU_MapPMRFast(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + sReservationVAddr, psPMR, (IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity, uiMapFlags, uiLog2HeapContiguity); PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr); } +#endif + + psReservation->psMappedPMR = psPMR; - psMapping->psReservation = psReservation; - psMapping->uiNumPages = ui32NumDevPages; - psMapping->psPMR = psPMR; + /* Increase reservation association count so we know if multiple mappings have been created + * on the PMR, also link the mapping to the PMR if required. + */ +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + psReservation->psDevmemMapping = _AllocateDevmemMapping(psReservation, + psPMR); + PVR_LOG_GOTO_IF_NOMEM(psReservation->psDevmemMapping, eError, ErrorUnsetMappedPMR); + _LinkPMRRsrvNode(&psReservation->psDevmemMapping->sPMRRsrvLinkNode, psPMR); +#else + PMRLinkGPUMapping(psPMR); +#endif - *ppsMappingPtr = psMapping; + PMRUnlockPMR(psPMR); + OSLockRelease(psReservation->hLock); return PVRSRV_OK; -ErrorFreeDefBackingPage: - if (bNeedBacking) +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +ErrorUnsetMappedPMR: + psReservation->psMappedPMR = NULL; +#endif +ErrorUnmap: + (void) MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + 0, + sReservationVAddr, + ui32NumDevPages, + NULL, + uiLog2HeapContiguity, + 0); +ErrorFreeSparseTmpBuf: + if (pvTmpBuf) { - /*if the mapping failed, the allocated dummy ref count need - * to be handled accordingly */ - DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, - psDefPage, - pszPageName); + OSFreeMem(pvTmpBuf); } ErrorUnlockPhysAddr: + PMRUnlockPMR(psPMR); { PVRSRV_ERROR eError1 = PVRSRV_OK; eError1 = PMRUnlockSysPhysAddresses(psPMR); PVR_LOG_IF_ERROR(eError1, "PMRUnlockSysPhysAddresses"); - - *ppsMappingPtr = NULL; } -ErrorFreeMapping: - OSFreeMem(psMapping); - -ErrorUnreference: - /* if fails there's not much to do (the function will print an error) */ - DevmemIntHeapRelease(psDevmemHeap); +ErrorReleaseResLock: + OSLockRelease(psReservation->hLock); ErrorReturnError: PVR_ASSERT (eError != PVRSRV_OK); return eError; } +PVRSRV_ERROR +DevmemIntGetReservationData(DEVMEMINT_RESERVATION* psReservation, PMR** ppsPMR, IMG_DEV_VIRTADDR* psDevVAddr) +{ + /* Reservation might not have a PMR if a mapping was not yet performed */ + if (psReservation->psMappedPMR == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevVAddr->uiAddr = psReservation->sBase.uiAddr; + *ppsPMR = psReservation->psMappedPMR; + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntReserveRangeAndMapPMR(CONNECTION_DATA *psConnectionData, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sReservationVAddr, + IMG_DEVMEM_SIZE_T uiVirtualSize, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + DEVMEMINT_RESERVATION **ppsReservation) +{ + PVRSRV_ERROR eError, eUnreserveError; + + eError = DevmemIntReserveRange(psConnectionData, + psDeviceNode, + psDevmemHeap, + sReservationVAddr, + uiVirtualSize, + uiFlags, + ppsReservation); + PVR_GOTO_IF_ERROR(eError, ErrorReturnError); + + eError = DevmemIntMapPMR(*ppsReservation, psPMR); + PVR_GOTO_IF_ERROR(eError, ErrorUnreserve); + + return PVRSRV_OK; + +ErrorUnreserve: + eUnreserveError = DevmemIntUnreserveRange(*ppsReservation); + *ppsReservation = NULL; + PVR_LOG_IF_ERROR(eUnreserveError, "DevmemIntUnreserveRange"); +ErrorReturnError: + return eError; +} PVRSRV_ERROR -DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping) +DevmemIntUnmapPMR(DEVMEMINT_RESERVATION *psReservation) { PVRSRV_ERROR eError; - DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap; + DEVMEMINT_HEAP *psDevmemHeap = psReservation->psDevmemHeap; /* device virtual address of start of allocation */ - IMG_DEV_VIRTADDR sAllocationDevVAddr; + IMG_DEV_VIRTADDR sReservationVAddr; /* number of pages (device pages) that allocation spans */ IMG_UINT32 ui32NumDevPages; - IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE; - PMR_FLAGS_T uiPMRFlags; + IMG_BOOL bIsSparse = IMG_FALSE; + IMG_UINT32 i; - ui32NumDevPages = psMapping->uiNumPages; - sAllocationDevVAddr = psMapping->psReservation->sBase; + PVR_RETURN_IF_INVALID_PARAM(psReservation->psMappedPMR != NULL); - /*Check if the PMR that needs to be mapped is sparse */ - bIsSparse = PMR_IsSparse(psMapping->psPMR); + ui32NumDevPages = _DevmemReservationPageCount(psReservation); + sReservationVAddr = psReservation->sBase; + + OSLockAcquireNested(psReservation->hLock, psReservation->eLockClass); + PMRLockPMR(psReservation->psMappedPMR); + + bIsSparse = PMR_IsSparse(psReservation->psMappedPMR); if (bIsSparse) { - /*Get the flags*/ - uiPMRFlags = PMR_Flags(psMapping->psPMR); - bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags); - - if (bNeedBacking) + eError = MMU_UnmapPages(psDevmemHeap->psDevmemCtx->psMMUContext, + 0, + sReservationVAddr, + ui32NumDevPages, + NULL, + psDevmemHeap->uiLog2PageSize, + 0); + PVR_LOG_GOTO_IF_ERROR(eError, "MMU_UnmapPages", ErrUnlock); + + /* We are unmapping the whole PMR */ + for (i = 0; i < ui32NumDevPages; i++) { - if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags)) - { - DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, - &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage, - DEV_ZERO_PAGE); - } - else + if (DevmemIntReservationIsIndexMapped(psReservation, i)) { - DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, - &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage, - DUMMY_PAGE); + /* All PMRs in the range should be the same, set local PMR + * for Unlocking phys addrs later */ + eError = PMRUnrefPMR(psReservation->psMappedPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnrefPMR"); + + DevmemIntReservationSetMappingIndex(psReservation, i, IMG_FALSE); } } - - MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext, - 0, - sAllocationDevVAddr, - ui32NumDevPages, - NULL, - psMapping->psReservation->psDevmemHeap->uiLog2PageSize, - 0); } else { - MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, - sAllocationDevVAddr, - ui32NumDevPages, - psMapping->psReservation->psDevmemHeap->uiLog2PageSize); + eError = MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, + sReservationVAddr, + ui32NumDevPages, + psDevmemHeap->uiLog2PageSize); + PVR_LOG_GOTO_IF_ERROR(eError, "MMU_UnmapPMRFast", ErrUnlock); } - eError = PMRUnlockSysPhysAddresses(psMapping->psPMR); - PVR_ASSERT(eError == PVRSRV_OK); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + _UnlinkPMRRsrvNode(&psReservation->psDevmemMapping->sPMRRsrvLinkNode, psReservation->psMappedPMR); + PMRUnlockPMR(psReservation->psMappedPMR); - /* Don't bother with refcount on reservation, as a reservation only ever - * holds one mapping, so we directly decrement the refcount on the heap - * instead. - * Function will print an error if the heap could not be unreferenced. */ - DevmemIntHeapRelease(psDevmemHeap); + _UnrefAndMaybeDestroyPMRRsrvNode(&psReservation->psDevmemMapping->sPMRRsrvLinkNode); +#else + PMRUnlinkGPUMapping(psReservation->psMappedPMR); + PMRUnlockPMR(psReservation->psMappedPMR); +#endif + + eError = PMRUnlockSysPhysAddresses(psReservation->psMappedPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); - OSFreeMem(psMapping); + psReservation->psMappedPMR = NULL; + + OSLockRelease(psReservation->hLock); return PVRSRV_OK; -} +ErrUnlock: + PMRUnlockPMR(psReservation->psMappedPMR); + OSLockRelease(psReservation->hLock); + + return eError; +} PVRSRV_ERROR -DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, - IMG_DEV_VIRTADDR sAllocationDevVAddr, - IMG_DEVMEM_SIZE_T uiAllocationSize, +DevmemIntReserveRange(CONNECTION_DATA *psConnectionData, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sReservationVAddr, + IMG_DEVMEM_SIZE_T uiVirtualSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, DEVMEMINT_RESERVATION **ppsReservationPtr) { PVRSRV_ERROR eError; DEVMEMINT_RESERVATION *psReservation; + IMG_UINT32 uiNumPages; + IMG_UINT64 ui64MapSize; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + PVR_ASSERT(ppsReservationPtr != NULL); + + eError = ReserveRangeParamValidation(psDevmemHeap, + sReservationVAddr, + uiVirtualSize); + PVR_LOG_RETURN_IF_ERROR(eError, "ReserveRangeParamValidation"); if (!DevmemIntHeapAcquire(psDevmemHeap)) { @@ -901,25 +2705,55 @@ DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, ErrorReturnError); } + uiNumPages = uiVirtualSize >> psDevmemHeap->uiLog2PageSize; + /* allocate memory to record the reservation info */ - psReservation = OSAllocMem(sizeof(*psReservation)); + ui64MapSize = sizeof(*psReservation->pui8Map) * LOG2_DIV(uiNumPages, MAP_MASK_SHIFT); + if (LOG2_MOD(uiNumPages, MAP_MASK_SHIFT) != 0) + { + ui64MapSize += 1; + } + + psReservation = OSAllocZMem(sizeof(*psReservation) + ui64MapSize); PVR_LOG_GOTO_IF_NOMEM(psReservation, eError, ErrorUnreference); - psReservation->sBase = sAllocationDevVAddr; - psReservation->uiLength = uiAllocationSize; + /* Create lock */ + eError = OSLockCreate(&psReservation->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", ErrorFreeReservation); + + /* Determine where the caller originated from. This is to prevent lockdep + * warnings between reservations that originate from different sides of the + * bridge. */ + if (psConnectionData == NULL) + { + psReservation->eLockClass = RESERVATION_USAGE_SERVER_FW; + } + else + { + psReservation->eLockClass = RESERVATION_USAGE_CLIENT; + } + + /* Initialise external device resource use indicator */ + psReservation->i32DevResAcquisitionCount = DEVMEMRESERVATION_ACQUISITION_MIN; + /* Initialise UM-KM lifetime refcount */ + OSAtomicWrite(&psReservation->iLifetimeRefCount, 1); + + psReservation->uiFlags = uiFlags; + psReservation->sBase = sReservationVAddr; + psReservation->uiLength = uiVirtualSize; + psReservation->pui8Map = IMG_OFFSET_ADDR(psReservation, sizeof(*psReservation)); eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext, - uiAllocationSize, - &uiAllocationSize, + uiVirtualSize, 0, /* IMG_UINT32 uiProtFlags */ 0, /* alignment is n/a since we supply devvaddr */ - &sAllocationDevVAddr, + &sReservationVAddr, psDevmemHeap->uiLog2PageSize); - PVR_GOTO_IF_ERROR(eError, ErrorFreeReservation); + PVR_GOTO_IF_ERROR(eError, ErrorDestroyLock); /* since we supplied the virt addr, MMU_Alloc shouldn't have chosen a new one for us */ - PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr); + PVR_ASSERT(sReservationVAddr.uiAddr == psReservation->sBase.uiAddr); psReservation->psDevmemHeap = psDevmemHeap; *ppsReservationPtr = psReservation; @@ -930,6 +2764,9 @@ DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, * error exit paths follow */ +ErrorDestroyLock: + OSLockDestroy(psReservation->hLock); + ErrorFreeReservation: OSFreeMem(psReservation); @@ -945,22 +2782,57 @@ DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, PVRSRV_ERROR DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation) { - IMG_DEV_VIRTADDR sBase = psReservation->sBase; - IMG_UINT32 uiLength = psReservation->uiLength; - IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize; + IMG_UINT32 i; + DEVMEMINT_HEAP *psDevmemHeap = psReservation->psDevmemHeap; + PVRSRV_ERROR eError; - MMU_Free(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, - sBase, - uiLength, - uiLog2DataPageSize); + if (psReservation->psMappedPMR != NULL) + { + /* No warning to be emitted as this is expected behaviour for the + * Devmem interface. */ + eError = DevmemIntUnmapPMR(psReservation); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemIntUnmapPMR"); + } - /* Don't bother with refcount on reservation, as a reservation only ever - * holds one mapping, so we directly decrement the refcount on the heap - * instead. - * Function will print an error if the heap could not be unreferenced. */ - DevmemIntHeapRelease(psReservation->psDevmemHeap); + OSLockAcquireNested(psReservation->hLock, psReservation->eLockClass); - OSFreeMem(psReservation); + if (psReservation->i32DevResAcquisitionCount != DEVMEMRESERVATION_ACQUISITION_MIN) + { + PVR_DPF((PVR_DBG_ERROR, "%s called but still has existing dev res acquisitions " + "(%d), free existing acquisitions first.", __func__, + psReservation->i32DevResAcquisitionCount)); + + OSLockRelease(psReservation->hLock); + + return PVRSRV_ERROR_RETRY; + } + + OSLockRelease(psReservation->hLock); + + MMU_Free(psDevmemHeap->psDevmemCtx->psMMUContext, + psReservation->sBase, + psReservation->uiLength, + psDevmemHeap->uiLog2PageSize); + + for (i = 0; i < _DevmemReservationPageCount(psReservation); i++) + { + if (DevmemIntReservationIsIndexMapped(psReservation, i)) + { + eError = PMRUnrefPMR(psReservation->psMappedPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnrefPMR"); + + DevmemIntReservationSetMappingIndex(psReservation, i, IMG_FALSE); + } + } + + DevmemIntHeapRelease(psDevmemHeap); + + /* We have partially destroyed the object but we may need to keep the wrapper object + * around since it may be referenced in KM threads where we need the lock to avoid + * race conditions. UM threads will no longer have a handle to this object when this + * bridge call exits. + */ + _DevmemReservationUnrefAndMaybeDestroy(psReservation); return PVRSRV_OK; } @@ -977,9 +2849,8 @@ DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap) if (OSAtomicRead(&psDevmemHeap->uiRefCount) != DEVMEMHEAP_REFCOUNT_MIN) { - PVR_DPF((PVR_DBG_ERROR, "BUG! %s called but has too many references (%d) " - "which probably means reservations & mappings have been made from " - "the heap and not freed", __func__, + PVR_DPF((PVR_DBG_ERROR, "%s called but still has existing references " + "(%d), free existing reservations & mappings first.", __func__, OSAtomicRead(&psDevmemHeap->uiRefCount))); /* @@ -996,211 +2867,394 @@ DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap) return PVRSRV_ERROR_RETRY; } - PVR_ASSERT(OSAtomicRead(&psDevmemHeap->uiRefCount) == DEVMEMHEAP_REFCOUNT_MIN); + PVR_ASSERT(OSAtomicRead(&psDevmemHeap->uiRefCount) == DEVMEMHEAP_REFCOUNT_MIN); + + BIT_UNSET(psDevmemHeap->psDevmemCtx->uiCreatedHeaps, psDevmemHeap->uiHeapIndex); + + DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __func__, psDevmemHeap)); + OSFreeMem(psDevmemHeap); + + return PVRSRV_OK; +} + +IMG_DEV_VIRTADDR +DevmemIntHeapGetBaseAddr(DEVMEMINT_HEAP *psDevmemHeap) +{ + PVR_ASSERT(psDevmemHeap != NULL); + + return psDevmemHeap->sBaseAddr; +} + +static PVRSRV_ERROR +DevmemIntChangeSparseValidateParams(PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 ui32LogicalChunkCount, + SPARSE_MEM_RESIZE_FLAGS uiSparseFlags) +{ + PMRLockHeldAssert(psPMR); + + /* Ensure a PMR has been mapped to this reservation. */ + PVR_LOG_RETURN_IF_INVALID_PARAM(uiSparseFlags & SPARSE_RESIZE_BOTH, "uiSparseFlags"); + + if (!PMR_IsSparse(psPMR) || PMR_IsMemLayoutFixed(psPMR) || + PMR_IsClientCpuMapped(psPMR)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR cannot be changed because one or more of the following" + " were true: !PMR_IsSparse() = %s, PMR_IsMemLayoutFixed() = %s," + " PMR_IsClientCpuMapped() = %s", + __func__, + !PMR_IsSparse(psPMR) ? "true" : "false", + PMR_IsMemLayoutFixed(psPMR) ? "true" : "false", + PMR_IsClientCpuMapped(psPMR) ? "true" : "false")); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + if (PMR_IsGpuMultiMapped(psPMR)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR cannot be changed because PMR_IsGpuMultiMapped() = true", + __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + if (uiSparseFlags & SPARSE_RESIZE_ALLOC) + { + PVR_LOG_RETURN_IF_INVALID_PARAM(ui32AllocPageCount != 0, "ui32AllocPageCount"); + PVR_LOG_RETURN_IF_FALSE(ui32AllocPageCount <= ui32LogicalChunkCount, + "ui32AllocPageCount is invalid", + PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE); + } + + if (uiSparseFlags & SPARSE_RESIZE_FREE) + { + PVR_LOG_RETURN_IF_INVALID_PARAM(ui32FreePageCount != 0, "ui32FreePageCount"); + PVR_LOG_RETURN_IF_FALSE(ui32FreePageCount <= ui32LogicalChunkCount, + "ui32FreePageCount is invalid", + PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE); + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +DevmemIntValidateSparsePMRIndices(IMG_UINT32 ui32PMRLogicalChunkCount, + IMG_UINT32 *paui32LogicalIndices, + IMG_UINT32 ui32LogicalIndexCount) +{ + IMG_UINT32 i; + IMG_UINT8 *paui8TrackedIndices; + IMG_UINT32 ui32AllocSize; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(ui32PMRLogicalChunkCount != 0); + PVR_ASSERT(paui32LogicalIndices != NULL); + PVR_ASSERT(ui32LogicalIndexCount != 0 && ui32LogicalIndexCount <= ui32PMRLogicalChunkCount); + + ui32AllocSize = LOG2_DIV(ui32PMRLogicalChunkCount, 3); + if (LOG2_MOD(ui32PMRLogicalChunkCount, 3) != 0) + { + ++ui32AllocSize; + } + + paui8TrackedIndices = OSAllocZMem(ui32AllocSize); + if (paui8TrackedIndices == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + for (i = 0; i < ui32LogicalIndexCount; i++) + { + IMG_UINT32 ui32LogicalIndex = paui32LogicalIndices[i]; + + if (ui32LogicalIndex >= ui32PMRLogicalChunkCount) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Reservation index %u is OOB", + __func__, + ui32LogicalIndex)); + + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + break; + } + + if (BIT_ISSET(paui8TrackedIndices[LOG2_DIV(ui32LogicalIndex, 3)], LOG2_MOD(ui32LogicalIndex, 3))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Duplicate index found: %u", + __func__, + ui32LogicalIndex)); + + eError = PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY; + break; + } + + BIT_SET(paui8TrackedIndices[LOG2_DIV(ui32LogicalIndex, 3)], LOG2_MOD(ui32LogicalIndex, 3)); + } + + OSFreeMem(paui8TrackedIndices); + return eError; +} - DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx); +static PVRSRV_ERROR +DevmemIntComputeVirtualIndicesFromLogical(DEVMEMINT_RESERVATION *psReservation, + IMG_UINT32 ui32LogicalIndexCount, + IMG_UINT32 *paui32LogicalIndices, + IMG_UINT32 *pui32VirtualIndexCount, + IMG_UINT32 **ppaui32VirtualIndices) +{ + IMG_UINT32 ui32OrderDiff = PMR_GetLog2Contiguity(psReservation->psMappedPMR) - + psReservation->psDevmemHeap->uiLog2PageSize; - PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __func__, psDevmemHeap)); - OSFreeMem(psDevmemHeap); + if (ui32OrderDiff == 0) + { + *pui32VirtualIndexCount = ui32LogicalIndexCount; + *ppaui32VirtualIndices = paui32LogicalIndices; + return PVRSRV_OK; + } + else + { + IMG_UINT32 ui32PagesPerOrder = 1 << ui32OrderDiff; + IMG_UINT32 *paui32VirtualIndices; + IMG_UINT32 i = 0; - return PVRSRV_OK; -} + paui32VirtualIndices = OSAllocMem(*pui32VirtualIndexCount * sizeof(IMG_UINT32)); + PVR_RETURN_IF_NOMEM(paui32VirtualIndices); -IMG_DEV_VIRTADDR -DevmemIntHeapGetBaseAddr(DEVMEMINT_HEAP *psDevmemHeap) -{ - PVR_ASSERT(psDevmemHeap != NULL); + for (i = 0; i < ui32LogicalIndexCount; i++) + { + IMG_UINT32 ui32LogicalIndex = paui32LogicalIndices[i]; + IMG_UINT32 ui32PageOffset; - return psDevmemHeap->sBaseAddr; + for (ui32PageOffset = 0; ui32PageOffset < ui32PagesPerOrder; ui32PageOffset++) + { + IMG_UINT32 ui32VirtAddr = ui32LogicalIndex * ui32PagesPerOrder + ui32PageOffset; + paui32VirtualIndices[i * ui32PagesPerOrder + ui32PageOffset] = ui32VirtAddr; + } + } + + *ppaui32VirtualIndices = paui32VirtualIndices; + } + + return PVRSRV_OK; } PVRSRV_ERROR -DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, - PMR *psPMR, - IMG_UINT32 ui32AllocPageCount, +DevmemIntChangeSparse(IMG_UINT32 ui32AllocPageCount, IMG_UINT32 *pai32AllocIndices, IMG_UINT32 ui32FreePageCount, IMG_UINT32 *pai32FreeIndices, SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddrBase, - IMG_UINT64 sCpuVAddrBase) + DEVMEMINT_RESERVATION *psReservation) { PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR); - IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; - IMG_UINT32 uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity; - IMG_UINT32 uiPagesPerOrder = 1 << uiOrderDiff; + IMG_UINT32 ui32Log2PMRContiguity; + IMG_UINT32 ui32Log2HeapContiguity; + PVRSRV_MEMALLOCFLAGS_T uiFlags; + + IMG_UINT32 *pai32MapIndices; + IMG_UINT32 *pai32UnmapIndices; + IMG_UINT32 uiMapPageCount; + IMG_UINT32 uiUnmapPageCount; + + IMG_UINT32 ui32LogicalChunkCount; + PMR *psPMR; + + OSLockAcquireNested(psReservation->hLock, psReservation->eLockClass); - IMG_UINT32 *pai32MapIndices = pai32AllocIndices; - IMG_UINT32 *pai32UnmapIndices = pai32FreeIndices; - IMG_UINT32 uiMapPageCount = ui32AllocPageCount; - IMG_UINT32 uiUnmapPageCount = ui32FreePageCount; + uiFlags = psReservation->uiFlags; - /* Special case: - * Adjust indices if we map into a heap that uses smaller page sizes - * than the physical allocation itself. - * The incoming parameters are all based on the page size of the PMR - * but the mapping functions expects parameters to be in terms of heap page sizes. */ - if (uiOrderDiff != 0) + PVR_LOG_GOTO_IF_INVALID_PARAM(psReservation->psMappedPMR != NULL, eError, InvalidPMRErr); + psPMR = psReservation->psMappedPMR; + + PMRLockPMR(psPMR); + + ui32LogicalChunkCount = PMR_GetLogicalChunkCount(psPMR); + ui32Log2PMRContiguity = PMR_GetLog2Contiguity(psPMR); + ui32Log2HeapContiguity = psReservation->psDevmemHeap->uiLog2PageSize; + + eError = DevmemIntChangeSparseValidateParams(psPMR, + ui32AllocPageCount, + ui32FreePageCount, + ui32LogicalChunkCount, + uiSparseFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemIntChangeSparseValidateParams", e0); + + /* This is check is made in DevmemIntMapPMR - no need to do it again in release. */ + PVR_ASSERT(ui32Log2HeapContiguity <= ui32Log2PMRContiguity); + + pai32MapIndices = pai32AllocIndices; + pai32UnmapIndices = pai32FreeIndices; + uiMapPageCount = ui32AllocPageCount; + uiUnmapPageCount = ui32FreePageCount; + + /* Pre check free indices against reservation given */ + if (uiSparseFlags & SPARSE_RESIZE_FREE) { - IMG_UINT32 uiPgIdx, uiPgOffset; + IMG_UINT32 i; - uiMapPageCount = (uiMapPageCount << uiOrderDiff); - uiUnmapPageCount = (uiUnmapPageCount << uiOrderDiff); + eError = DevmemIntValidateSparsePMRIndices(ui32LogicalChunkCount, + pai32FreeIndices, + ui32FreePageCount); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemIntValidateSparsePMRIndices", e0); - pai32MapIndices = OSAllocMem(uiMapPageCount * sizeof(*pai32MapIndices)); - PVR_GOTO_IF_NOMEM(pai32MapIndices, eError, e0); + eError = DevmemIntComputeVirtualIndicesFromLogical(psReservation, + ui32FreePageCount, + pai32FreeIndices, + &uiUnmapPageCount, + &pai32UnmapIndices); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemIntComputeVirtualIndicesFromLogical", e1); - pai32UnmapIndices = OSAllocMem(uiUnmapPageCount * sizeof(*pai32UnmapIndices)); - if (!pai32UnmapIndices) + for (i = 0; i < uiUnmapPageCount; i++) { - OSFreeMem(pai32MapIndices); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); - } + IMG_UINT32 ui32VirtIndex = pai32UnmapIndices[i]; + IMG_BOOL bIsMapped; - /* Every chunk index needs to be translated from physical indices - * into heap based indices. */ - for (uiPgIdx = 0; uiPgIdx < ui32AllocPageCount; uiPgIdx++) - { - for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) + bIsMapped = DevmemIntReservationIsIndexMapped(psReservation, + ui32VirtIndex); + if (!bIsMapped) { - pai32MapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = - pai32AllocIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; + PVR_DPF((PVR_DBG_ERROR, + "%s: Reservation index %u is not mapped into the reservation", + __func__, + ui32VirtIndex)); + + PVR_GOTO_WITH_ERROR(eError, + PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, + e1); } } + } + + /* Pre check alloc indices against reservation given */ + if (uiSparseFlags & SPARSE_RESIZE_ALLOC) + { + IMG_UINT32 i; + + eError = DevmemIntValidateSparsePMRIndices(ui32LogicalChunkCount, + pai32AllocIndices, + ui32AllocPageCount); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemIntValidateSparsePMRIndices", e1); - for (uiPgIdx = 0; uiPgIdx < ui32FreePageCount; uiPgIdx++) + eError = DevmemIntComputeVirtualIndicesFromLogical(psReservation, + ui32AllocPageCount, + pai32AllocIndices, + &uiMapPageCount, + &pai32MapIndices); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemIntComputeVirtualIndicesFromLogical", e1); + + for (i = 0; i < uiMapPageCount; i++) { - for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) + IMG_UINT32 ui32VirtIndex = pai32MapIndices[i]; + IMG_BOOL bIsMapped; + + bIsMapped = DevmemIntReservationIsIndexMapped(psReservation, + ui32VirtIndex); + if (bIsMapped) { - pai32UnmapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = - pai32FreeIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; + PVR_DPF((PVR_DBG_ERROR, + "%s: Reservation index %u is mapped into the reservation", + __func__, + ui32VirtIndex)); + + PVR_GOTO_WITH_ERROR(eError, + PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, + e1); } } } - /* - * The order of steps in which this request is done is given below. The order of - * operations is very important in this case: - * - * 1. The parameters are validated in function PMR_ChangeSparseMem below. - * A successful response indicates all the parameters are correct. - * In failure case we bail out from here without processing further. - * 2. On success, get the PMR specific operations done. this includes page alloc, page free - * and the corresponding PMR status changes. - * when this call fails, it is ensured that the state of the PMR before is - * not disturbed. If it succeeds, then we can go ahead with the subsequent steps. - * 3. Invalidate the GPU page table entries for the pages to be freed. - * 4. Write the GPU page table entries for the pages that got allocated. - * 5. Change the corresponding CPU space map. - * - * The above steps can be selectively controlled using flags. - */ - if (uiSparseFlags & (SPARSE_REMAP_MEM | SPARSE_RESIZE_BOTH)) - { - /* Do the PMR specific changes first */ - eError = PMR_ChangeSparseMem(psPMR, - ui32AllocPageCount, - pai32AllocIndices, - ui32FreePageCount, - pai32FreeIndices, - uiSparseFlags); - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_MESSAGE, - "%s: Failed to do PMR specific changes.", - __func__)); - goto e1; - } + /* Invalidate the page table entries before freeing the physical pages. */ + if (uiSparseFlags & SPARSE_RESIZE_FREE) + { + PMR_FLAGS_T uiPMRFlags; + IMG_UINT32 i; - /* Invalidate the page table entries for the free pages. - * Optimisation later would be not to touch the ones that gets re-mapped */ - if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE)) - { - PMR_FLAGS_T uiPMRFlags; + /*Get the flags*/ + uiPMRFlags = PMR_Flags(psPMR); - /*Get the flags*/ - uiPMRFlags = PMR_Flags(psPMR); + /* Unmap the pages and mark them invalid in the MMU PTE */ + eError = MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + psReservation->sBase, + uiUnmapPageCount, + pai32UnmapIndices, + ui32Log2HeapContiguity, + uiPMRFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "MMU_UnmapPages", e1); + + for (i = 0; i < uiUnmapPageCount; i++) + { + IMG_UINT32 uiIndex = pai32UnmapIndices[i]; - if (SPARSE_REMAP_MEM != (uiSparseFlags & SPARSE_REMAP_MEM)) + if (DevmemIntReservationIsIndexMapped(psReservation, uiIndex)) { - /* Unmap the pages and mark them invalid in the MMU PTE */ - MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext, - uiFlags, - sDevVAddrBase, - uiUnmapPageCount, - pai32UnmapIndices, - uiLog2HeapContiguity, - uiPMRFlags); - } - } + eError = PMRUnrefPMR(psReservation->psMappedPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnrefPMR"); - /* Wire the pages tables that got allocated */ - if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC)) - { - /* Map the pages and mark them Valid in the MMU PTE */ - eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext, - uiFlags, - sDevVAddrBase, - psPMR, - 0, - uiMapPageCount, - pai32MapIndices, - uiLog2HeapContiguity); - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_MESSAGE, - "%s: Failed to map alloc indices.", - __func__)); - goto e1; + DevmemIntReservationSetMappingIndex(psReservation, + uiIndex, + IMG_FALSE); } } + } - /* Currently only used for debug */ - if (SPARSE_REMAP_MEM == (uiSparseFlags & SPARSE_REMAP_MEM)) - { - eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, - uiFlags, - sDevVAddrBase, - psPMR, - 0, - uiMapPageCount, - pai32UnmapIndices, - uiLog2HeapContiguity); - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_MESSAGE, - "%s: Failed to map Free indices.", - __func__)); - goto e1; - } - } + /* Do the PMR specific changes */ + eError = PMR_ChangeSparseMemUnlocked(psPMR, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + uiSparseFlags); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to do PMR specific changes.", + __func__)); + goto e1; } -#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE - /* Do the changes in sparse on to the CPU virtual map accordingly */ - if (uiSparseFlags & SPARSE_MAP_CPU_ADDR) + /* Wire the pages tables that got allocated */ + if (uiSparseFlags & SPARSE_RESIZE_ALLOC) { - if (sCpuVAddrBase != 0) + IMG_UINT32 i; + + /* Map the pages and mark them Valid in the MMU PTE */ + eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + psReservation->sBase, + psPMR, + 0, + uiMapPageCount, + pai32MapIndices, + ui32Log2HeapContiguity); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to map alloc indices.", + __func__)); + goto e1; + } + + for (i = 0; i < uiMapPageCount; i++) { - eError = PMR_ChangeSparseMemCPUMap(psPMR, - sCpuVAddrBase, - ui32AllocPageCount, - pai32AllocIndices, - ui32FreePageCount, - pai32FreeIndices); - if (PVRSRV_OK != eError) + IMG_UINT32 uiIndex = pai32MapIndices[i]; + + if (!DevmemIntReservationIsIndexMapped(psReservation, uiIndex)) { - PVR_DPF((PVR_DBG_MESSAGE, - "%s: Failed to map to CPU addr space.", - __func__)); - goto e0; + PVRSRV_ERROR eError2 = PMRRefPMR(psReservation->psMappedPMR); + PVR_LOG_IF_ERROR(eError2, "PMRRefPMR"); + + DevmemIntReservationSetMappingIndex(psReservation, + uiIndex, + IMG_TRUE); } } } -#endif e1: if (pai32MapIndices != pai32AllocIndices) @@ -1212,6 +3266,9 @@ DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, OSFreeMem(pai32UnmapIndices); } e0: + PMRUnlockPMR(psPMR); +InvalidPMRErr: + OSLockRelease(psReservation->hLock); return eError; } @@ -1238,10 +3295,18 @@ DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx) return PVRSRV_OK; } -PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - DEVMEMINT_CTX *psDevMemContext, - IMG_DEV_VIRTADDR sDevAddr) +/*************************************************************************/ /*! +@Function DevmemIntGetVDevAddrPageSize +@Description Get the page size for a virtual address. +@Input psDevNode +@Input sDevAddr Get the page size for this virtual address. +@Output puiLog2HeapPageSize On success returns log2 of the page size. +@Return Failure code if the virtual address is outside any heap. +*/ /**************************************************************************/ +static +PVRSRV_ERROR DevmemIntGetVDevAddrPageSize(PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEV_VIRTADDR sDevAddr, + IMG_PUINT32 puiLog2HeapPageSize) { IMG_UINT32 i, j, uiLog2HeapPageSize = 0; DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo; @@ -1277,30 +3342,40 @@ PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, return PVRSRV_ERROR_INVALID_GPU_ADDR; } - return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext, - uiLog2HeapPageSize, - sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR; + *puiLog2HeapPageSize = uiLog2HeapPageSize; + return PVRSRV_OK; } -PVRSRV_ERROR -DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevMemContext, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate) +/*************************************************************************/ /*! +@Function DevmemIntIsVDevAddrValid +@Description Checks if a virtual address is valid for access. +@Input psConnection +@Input psDevNode +@Input psDevmemCtx Device Memory context +@Input sDevAddr Virtual address to check. +@Return Failure code if the virtual address is invalid. +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR sDevAddr) { - PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode; - MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext; + IMG_UINT32 uiLog2HeapPageSize = 0; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); - if (psDevNode->pfnDevSLCFlushRange) + eError = DevmemIntGetVDevAddrPageSize(psDevNode, + sDevAddr, + &uiLog2HeapPageSize); + if (eError != PVRSRV_OK) { - return psDevNode->pfnDevSLCFlushRange(psDevNode, - psMMUContext, - sDevVAddr, - uiSize, - bInvalidate); + return eError; } - return PVRSRV_ERROR_NOT_SUPPORTED; + return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext, + uiLog2HeapPageSize, + sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR; } PVRSRV_ERROR @@ -1325,6 +3400,9 @@ PVRSRV_ERROR DevmemIntGetFaultAddress(CONNECTION_DATA *psConnection, DEVMEMINT_CTX *psDevMemContext, IMG_DEV_VIRTADDR *psFaultAddress) { + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDevNode); + if ((psDevMemContext->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0) { return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; @@ -1367,12 +3445,19 @@ DevmemIntExportCtx(DEVMEMINT_CTX *psContext, DEVMEMINT_CTX_EXPORT **ppsContextExport) { DEVMEMINT_CTX_EXPORT *psCtxExport; + PVRSRV_ERROR eError; psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT)); PVR_LOG_RETURN_IF_NOMEM(psCtxExport, "psCtxExport"); - DevmemIntCtxAcquire(psContext); - PMRRefPMR(psPMR); + if (!DevmemIntCtxAcquire(psContext)) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorFreeCtxExport); + } + + eError = PMRRefPMR(psPMR); + PVR_GOTO_IF_ERROR(eError, ErrorIntCtxRelease); + /* Now that the source PMR is exported, the layout * can't change as there could be outstanding importers * This is to make sure both exporter and importers view of @@ -1387,16 +3472,29 @@ DevmemIntExportCtx(DEVMEMINT_CTX *psContext, *ppsContextExport = psCtxExport; return PVRSRV_OK; + +ErrorIntCtxRelease: + DevmemIntCtxRelease(psContext); +ErrorFreeCtxExport: + OSFreeMem(psCtxExport); + + return eError; } PVRSRV_ERROR DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport) { - PMRUnrefPMR(psContextExport->psPMR); - DevmemIntCtxRelease(psContextExport->psDevmemCtx); + PVRSRV_ERROR eError; + OSWRLockAcquireWrite(g_hExportCtxListLock); dllist_remove_node(&psContextExport->sNode); OSWRLockReleaseWrite(g_hExportCtxListLock); + + eError = PMRUnrefPMR(psContextExport->psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnrefPMR"); + + DevmemIntCtxRelease(psContextExport->psDevmemCtx); + OSFreeMem(psContextExport); /* Unable to find exported context, return error */ @@ -1418,11 +3516,17 @@ DevmemIntAcquireRemoteCtx(PMR *psPMR, psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode); if (psCtxExport->psPMR == psPMR) { - OSWRLockReleaseRead(g_hExportCtxListLock); - DevmemIntCtxAcquire(psCtxExport->psDevmemCtx); + if (!DevmemIntCtxAcquire(psCtxExport->psDevmemCtx)) + { + OSWRLockReleaseRead(g_hExportCtxListLock); + + return PVRSRV_ERROR_REFCOUNT_OVERFLOW; + } *ppsContext = psCtxExport->psDevmemCtx; *phPrivData = psCtxExport->psDevmemCtx->hPrivData; + OSWRLockReleaseRead(g_hExportCtxListLock); + /* PMR should have been already exported to import it * If a PMR is exported, its immutable and the same is * checked here */ @@ -1445,13 +3549,10 @@ DevmemIntAcquireRemoteCtx(PMR *psPMR, @Description Registers a PID to be notified when a page fault occurs on a specific device memory context. @Input psDevmemCtx The context to be notified about. -@Input ui32PID The PID of the process that would like to be - notified. @Input bRegister If true, register. If false, de-register. @Return PVRSRV_ERROR. */ /**************************************************************************/ PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, - IMG_INT32 ui32PID, IMG_BOOL bRegister) { PVRSRV_DEVICE_NODE *psDevNode; @@ -1459,30 +3560,38 @@ PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, DEVMEMINT_PF_NOTIFY *psNotifyNode; IMG_BOOL bPresent = IMG_FALSE; PVRSRV_ERROR eError; + IMG_PID ui32PID; PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemCtx, "psDevmemCtx"); - /* Acquire write lock for the duration, to avoid resource free - * while trying to read (no need to then also acquire the read lock - * as we have exclusive access while holding the write lock) + /* We can be certain that the memory context is valid and not freed during + * the call time of this function as it's assured by the handle framework + * (while the handle is looked up it cannot be freed). Therefore we can + * safely retrieve the pointer to the device node and acquire the + * hPageFaultNotifyLock lock. */ - OSWRLockAcquireWrite(psDevmemCtx->hListLock); - psDevNode = psDevmemCtx->psDevNode; + /* Acquire write lock to avoid resource free while + * sMemoryContextPageFaultNotifyListHead and sProcessNotifyListHead + * are being accessed. + */ + OSWRLockAcquireWrite(psDevNode->hPageFaultNotifyLock); + if (bRegister) { /* If this is the first PID in the list, the device memory context * needs to be registered for notification */ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) { - OSWRLockAcquireWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead, &psDevmemCtx->sPageFaultNotifyListElem); - OSWRLockReleaseWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); } } + /* Obtain current client PID */ + ui32PID = OSGetCurrentClientProcessIDKM(); + /* Loop through the registered PIDs and check whether this one is * present */ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) @@ -1539,9 +3648,7 @@ PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, * unregister the device memory context from the notify list. */ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) { - OSWRLockAcquireWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); - OSWRLockReleaseWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); } } eError = PVRSRV_OK; @@ -1549,8 +3656,7 @@ PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, err_already_registered: err_out_of_mem: err_not_registered: - - OSWRLockReleaseWrite(psDevmemCtx->hListLock); + OSWRLockReleaseWrite(psDevNode->hPageFaultNotifyLock); return eError; } @@ -1574,10 +3680,14 @@ PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, DEVMEMINT_CTX *psDevmemCtx = NULL; IMG_BOOL bFailed = IMG_FALSE; - OSWRLockAcquireRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + /* Protect access both to sMemoryContextPageFaultNotifyListHead and + * to sProcessNotifyListHead. Those lists must be accessed atomically + * in relation to each other, otherwise we risk accessing context that + * might have already been destroyed. */ + OSWRLockAcquireRead(psDevNode->hPageFaultNotifyLock); if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead))) { - OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + OSWRLockReleaseRead(psDevNode->hPageFaultNotifyLock); return PVRSRV_OK; } @@ -1591,7 +3701,7 @@ PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, if (eError != PVRSRV_OK) { PVR_LOG_ERROR(eError, "MMU_AcquireBaseAddr"); - OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + OSWRLockReleaseRead(psDevNode->hPageFaultNotifyLock); return eError; } @@ -1601,14 +3711,13 @@ PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, break; } } - OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); if (psDevmemCtx == NULL) { /* Not found, just return */ + OSWRLockReleaseRead(psDevNode->hPageFaultNotifyLock); return PVRSRV_OK; } - OSWRLockAcquireRead(psDevmemCtx->hListLock); /* * Store the first occurrence of a page fault address, @@ -1633,12 +3742,10 @@ PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, __func__, psNotifyNode->ui32PID)); - PVR_ASSERT(!"Unable to signal process"); - bFailed = IMG_TRUE; } } - OSWRLockReleaseRead(psDevmemCtx->hListLock); + OSWRLockReleaseRead(psDevNode->hPageFaultNotifyLock); if (bFailed) { @@ -1648,7 +3755,15 @@ PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, return PVRSRV_OK; } + #if defined(PDUMP) +typedef struct _DEVMEMINT_PDUMP_VALID_REGION_ +{ + DLLIST_NODE sNode; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiSize; +} DEVMEMINT_PDUMP_VALID_REGION; + IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext) { IMG_UINT32 ui32MMUContextID; @@ -1657,7 +3772,182 @@ IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext) } PVRSRV_ERROR -DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, +DevmemIntPDumpGetValidRegions(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + DLLIST_NODE *psValidRegionsList) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiLog2HeapPageSize; + IMG_UINT32 uiHeapPageSize; + IMG_DEV_VIRTADDR sValidStart, sValidEnd, sCurrent, sEnd, sStartPage; + + /* Get the page size for heap containing the start virtual address. */ + eError = DevmemIntGetVDevAddrPageSize(psDeviceNode, + sDevAddrStart, + &uiLog2HeapPageSize); + PVR_RETURN_IF_ERROR(eError); + + uiHeapPageSize = 1 << uiLog2HeapPageSize; + + /* Iterate every page in the region to dump... */ + sValidStart.uiAddr = sValidEnd.uiAddr = 0; /* Start/end of the current region which is valid to read. */ + sStartPage.uiAddr = sDevAddrStart.uiAddr & ~((IMG_UINT64) uiHeapPageSize - 1); /* Page aligned start of the region to dump. */ + sCurrent = sStartPage; + sEnd.uiAddr = sDevAddrStart.uiAddr + uiSize; /* End of the region to dump. */ + + while (IMG_TRUE) + { + IMG_BOOL bAtEnd = sCurrent.uiAddr >= sEnd.uiAddr; + IMG_BOOL bValid = IMG_FALSE; + + if (!bAtEnd) + { + /* Check if the page starting at the current address is valid for reading. */ + eError = DevmemIntIsVDevAddrValid(psConnection, + psDeviceNode, + psDevmemCtx, + sCurrent); + if (eError == PVRSRV_OK) + { + /* If the current valid region is empty then set the start + * to the current page. */ + if (sValidStart.uiAddr == 0) + { + if (sCurrent.uiAddr == sStartPage.uiAddr) + { + /* Use the start of the region to dump if it doesn't + * start page aligned. */ + sValidStart = sDevAddrStart; + } + else + { + sValidStart = sCurrent; + } + } + /* Set the end of the valid region. */ + sValidEnd.uiAddr = sCurrent.uiAddr + uiHeapPageSize; + /* Restrict to the region to dump. */ + if (sValidEnd.uiAddr > sEnd.uiAddr) + { + sValidEnd = sEnd; + } + bValid = IMG_TRUE; + } + /* Move to the next page. */ + sCurrent.uiAddr += uiHeapPageSize; + } + + /* If the current page is invalid or we've reached the end of the region + * to dump then pdump the current valid region. */ + if (!bValid && sValidEnd.uiAddr > sValidStart.uiAddr) + { + DEVMEMINT_PDUMP_VALID_REGION *psRegion = OSAllocMem(sizeof(*psRegion)); + PVR_LOG_GOTO_IF_NOMEM(psRegion, eError, ErrFreeRegions); + + psRegion->sDevVAddr = sValidStart; + psRegion->uiSize = sValidEnd.uiAddr - sValidStart.uiAddr; + + dllist_add_to_tail(psValidRegionsList, &psRegion->sNode); + + sValidStart.uiAddr = sValidEnd.uiAddr = 0; + } + + if (bAtEnd) + { + break; + } + } + + return PVRSRV_OK; + +ErrFreeRegions: + DevmemIntPDumpFreeValidRegions(psValidRegionsList); + return eError; +} + +void +DevmemIntPDumpFreeValidRegions(DLLIST_NODE *psValidRegionsList) +{ + DLLIST_NODE *psThis, *psNext; + + dllist_foreach_node(psValidRegionsList, psThis, psNext) + { + DEVMEMINT_PDUMP_VALID_REGION *psRegion = + IMG_CONTAINER_OF(psThis, DEVMEMINT_PDUMP_VALID_REGION, sNode); + + dllist_remove_node(psThis); + OSFreeMem(psRegion); + } +} + +PVRSRV_ERROR +DevmemIntPDumpSaveFromRegionListToFileVirtual(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, + DLLIST_NODE *psDevAddrRegions, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiPDumpMMUCtx; + DLLIST_NODE *psThis, *psNext; + + /* Confirm that the device node's ui32InternalID matches the bound + * PDump device stored in PVRSRV_DATA. + */ + if (psDevmemCtx->psDevNode->sDevId.ui32InternalID != + (PVRSRVGetPVRSRVData())->ui32PDumpBoundDevice) + { + return PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE; + } + + eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext, + &uiPDumpMMUCtx, + ui32PDumpFlags); + + PVR_ASSERT(eError == PVRSRV_OK); + + /* The following SYSMEM refers to the 'MMU Context', hence it + * should be the MMU context, not the PMR, that says what the PDump + * MemSpace tag is? + * From a PDump P.O.V. it doesn't matter which name space we use as long + * as that MemSpace is used on the 'MMU Context' we're dumping from + */ + + dllist_foreach_node(psDevAddrRegions, psThis, psNext) + { + DEVMEMINT_PDUMP_VALID_REGION *psRegion = + IMG_CONTAINER_OF(psThis, DEVMEMINT_PDUMP_VALID_REGION, sNode); + + eError = PDumpMMUSAB(psDevmemCtx->psDevNode, + psDevmemCtx->psDevNode->sDevId.pszPDumpDevName, + uiPDumpMMUCtx, + psRegion->sDevVAddr, + psRegion->uiSize, + pszFilename, + ui32FileOffset, + ui32PDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + ui32FileOffset += psRegion->uiSize; + + dllist_remove_node(psThis); + OSFreeMem(psRegion); + } + + MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntPDumpSaveToFileVirtual(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, IMG_DEV_VIRTADDR sDevAddrStart, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32ArraySize, @@ -1667,6 +3957,10 @@ DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, { PVRSRV_ERROR eError; IMG_UINT32 uiPDumpMMUCtx; + IMG_UINT32 uiLog2HeapPageSize; + IMG_UINT32 uiHeapPageSize; + IMG_DEV_VIRTADDR sValidStart, sValidEnd, sCurrent, sEnd, sStartPage; + IMG_UINT64 ui64PageMask; PVR_UNREFERENCED_PARAMETER(ui32ArraySize); @@ -1686,12 +3980,128 @@ DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, PVR_ASSERT(eError == PVRSRV_OK); /* - The following SYSMEM refers to the 'MMU Context', hence it - should be the MMU context, not the PMR, that says what the PDump - MemSpace tag is? - From a PDump P.O.V. it doesn't matter which name space we use as long - as that MemSpace is used on the 'MMU Context' we're dumping from - */ + Get the page size for heap containing the start virtual address. + */ + eError = DevmemIntGetVDevAddrPageSize(psDeviceNode, + sDevAddrStart, + &uiLog2HeapPageSize); + if (eError != PVRSRV_OK) + { + return eError; + } + uiHeapPageSize = 1 << uiLog2HeapPageSize; + ui64PageMask = uiHeapPageSize - 1; + + /* + Iterate every page in the region to dump... + */ + sValidStart.uiAddr = sValidEnd.uiAddr = 0; /* Start/end of the current region which is valid to read. */ + sStartPage.uiAddr = sDevAddrStart.uiAddr & ~ui64PageMask; /* Page aligned start of the region to dump. */ + sCurrent = sStartPage; + sEnd.uiAddr = sDevAddrStart.uiAddr + uiSize; /* End of the region to dump. */ + for (;;) + { + IMG_BOOL bAtEnd = sCurrent.uiAddr >= sEnd.uiAddr; + IMG_BOOL bValid = IMG_FALSE; + + if (!bAtEnd) + { + /* Check if the page starting at the current address is valid for reading. */ + eError = DevmemIntIsVDevAddrValid(psConnection, + psDeviceNode, + psDevmemCtx, + sCurrent); + if (eError == PVRSRV_OK) + { + /* If the current valid region is empty then set the start to the current page. */ + if (sValidStart.uiAddr == 0) + { + if (sCurrent.uiAddr == sStartPage.uiAddr) + { + /* Use the start of the region to dump if it doesn't start page aligned. */ + sValidStart = sDevAddrStart; + } + else + { + sValidStart = sCurrent; + } + } + /* Set the end of the valid region. */ + sValidEnd.uiAddr = sCurrent.uiAddr + uiHeapPageSize; + /* Restrict to the region to dump. */ + if (sValidEnd.uiAddr > sEnd.uiAddr) + { + sValidEnd = sEnd; + } + bValid = IMG_TRUE; + } + /* Move to the next page. */ + sCurrent.uiAddr += uiHeapPageSize; + } + /* + If the current page is invalid or we've reached the end of the region to dump then pdump the current valid region. + */ + if (!bValid && sValidEnd.uiAddr > sValidStart.uiAddr) + { + IMG_DEVMEM_SIZE_T uiValidSize = sValidEnd.uiAddr - sValidStart.uiAddr; + eError = PDumpMMUSAB(psDevmemCtx->psDevNode, + psDevmemCtx->psDevNode->sDevId.pszPDumpDevName, + uiPDumpMMUCtx, + sValidStart, + uiValidSize, + pszFilename, + ui32FileOffset, + ui32PDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + ui32FileOffset += uiValidSize; + + sValidStart.uiAddr = sValidEnd.uiAddr = 0; + } + + if (bAtEnd) + { + break; + } + } + + MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags); + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntPDumpSaveToFileVirtualNoValidate(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiPDumpMMUCtx; + + /* Confirm that the device node's ui32InternalID matches the bound + * PDump device stored in PVRSRV_DATA. + */ + if (psDevmemCtx->psDevNode->sDevId.ui32InternalID != + (PVRSRVGetPVRSRVData())->ui32PDumpBoundDevice) + { + return PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE; + } + + eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext, + &uiPDumpMMUCtx, + ui32PDumpFlags); + + PVR_ASSERT(eError == PVRSRV_OK); + + /* The following SYSMEM refers to the 'MMU Context', hence it + * should be the MMU context, not the PMR, that says what the PDump + * MemSpace tag is? + * From a PDump P.O.V. it doesn't matter which name space we use as long + * as that MemSpace is used on the 'MMU Context' we're dumping from + */ eError = PDumpMMUSAB(psDevmemCtx->psDevNode, psDevmemCtx->psDevNode->sDevId.pszPDumpDevName, uiPDumpMMUCtx, @@ -1703,6 +4113,7 @@ DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, PVR_ASSERT(eError == PVRSRV_OK); MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags); + return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/di_impl_brg.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/di_impl_brg.c index 5670af07f6d4..9255e8622412 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/di_impl_brg.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/di_impl_brg.c @@ -44,6 +44,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "allocmem.h" #include "hash.h" +#include "hash_functions.h" #include "img_defs.h" #include "img_types.h" #include "lock.h" @@ -65,12 +66,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define STREAM_LINE_LENGTH 512 #if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -#define WRITER_THREAD_SLEEP_TIMEOUT 0ull +#define WRITER_THREAD_SLEEP_TIMEOUT 0ULL #else -#define WRITER_THREAD_SLEEP_TIMEOUT 28800000000ull +#define WRITER_THREAD_SLEEP_TIMEOUT 28800000000ULL #endif -#define WRITER_THREAD_DESTROY_TIMEOUT 100000ull -#define WRITER_THREAD_DESTROY_RETRIES 10u +#define WRITER_THREAD_DESTROY_TIMEOUT 100000ULL +#define WRITER_THREAD_DESTROY_RETRIES 10U #define WRITE_RETRY_COUNT 10 /* retry a write to a TL buffer 10 times */ #define WRITE_RETRY_WAIT_TIME 100 /* wait 10ms between write retries */ @@ -85,7 +86,7 @@ typedef enum THREAD_STATE static struct DIIB_IMPL { HASH_TABLE *psEntriesTable; /*!< Table of entries. */ - POS_LOCK psEntriesLock; /*!< Protects psEntriesTable. */ + POSWR_LOCK psEntriesLock; /*!< Protects psEntriesTable. */ IMG_HANDLE hWriterThread; IMG_HANDLE hWriterEventObject; ATOMIC_T eThreadState; @@ -130,35 +131,6 @@ struct DIIB_WORK_ITEM DLLIST_NODE sQueueElement; }; -/* Declaring function here to avoid dependencies that are introduced by - * including osfunc.h. */ -IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, - size_t uiSize); - -/* djb2 hash function is public domain */ -static IMG_UINT32 _Hash(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) -{ - IMG_CHAR *pszStr = pKey; - IMG_UINT32 ui32Hash = 5381, ui32Char; - - PVR_UNREFERENCED_PARAMETER(uKeySize); - PVR_UNREFERENCED_PARAMETER(uHashTabLen); - - while ((ui32Char = *pszStr++) != '\0') - { - ui32Hash = ((ui32Hash << 5) + ui32Hash) + ui32Char; /* hash * 33 + c */ - } - - return ui32Hash; -} - -static IMG_BOOL _Compare(size_t uKeySize, void *pKey1, void *pKey2) -{ - IMG_CHAR *pszKey1 = pKey1, *pszKey2 = pKey2; - - return OSStringNCompare(pszKey1, pszKey2, uKeySize) == 0; -} - /* ----- native callbacks interface ----------------------------------------- */ static void _WriteWithRetires(void *pvNativeHandle, const IMG_CHAR *pszStr, @@ -318,6 +290,8 @@ static void _WriterThread(void *pvArg) IMG_HANDLE hEvent; DLLIST_NODE *psNode; + PVR_UNREFERENCED_PARAMETER(pvArg); + eError = OSEventObjectOpen(_g_psImpl->hWriterEventObject, &hEvent); PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); @@ -425,10 +399,10 @@ DIIB_ENTRY *DIImplBrgFind(const IMG_CHAR *pszPath) { DIIB_ENTRY *psEntry; - OSLockAcquire(_g_psImpl->psEntriesLock); + OSWRLockAcquireRead(_g_psImpl->psEntriesLock); psEntry = (void *) HASH_Retrieve_Extended(_g_psImpl->psEntriesTable, (IMG_CHAR *) pszPath); - OSLockRelease(_g_psImpl->psEntriesLock); + OSWRLockReleaseRead(_g_psImpl->psEntriesLock); return psEntry; } @@ -455,7 +429,8 @@ static PVRSRV_ERROR _CreateStream(IMG_CHAR *pszStreamName, IMG_HANDLE *phStream) } eError = TLStreamCreate(&hStream, pszStreamName, STREAM_BUFFER_SIZE, - TL_OPMODE_DROP_NEWER, NULL, NULL, NULL, NULL); + TL_OPMODE_DROP_NEWER, NULL, NULL, NULL, NULL, + NULL, NULL); PVR_RETURN_IF_ERROR(eError); *phStream = hStream; @@ -545,7 +520,11 @@ PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, /* increment ref count on the context so that it doesn't get freed * before it gets processed by the writer thread. */ - OSAtomicIncrement(&psContext->iRefCnt); + if (OSAtomicAddUnless(&psContext->iRefCnt, 1, IMG_INT32_MAX) == IMG_INT32_MAX) + { + /* Job is not scheduled to the writer queue; there are too many waiting. */ + PVR_LOG_GOTO_WITH_ERROR("OSAtomicAddUnless", eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, overflow_); + } OSLockAcquire(_g_psImpl->psWriterLock); dllist_add_to_head(&_g_psImpl->sWriterQueue, &psItem->sQueueElement); @@ -558,6 +537,7 @@ PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, free_item_: eError = PVRSRV_ERROR_NOT_FOUND; +overflow_: OSFreeMemNoStats(psItem); return_: return eError; @@ -621,7 +601,9 @@ PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext) PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); + OSWRLockAcquireRead(_g_psImpl->psEntriesLock); eError = HASH_Iterate(_g_psImpl->psEntriesTable, _listName, psContext->hStream); + OSWRLockReleaseRead(_g_psImpl->psEntriesLock); PVR_LOG_IF_ERROR(eError, "HASH_Iterate_Extended"); eError = TLStreamMarkEOS(psContext->hStream, IMG_FALSE); @@ -639,10 +621,11 @@ static PVRSRV_ERROR _Init(void) _g_psImpl->psEntriesTable = HASH_Create_Extended(ENTRIES_TABLE_INIT_SIZE, DI_IMPL_BRG_PATH_LEN, - _Hash, _Compare); + HASH_Djb2_Hash, + HASH_Djb2_Compare); PVR_LOG_GOTO_IF_NOMEM(_g_psImpl->psEntriesTable, eError, free_impl_); - eError = OSLockCreate(&_g_psImpl->psEntriesLock); + eError = OSWRLockCreate(&_g_psImpl->psEntriesLock); PVR_LOG_GOTO_IF_ERROR(eError, "OSCreateLock", free_table_); eError = OSLockCreate(&_g_psImpl->psWriterLock); @@ -662,7 +645,7 @@ static PVRSRV_ERROR _Init(void) free_writer_lock_: OSLockDestroy(_g_psImpl->psWriterLock); free_entries_lock_: - OSLockDestroy(_g_psImpl->psEntriesLock); + OSWRLockDestroy(_g_psImpl->psEntriesLock); free_table_: HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE); free_impl_: @@ -689,7 +672,7 @@ static void _DeInit(void) PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); } - LOOP_UNTIL_TIMEOUT(WRITER_THREAD_DESTROY_TIMEOUT) + LOOP_UNTIL_TIMEOUT_US(WRITER_THREAD_DESTROY_TIMEOUT) { eError = OSThreadDestroy(_g_psImpl->hWriterThread); if (eError == PVRSRV_OK) @@ -697,7 +680,7 @@ static void _DeInit(void) break; } OSWaitus(WRITER_THREAD_DESTROY_TIMEOUT/WRITER_THREAD_DESTROY_RETRIES); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); } @@ -710,7 +693,7 @@ static void _DeInit(void) HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE); OSLockDestroy(_g_psImpl->psWriterLock); - OSLockDestroy(_g_psImpl->psEntriesLock); + OSWRLockDestroy(_g_psImpl->psEntriesLock); OSFreeMem(_g_psImpl); _g_psImpl = NULL; } @@ -721,27 +704,28 @@ static void _DeInit(void) * Returns current offset in the path (the current path length without the * NUL character). If there is no more space in the path returns -1 * to indicate an error (the path is too long to fit into the buffer). */ -static IMG_INT _BuildGroupPath(IMG_CHAR *pszPath, const DIIB_GROUP *psGroup) +static ssize_t _BuildGroupPath(IMG_CHAR *pszPath, const DIIB_GROUP *psGroup) { - IMG_INT iOff; + ssize_t iOff; + ssize_t iCopiedCnt; + size_t uFreeCnt; - if (psGroup == NULL) - { - return 0; - } + PVR_RETURN_IF_FALSE(psGroup != NULL, 0); PVR_ASSERT(pszPath != NULL); iOff = _BuildGroupPath(pszPath, psGroup->psParentGroup); PVR_RETURN_IF_FALSE(iOff != -1, -1); - iOff += OSStringLCopy(pszPath + iOff, "/", - DI_IMPL_BRG_PATH_LEN - iOff); - PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1); + uFreeCnt = DI_IMPL_BRG_PATH_LEN - iOff; + PVR_RETURN_IF_FALSE(uFreeCnt > 1, -1); + + pszPath[iOff++] = '/'; + --uFreeCnt; - iOff += OSStringLCopy(pszPath + iOff, psGroup->pszName, - DI_IMPL_BRG_PATH_LEN - iOff); - PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1); + iCopiedCnt = OSStringSafeCopy(&pszPath[iOff], psGroup->pszName, uFreeCnt); + PVR_RETURN_IF_FALSE(iCopiedCnt >= 0, -1); + iOff += iCopiedCnt; return iOff; } @@ -749,16 +733,20 @@ static IMG_INT _BuildGroupPath(IMG_CHAR *pszPath, const DIIB_GROUP *psGroup) static PVRSRV_ERROR _BuildEntryPath(IMG_CHAR *pszPath, const IMG_CHAR *pszName, const DIIB_GROUP *psGroup) { - IMG_INT iOff = _BuildGroupPath(pszPath, psGroup); + ssize_t iOff = _BuildGroupPath(pszPath, psGroup); + ssize_t iCopiedCnt; + size_t uFreeCnt; + PVR_RETURN_IF_FALSE(iOff != -1, PVRSRV_ERROR_INVALID_OFFSET); - iOff += OSStringLCopy(pszPath + iOff, "/", DI_IMPL_BRG_PATH_LEN - iOff); - PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, - PVRSRV_ERROR_INVALID_OFFSET); + uFreeCnt = DI_IMPL_BRG_PATH_LEN - iOff; + PVR_RETURN_IF_FALSE(uFreeCnt > 1, PVRSRV_ERROR_OUT_OF_MEMORY); - iOff += OSStringLCopy(pszPath + iOff, pszName, DI_IMPL_BRG_PATH_LEN - iOff); - PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, - PVRSRV_ERROR_INVALID_OFFSET); + pszPath[iOff++] = '/'; + --uFreeCnt; + + iCopiedCnt = OSStringSafeCopy(&pszPath[iOff], pszName, uFreeCnt); + PVR_RETURN_IF_FALSE(iCopiedCnt >= 0, PVRSRV_ERROR_OUT_OF_MEMORY); return PVRSRV_OK; } @@ -813,12 +801,12 @@ static PVRSRV_ERROR _CreateEntry(const IMG_CHAR *pszName, goto destroy_lock_; } - OSLockAcquire(_g_psImpl->psEntriesLock); + OSWRLockAcquireWrite(_g_psImpl->psEntriesLock); eError = HASH_Insert_Extended(_g_psImpl->psEntriesTable, psEntry->pszFullPath, (uintptr_t) psEntry) ? PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; - OSLockRelease(_g_psImpl->psEntriesLock); + OSWRLockReleaseWrite(_g_psImpl->psEntriesLock); PVR_LOG_GOTO_IF_ERROR(eError, "HASH_Insert_Extended failed", destroy_lock_); *pvEntry = psEntry; @@ -838,9 +826,9 @@ static void _DestroyEntry(void *pvEntry) DIIB_ENTRY *psEntry = pvEntry; PVR_ASSERT(psEntry != NULL); - OSLockAcquire(_g_psImpl->psEntriesLock); + OSWRLockAcquireWrite(_g_psImpl->psEntriesLock); HASH_Remove_Extended(_g_psImpl->psEntriesTable, psEntry->pszFullPath); - OSLockRelease(_g_psImpl->psEntriesLock); + OSWRLockReleaseWrite(_g_psImpl->psEntriesLock); OSLockDestroy(psEntry->hLock); OSFreeMem(psEntry); @@ -887,3 +875,7 @@ PVRSRV_ERROR PVRDIImplBrgRegister(void) return DIRegisterImplementation("impl_brg", &sImplCb); } + +/****************************************************************************** + End of file (di_impl_brg.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/di_impl_brg.h b/drivers/gpu/drm/img/img-volcanic/services/server/common/di_impl_brg.h index 7d5a6ca757e8..b8cff25d94ec 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/di_impl_brg.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/di_impl_brg.h @@ -44,6 +44,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVR_IMPL_BRG_H #include "pvrsrv_error.h" +#include "lock_types.h" typedef struct DI_CONTEXT_TAG DI_CONTEXT; typedef struct DI_ENTRY_DESC DI_ENTRY_DESC; @@ -89,4 +90,5 @@ PVRSRV_ERROR DIWriteEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext); + #endif /* PVR_IMPL_BRG_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/di_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/di_server.c index 330d35792819..47046d7ec3a2 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/di_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/di_server.c @@ -135,7 +135,7 @@ PVRSRV_ERROR DIInit(void) _g_psRootGroup->pszName = OSAllocMemNoStats(sizeof(ROOT_GROUP_NAME)); PVR_LOG_GOTO_IF_NOMEM(_g_psRootGroup->pszName, eError, cleanup_name_); - OSStringLCopy(_g_psRootGroup->pszName, ROOT_GROUP_NAME, + OSStringSafeCopy(_g_psRootGroup->pszName, ROOT_GROUP_NAME, sizeof(ROOT_GROUP_NAME)); dllist_init(&_g_psRootGroup->sListNode); @@ -456,7 +456,7 @@ PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName, uSize = OSStringLength(pszName) + 1; psGroup->pszName = OSAllocMem(uSize * sizeof(*psGroup->pszName)); PVR_LOG_GOTO_IF_NOMEM(psGroup->pszName, eError, cleanup_name_); - OSStringLCopy(psGroup->pszName, pszName, uSize); + OSStringSafeCopy(psGroup->pszName, pszName, uSize); psGroup->psParent = psParent; dllist_init(&psGroup->sGroupList); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/dma_km.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/dma_km.c index a2c504da4fc9..6e5ae4915847 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/dma_km.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/dma_km.c @@ -130,49 +130,163 @@ static void Cleanup(void* pvCleanupData, IMG_BOOL bAdvanceTimeline) #endif /* !defined(NO_HARDWARE) */ IMG_EXPORT PVRSRV_ERROR -PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode) +PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode, CONNECTION_DATA *psConnectionData) { PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_ERROR eError; - if (psDevConfig->bHasDma) + if (!psDevConfig->bHasDma) { + return PVRSRV_OK; + } + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevConfig->pfnSlaveDMAGetChan, "pfnSlaveDMAGetChan"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevConfig->pfnSlaveDMAFreeChan, "pfnSlaveDMAFreeChan"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevConfig->pszDmaTxChanName, "pszDmaTxChanName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevConfig->pszDmaRxChanName, "pszDmaRxChanName"); + + eError = OSEventObjectCreate("Dma transfer cleanup event object", + &psConnectionData->hDmaEventObject); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", dma_init_error1); - PVR_ASSERT(psDevConfig->pfnSlaveDMAGetChan != NULL); - PVR_ASSERT(psDevConfig->pfnSlaveDMAFreeChan != NULL); - PVR_ASSERT(psDevConfig->pszDmaTxChanName != NULL); - PVR_ASSERT(psDevConfig->pszDmaRxChanName != NULL); + OSLockAcquire(psDeviceNode->hConnectionsLock); + if (psDeviceNode->ui32RefCountDMA == 0) + { psDeviceNode->hDmaTxChan = psDevConfig->pfnSlaveDMAGetChan(psDevConfig, - psDevConfig->pszDmaTxChanName); + psDevConfig->pszDmaTxChanName); if (!psDeviceNode->hDmaTxChan) { - return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_RESOURCE_UNAVAILABLE, dma_init_error2); } psDeviceNode->hDmaRxChan = psDevConfig->pfnSlaveDMAGetChan(psDevConfig, - psDevConfig->pszDmaRxChanName); + psDevConfig->pszDmaRxChanName); if (!psDeviceNode->hDmaRxChan) { - psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaTxChan); - return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_RESOURCE_UNAVAILABLE, dma_init_error3); } - psDeviceNode->bHasSystemDMA = true; + + eError = OSLockCreate(&psDeviceNode->hDmaTxLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(hDmaTxLock)", dma_init_error_txlock); + + eError = OSLockCreate(&psDeviceNode->hDmaRxLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(hDmaRxLock)", dma_init_error_rxlock); } + psDeviceNode->ui32RefCountDMA++; + + OSLockRelease(psDeviceNode->hConnectionsLock); + + OSAtomicWrite(&psConnectionData->ui32NumDmaTransfersInFlight, 0); + psConnectionData->bAcceptDmaRequests = IMG_TRUE; + return PVRSRV_OK; + +dma_init_error_rxlock: + OSLockDestroy(psDeviceNode->hDmaTxLock); +dma_init_error_txlock: + psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaRxChan); +dma_init_error3: + psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaTxChan); +dma_init_error2: + OSLockRelease(psDeviceNode->hConnectionsLock); + OSEventObjectDestroy(psConnectionData->hDmaEventObject); +dma_init_error1: + return eError; +} + +static void WaitForOutstandingDma(CONNECTION_DATA *psConnectionData) +{ + + PVRSRV_ERROR eError; + IMG_HANDLE hEvent; + IMG_UINT32 ui32Tries = 100; + +#if defined(DMA_VERBOSE) + PVR_DPF((PVR_DBG_ERROR, + "Waiting on %d DMA transfers in flight...", OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight))); +#endif + + eError = OSEventObjectOpen(psConnectionData->hDmaEventObject, &hEvent); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); + return; + } + + while (OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight) != 0) + { + /* + #define DMA_TRANSFER_TIMEOUT_US (5000000ULL) + + This currently doesn't work properly. Wait time is not as requested. + Using OSSleepms instead + + OSEventObjectWaitKernel(hEvent, DMA_TRANSFER_TIMEOUT_US); + */ + OSSleepms(50); + if (!ui32Tries) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Timeout while waiting on outstanding DMA transfers!", __func__)); + break; + } + + ui32Tries--; + } + + OSEventObjectClose(hEvent); } IMG_EXPORT void -PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode) +PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode, CONNECTION_DATA *psConnectionData) { - if (psDeviceNode->bHasSystemDMA) + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + + if (!psDevConfig->bHasDma) { - PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + return; + } + + OSLockAcquire(psDeviceNode->hDmaTxLock); + OSLockAcquire(psDeviceNode->hDmaRxLock); + + psConnectionData->bAcceptDmaRequests = IMG_FALSE; + + OSLockRelease(psDeviceNode->hDmaRxLock); + OSLockRelease(psDeviceNode->hDmaTxLock); + + WaitForOutstandingDma(psConnectionData); + + OSLockAcquire(psDeviceNode->hConnectionsLock); + + if (psDeviceNode->ui32RefCountDMA == 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid ref count (%d)", __func__, psDeviceNode->ui32RefCountDMA)); + OSLockRelease(psDeviceNode->hConnectionsLock); + return; + } + + if (--psDeviceNode->ui32RefCountDMA == 0) + { + if (psDeviceNode->hDmaRxLock != NULL) + { + OSLockDestroy(psDeviceNode->hDmaRxLock); + } + + if (psDeviceNode->hDmaTxLock != NULL) + { + OSLockDestroy(psDeviceNode->hDmaTxLock); + } psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaRxChan); psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaTxChan); } + + OSLockRelease(psDeviceNode->hConnectionsLock); + + OSEventObjectDestroy(psConnectionData->hDmaEventObject); } IMG_EXPORT PVRSRV_ERROR @@ -186,7 +300,7 @@ DmaDeviceParams(CONNECTION_DATA *psConnection, *ui32DmaBuffAlign = psDevConfig->ui32DmaAlignment; *ui32DmaTransferMult = psDevConfig->ui32DmaTransferUnit; - return PVRSRV_OK; + return PVRSRV_OK; } IMG_EXPORT PVRSRV_ERROR @@ -213,7 +327,8 @@ DmaSparseMappingTable(PMR *psPMR, ui32SizeInPages, uiOffset, psDevPhyAddr, - pbValid); + pbValid, + CPU_USE); PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", err3); PMRUnlockSysPhysAddresses(psPMR); @@ -269,12 +384,46 @@ DmaTransfer(CONNECTION_DATA *psConnection, void* pvChan = NULL; SERVER_CLEANUP_DATA* psServerData; void* pvOSData; + POS_LOCK hChanLock = uiFlags & (DMA_FLAG_MEM_TO_DEV) ? psDevNode->hDmaTxLock : psDevNode->hDmaRxLock; + + for (i=0; ihDmaReqLock); + OSLockAcquire(hChanLock); if (!psConnection->bAcceptDmaRequests) { - OSLockRelease(psConnection->hDmaReqLock); + OSLockRelease(hChanLock); return PVRSRV_OK; } @@ -333,7 +482,8 @@ DmaTransfer(CONNECTION_DATA *psConnection, ui32SizeInPages, puiOffset[i], psDevPhyAddr, - pbValid); + pbValid, + CPU_USE); PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", loop_e4); psDevConfig->pfnDevPhysAddr2DmaAddr(psDevConfig, @@ -394,7 +544,7 @@ DmaTransfer(CONNECTION_DATA *psConnection, /* One of the transfers could not be programmed, roll back */ OSDmaForceCleanup(psDevNode, pvChan, pvOSData, psServerData, Cleanup); } - OSLockRelease(psConnection->hDmaReqLock); + OSLockRelease(hChanLock); return eError; e3: @@ -407,7 +557,8 @@ DmaTransfer(CONNECTION_DATA *psConnection, e1: OSFreeMem(psServerData); e0: - OSLockRelease(psConnection->hDmaReqLock); + OSLockRelease(hChanLock); +error_return: return eError; #endif } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/handle.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/handle.c index aa2ac034c3a5..7ebd38861963 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/handle.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/handle.c @@ -1120,7 +1120,6 @@ PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, /* If we were able to allocate the handle then there should be no reason why we * can't also get it's handle structure. Otherwise something has gone badly wrong. */ - PVR_ASSERT(eError == PVRSRV_OK); PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "Failed to get parent handle structure", ExitFreeHandle); @@ -1412,8 +1411,6 @@ void PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, (void*) hHandle, HandleTypeToString(eType), psBase, HandleBaseTypeToString(psBase->eType))); - PVR_ASSERT(eError == PVRSRV_OK); - return; } @@ -1493,6 +1490,12 @@ static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase, /* Prepare children for destruction */ eError = IterateOverChildren(psBase, psHandleData, HandleUnrefAndMaybeMarkForFreeWrapper); + if (eError != PVRSRV_OK) + { + /* If there was a problem destroying any of the child handles, + * the parent handle still needs to be able to be looked-up */ + psHandleData->bCanLookup = IMG_TRUE; + } PVR_LOG_RETURN_IF_ERROR(eError, "HandleUnrefAndMaybeMarkForFreeWrapper"); return PVRSRV_OK; @@ -1764,6 +1767,7 @@ PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, return PVRSRV_OK; ErrorDestroyHandleBase: + eError = PVRSRV_ERROR_INVALID_PARAMS; (void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase); ErrorUnlock: @@ -1944,7 +1948,7 @@ static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) if (psHandleData->pfnReleaseData != NULL) { eError = psHandleData->pfnReleaseData(psHandleData->pvData); - if (eError == PVRSRV_ERROR_RETRY) + if (PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_MESSAGE, "%s: Got retry while calling release " "data callback for handle %p of type = %s", __func__, @@ -1954,7 +1958,9 @@ static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) } else if (eError != PVRSRV_OK) { - return eError; + PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected error from pfnReleaseData (%s)", + __func__, + PVRSRVGetErrorString(eError))); } } @@ -2044,9 +2050,6 @@ static const PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] = PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT, PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, -#if defined(PVR_TESTING_UTILS) && defined(SUPPORT_VALIDATION) - PVRSRV_HANDLE_TYPE_RGX_SERVER_GPUMAP_CONTEXT, -#endif PVRSRV_HANDLE_TYPE_RI_HANDLE, PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, @@ -2055,6 +2058,7 @@ static const PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] = PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, @@ -2069,6 +2073,7 @@ static const PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] = PVRSRV_HANDLE_TYPE_DC_BUFFER, PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT, PVRSRV_HANDLE_TYPE_DC_DEVICE, + PVRSRV_HANDLE_TYPE_PVR_HWPERF_SD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, PVRSRV_HANDLE_TYPE_DI_CONTEXT, PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP @@ -2105,6 +2110,8 @@ PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase) return eError; } +/* Only called from sync_fallback_server.c */ +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) /*! ******************************************************************************* @Function PVRSRVRetrieveProcessHandleBase @@ -2144,8 +2151,13 @@ PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void) /* Not being called from the cleanup thread, so return the process * handle base for the current process. */ + uintptr_t uiHashKey; + + uiHashKey = OSAcquireCurrentPPIDResourceRefKM(); + OSReleasePPIDResourceRefKM(uiHashKey); + psProcHandleBase = (PROCESS_HANDLE_BASE *) - HASH_Retrieve(g_psProcessHandleBaseTable, OSGetCurrentClientProcessIDKM()); + HASH_Retrieve(g_psProcessHandleBaseTable, uiHashKey); } OSLockRelease(g_hProcessHandleBaseLock); @@ -2156,26 +2168,30 @@ PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void) } return psHandleBase; } +#endif /*! ******************************************************************************* @Function PVRSRVAcquireProcessHandleBase - @Description Increments reference count on a process handle base identified - by uiPid and returns pointer to the base. If the handle base - does not exist it will be allocated. - @Inout uiPid - PID of a process - @Output ppsBase - pointer to a handle base for the process identified by - uiPid + @Description Increments reference count on the process handle base for the + current process and returns pointer to the base. If the handle + base does not exist it will be allocated. + @Output ppsBase - pointer to a handle base for the current process @Return Error code or PVRSRV_OK ******************************************************************************/ -PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase) +PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(PROCESS_HANDLE_BASE **ppsBase) { PROCESS_HANDLE_BASE *psBase; PVRSRV_ERROR eError; + uintptr_t uiHashKey; OSLockAcquire(g_hProcessHandleBaseLock); - psBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(g_psProcessHandleBaseTable, uiPid); + /* Acquire the process resource hash key (and take ref) */ + uiHashKey = OSAcquireCurrentPPIDResourceRefKM(); + PVR_ASSERT(uiHashKey != 0); + + psBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(g_psProcessHandleBaseTable, uiHashKey); /* In case there is none we are going to allocate one */ if (psBase == NULL) @@ -2190,7 +2206,8 @@ PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE * PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorFreeProcessHandleBase); /* Insert the handle base into the global hash table */ - bSuccess = HASH_Insert(g_psProcessHandleBaseTable, uiPid, (uintptr_t) psBase); + psBase->uiHashKey = uiHashKey; + bSuccess = HASH_Insert(g_psProcessHandleBaseTable, uiHashKey, (uintptr_t)psBase); PVR_LOG_GOTO_IF_FALSE(bSuccess, "HASH_Insert failed", ErrorFreeHandleBase); } @@ -2203,10 +2220,12 @@ PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE * return PVRSRV_OK; ErrorFreeHandleBase: + eError = PVRSRV_ERROR_INVALID_PARAMS; PVRSRVFreeHandleBase(psBase->psHandleBase, 0); ErrorFreeProcessHandleBase: OSFreeMem(psBase); ErrorUnlock: + OSReleasePPIDResourceRefKM(uiHashKey); OSLockRelease(g_hProcessHandleBaseLock); return eError; @@ -2216,22 +2235,21 @@ PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE * ******************************************************************************* @Function PVRSRVReleaseProcessHandleBase @Description Decrements reference count on a process handle base psBase - for a process identified by uiPid. If the reference count - reaches 0 the handle base will be freed.. + for the current process. If the reference count reaches 0 the + process handle base will be freed. @Input psBase - pointer to a process handle base - @Inout uiPid - PID of a process @Inout ui64MaxBridgeTime - maximum time a handle destroy operation can hold the handle base lock (after that time a lock will be release and reacquired for another time slice) @Return Error code or PVRSRV_OK ******************************************************************************/ -PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid, +PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime) { PVRSRV_ERROR eError; IMG_INT iRefCount; - uintptr_t uiHashValue; + uintptr_t uiHashValue = 0; OSLockAcquire(g_hProcessHandleBaseLock); @@ -2239,6 +2257,8 @@ PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID if (iRefCount != 0) { + /* Release the process resource hash key (drop ref) */ + OSReleasePPIDResourceRefKM(psBase->uiHashKey); OSLockRelease(g_hProcessHandleBaseLock); return PVRSRV_OK; } @@ -2246,7 +2266,10 @@ PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID /* in case the refcount becomes 0 we can remove the process handle base * and all related objects */ - uiHashValue = HASH_Remove(g_psProcessHandleBaseTable, uiPid); + uiHashValue = HASH_Remove(g_psProcessHandleBaseTable, psBase->uiHashKey); + /* Release the process resource hash key (drop ref) */ + OSReleasePPIDResourceRefKM(psBase->uiHashKey); + psBase->uiHashKey = 0; OSLockRelease(g_hProcessHandleBaseLock); PVR_LOG_RETURN_IF_FALSE(uiHashValue != 0, "HASH_Remove failed", @@ -2280,6 +2303,7 @@ PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64Max PVRSRV_ERROR eError; IMG_PID uiCleanupPid = PVRSRVCleanupThreadGetPid(); uintptr_t uiCleanupTid = PVRSRVCleanupThreadGetTid(); + IMG_UINT32 ui32ErrorCount = 0; PVR_ASSERT(gpsHandleFuncs); @@ -2347,9 +2371,24 @@ PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64Max eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, &FreeHandleDataWrapper, (void *)&sHandleData); - PVR_GOTO_IF_ERROR(eError, ExitUnlock); + + /* On retry error return without destroying handle base. Caller may retry. */ + if (PVRSRVIsRetryError(eError)) + { + PVR_GOTO_IF_ERROR(eError, ExitUnlock); + } + + /* Retry is not possible. Continue freeing remaining handles. */ + if (eError != PVRSRV_OK) + { + ui32ErrorCount++; + } } + if (ui32ErrorCount > 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error freeing %d handles.", __func__, ui32ErrorCount)); + } if (psBase->psHashTab != NULL) { @@ -2477,3 +2516,7 @@ PVRSRV_ERROR PVRSRVHandleDeInit(void) return eError; } + +/****************************************************************************** + End of file (handle.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/handle_generic.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/handle_generic.c deleted file mode 100644 index a587ed477c42..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/handle_generic.c +++ /dev/null @@ -1,1009 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title Resource Handle Manager - Generic Back-end -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Provide generic resource handle management back-end -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /***************************************************************************/ - -#include "handle_impl.h" -#include "allocmem.h" -#include "osfunc.h" -#include "pvr_debug.h" - - -/* Valid handles are never NULL. Therefore, this value should never be 0! */ -#define HANDLE_OFFSET_FROM_INDEX 1 - -#if defined(DEBUG) -#define HANDLE_BLOCK_SHIFT 2 -#else -#define HANDLE_BLOCK_SHIFT 8 -#endif - -#define DIVIDE_BY_BLOCK_SIZE(i) (((IMG_UINT32)(i)) >> HANDLE_BLOCK_SHIFT) -#define MULTIPLY_BY_BLOCK_SIZE(i) (((IMG_UINT32)(i)) << HANDLE_BLOCK_SHIFT) - -#define HANDLE_BLOCK_SIZE MULTIPLY_BY_BLOCK_SIZE(1) -#define HANDLE_SUB_BLOCK_MASK (HANDLE_BLOCK_SIZE - 1) -#define HANDLE_BLOCK_MASK (~(HANDLE_SUB_BLOCK_MASK)) - -#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount) - -#define INDEX_TO_HANDLE(i) ((IMG_HANDLE)((uintptr_t)(i) + HANDLE_OFFSET_FROM_INDEX)) -#define HANDLE_TO_INDEX(h) ((IMG_UINT32)((uintptr_t)(h) - HANDLE_OFFSET_FROM_INDEX)) - -#define INDEX_TO_BLOCK_INDEX(i) DIVIDE_BY_BLOCK_SIZE(i) -#define BLOCK_INDEX_TO_INDEX(i) MULTIPLY_BY_BLOCK_SIZE(i) -#define INDEX_TO_SUB_BLOCK_INDEX(i) ((i) & HANDLE_SUB_BLOCK_MASK) - -#define BLOCK_ARRAY_AND_INDEX_TO_HANDLE_BLOCK(psArray, i) (&((psArray)[INDEX_TO_BLOCK_INDEX(i)])) -#define BASE_AND_INDEX_TO_HANDLE_BLOCK(psBase, i) BLOCK_ARRAY_AND_INDEX_TO_HANDLE_BLOCK((psBase)->psHandleBlockArray, i) -#define BASE_TO_TOTAL_INDICES(psBase) (HANDLE_TO_INDEX((psBase)->ui32MaxHandleValue) + 1) - -#define INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, i) (BASE_AND_INDEX_TO_HANDLE_BLOCK(psBase, i)->ui32FreeHandCount) -#define INDEX_TO_HANDLE_DATA(psBase, i) (BASE_AND_INDEX_TO_HANDLE_BLOCK(psBase, i)->psHandleDataArray + INDEX_TO_SUB_BLOCK_INDEX(i)) - -#define ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(a) (HANDLE_BLOCK_MASK & (a)) -#define ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(a) ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE((a) + HANDLE_BLOCK_SIZE - 1) - -#define INDEX_MIN 0x0u -#define INDEX_MAX (ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(0x7fffffffu) - 1) - -#define HANDLE_VALUE_MIN ((IMG_UINT32)(uintptr_t)INDEX_TO_HANDLE(INDEX_MIN)) -#define HANDLE_VALUE_MAX ((IMG_UINT32)(uintptr_t)INDEX_TO_HANDLE(INDEX_MAX)) - -#define HANDLE_BLOCK_ARRAY_SIZE(uiNumHandles) DIVIDE_BY_BLOCK_SIZE(ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(uiNumHandles)) - -#if defined(MIN) -#undef MIN -#endif - -#define MIN(x, y) (((x) < (y)) ? (x) : (y)) - -typedef struct _HANDLE_IMPL_DATA_ -{ - /* Pointer to the data that the handle represents */ - void *pvData; - - /* - * When handles are on the free list, the value of the "next index - * plus one field" has the following meaning: - * zero - next handle is the one that follows this one, - * nonzero - the index of the next handle is the value minus one. - * This scheme means handle space can be initialised to all zeros. - * - * When this field is used to link together handles on a list - * other than the free list, zero indicates the end of the - * list, with nonzero the same as above. - */ - IMG_UINT32 ui32NextIndexPlusOne; -} HANDLE_IMPL_DATA; - -typedef struct _HANDLE_BLOCK_ -{ - /* Pointer to an array of handle data structures */ - HANDLE_IMPL_DATA *psHandleDataArray; - - /* Number of free handle data structures in block */ - IMG_UINT32 ui32FreeHandCount; -} HANDLE_BLOCK; - -struct _HANDLE_IMPL_BASE_ -{ - /* Pointer to array of handle block structures */ - HANDLE_BLOCK *psHandleBlockArray; - - /* Maximum handle value */ - IMG_UINT32 ui32MaxHandleValue; - - /* Total number of handles (this may include allocated but unused handles) */ - IMG_UINT32 ui32TotalHandCount; - - /* Number of free handles */ - IMG_UINT32 ui32TotalFreeHandCount; - - /* Purging enabled. - * If purging is enabled, the size of the table can be reduced - * by removing free space at the end of the table. To make - * purging more likely to succeed, handles are allocated as - * far to the front of the table as possible. The first free - * handle is found by a linear search from the start of the table, - * and so no free handle list management is done. - */ - IMG_BOOL bPurgingEnabled; - - /* - * If purging is not enabled, this is the array index of first free - * handle. - * If purging is enabled, this is the index to start searching for - * a free handle from. In this case it is usually zero, unless - * the handle array size has been increased due to lack of - * handles. - */ - IMG_UINT32 ui32FirstFreeIndex; - - /* - * Index of the last free index, plus one. Not used if purging - * is enabled. - */ - IMG_UINT32 ui32LastFreeIndexPlusOne; -}; - - -/*! -****************************************************************************** - - @Function ReallocHandleBlockArray - - @Description Reallocate the handle block array - - @Input psBase - Pointer to handle base structure - ui32NewCount - The new total number of handles - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR ReallocHandleBlockArray(HANDLE_IMPL_BASE *psBase, - IMG_UINT32 ui32NewCount) -{ - HANDLE_BLOCK *psOldArray = psBase->psHandleBlockArray; - IMG_UINT32 ui32OldCount = psBase->ui32TotalHandCount; - HANDLE_BLOCK *psNewArray = NULL; - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 ui32Index; - - if (ui32NewCount == ui32OldCount) - { - return PVRSRV_OK; - } - - if (ui32NewCount != 0 && - !psBase->bPurgingEnabled && - ui32NewCount < ui32OldCount) - { - return PVRSRV_ERROR_INVALID_PARAMS; - } - - if (((ui32OldCount % HANDLE_BLOCK_SIZE) != 0) || - ((ui32NewCount % HANDLE_BLOCK_SIZE) != 0)) - { - PVR_ASSERT((ui32OldCount % HANDLE_BLOCK_SIZE) == 0); - PVR_ASSERT((ui32NewCount % HANDLE_BLOCK_SIZE) == 0); - - return PVRSRV_ERROR_INVALID_PARAMS; - } - - if (ui32NewCount != 0) - { - /* Allocate new handle array */ - psNewArray = OSAllocMem(HANDLE_BLOCK_ARRAY_SIZE(ui32NewCount) * sizeof(HANDLE_BLOCK)); - PVR_LOG_GOTO_IF_NOMEM(psNewArray, eError, error); - - if (ui32OldCount != 0) - { - OSCachedMemCopy(psNewArray, psOldArray, HANDLE_BLOCK_ARRAY_SIZE(MIN(ui32NewCount, ui32OldCount)) * sizeof(HANDLE_BLOCK)); - } - } - - /* - * If the new handle array is smaller than the old one, free - * unused handle data structure arrays - */ - for (ui32Index = ui32NewCount; ui32Index < ui32OldCount; ui32Index += HANDLE_BLOCK_SIZE) - { - HANDLE_BLOCK *psHandleBlock = BLOCK_ARRAY_AND_INDEX_TO_HANDLE_BLOCK(psOldArray, ui32Index); - - OSFreeMem(psHandleBlock->psHandleDataArray); - } - - /* - * If the new handle array is bigger than the old one, allocate - * new handle data structure arrays - */ - for (ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE) - { - /* PRQA S 0505 1 */ /* psNewArray is never NULL, see assert earlier */ - HANDLE_BLOCK *psHandleBlock = BLOCK_ARRAY_AND_INDEX_TO_HANDLE_BLOCK(psNewArray, ui32Index); - - psHandleBlock->psHandleDataArray = OSAllocZMem(sizeof(HANDLE_IMPL_DATA) * HANDLE_BLOCK_SIZE); - if (psHandleBlock->psHandleDataArray != NULL) - { - psHandleBlock->ui32FreeHandCount = HANDLE_BLOCK_SIZE; - } - else - { - PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate handle structures", - __func__)); - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - } - } - - PVR_GOTO_IF_ERROR(eError, error); - -#if defined(DEBUG_MAX_HANDLE_COUNT) - /* Force handle failure to test error exit code */ - if (ui32NewCount > DEBUG_MAX_HANDLE_COUNT) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Max handle count (%u) reached", - __func__, DEBUG_MAX_HANDLE_COUNT)); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, error); - } -#endif /* defined(DEBUG_MAX_HANDLE_COUNT) */ - - if (psOldArray != NULL) - { - /* Free old handle array */ - OSFreeMem(psOldArray); - } - - psBase->psHandleBlockArray = psNewArray; - psBase->ui32TotalHandCount = ui32NewCount; - - if (ui32NewCount > ui32OldCount) - { - /* Check for wraparound */ - PVR_ASSERT(psBase->ui32TotalFreeHandCount + (ui32NewCount - ui32OldCount) > psBase->ui32TotalFreeHandCount); - - /* PRQA S 3382 1 */ /* ui32NewCount always > ui32OldCount */ - psBase->ui32TotalFreeHandCount += (ui32NewCount - ui32OldCount); - - /* - * If purging is enabled, there is no free handle list - * management, but as an optimisation, when allocating - * new handles, we use ui32FirstFreeIndex to point to - * the first handle in a newly allocated block. - */ - if (psBase->ui32FirstFreeIndex == 0) - { - PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); - - psBase->ui32FirstFreeIndex = ui32OldCount; - } - else - { - if (!psBase->bPurgingEnabled) - { - PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0); - PVR_ASSERT(INDEX_TO_HANDLE_DATA(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0); - - INDEX_TO_HANDLE_DATA(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32OldCount + 1; - } - } - - if (!psBase->bPurgingEnabled) - { - psBase->ui32LastFreeIndexPlusOne = ui32NewCount; - } - } - else - { - if (ui32NewCount == 0) - { - psBase->ui32TotalFreeHandCount = 0; - psBase->ui32FirstFreeIndex = 0; - psBase->ui32LastFreeIndexPlusOne = 0; - } - else - { - PVR_ASSERT(psBase->bPurgingEnabled); - PVR_ASSERT(psBase->ui32FirstFreeIndex <= ui32NewCount); - PVR_ASSERT(psBase->ui32TotalFreeHandCount - (ui32OldCount - ui32NewCount) < psBase->ui32TotalFreeHandCount); - - /* PRQA S 3382 1 */ /* ui32OldCount always >= ui32NewCount */ - psBase->ui32TotalFreeHandCount -= (ui32OldCount - ui32NewCount); - } - } - - PVR_ASSERT(psBase->ui32FirstFreeIndex <= psBase->ui32TotalHandCount); - - return PVRSRV_OK; - -error: - PVR_ASSERT(eError != PVRSRV_OK); - - if (psNewArray != NULL) - { - /* Free any new handle structures that were allocated */ - for (ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE) - { - HANDLE_BLOCK *psHandleBlock = BLOCK_ARRAY_AND_INDEX_TO_HANDLE_BLOCK(psNewArray, ui32Index); - if (psHandleBlock->psHandleDataArray != NULL) - { - OSFreeMem(psHandleBlock->psHandleDataArray); - } - } - - /* Free new handle array */ - OSFreeMem(psNewArray); - } - - return eError; -} - -/*! -****************************************************************************** - - @Function IncreaseHandleArraySize - - @Description Allocate some more free handles - - @Input psBase - pointer to handle base structure - ui32Delta - number of new handles required - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR IncreaseHandleArraySize(HANDLE_IMPL_BASE *psBase, - IMG_UINT32 ui32Delta) -{ - IMG_UINT32 ui32DeltaAdjusted = ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(ui32Delta); - IMG_UINT32 ui32NewTotalHandCount = psBase->ui32TotalHandCount + ui32DeltaAdjusted; - IMG_UINT32 ui32TotalIndices = BASE_TO_TOTAL_INDICES(psBase); - - PVR_ASSERT(ui32Delta != 0); - - /* Check new count against max handle array size and check for wrap around */ - if (ui32NewTotalHandCount > ui32TotalIndices || ui32NewTotalHandCount <= psBase->ui32TotalHandCount) - { - ui32NewTotalHandCount = ui32TotalIndices; - - ui32DeltaAdjusted = ui32NewTotalHandCount - psBase->ui32TotalHandCount; - - if (ui32DeltaAdjusted < ui32Delta) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Maximum handle limit reached (%u)", - __func__, psBase->ui32MaxHandleValue)); - return PVRSRV_ERROR_OUT_OF_MEMORY; - } - } - - PVR_ASSERT(ui32DeltaAdjusted >= ui32Delta); - - /* Realloc handle pointer array */ - return ReallocHandleBlockArray(psBase, ui32NewTotalHandCount); -} - -/*! -****************************************************************************** - - @Function EnsureFreeHandles - - @Description Ensure there are enough free handles - - @Input psBase - Pointer to handle base structure - ui32Free - Number of free handles required - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR EnsureFreeHandles(HANDLE_IMPL_BASE *psBase, - IMG_UINT32 ui32Free) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - - if (ui32Free > psBase->ui32TotalFreeHandCount) - { - IMG_UINT32 ui32FreeHandDelta = ui32Free - psBase->ui32TotalFreeHandCount; - - eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Couldn't create %u handles to ensure %u free handles (%s)", - __func__, ui32FreeHandDelta, ui32Free, - PVRSRVGetErrorString(eError))); - } - } - - return eError; -} - -/*! -****************************************************************************** - - @Function AcquireHandle - - @Description Acquire a new handle - - @Input psBase - Pointer to handle base structure - phHandle - Points to a handle pointer - pvData - Pointer to resource to be associated with the handle - - @Output phHandle - Points to a handle pointer - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase, - IMG_HANDLE *phHandle, - void *pvData) -{ - IMG_UINT32 ui32NewIndex = BASE_TO_TOTAL_INDICES(psBase); - HANDLE_IMPL_DATA *psNewHandleData = NULL; - PVRSRV_ERROR eError; - - PVR_ASSERT(psBase != NULL); - PVR_ASSERT(phHandle != NULL); - PVR_ASSERT(pvData != NULL); - - /* Ensure there is a free handle */ - eError = EnsureFreeHandles(psBase, 1); - if (eError != PVRSRV_OK) - { - PVR_LOG_ERROR(eError, "EnsureFreeHandles"); - return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; - } - PVR_ASSERT(psBase->ui32TotalFreeHandCount != 0); - - if (!psBase->bPurgingEnabled) - { - /* Array index of first free handle */ - ui32NewIndex = psBase->ui32FirstFreeIndex; - - /* Get handle array entry */ - psNewHandleData = INDEX_TO_HANDLE_DATA(psBase, ui32NewIndex); - } - else - { - IMG_UINT32 ui32BlockedIndex; - - /* - * If purging is enabled, we always try to allocate handles - * at the front of the array, to increase the chances that - * the size of the handle array can be reduced by a purge. - * No linked list of free handles is kept; we search for - * free handles as required. - */ - - /* - * ui32FirstFreeIndex should only be set when a new batch of - * handle structures is allocated, and should always be a - * multiple of the block size. - */ - PVR_ASSERT((psBase->ui32FirstFreeIndex % HANDLE_BLOCK_SIZE) == 0); - - for (ui32BlockedIndex = ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(psBase->ui32FirstFreeIndex); ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE) - { - HANDLE_BLOCK *psHandleBlock = BASE_AND_INDEX_TO_HANDLE_BLOCK(psBase, ui32BlockedIndex); - - if (psHandleBlock->ui32FreeHandCount == 0) - { - continue; - } - - for (ui32NewIndex = ui32BlockedIndex; ui32NewIndex < ui32BlockedIndex + HANDLE_BLOCK_SIZE; ui32NewIndex++) - { - psNewHandleData = INDEX_TO_HANDLE_DATA(psBase, ui32NewIndex); - if (psNewHandleData->pvData == NULL) - { - break; - } - } - } - psBase->ui32FirstFreeIndex = 0; - PVR_ASSERT(INDEX_IS_VALID(psBase, ui32NewIndex)); - } - PVR_ASSERT(psNewHandleData != NULL); - - psBase->ui32TotalFreeHandCount--; - - PVR_ASSERT(INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32NewIndex) <= HANDLE_BLOCK_SIZE); - PVR_ASSERT(INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32NewIndex) > 0); - - INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32NewIndex)--; - - /* No free list management if purging is enabled */ - if (!psBase->bPurgingEnabled) - { - /* Check whether the last free handle has been allocated */ - if (psBase->ui32TotalFreeHandCount == 0) - { - PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex); - PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1)); - - psBase->ui32LastFreeIndexPlusOne = 0; - psBase->ui32FirstFreeIndex = 0; - } - else - { - /* - * Update the first free handle index. - * If the "next free index plus one" field in the new - * handle structure is zero, the next free index is - * the index of the new handle plus one. This - * convention has been adopted to simplify the - * initialisation of freshly allocated handle - * space. - */ - if (psNewHandleData->ui32NextIndexPlusOne == 0) - { - psBase->ui32FirstFreeIndex = ui32NewIndex + 1; - } - else - { - psBase->ui32FirstFreeIndex = psNewHandleData->ui32NextIndexPlusOne - 1; - } - } - } - - psNewHandleData->pvData = pvData; - psNewHandleData->ui32NextIndexPlusOne = 0; - - /* Return the new handle to the client */ - *phHandle = INDEX_TO_HANDLE(ui32NewIndex); - - PVR_DPF((PVR_DBG_MESSAGE, "Handle acquire base %p hdl %p", psBase, *phHandle)); - - return PVRSRV_OK; -} - -/*! -****************************************************************************** - - @Function ReleaseHandle - - @Description Release a handle that is no longer needed. - - @Input psBase - Pointer to handle base structure - hHandle - Handle to release - ppvData - Points to a void data pointer - - @Output ppvData - Points to a void data pointer - - @Return PVRSRV_OK or PVRSRV_ERROR - -******************************************************************************/ -static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase, - IMG_HANDLE hHandle, - void **ppvData) -{ - IMG_UINT32 ui32Index = HANDLE_TO_INDEX(hHandle); - HANDLE_IMPL_DATA *psHandleData; - IMG_UINT32 ui32ValidatedHandleIndex = OSConfineArrayIndexNoSpeculation(ui32Index, psBase->ui32TotalHandCount); - void *pvData; - - PVR_ASSERT(psBase); - - /* Check handle index is in range */ - if (unlikely(!INDEX_IS_VALID(psBase, ui32Index))) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Handle index out of range (%u >= %u)", - __func__, ui32Index, psBase->ui32TotalHandCount)); - return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; - } - - psHandleData = INDEX_TO_HANDLE_DATA(psBase, ui32ValidatedHandleIndex); - - pvData = psHandleData->pvData; - psHandleData->pvData = NULL; - - /* No free list management if purging is enabled */ - if (!psBase->bPurgingEnabled) - { - if (psBase->ui32TotalFreeHandCount == 0) - { - PVR_ASSERT(psBase->ui32FirstFreeIndex == 0); - PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); - - psBase->ui32FirstFreeIndex = ui32ValidatedHandleIndex; - } - else - { - /* - * Put the handle pointer on the end of the free - * handle pointer linked list. - */ - PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0); - PVR_ASSERT(INDEX_TO_HANDLE_DATA(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0); - INDEX_TO_HANDLE_DATA(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32ValidatedHandleIndex + 1; - } - - PVR_ASSERT(psHandleData->ui32NextIndexPlusOne == 0); - - /* Update the end of the free handle linked list */ - psBase->ui32LastFreeIndexPlusOne = ui32ValidatedHandleIndex + 1; - } - - psBase->ui32TotalFreeHandCount++; - INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32ValidatedHandleIndex)++; - - PVR_ASSERT(INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32ValidatedHandleIndex)<= HANDLE_BLOCK_SIZE); - -#if defined(DEBUG) - { - IMG_UINT32 ui32BlockedIndex; - IMG_UINT32 ui32TotalFreeHandCount = 0; - - for (ui32BlockedIndex = 0; ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE) - { - ui32TotalFreeHandCount += INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32BlockedIndex); - } - - PVR_ASSERT(ui32TotalFreeHandCount == psBase->ui32TotalFreeHandCount); - } -#endif /* defined(DEBUG) */ - - if (ppvData) - { - *ppvData = pvData; - } - - PVR_DPF((PVR_DBG_MESSAGE, "Handle release base %p hdl %p", psBase, hHandle)); - - return PVRSRV_OK; -} - -/*! -****************************************************************************** - - @Function GetHandleData - - @Description Get the data associated with the given handle - - @Input psBase - Pointer to handle base structure - hHandle - Handle from which data should be retrieved - ppvData - Points to a void data pointer - - @Output ppvData - Points to a void data pointer - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase, - IMG_HANDLE hHandle, - void **ppvData) -{ - IMG_UINT32 ui32Index = HANDLE_TO_INDEX(hHandle); - HANDLE_IMPL_DATA *psHandleData; - IMG_UINT32 ui32ValidatedHandleIndex = OSConfineArrayIndexNoSpeculation(ui32Index, psBase->ui32TotalHandCount); - PVR_ASSERT(psBase); - PVR_ASSERT(ppvData); - - /* Check handle index is in range */ - if (unlikely(!INDEX_IS_VALID(psBase, ui32Index))) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Handle index out of range (%u >= %u)", - __func__, ui32Index, psBase->ui32TotalHandCount)); - OSDumpStack(); - return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; - } - - psHandleData = INDEX_TO_HANDLE_DATA(psBase, ui32ValidatedHandleIndex); - if (unlikely(psHandleData == NULL || psHandleData->pvData == NULL)) - { - return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; - } - - *ppvData = psHandleData->pvData; - - return PVRSRV_OK; -} - -/*! -****************************************************************************** - - @Function SetHandleData - - @Description Set the data associated with the given handle - - @Input psBase - Pointer to handle base structure - hHandle - Handle for which data should be changed - pvData - Pointer to new data to be associated with the handle - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase, - IMG_HANDLE hHandle, - void *pvData) -{ - IMG_UINT32 ui32Index = HANDLE_TO_INDEX(hHandle); - HANDLE_IMPL_DATA *psHandleData; - IMG_UINT32 ui32ValidatedHandleIndex = OSConfineArrayIndexNoSpeculation(ui32Index, psBase->ui32TotalHandCount); - - PVR_ASSERT(psBase); - - /* Check handle index is in range */ - if (!INDEX_IS_VALID(psBase, ui32Index)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Handle index out of range (%u >= %u)", - __func__, ui32Index, psBase->ui32TotalHandCount)); - OSDumpStack(); - return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; - } - - psHandleData = INDEX_TO_HANDLE_DATA(psBase, ui32ValidatedHandleIndex); - if (psHandleData == NULL || psHandleData->pvData == NULL) - { - return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; - } - - psHandleData->pvData = pvData; - - return PVRSRV_OK; -} - -static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, - PFN_HANDLE_ITER pfnHandleIter, - void *pvHandleIterData) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 i; - - PVR_ASSERT(psBase); - PVR_ASSERT(pfnHandleIter); - - if (psBase->ui32TotalFreeHandCount == psBase->ui32TotalHandCount) - { - return PVRSRV_OK; - } - - for (i = 0; i < psBase->ui32TotalHandCount; i++) - { - HANDLE_IMPL_DATA *psHandleData = INDEX_TO_HANDLE_DATA(psBase, i); - - if (psHandleData->pvData != NULL) - { - eError = pfnHandleIter(INDEX_TO_HANDLE(i), pvHandleIterData); - if (eError != PVRSRV_OK) - { - break; - } - - if (psBase->ui32TotalFreeHandCount == psBase->ui32TotalHandCount) - { - break; - } - } - } - - return eError; -} - -/*! -****************************************************************************** - - @Function EnableHandlePurging - - @Description Enable purging for a given handle base - - @Input psBase - pointer to handle base structure - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase) -{ - PVR_ASSERT(psBase); - - if (psBase->bPurgingEnabled) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Purging already enabled", __func__)); - return PVRSRV_OK; - } - - /* Purging can only be enabled if no handles have been allocated */ - if (psBase->ui32TotalHandCount != 0) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Handles have already been allocated", - __func__)); - return PVRSRV_ERROR_INVALID_PARAMS; - } - - psBase->bPurgingEnabled = IMG_TRUE; - - return PVRSRV_OK; -} - -/*! -****************************************************************************** - - @Function PurgeHandles - - @Description Purge handles for a given handle base - - @Input psBase - Pointer to handle base structure - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase) -{ - IMG_UINT32 ui32BlockIndex; - IMG_UINT32 ui32NewHandCount; - - PVR_ASSERT(psBase); - - if (!psBase->bPurgingEnabled) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Purging not enabled for this handle base", - __func__)); - return PVRSRV_ERROR_NOT_SUPPORTED; - } - - PVR_ASSERT((psBase->ui32TotalHandCount % HANDLE_BLOCK_SIZE) == 0); - - for (ui32BlockIndex = INDEX_TO_BLOCK_INDEX(psBase->ui32TotalHandCount); ui32BlockIndex != 0; ui32BlockIndex--) - { - if (psBase->psHandleBlockArray[ui32BlockIndex - 1].ui32FreeHandCount != HANDLE_BLOCK_SIZE) - { - break; - } - } - ui32NewHandCount = BLOCK_INDEX_TO_INDEX(ui32BlockIndex); - - /* Check for a suitable decrease in the handle count */ - if (ui32NewHandCount <= (psBase->ui32TotalHandCount / 2)) - { - return ReallocHandleBlockArray(psBase, ui32NewHandCount); - } - - return PVRSRV_OK; -} - -/*! -****************************************************************************** - - @Function CreateHandleBase - - @Description Create a handle base structure - - @Input ppsBase - pointer to handle base structure pointer - - @Output ppsBase - points to handle base structure pointer - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase) -{ - HANDLE_IMPL_BASE *psBase; - - PVR_ASSERT(ppsBase); - - psBase = OSAllocZMem(sizeof(*psBase)); - PVR_LOG_RETURN_IF_NOMEM(psBase, "psBase"); - - psBase->psHandleBlockArray = NULL; - psBase->ui32MaxHandleValue = HANDLE_VALUE_MAX; - psBase->bPurgingEnabled = IMG_FALSE; - - *ppsBase = psBase; - - return PVRSRV_OK; -} - -/*! -****************************************************************************** - - @Function DestroyHandleBase - - @Description Destroy a handle base structure - - @Input psBase - pointer to handle base structure - - @Return Error code or PVRSRV_OK - -******************************************************************************/ -static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase) -{ - PVRSRV_ERROR eError; - - PVR_ASSERT(psBase); - - if (psBase->ui32TotalHandCount != psBase->ui32TotalFreeHandCount) - { -#if defined(DEBUG_HANDLEALLOC_INFO_KM) - { - IMG_UINT32 i; - - for (i = 0; i < psBase->ui32TotalHandCount; i++) - { - HANDLE_IMPL_DATA *psHandleData = INDEX_TO_HANDLE_DATA(psBase, i); - - if (psHandleData->pvData != NULL) - { - PVR_DPF((PVR_DBG_WARNING, "%d: handle[%p] data[%p] still allocated", - i, INDEX_TO_HANDLE(i), psHandleData->pvData)); - - } - } - } -#endif /* DEBUG_HANDLEALLOC_INFO_KM */ - - } - - eError = ReallocHandleBlockArray(psBase, 0); - PVR_LOG_RETURN_IF_ERROR(eError, "ReallocHandleBlockArray"); - - OSFreeMem(psBase); - - return PVRSRV_OK; -} - - -static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab = -{ - /* pfnAcquireHandle */ - &AcquireHandle, - - /* pfnReleaseHandle */ - &ReleaseHandle, - - /* pfnGetHandleData */ - &GetHandleData, - - /* pfnSetHandleData */ - &SetHandleData, - - /* pfnIterateOverHandles */ - &IterateOverHandles, - - /* pfnEnableHandlePurging */ - &EnableHandlePurging, - - /* pfnPurgeHandles */ - &PurgeHandles, - - /* pfnCreateHandleBase */ - &CreateHandleBase, - - /* pfnDestroyHandleBase */ - &DestroyHandleBase -}; - -PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs) -{ - static IMG_BOOL bAcquired = IMG_FALSE; - - if (bAcquired) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired", - __func__)); - return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; - } - - PVR_RETURN_IF_INVALID_PARAM(ppsFuncs); - - *ppsFuncs = &g_sHandleFuncTab; - - bAcquired = IMG_TRUE; - - return PVRSRV_OK; -} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/htb_debug.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/htb_debug.c index 4a41031fd371..b16592ae5ebb 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/htb_debug.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/htb_debug.c @@ -42,7 +42,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ #include "rgxdevice.h" #include "htbserver.h" -#include "htbuffer.h" #include "htbuffer_types.h" #include "tlstream.h" #include "tlclient.h" @@ -170,7 +169,12 @@ static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *pSentinel) if (pNext >= pLast) { eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream); - PVR_ASSERT(eError == PVRSRV_OK); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s FAILED '%s'", __func__, + "TLClientReleaseData", PVRSRVGETERRORSTRING(eError))); + return NULL; + } eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); @@ -648,10 +652,11 @@ typedef struct _HTB_TRACEBUF_LOG_ { IMG_CHAR *pszName; IMG_CHAR *pszFmt; IMG_UINT32 ui32ArgNum; + IMG_UINT32 ui32StrArgIdx; /* This is 1 indexed, if 0, there is not a string arg. */ } HTB_TRACEBUF_LOG; static const HTB_TRACEBUF_LOG aLogs[] = { -#define X(a, b, c, d, e) {HTB_LOG_CREATESFID(a,b,e), #c, d, e}, +#define X(a, b, c, d, e, f) {HTB_LOG_CREATESFID(a,b,e,f), #c, d, e, f}, HTB_LOG_SFIDLIST #undef X }; @@ -821,9 +826,9 @@ DecodeHTB(HTB_Sentinel_t *pSentinel, OSDI_IMPL_ENTRY *pvDumpDebugFile, if (ppHdr == NULL) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Unexpected NULL packet in Host Trace buffer", __func__)); - return -1; + PVR_DPF((PVR_DBG_ERROR, + "%s: Unexpected NULL packet in Host Trace buffer", __func__)); + return -1; } uiHdrType = GET_PACKET_TYPE(ppHdr); @@ -958,13 +963,21 @@ DecodeHTB(HTB_Sentinel_t *pSentinel, OSDI_IMPL_ENTRY *pvDumpDebugFile, { if (pszFmt) { - nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); - if (nPrinted >= uBufBytesAvailable) + const ssize_t iCopiedCnt = + OSStringSafeCopy(pszBuffer, pszFmt, uBufBytesAvailable); + if (iCopiedCnt < 0) { - PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," - " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, - uBufBytesAvailable); - nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + PVR_DUMPDEBUG_LOG("Buffer overrun - %zu required," + " max space %zu\n", + OSStringLength(pszFmt), + uBufBytesAvailable); + + /* Ensure we don't overflow buffer */ + nPrinted = uBufBytesAvailable; + } + else + { + nPrinted = iCopiedCnt; } PVR_DUMPDEBUG_LOG("%s", pszBuffer); pszBuffer += nPrinted; @@ -1039,8 +1052,20 @@ DecodeHTB(HTB_Sentinel_t *pSentinel, OSDI_IMPL_ENTRY *pvDumpDebugFile, break; case TRACEBUF_ARG_TYPE_NONE: - nPrinted = OSStringLCopy(pszBuffer, pszFmt, - uBufBytesAvailable); + { + const ssize_t iCopiedCnt = + OSStringSafeCopy(pszBuffer, + pszFmt, + uBufBytesAvailable); + if (iCopiedCnt < 0) + { + nPrinted = OSStringLength(pszFmt); + } + else + { + nPrinted = iCopiedCnt; + } + } break; default: @@ -1063,13 +1088,21 @@ DecodeHTB(HTB_Sentinel_t *pSentinel, OSDI_IMPL_ENTRY *pvDumpDebugFile, /* Display any remaining text in pszFmt string */ if (pszFmt) { - nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); - if (nPrinted >= uBufBytesAvailable) + const ssize_t iCopiedCnt = + OSStringSafeCopy(pszBuffer, pszFmt, uBufBytesAvailable); + if (iCopiedCnt < 0) { - PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," - " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, - uBufBytesAvailable); - nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + PVR_DUMPDEBUG_LOG("Buffer overrun - %zu required," + " max space %zu\n", + OSStringLength(pszFmt), + uBufBytesAvailable); + + /* Ensure we don't overflow buffer */ + nPrinted = uBufBytesAvailable; + } + else + { + nPrinted = iCopiedCnt; } PVR_DUMPDEBUG_LOG("%s", pszBuffer); pszBuffer += nPrinted; @@ -1151,7 +1184,7 @@ static int HTBDumpBuffer(DI_PRINTF pfnPrintf, OSDI_IMPL_ENTRY *psEntry, @Returns eError internal error code, PVRSRV_OK on success */ /*************************************************************************/ -PVRSRV_ERROR HTB_CreateDIEntry(void) +PVRSRV_ERROR HTB_CreateDIEntry_Impl(void) { PVRSRV_ERROR eError; @@ -1178,7 +1211,7 @@ PVRSRV_ERROR HTB_CreateDIEntry(void) @Description Destroy the debugFS entry-point created by earlier HTB_CreateDIEntry() call. */ /**************************************************************************/ -void HTB_DestroyDIEntry(void) +void HTB_DestroyDIEntry_Impl(void) { if (g_sHTBData.psDumpHostDiEntry != NULL) { diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/htb_debug.h b/drivers/gpu/drm/img/img-volcanic/services/server/common/htb_debug.h index 04132e13c302..cf18bbb2ac5d 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/htb_debug.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/htb_debug.h @@ -51,7 +51,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. @Returns eError internal error code, PVRSRV_OK on success */ /**************************************************************************/ -PVRSRV_ERROR HTB_CreateDIEntry(void); +PVRSRV_ERROR HTB_CreateDIEntry_Impl(void); /**************************************************************************/ /*! @Function HTB_DestroyFSEntry @@ -59,6 +59,14 @@ PVRSRV_ERROR HTB_CreateDIEntry(void); @Description Destroy the debugFS entry-point created by earlier HTB_CreateDIEntry() call. */ /**************************************************************************/ -void HTB_DestroyDIEntry(void); +void HTB_DestroyDIEntry_Impl(void); + +#if defined(PVRSRV_ENABLE_HTB) +#define HTB_CreateDIEntry() HTB_CreateDIEntry_Impl() +#define HTB_DestroyDIEntry() HTB_DestroyDIEntry_Impl() +#else /* !PVRSRV_ENABLE_HTB */ +#define HTB_CreateDIEntry() PVRSRV_OK +#define HTB_DestroyDIEntry() +#endif /* PVRSRV_ENABLE_HTB */ #endif /* HTB_DEBUG_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/htbserver.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/htbserver.c index 2ada5ab98f10..9ed3283f52b2 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/htbserver.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/htbserver.c @@ -46,7 +46,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ #include "htbserver.h" -#include "htbuffer.h" #include "htbuffer_types.h" #include "tlstream.h" #include "pvrsrv_tlcommon.h" @@ -58,7 +57,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_notifier.h" #include "pvrsrv.h" #include "pvrsrv_apphint.h" -#include "oskm_apphint.h" +#include "os_apphint.h" /* size of circular buffer controlling the maximum number of concurrent PIDs logged */ #define HTB_MAX_NUM_PID 8 @@ -66,6 +65,17 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* number of times to try rewriting a log entry */ #define HTB_LOG_RETRY_COUNT 5 +#if defined(__linux__) + #include + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + #include + #else + #include + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ +#else + #include +#endif /* __linux__ */ + /*************************************************************************/ /*! Host Trace Buffer control information structure */ /**************************************************************************/ @@ -159,7 +169,6 @@ static IMG_HANDLE g_hTLStream; static IMG_HANDLE hHtbDbgReqNotify; - /************************************************************************/ /*! @Function _LookupFlags @Description Convert HTBuffer Operation mode to TLStream flags @@ -251,7 +260,7 @@ static void _OnTLReaderOpenCallback(void *); number */ /**************************************************************************/ PVRSRV_ERROR -HTBInit(void) +HTBInit_Impl(void) { void *pvAppHintState = NULL; IMG_UINT32 ui32AppHintDefault; @@ -289,11 +298,11 @@ HTBInit(void) /* * Now get whatever values have been configured for our AppHints */ - OSCreateKMAppHintState(&pvAppHintState); + OSCreateAppHintState(&pvAppHintState); ui32AppHintDefault = HTB_TL_BUFFER_SIZE_MIN / 1024; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBufferSizeInKB, + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBufferSizeInKB, &ui32AppHintDefault, &g_ui32HTBufferSize); - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); ui32BufBytes = g_ui32HTBufferSize * 1024; @@ -340,7 +349,7 @@ HTBInit(void) number */ /**************************************************************************/ PVRSRV_ERROR -HTBDeInit( void ) +HTBDeInit_Impl( void ) { if (!g_sCtrl.bInitDone) return PVRSRV_OK; @@ -419,6 +428,273 @@ PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode, return PVRSRV_OK; } +#if defined(PVRSRV_ENABLE_HTB) +static IMG_BOOL +_ValidPID( IMG_UINT32 PID ) +{ + IMG_UINT32 i; + for (i = 0; i < g_sCtrl.ui32PIDCount; i++) + { + if ( g_sCtrl.aui32EnablePID[i] == PID ) + { + return IMG_TRUE; + } + } + return IMG_FALSE; +} +#endif /* PVRSRV_ENABLE_HTB */ + +/*************************************************************************/ /*! + @Function HTBLogKM + @Description Record a Host Trace Buffer log event + + @Input PID The PID of the process the event is associated + with. This is provided as an argument rather + than querying internally so that events associated + with a particular process, but performed by + another can be logged correctly. + + @Input ui64TimeStamp The timestamp to be associated with this log event + + @Input SF The log event ID + + @Input ... Log parameters + + @Return PVRSRV_OK Success. + +*/ /**************************************************************************/ +static PVRSRV_ERROR +HTBLogKM(IMG_UINT32 PID, + IMG_UINT32 TID, + IMG_UINT64 ui64TimeStamp, + HTB_LOG_SFids SF, + va_list args +) +{ +#if defined(PVRSRV_ENABLE_HTB) + + OS_SPINLOCK_FLAGS uiSpinLockFlags; + IMG_UINT32 ui32ReturnFlags = 0; + IMG_UINT32 ui32CurrentArg = 0; + + /* Local snapshot variables of global counters */ + IMG_UINT64 ui64OSTSSnap; + IMG_UINT64 ui64CRTSSnap; + IMG_UINT32 ui32ClkSpeedSnap; + + /* format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]* + * Buffer is on the stack so we don't need a semaphore to guard it + */ + IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS]; + IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS + 1] = {0}; + + /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/ + * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 80 bytes, + * hence with these constraints this design is unlikely to get + * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error + */ + PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED; + IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; + IMG_UINT32 * pui32Message = aui32MessageBuffer; + IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF); + IMG_UINT32 ui32StrArg = HTB_SF_STRNUM(SF); + IMG_UINT32 ui32CurrentStringArg; + + IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs); + + if (ui32StrArg > 0) + { + ui32MessageSize += HTB_LOG_STR_ARG_SIZE - sizeof(IMG_UINT32); + } + + PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS); + ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS) ? + HTB_LOG_MAX_PARAMS : ui32NumArgs; + + PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs == HTB_SF_PARAMNUM(SF), eError, ReturnError); + PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs <= HTB_LOG_MAX_PARAMS, eError, ReturnError); + + /* Needs to be set up here because it's accessed from both `if` blocks below + * and it needs to be pre-populated for both of them (pui32Message case and + * HTB_SF_CTRL_FWSYNC_MARK_SCALE case). */ + + while (ui32CurrentArg < ui32NumArgs) + { + if (ui32StrArg != 0 && ui32CurrentArg == ui32StrArg - 1) + { + IMG_CHAR* strArg; + strArg = va_arg(args, IMG_CHAR*); + + /* if a string is present, it will need more than one UINT32 words. */ + ui32NumArgs += HTB_LOG_STR_ARG_NUM_WORDS - 1; + + /* Ignore if filename is not supported. */ + if (strcmp(strArg, "n/a") == 0) + { + PVR_DPF((PVR_DBG_MESSAGE, "N/A\n")); + return PVRSRV_OK; + } + + /* looping through the created string to get the substrings to encode. */ + for (ui32CurrentStringArg = 0; ui32CurrentStringArg < HTB_LOG_STR_ARG_NUM_WORDS; ui32CurrentStringArg++) + { + IMG_UINT32 encodedString = 0; + + if (*strArg == '\0') + { + aui32Args[ui32CurrentArg] = encodedString; + ui32CurrentArg++; + } + else + { + IMG_UINT32 currentSubstring; + + for (currentSubstring = 0; currentSubstring < sizeof(IMG_UINT32); currentSubstring++) + { + IMG_UINT32 bitPos = currentSubstring * 8; + + if (*strArg == '\0') + { + break; + } + else + { + encodedString |= (IMG_UINT32) *strArg << bitPos; + strArg++; + } + } + + aui32Args[ui32CurrentArg] = encodedString; + ui32CurrentArg++; + } + } + } + else + { + aui32Args[ui32CurrentArg] = va_arg(args, IMG_UINT32); + ui32CurrentArg++; + } + } + + if ( g_hTLStream + && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) ) +/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */ +/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */ + ) + { + *pui32Message++ = SF; + *pui32Message++ = PID; + *pui32Message++ = TID; + *pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff)); + *pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff)); + for (ui32CurrentArg = 0; ui32CurrentArg < ui32NumArgs; ui32CurrentArg++) + { + pui32Message[ui32CurrentArg] = aui32Args[ui32CurrentArg]; + } + + eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); + + while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) + { + OSReleaseThreadQuanta(); + eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); + } + + if ( PVRSRV_OK == eError ) + { + g_sCtrl.bLogDropSignalled = IMG_FALSE; + } + else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled ) + { + PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__)); + } + if ( PVRSRV_ERROR_STREAM_FULL == eError ) + { + g_sCtrl.bLogDropSignalled = IMG_TRUE; + } + + } + + if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE) + { + OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + + /* If a marker is being placed reset byte count from last marker */ + g_sCtrl.ui32ByteCount = 0; + g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2]; + g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2]; + g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD]; + + OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + } + else + { + OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + /* Increase global count */ + g_sCtrl.ui32ByteCount += ui32MessageSize; + + /* Check if packet has overwritten last marker/rpt && + If the packet count is over half the size of the buffer */ + if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED && + g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize)) + { + /* Take snapshot of global variables */ + ui64OSTSSnap = g_sCtrl.ui64OSTS; + ui64CRTSSnap = g_sCtrl.ui64CRTS; + ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed; + /* Reset global variable counter */ + g_sCtrl.ui32ByteCount = 0; + OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + + /* Produce a repeat marker */ + HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap); + } + else + { + OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + } + } + +ReturnError: + return eError; + +#else + /* HTB support is disabled. Just return PVRSRV_OK and do nothing. */ + PVR_UNREFERENCED_PARAMETER(PID); + PVR_UNREFERENCED_PARAMETER(TID); + PVR_UNREFERENCED_PARAMETER(ui64TimeStamp); + PVR_UNREFERENCED_PARAMETER(SF); + PVR_UNREFERENCED_PARAMETER(args); + return PVRSRV_OK; +#endif +} + +/*************************************************************************/ /*! + @Function HTBLog + @Description Record a Host Trace Buffer log event + @Input PID The PID of the process the event is associated + with. This is provided as an argument rather + than querying internally so that events + associated with a particular process, but + performed by another can be logged correctly. + @Input ui64TimeStamp The timestamp to be associated with this + log event + @Input SF The log event ID + @Input ... Log parameters + @Return PVRSRV_OK Success. +*/ /**************************************************************************/ +static PVRSRV_ERROR +HTBLog(IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, + IMG_UINT32 SF, ...) +{ + PVRSRV_ERROR eError; + + va_list args; + va_start(args, SF); + eError = HTBLogKM(PID, TID, ui64TimeStamp, SF, args); + va_end(args); + return eError; +} static void _OnTLReaderOpenCallback( void *pvArg ) @@ -427,7 +703,7 @@ _OnTLReaderOpenCallback( void *pvArg ) { IMG_UINT64 ui64Time; OSClockMonotonicns64(&ui64Time); - (void) HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + (void) HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, g_sCtrl.ui32SyncMarker, ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), @@ -460,7 +736,7 @@ _OnTLReaderOpenCallback( void *pvArg ) number */ /**************************************************************************/ PVRSRV_ERROR -HTBControlKM( +HTBControlKM_Impl( const IMG_UINT32 ui32NumFlagGroups, const IMG_UINT32 * aui32GroupEnable, const IMG_UINT32 ui32LogLevel, @@ -482,7 +758,7 @@ HTBControlKM( HTB_STREAM_NAME, g_sCtrl.ui32BufferSize, _LookupFlags(HTB_OPMODE_DROPOLDEST) | g_ui32TLBaseFlags, - _OnTLReaderOpenCallback, NULL, NULL, NULL); + _OnTLReaderOpenCallback, NULL, NULL, NULL, NULL, NULL); PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); g_bConfigured = IMG_TRUE; } @@ -541,23 +817,23 @@ HTBControlKM( } /* Dump the current configuration state */ - eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode); + eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode); PVR_LOG_IF_ERROR(eError, "HTBLog"); - eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]); + eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]); PVR_LOG_IF_ERROR(eError, "HTBLog"); - eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel); + eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel); PVR_LOG_IF_ERROR(eError, "HTBLog"); - eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode); + eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode); PVR_LOG_IF_ERROR(eError, "HTBLog"); for (i = 0; i < g_sCtrl.ui32PIDCount; i++) { - eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]); + eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]); PVR_LOG_IF_ERROR(eError, "HTBLog"); } /* Else should never be hit as we set the spd when the power state is updated */ if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd) { - eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, g_sCtrl.ui32SyncMarker, ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), @@ -568,23 +844,6 @@ HTBControlKM( return eError; } -/*************************************************************************/ /*! -*/ /**************************************************************************/ -static IMG_BOOL -_ValidPID( IMG_UINT32 PID ) -{ - IMG_UINT32 i; - - for (i = 0; i < g_sCtrl.ui32PIDCount; i++) - { - if ( g_sCtrl.aui32EnablePID[i] == PID ) - { - return IMG_TRUE; - } - } - return IMG_FALSE; -} - /*************************************************************************/ /*! @Function HTBSyncPartitionMarker @@ -594,7 +853,7 @@ _ValidPID( IMG_UINT32 PID ) */ /**************************************************************************/ void -HTBSyncPartitionMarker( +HTBSyncPartitionMarker_Impl( const IMG_UINT32 ui32Marker ) { @@ -608,7 +867,7 @@ HTBSyncPartitionMarker( /* Else should never be hit as we set the spd when the power state is updated */ if (0 != g_sCtrl.ui32SyncCalcClkSpd) { - eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, ui32Marker, ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), @@ -630,7 +889,7 @@ HTBSyncPartitionMarker( */ /**************************************************************************/ void -HTBSyncPartitionMarkerRepeat( +HTBSyncPartitionMarkerRepeat_Impl( const IMG_UINT32 ui32Marker, const IMG_UINT64 ui64SyncOSTS, const IMG_UINT64 ui64SyncCRTS, @@ -646,7 +905,7 @@ HTBSyncPartitionMarkerRepeat( /* Else should never be hit as we set the spd when the power state is updated */ if (0 != ui32ClkSpeed) { - eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, ui32Marker, ((IMG_UINT32)((ui64SyncOSTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncOSTS&0xffffffffU)), ((IMG_UINT32)((ui64SyncCRTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncCRTS&0xffffffffU)), @@ -672,7 +931,7 @@ HTBSyncPartitionMarkerRepeat( */ /**************************************************************************/ void -HTBSyncScale( +HTBSyncScale_Impl( const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS, const IMG_UINT64 ui64CRTS, @@ -687,7 +946,7 @@ HTBSyncScale( PVRSRV_ERROR eError; IMG_UINT64 ui64Time; OSClockMonotonicns64(&ui64Time); - eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, g_sCtrl.ui32SyncMarker, ((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)), ((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)), @@ -699,143 +958,25 @@ HTBSyncScale( } } - /*************************************************************************/ /*! - @Function HTBLogKM - @Description Record a Host Trace Buffer log event - - @Input PID The PID of the process the event is associated - with. This is provided as an argument rather - than querying internally so that events associated - with a particular process, but performed by - another can be logged correctly. - - @Input ui64TimeStamp The timestamp to be associated with this log event - + @Function HTBLogSimple + @Description Record a Host Trace Buffer log event with implicit PID and + Timestamp @Input SF The log event ID - @Input ... Log parameters - @Return PVRSRV_OK Success. - */ /**************************************************************************/ -PVRSRV_ERROR -HTBLogKM( - IMG_UINT32 PID, - IMG_UINT32 TID, - IMG_UINT64 ui64TimeStamp, - HTB_LOG_SFids SF, - IMG_UINT32 ui32NumArgs, - IMG_UINT32 * aui32Args -) +IMG_INTERNAL PVRSRV_ERROR +HTBLogSimple_Impl(IMG_UINT32 SF, ...) { - OS_SPINLOCK_FLAGS uiSpinLockFlags; - IMG_UINT32 ui32ReturnFlags = 0; - - /* Local snapshot variables of global counters */ - IMG_UINT64 ui64OSTSSnap; - IMG_UINT64 ui64CRTSSnap; - IMG_UINT32 ui32ClkSpeedSnap; - - /* format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]* - * Buffer is on the stack so we don't need a semaphore to guard it - */ - IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS]; - - /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/ - * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 80 bytes, - * hence with these constraints this design is unlikely to get - * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error - */ - PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED; - IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; - IMG_UINT32 * pui32Message = aui32MessageBuffer; - IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs); - - PVR_LOG_GOTO_IF_INVALID_PARAM(aui32Args != NULL, eError, ReturnError); - PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs == HTB_SF_PARAMNUM(SF), eError, ReturnError); - PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs <= HTB_LOG_MAX_PARAMS, eError, ReturnError); - - if ( g_hTLStream - && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) ) -/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */ -/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */ - ) - { - *pui32Message++ = SF; - *pui32Message++ = PID; - *pui32Message++ = TID; - *pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff)); - *pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff)); - while ( ui32NumArgs ) - { - ui32NumArgs--; - pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs]; - } - - eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); - while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) - { - OSReleaseThreadQuanta(); - eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); - } - - if ( PVRSRV_OK == eError ) - { - g_sCtrl.bLogDropSignalled = IMG_FALSE; - } - else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled ) - { - PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__)); - } - if ( PVRSRV_ERROR_STREAM_FULL == eError ) - { - g_sCtrl.bLogDropSignalled = IMG_TRUE; - } - - } - - if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE) - { - OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); - - /* If a marker is being placed reset byte count from last marker */ - g_sCtrl.ui32ByteCount = 0; - g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2]; - g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2]; - g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD]; - - OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); - } - else - { - OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); - /* Increase global count */ - g_sCtrl.ui32ByteCount += ui32MessageSize; - - /* Check if packet has overwritten last marker/rpt && - If the packet count is over half the size of the buffer */ - if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED && - g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize)) - { - /* Take snapshot of global variables */ - ui64OSTSSnap = g_sCtrl.ui64OSTS; - ui64CRTSSnap = g_sCtrl.ui64CRTS; - ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed; - /* Reset global variable counter */ - g_sCtrl.ui32ByteCount = 0; - OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); - - /* Produce a repeat marker */ - HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap); - } - else - { - OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); - } - } - -ReturnError: + PVRSRV_ERROR eError; + IMG_UINT64 ui64TimeStamp; + va_list args; + va_start(args, SF); + OSClockMonotonicns64(&ui64TimeStamp); + eError = HTBLogKM(OSGetCurrentProcessID(), OSGetCurrentThreadID(), ui64TimeStamp, + SF, args); + va_end(args); return eError; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/info_page_km.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/info_page_km.c index 19345bcad00b..79b03fe7e658 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/info_page_km.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/info_page_km.c @@ -60,7 +60,7 @@ PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData) /* Allocate single page of memory for driver information page */ eError = DevmemAllocateExportable(psData->psHostMemDeviceNode, - OSGetPageSize(), + PVR_ALIGN(INFO_PAGE_TOTAL_SIZE, OSGetPageSize()), OSGetPageSize(), OSGetPageShift(), uiMemFlags, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/mmu_common.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/mmu_common.c index 22dbea7c6a08..e9df944b8546 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/mmu_common.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/mmu_common.c @@ -49,14 +49,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "osfunc.h" #include "allocmem.h" -#if defined(SUPPORT_RGX) -# include "rgx_memallocflags.h" -# include "rgxmmudefs_km.h" -#endif - #include "pvr_notifier.h" #include "pvrsrv.h" -#include "htbuffer.h" +#include "htbserver.h" #include "pvr_ricommon.h" #if defined(PVRSRV_ENABLE_PROCESS_STATS) @@ -69,6 +64,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pdump_physmem.h" #endif +#include "physmem.h" #if defined(SUPPORT_GPUVIRT_VALIDATION) #include "physmem_lma.h" #endif @@ -104,6 +100,15 @@ Let's keep this graph up-to-date: #define MMU_OBJ_DBG(x) #endif +#define SCRATCH_PAGE 1 +#define DEV_ZERO_PAGE 2 +#if defined(PDUMP) +#define SCRATCH_PAGE_STR "SCRATCH_PAGE" +#define DEV_ZERO_PAGE_STR "DEV_ZERO_PAGE" +#endif +#define PVR_SCRATCH_PAGE_INIT_VALUE 0 +#define PVR_ZERO_PAGE_INIT_VALUE 0 + /*! * Refcounted structure that is shared between the context and * the cleanup thread items. @@ -127,10 +132,10 @@ typedef struct _MMU_CTX_CLEANUP_DATA_ DLLIST_NODE sMMUCtxCleanupItemsHead; /*! Was the MMU context destroyed and should not be accessed any more? */ IMG_BOOL bMMUContextExists; -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) /*! Associated OSid for this context */ IMG_UINT32 ui32OSid; -#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ +#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */ } MMU_CTX_CLEANUP_DATA; @@ -153,6 +158,8 @@ typedef struct _MMU_CLEANUP_ITEM_ PVRSRV_CLIENT_SYNC_PRIM *psSync; /*! The update value of the sync to signal that the cache was flushed */ IMG_UINT32 uiRequiredSyncVal; + /*! The update value of the power off counter */ + IMG_UINT32 uiRequiredPowerOffCounter; /*! The device node needed to free the page tables */ PVRSRV_DEVICE_NODE *psDevNode; } MMU_CLEANUP_ITEM; @@ -188,7 +195,7 @@ typedef struct _MMU_PHYSMEM_CONTEXT_ /*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */ DLLIST_NODE sTmpMMUMappingHead; -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) IMG_UINT32 ui32OSid; IMG_UINT32 ui32OSidReg; IMG_BOOL bOSidAxiProt; @@ -258,7 +265,7 @@ typedef struct _MMU_Levelx_INFO_ MMU_MEMORY_DESC sMemDesc; /*! Array of infos for the next level. Must be last member in structure */ - struct _MMU_Levelx_INFO_ *apsNextLevel[1]; + struct _MMU_Levelx_INFO_ *apsNextLevel[IMG_FLEX_ARRAY_MEMBER]; } MMU_Levelx_INFO; /*! @@ -269,6 +276,7 @@ struct _MMU_CONTEXT_ /*! Originating Connection */ CONNECTION_DATA *psConnection; + /*! Device MMU attribute descriptions */ MMU_DEVICEATTRIBS *psDevAttrs; /*! For allocation and deallocation of the physical memory where @@ -300,16 +308,40 @@ struct _MMU_CONTEXT_ /* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */ }; +/* + * Only the kernel created (direct bridge) firmware memory context will + * have a NULL connection and all other application memory context get a + * valid connection object. + */ +#define _MMU_IS_FWKM_CTX(_ctx) ((_ctx)->psConnection == NULL) + +/* + * Used to determine if the MMU Ctx provided is the FWKM memory context + * and if it belongs to the VZ Guest. + */ +#define _MMU_IS_FWKM_CTX_VZGUEST(_ctx) (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, _ctx->psPhysMemCtx->psDevNode) && _MMU_IS_FWKM_CTX(_ctx)) + static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR}; #if defined(DEBUG) #include "log2.h" #endif -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) -static IMG_UINT32 g_ui32MMULeakCounter = 0; -static DEFINE_MUTEX(g_sMMULeakMutex); -#endif + +static PVRSRV_ERROR +MMU_UnmapPagesUnlocked(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags); + +static PVRSRV_ERROR +MMU_UnmapPMRFastUnlocked(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 uiLog2PageSize); /***************************************************************************** * Utility functions * @@ -347,6 +379,7 @@ _FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode, } } +#if defined(SUPPORT_MMU_DEFERRED_FREE) /*************************************************************************/ /*! @Function _CleanupThread_FreeMMUMapping @@ -375,8 +408,6 @@ _CleanupThread_FreeMMUMapping(void* pvData) MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData; PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode; IMG_BOOL bFreeNow; - IMG_UINT32 uiSyncCurrent; - IMG_UINT32 uiSyncReq; OSLockAcquire(psMMUCtxCleanupData->hCleanupLock); @@ -388,11 +419,20 @@ _CleanupThread_FreeMMUMapping(void* pvData) PVR_GOTO_WITH_ERROR(eError, PVRSRV_OK, e0); } + /* Has there been a power off since the cleanup item was queued? */ + bFreeNow = PVRSRVHasCounter32Advanced(psDevNode->uiPowerOffCounter, + psCleanup->uiRequiredPowerOffCounter); + if (bFreeNow) + { + goto freeNow; + } + if (psCleanup->psSync == NULL) { + IMG_UINT32 uiSyncVal; /* Kick to invalidate the MMU caches and get sync info */ eError = psDevNode->pfnMMUCacheInvalidateKick(psDevNode, - &psCleanup->uiRequiredSyncVal); + &uiSyncVal); if (eError != PVRSRV_OK) { OSLockRelease(psMMUCtxCleanupData->hCleanupLock); @@ -411,16 +451,10 @@ _CleanupThread_FreeMMUMapping(void* pvData) } } - uiSyncCurrent = OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr); - uiSyncReq = psCleanup->uiRequiredSyncVal; - - /* Has the invalidate executed */ - bFreeNow = (uiSyncCurrent >= uiSyncReq) ? - /* ... with the counter wrapped around ... - * There can't be 3*1024*1024 transactions completed, so consider wrapped */ - (((uiSyncCurrent - uiSyncReq) > 0xF0000000UL)? IMG_FALSE : IMG_TRUE): - /* There can't be 3*1024*1024 transactions pending, so consider wrapped */ - (((uiSyncReq - uiSyncCurrent) > 0xF0000000UL)? IMG_TRUE : IMG_FALSE); + /* Has the invalidate executed (sync is updated when the Firmware performs + * the invalidate)? */ + bFreeNow = PVRSRVHasCounter32Advanced(OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr), + psCleanup->uiRequiredSyncVal); #if defined(NO_HARDWARE) /* In NOHW the syncs will never be updated so just free the tables */ @@ -429,10 +463,12 @@ _CleanupThread_FreeMMUMapping(void* pvData) /* If the Invalidate operation is not completed, check if the operation timed out */ if (!bFreeNow) { + IMG_UINT32 uiTimeStart = psCleanup->sCleanupThreadFn.ui32TimeStart; + IMG_UINT32 uiTimeEnd = psCleanup->sCleanupThreadFn.ui32TimeEnd; + /* If the time left for the completion of invalidate operation is * within 500ms of time-out, consider the operation as timed out */ - if ((psCleanup->sCleanupThreadFn.ui32TimeEnd - psCleanup->sCleanupThreadFn.ui32TimeStart - 500) <= - (OSClockms() - psCleanup->sCleanupThreadFn.ui32TimeStart)) + if ((uiTimeEnd - uiTimeStart - 500) <= (OSClockms() - uiTimeStart)) { /* Consider the operation is timed out */ bFreeNow = IMG_TRUE; @@ -446,6 +482,7 @@ _CleanupThread_FreeMMUMapping(void* pvData) _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead); dllist_remove_node(&psCleanup->sMMUCtxCleanupItem); + OSFreeMem(psCleanup); eError = PVRSRV_OK; @@ -533,7 +570,8 @@ _SetupCleanup_FreeMMUMapping(MMU_PHYSMEM_CONTEXT *psPhysMemCtx) * the FW yet. Kicking off an MMU cache invalidate should * be done in the cleanup thread to not waste time here. */ psCleanupItem->psSync = NULL; - psCleanupItem->uiRequiredSyncVal = 0; + psCleanupItem->uiRequiredSyncVal = psDevNode->ui32NextMMUInvalidateUpdate; + psCleanupItem->uiRequiredPowerOffCounter = psDevNode->uiPowerOffCounterNext; psCleanupItem->psDevNode = psDevNode; psCleanupItem->psMMUCtxCleanupData = psCleanupData; @@ -551,10 +589,11 @@ _SetupCleanup_FreeMMUMapping(MMU_PHYSMEM_CONTEXT *psPhysMemCtx) psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping; psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem; psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE; + psCleanupItem->sCleanupThreadFn.eCleanupType = PVRSRV_CLEANUP_TYPE_MMU; CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn, CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); - PVRSRVCleanupThreadAddWork(&psCleanupItem->sCleanupThreadFn); + PVRSRVCleanupThreadAddWork(psDevNode, &psCleanupItem->sCleanupThreadFn); return; @@ -564,6 +603,7 @@ _SetupCleanup_FreeMMUMapping(MMU_PHYSMEM_CONTEXT *psPhysMemCtx) e0: return; } +#endif /*************************************************************************/ /*! @Function _CalcPCEIdx @@ -681,73 +721,27 @@ static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr, return ui32RetVal; } -#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) -/* - * RGXMapBRN71422TargetPhysicalAddress - * - * Set-up a special MMU tree mapping with a single page that eventually points to - * RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR. - * - * PC entries are 32b, with the last 4 bits being 0 except for the LSB bit that should be 1 (Valid). Addr is 4KB aligned. - * PD entries are 64b, with addr in bits 39:5 and everything else 0 except for LSB bit that is Valid. Addr is byte aligned? - * PT entries are 64b, with phy addr in bits 39:12 and everything else 0 except for LSB bit that is Valid. Addr is 4KB aligned. - * So, we can construct the page tables in a single page like this: - * 0x00 : PCE (PCE index 0) - * 0x04 : 0x0 - * 0x08 : PDEa (PDE index 1) - * 0x0C : PDEb - * 0x10 : PTEa (PTE index 2) - * 0x14 : PTEb - * - * With the PCE and the PDE pointing to this same page. - * The VA address that we are mapping is therefore: - * VA = PCE_idx*PCE_size + PDE_idx*PDE_size + PTE_idx*PTE_size = - * = 0 * 1GB + 1 * 2MB + 2 * 4KB = - * = 0 + 0x20_0000 + 0x2000 = - * = 0x00_0020_2000 - */ -void RGXMapBRN71422TargetPhysicalAddress(MMU_CONTEXT *psMMUContext) -{ - MMU_MEMORY_DESC *psMemDesc = &psMMUContext->sBaseLevelInfo.sMemDesc; - IMG_DEV_PHYADDR sPhysAddrPC = psMemDesc->sDevPAddr; - IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; - IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; - IMG_UINT64 ui64Entry; - - /* PCE points to PC */ - ui64Entry = sPhysAddrPC.uiAddr; - ui64Entry = ui64Entry >> RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; - ui64Entry = ui64Entry << RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT; - ui64Entry = ui64Entry & ~RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK; - ui64Entry = ui64Entry | RGX_MMUCTRL_PC_DATA_VALID_EN; - pui32Px[0] = (IMG_UINT32) ui64Entry; - - /* PDE points to PC */ - ui64Entry = sPhysAddrPC.uiAddr; - ui64Entry = ui64Entry & ~RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK; - ui64Entry = ui64Entry | RGX_MMUCTRL_PD_DATA_VALID_EN; - pui64Px[1] = ui64Entry; - - /* PTE points to PAddr */ - ui64Entry = RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR; - ui64Entry = ui64Entry & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK; - ui64Entry = ui64Entry | RGX_MMUCTRL_PT_DATA_VALID_EN; - pui64Px[2] = ui64Entry; +/*************************************************************************/ /*! +@Function _GetParityBit - { - PVRSRV_ERROR eError; - PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psMMUContext->psPhysMemCtx->psDevNode; - eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, - &psMemDesc->psMapping->sMemHandle, - psMemDesc->uiOffset, - psMemDesc->uiSize); - PVR_LOG_IF_ERROR(eError, "pfnDevPxClean"); - } +@Description Calculate parity bit - PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapping the BRN71422 workaround to target physical address 0x%" IMG_UINT64_FMTSPECx ".", - __func__, RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR)); +@Input uiSrc Value to calculate the parity bit from. + +@Return The parity bit + */ +/*****************************************************************************/ +static inline IMG_UINT64 _GetParityBit(IMG_UINT64 uiSrc) +{ + uiSrc ^= uiSrc >> 32U; + uiSrc ^= uiSrc >> 16U; + uiSrc ^= uiSrc >> 8U; + uiSrc ^= uiSrc >> 4U; + uiSrc ^= uiSrc >> 2U; + uiSrc ^= uiSrc >> 1U; + + return (uiSrc & 1U); } -#endif /***************************************************************************** * MMU memory allocation/management functions (mem desc) * @@ -784,9 +778,7 @@ static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle, RA_FLAGS_T uiFlags, RA_LENGTH_T uBaseAlignment, const IMG_CHAR *pszAnnotation, - RA_BASE_T *puiBase, - RA_LENGTH_T *puiActualSize, - RA_PERISPAN_HANDLE *phPriv) + RA_IMPORT *psImport) { MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle; PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psPhysMemCtx->psDevNode; @@ -805,7 +797,7 @@ static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle, PVR_GOTO_IF_NOMEM(psMapping, eError, e0); #if defined(PVRSRV_ENABLE_PROCESS_STATS) - uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ? + uiPid = psDevNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE ? PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM(); #endif @@ -842,11 +834,11 @@ static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle, psMapping->uiCpuVAddrRefCount = 0; - *phPriv = (RA_PERISPAN_HANDLE) psMapping; + psImport->hPriv = (RA_PERISPAN_HANDLE) psMapping; /* Note: This assumes this memory never gets paged out */ - *puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr; - *puiActualSize = uiSize; + psImport->base = (RA_BASE_T)psMapping->sDevPAddr.uiAddr; + psImport->uSize = uiSize; return PVRSRV_OK; @@ -997,7 +989,7 @@ static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psPhysMemCtx, static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate, PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, - MMU_PROTFLAGS_T *uiMMUProtFlags, + MMU_PROTFLAGS_T *puiMMUProtFlags, MMU_CONTEXT *psMMUContext) { PVRSRV_ERROR eError = PVRSRV_OK; @@ -1005,73 +997,52 @@ static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate, PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; /* Do flag conversion between devmem flags and MMU generic flags */ - if (bInvalidate == IMG_FALSE) + if (bInvalidate == IMG_TRUE) { - *uiMMUProtFlags |= ((uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) - >> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) - << MMU_PROTFLAGS_DEVICE_OFFSET; + *puiMMUProtFlags |= MMU_PROTFLAGS_INVALID; + return eError; /* OK */ + } - if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags)) - { - *uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; - } - if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags)) - { - *uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE; - } + /* Convert to valid flags for valid mapping */ + *puiMMUProtFlags |= ((uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) + >> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) + << MMU_PROTFLAGS_DEVICE_OFFSET; - eError = DevmemDeviceCacheMode(psDevNode, uiMappingFlags, &uiGPUCacheMode); - PVR_RETURN_IF_ERROR(eError); + if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags)) + { + *puiMMUProtFlags |= MMU_PROTFLAGS_READABLE; + } + if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags)) + { + *puiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE; + } - switch (uiGPUCacheMode) - { - case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: - case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC: - break; - case PVRSRV_MEMALLOCFLAG_GPU_CACHED: - *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED; - break; - default: - PVR_DPF((PVR_DBG_ERROR, - "%s: Wrong parameters", - __func__)); - return PVRSRV_ERROR_INVALID_PARAMS; - } + eError = DevmemDeviceCacheMode(uiMappingFlags, &uiGPUCacheMode); + PVR_RETURN_IF_ERROR(eError); - if (DevmemDeviceCacheCoherency(psDevNode, uiMappingFlags)) - { - *uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT; - } - /* Only compile if RGX_FEATURE_MIPS_BIT_MASK is defined to avoid compilation - * errors on volcanic cores. - */ - #if defined(SUPPORT_RGX) && defined(RGX_FEATURE_MIPS_BIT_MASK) - if ((psDevNode->pfnCheckDeviceFeature) && - PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS)) - { - /* If we are allocating on the MMU of the firmware processor, the - * cached/uncached attributes must depend on the FIRMWARE_CACHED - * allocation flag. - */ - if (psMMUContext->psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) - { - if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) - { - *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED; - } - else - { - *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED; + switch (uiGPUCacheMode) + { + case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: + case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC: + break; + case PVRSRV_MEMALLOCFLAG_GPU_CACHED: + *puiMMUProtFlags |= MMU_PROTFLAGS_CACHED; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Wrong parameters", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } - } - *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT; - } - } -#endif + if (DevmemDeviceCacheCoherency(psDevNode, uiMappingFlags)) + { + *puiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT; } - else + + if (psDevNode->pfnMMUTweakProtFlags) { - *uiMMUProtFlags |= MMU_PROTFLAGS_INVALID; + psDevNode->pfnMMUTweakProtFlags(psDevNode, psMMUContext->psDevAttrs, uiMappingFlags, puiMMUProtFlags); } return PVRSRV_OK; @@ -1089,7 +1060,7 @@ static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate, @Input psConfig MMU Px config -@Input eMMULevel MMU level that that allocation is for +@Input eMMULevel MMU level that allocation is for @Output psMemDesc Description of allocation @@ -1101,6 +1072,10 @@ static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext, const MMU_PxE_CONFIG *psConfig, MMU_LEVEL eMMULevel, MMU_MEMORY_DESC *psMemDesc, +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + IMG_DEV_VIRTADDR* psRunningDevVAddr, + IMG_UINT32 uiLog2DataPageSize, +#endif IMG_UINT32 uiLog2Align) { PVRSRV_ERROR eError; @@ -1136,14 +1111,31 @@ static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext, PVR_LOG_GOTO_WITH_ERROR("_MMU_PhysMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); } - /* - Clear the object - Note: if any MMUs are cleared with non-zero values then will need a - custom clear function - Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is - unlikely - */ - OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes); +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + /* If parity bit is needed, set it accordingly to avoid parity errors along with page-faults. */ + if (psConfig->uiParityBitMask && eMMULevel == MMU_LEVEL_1 && psRunningDevVAddr != NULL) + { + IMG_UINT32 uiParity = (IMG_UINT32)_GetParityBit(psRunningDevVAddr->uiAddr ^ 0ULL); + + /* Only calculate parity for first address in the PT. + This tells us which of the two patterns to use for the rest of the PT. */ + OSCachedMemCopy(psMemDesc->pvCpuVAddr, psMMUContext->psDevAttrs->pui64PrecomputedAllocParity[uiParity], uiBytes); + + /* Increment running dev virtual address */ + psRunningDevVAddr->uiAddr += (1 << uiLog2DataPageSize) * uiNumEntries; + } + else +#endif + { + /* + Clear the object + Note: if any MMUs are cleared with non-zero values then will need a + custom clear function + Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is + unlikely + */ + OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes); + } eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, &psMemDesc->psMapping->sMemHandle, @@ -1177,7 +1169,12 @@ static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext, psConfig->uiProtMask, psConfig->uiValidEnMask, 0, + 0, /* Unused - Parity bit values are taken directly from PTE memory when PTE is 0-initialised or invalid */ + 0, /* Unused */ + 0, /* Unused */ psMMUContext->psDevAttrs->eMMUType); +#else + PVR_UNREFERENCED_PARAMETER(eMMULevel); #endif return PVRSRV_OK; @@ -1238,6 +1235,7 @@ static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, IMG_UINT32 uiIndex, const MMU_PxE_CONFIG *psConfig, const IMG_DEV_PHYADDR *psDevPAddr, + const IMG_DEV_VIRTADDR *psDevVAddr, IMG_BOOL bUnmap, #if defined(PDUMP) const IMG_CHAR *pszMemspaceName, @@ -1251,6 +1249,15 @@ static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, IMG_UINT64 uiAddr = psDevPAddr->uiAddr; PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + IMG_BOOL bParity = psConfig->uiParityBitMask; +#else + IMG_BOOL bParity = psConfig->uiParityBitMask && !bUnmap; +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(bParity); +#endif +#endif + if (psDevNode->pfnValidateOrTweakPhysAddrs) { PVRSRV_ERROR eErr = psDevNode->pfnValidateOrTweakPhysAddrs(psDevNode, @@ -1265,8 +1272,15 @@ static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, >> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ & psConfig->uiAddrMask; /* Delete unused bits */ + ui64PxE64 |= uiProtFlags; + /* Add parity */ + if (bParity) + { + ui64PxE64 |= _GetParityBit(psDevVAddr->uiAddr ^ psDevPAddr->uiAddr) << psConfig->uiParityBitShift; + } + /* Set the entry */ if (psConfig->uiBytesPerEntry == 8) { @@ -1281,6 +1295,7 @@ static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, /* assert that the result fits into 32 bits before writing it into the 32-bit array with a cast */ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); + PVR_ASSERT(!bParity); pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64; } @@ -1289,7 +1304,6 @@ static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; } - /* Log modification */ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), @@ -1297,6 +1311,8 @@ static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), !bUnmap); + PVR_UNREFERENCED_PARAMETER(bUnmap); /* when HTBLOGK is disabled */ + #if defined(PDUMP) PDumpMMUDumpPxEntries(psDevNode, MMU_LEVEL_1, @@ -1315,6 +1331,9 @@ static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, psConfig->uiProtMask, psConfig->uiValidEnMask, 0, + bParity ? _GetParityBit(psDevVAddr->uiAddr ^ 0) : 0, + psConfig->uiParityBitShift, + bParity ? psConfig->uiParityBitMask : 0, psMMUContext->psDevAttrs->eMMUType); #endif /*PDUMP*/ @@ -1346,23 +1365,21 @@ static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, point to @Input uiProtFlags MMU protection flags - -@Return PVRSRV_OK if the setup was successful */ /*****************************************************************************/ -static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext, - MMU_Levelx_INFO *psLevel, - IMG_UINT32 uiIndex, - const MMU_PxE_CONFIG *psConfig, - MMU_LEVEL eMMULevel, - const IMG_DEV_PHYADDR *psDevPAddr, +static void _SetupPxE(MMU_CONTEXT *psMMUContext, + MMU_Levelx_INFO *psLevel, + IMG_UINT32 uiIndex, + const MMU_PxE_CONFIG *psConfig, + MMU_LEVEL eMMULevel, + const IMG_DEV_PHYADDR *psDevPAddr, #if defined(PDUMP) - const IMG_CHAR *pszMemspaceName, - const IMG_CHAR *pszSymbolicAddr, - IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, + const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, #endif - MMU_PROTFLAGS_T uiProtFlags, - IMG_UINT32 uiLog2DataPageSize) + MMU_PROTFLAGS_T uiProtFlags, + IMG_UINT32 uiLog2DataPageSize) { PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc; @@ -1408,7 +1425,8 @@ static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext, default: PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__)); - return PVRSRV_ERROR_INVALID_PARAMS; + PVR_ASSERT(0); + return; } /* How big is a PxE in bytes? */ @@ -1468,8 +1486,8 @@ static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext, default: PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d", __func__, psConfig->uiBytesPerEntry, eMMULevel)); - - return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; + PVR_ASSERT(0); + return; } #if defined(PDUMP) @@ -1490,6 +1508,9 @@ static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext, psConfig->uiProtMask, psConfig->uiValidEnMask, 0, + 0, /* Unused */ + 0, /* Unused */ + 0, /* Unused */ psMMUContext->psDevAttrs->eMMUType); #endif @@ -1497,7 +1518,7 @@ static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext, eMMULevel, uiProtFlags & MMU_PROTFLAGS_INVALID); - return PVRSRV_OK; + return; } /***************************************************************************** @@ -1574,15 +1595,22 @@ static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext, IMG_BOOL bFreed = IMG_FALSE; PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + /* Call before parameter check for debugging purposes. */ + MMU_OBJ_DBG((PVR_DBG_ERROR, + "_MMU_FreeLevel: level = %u, range %u - %u, refcount = %u", + aeMMULevel[uiThisLevel], uiStartIndex, uiEndIndex, + (psLevel != NULL ? psLevel->ui32RefCount : IMG_UINT32_MAX))); + /* Parameter checks */ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); - PVR_ASSERT(psLevel != NULL); - MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d", - aeMMULevel[uiThisLevel], uiStartIndex, - uiEndIndex, psLevel->ui32RefCount)); + if (psLevel == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level data", __func__)); + goto ErrReturn; + } - for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++) + for (i = uiStartIndex; i < uiEndIndex; i++) { if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) { @@ -1624,24 +1652,20 @@ static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext, uiNextStartIndex, uiNextEndIndex, bNextFirst, bNextLast, uiLog2DataPageSize)) { - PVRSRV_ERROR eError; - /* Un-wire the entry */ - eError = _SetupPxE(psMMUContext, - psLevel, - i, - psConfig, - aeMMULevel[uiThisLevel], - NULL, + _SetupPxE(psMMUContext, + psLevel, + i, + psConfig, + aeMMULevel[uiThisLevel], + NULL, #if defined(PDUMP) - NULL, /* Only required for data page */ - NULL, /* Only required for data page */ - 0, /* Only required for data page */ + NULL, /* Only required for data page */ + NULL, /* Only required for data page */ + 0, /* Only required for data page */ #endif - MMU_PROTFLAGS_INVALID, - uiLog2DataPageSize); - - PVR_ASSERT(eError == PVRSRV_OK); + MMU_PROTFLAGS_INVALID, + uiLog2DataPageSize); /* Free table of the level below, pointed to by this table entry. * We don't destroy the table inside the above _MMU_FreeLevel call because we @@ -1675,16 +1699,20 @@ static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext, } /* Level one flushing is done when we actually write the table entries */ - if ((aeMMULevel[uiThisLevel] != MMU_LEVEL_1) && (psLevel != NULL)) + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) { - PhysHeapPagesClean(psDevNode->psMMUPhysHeap, + PVRSRV_ERROR eError; + eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, &psLevel->sMemDesc.psMapping->sMemHandle, uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); + PVR_LOG_IF_ERROR(eError, "PhysHeapPagesClean"); } - MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d", - aeMMULevel[uiThisLevel], bFreed?0: (psLevel)?psLevel->ui32RefCount:-1)); +ErrReturn: + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %u, refcount = %u", + aeMMULevel[uiThisLevel], + bFreed ? 0 : (psLevel != NULL ? psLevel->ui32RefCount : IMG_UINT32_MAX))); return bFreed; } @@ -1748,6 +1776,9 @@ static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32CurrentLevel, IMG_UINT32 uiStartIndex, IMG_UINT32 uiEndIndex, +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + IMG_DEV_VIRTADDR* psRunningDevVAddr, +#endif IMG_BOOL bFirst, IMG_BOOL bLast, IMG_UINT32 uiLog2DataPageSize) @@ -1762,9 +1793,10 @@ static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, /* Parameter check */ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); - MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d", - aeMMULevel[uiThisLevel], uiStartIndex, - uiEndIndex, psLevel->ui32RefCount)); + MMU_OBJ_DBG((PVR_DBG_ERROR, + "_MMU_AllocLevel: level = %u, range %u - %u, refcount = %u", + aeMMULevel[uiThisLevel], uiStartIndex, uiEndIndex, + psLevel != NULL ? psLevel->ui32RefCount : IMG_UINT32_MAX)); /* Go from uiStartIndex to uiEndIndex through the Px */ for (i = uiStartIndex;i < uiEndIndex;i++) @@ -1790,7 +1822,7 @@ static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, ui32AllocSize = sizeof(MMU_Levelx_INFO); if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1) { - ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1); + ui32AllocSize += IMG_FLEX_ARRAY_SIZE(sizeof(MMU_Levelx_INFO *), uiNextEntries); } psNextLevel = OSAllocZMem(ui32AllocSize); if (psNextLevel == NULL) @@ -1808,7 +1840,12 @@ static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1], aeMMULevel[uiThisLevel + 1], &psNextLevel->sMemDesc, - psConfig->uiAddrLog2Align); +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + psRunningDevVAddr, + uiLog2DataPageSize, +#endif + psConfig->uiAddrLog2Align +); if (eError != PVRSRV_OK) { uiAllocState = 1; @@ -1816,28 +1853,33 @@ static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, } /* Wire up the entry */ - eError = _SetupPxE(psMMUContext, - psLevel, - i, - psConfig, - aeMMULevel[uiThisLevel], - &psNextLevel->sMemDesc.sDevPAddr, + _SetupPxE(psMMUContext, + psLevel, + i, + psConfig, + aeMMULevel[uiThisLevel], + &psNextLevel->sMemDesc.sDevPAddr, #if defined(PDUMP) - NULL, /* Only required for data page */ - NULL, /* Only required for data page */ - 0, /* Only required for data page */ + NULL, /* Only required for data page */ + NULL, /* Only required for data page */ + 0, /* Only required for data page */ #endif - 0, - uiLog2DataPageSize); + 0, + uiLog2DataPageSize); - if (eError != PVRSRV_OK) + psLevel->ui32RefCount++; + } +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + else + { + /* The level structure already exists, increment running device virtual address + This is necessary for correct parity bit calculation on further allocated page tables */ + if (apsConfig[aeMMULevel[MMU_LEVEL_1]]->uiParityBitMask && aeMMULevel[uiThisLevel+1] == MMU_LEVEL_1) { - uiAllocState = 2; - goto e0; + psRunningDevVAddr->uiAddr += psLevel->apsNextLevel[i]->ui32NumOfEntries * (1 << uiLog2DataPageSize); } - - psLevel->ui32RefCount++; } +#endif /* If we're crossing a Px then the start index changes */ if (bFirst && (i == uiStartIndex)) @@ -1874,13 +1916,17 @@ static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, pui32CurrentLevel, uiNextStartIndex, uiNextEndIndex, +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + + psRunningDevVAddr, +#endif bNextFirst, bNextLast, uiLog2DataPageSize); (*pui32CurrentLevel)--; if (eError != PVRSRV_OK) { - uiAllocState = 2; + uiAllocState = 3; goto e0; } } @@ -1889,7 +1935,17 @@ static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, /* All we need to do for level 1 is bump the refcount */ psLevel->ui32RefCount++; } - PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + + if (psLevel->ui32RefCount > psLevel->ui32NumOfEntries) + { + /* Given how the reference counting is implemented for MMU_LEVEL_2 + * and MMU_LEVEL_3 this should never happen for those levels. Only + * in case of MMU_LEVEL_1 this should be possible (e.g. when someone + * takes multiple reservations of the same range). In such case + * return error to prevent reference count to rollover. */ + uiAllocState = 4; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BAD_MAPPING, e0); + } } /* Level one flushing is done when we actually write the table entries */ @@ -1899,11 +1955,11 @@ static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, &psLevel->sMemDesc.psMapping->sMemHandle, uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); - PVR_GOTO_IF_ERROR(eError, e0); + PVR_GOTO_IF_ERROR(eError, e1); } - MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d", - aeMMULevel[uiThisLevel], psLevel->ui32RefCount)); + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %u, refcount = %u", + aeMMULevel[uiThisLevel], psLevel->ui32RefCount)); return PVRSRV_OK; e0: @@ -1912,96 +1968,133 @@ static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d" ,eError, aeMMULevel[uiThisLevel], uiAllocState)); + /* In case of an error process current `i` in special way without + * recursively calling `_MMU_FreeLevel()`. */ + + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) + { + /* If this is not a PT (so it's a PC or PD) it means that an error + * happened either during PT or PD allocation/setup. + * + * - If the error happened during PT allocation/setup it means that + * we're at the PD level now and we already run below `else` clause + * for the failing PT and the `for` loop below for the other PTs. This + * means that all of the PTs that have been referenced during this + * operation have been dereferenced. So we just need to free that + * failing PT here and the rest of them in the loop below. + * - If the error happened during PD allocation/setup it means that + * we're at the PC level now and we already run below `else` clause + * for the failing PD and the `for` loop below for the other PDs. This + * means that all of the PTs that have been allocated during this + * operation for the failing PD have been freed. So we just need to + * free that failing PD and the rest of them in a recursive manner + * in the loop below. */ + if (uiAllocState >= 3) + { + if (psLevel->apsNextLevel[i] != NULL && + psLevel->apsNextLevel[i]->ui32RefCount == 0) + { + psLevel->ui32RefCount--; + } + } + if (uiAllocState >= 2) + { + if (psLevel->apsNextLevel[i] != NULL && + psLevel->apsNextLevel[i]->ui32RefCount == 0) + { + _PxMemFree(psMMUContext, &psLevel->apsNextLevel[i]->sMemDesc, + aeMMULevel[uiThisLevel + 1]); + } + } + if (psLevel->apsNextLevel[i] != NULL && + psLevel->apsNextLevel[i]->ui32RefCount == 0) + { + OSFreeMem(psLevel->apsNextLevel[i]); + psLevel->apsNextLevel[i] = NULL; + } + } + else + { + /* This is a PT which means that we just need to dereference it. It's + * going to be freed on the PD level in this error path in the `if` + * clause above. */ + psLevel->ui32RefCount--; + } + + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + +e1: + i--; + /* The start value of index variable i is not initialised on purpose. * This clean-up loop deinitialises what was already initialised in * reverse order, so the i index already has the correct value. */ - for (/* i already set */; i>= uiStartIndex && i< uiEndIndex; i--) + for (/* i already set */; i >= uiStartIndex && i < uiEndIndex; i--) { - switch (uiAllocState) + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) { IMG_UINT32 uiNextStartIndex; IMG_UINT32 uiNextEndIndex; IMG_BOOL bNextFirst; IMG_BOOL bNextLast; - case 3: - /* If we're crossing a Px then the start index changes */ - if (bFirst && (i == uiStartIndex)) - { - uiNextStartIndex = auiStartArray[uiThisLevel + 1]; - bNextFirst = IMG_TRUE; - } - else - { - uiNextStartIndex = 0; - bNextFirst = IMG_FALSE; - } - - /* If we're crossing a Px then the end index changes */ - if (bLast && (i == (uiEndIndex - 1))) - { - uiNextEndIndex = auiEndArray[uiThisLevel + 1]; - bNextLast = IMG_TRUE; - } - else - { - uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; - bNextLast = IMG_FALSE; - } - - if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) - { - (*pui32CurrentLevel)++; - if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i], - auiStartArray, auiEndArray, - auiEntriesPerPxArray, apsConfig, - aeMMULevel, pui32CurrentLevel, - uiNextStartIndex, uiNextEndIndex, - bNextFirst, bNextLast, uiLog2DataPageSize)) - { - psLevel->ui32RefCount--; - psLevel->apsNextLevel[i] = NULL; + /* If we're crossing a Px then the start index changes */ + if (bFirst && (i == uiStartIndex)) + { + uiNextStartIndex = auiStartArray[uiThisLevel + 1]; + bNextFirst = IMG_TRUE; + } + else + { + uiNextStartIndex = 0; + bNextFirst = IMG_FALSE; + } - /* Check we haven't wrapped around */ - PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); - } - (*pui32CurrentLevel)--; - } - else - { - /* We should never come down this path, but it's here - for completeness */ - psLevel->ui32RefCount--; + /* If we're crossing a Px then the end index changes */ + if (bLast && (i == (uiEndIndex - 1))) + { + uiNextEndIndex = auiEndArray[uiThisLevel + 1]; + bNextLast = IMG_TRUE; + } + else + { + uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; + bNextLast = IMG_FALSE; + } - /* Check we haven't wrapped around */ - PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); - } + (*pui32CurrentLevel)++; + if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i], + auiStartArray, auiEndArray, + auiEntriesPerPxArray, apsConfig, + aeMMULevel, pui32CurrentLevel, + uiNextStartIndex, uiNextEndIndex, + bNextFirst, bNextLast, uiLog2DataPageSize)) + { + _PxMemFree(psMMUContext, &psLevel->apsNextLevel[i]->sMemDesc, + aeMMULevel[uiThisLevel + 1]); + OSFreeMem(psLevel->apsNextLevel[i]); + psLevel->apsNextLevel[i] = NULL; - __fallthrough; - case 2: - if (psLevel->apsNextLevel[i] != NULL && - psLevel->apsNextLevel[i]->ui32RefCount == 0) - { - _PxMemFree(psMMUContext, &psLevel->sMemDesc, - aeMMULevel[uiThisLevel]); - } + psLevel->ui32RefCount--; - __fallthrough; - case 1: - if (psLevel->apsNextLevel[i] != NULL && - psLevel->apsNextLevel[i]->ui32RefCount == 0) - { - OSFreeMem(psLevel->apsNextLevel[i]); - psLevel->apsNextLevel[i] = NULL; - } + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + } + (*pui32CurrentLevel)--; + } + else + { + /* We should never come down this path, but it's here + for completeness */ + psLevel->ui32RefCount--; - __fallthrough; - case 0: - uiAllocState = 3; - break; + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); } } + return eError; } @@ -2065,7 +2158,7 @@ static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext, &psMMUPTEConfig, ppsMMUDevVAddrConfig, phPriv); - PVR_ASSERT(eError == PVRSRV_OK); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "GetPageSizeConfiguration"); psDevVAddrConfig = *ppsMMUDevVAddrConfig; @@ -2098,7 +2191,7 @@ static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext, /* There is always a PTE entry so we have a slightly different behaviour than above. - E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there + E.g. for 2 MB pages the uiPTIndexMask is 0x0000000000 but still there is a PT with one entry. */ @@ -2169,6 +2262,10 @@ _AllocPageTables(MMU_CONTEXT *psMMUContext, const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; IMG_HANDLE hPriv; IMG_UINT32 ui32CurrentLevel = 0; +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + IMG_DEV_VIRTADDR sRunningDevVAddrStart; + IMG_BOOL bSetParity; +#endif PVR_DPF((PVR_DBG_ALLOC, "_AllocPageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, @@ -2190,6 +2287,16 @@ _AllocPageTables(MMU_CONTEXT *psMMUContext, auiEntriesPerPx, apsConfig, aeMMULevel, &psDevVAddrConfig, &hPriv); +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + bSetParity = apsConfig[aeMMULevel[MMU_LEVEL_1]]->uiParityBitMask != 0; + if (bSetParity) + { + /* If parity bit needs to be written for PTEs save the first VA of the PT */ + sRunningDevVAddrStart.uiAddr = sDevVAddrStart.uiAddr & ~((IMG_UINT64)(auiEntriesPerPx[MMU_LEVEL_1] * (1 << uiLog2DataPageSize)) - 1); + } + +#endif + HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC, HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr)); @@ -2198,8 +2305,18 @@ _AllocPageTables(MMU_CONTEXT *psMMUContext, auiStartArray, auiEndArray, auiEntriesPerPx, apsConfig, aeMMULevel, &ui32CurrentLevel, auiStartArray[0], auiEndArray[0], +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + &sRunningDevVAddrStart, +#endif IMG_TRUE, IMG_TRUE, uiLog2DataPageSize); +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) && defined(DEBUG) + if (bSetParity) + { + PVR_ASSERT(sRunningDevVAddrStart.uiAddr >= sDevVAddrEnd.uiAddr); + } +#endif + _MMU_PutLevelData(psMMUContext, hPriv); return eError; @@ -2284,17 +2401,18 @@ static void _FreePageTables(MMU_CONTEXT *psMMUContext, @Output pui32PTEIndex Index into the PT the address corresponds to -@Return None +@Return IMG_TRUE if the operation was successful and IMG_FALSE otherwise */ /*****************************************************************************/ -static INLINE void _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext, - IMG_DEV_VIRTADDR sDevVAddr, - const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, - MMU_Levelx_INFO **ppsLevel, - IMG_UINT32 *pui32PTEIndex) +static INLINE IMG_BOOL _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + MMU_Levelx_INFO **ppsLevel, + IMG_UINT32 *pui32PTEIndex) { MMU_Levelx_INFO *psLocalLevel = NULL; - MMU_LEVEL eMMULevel = psMMUContext->psDevAttrs->eTopLevel; + MMU_LEVEL eMMULevel = psMMUContext->psDevAttrs->psBaseConfig->ePxLevel; + const MMU_LEVEL eMMUBaseLevel = eMMULevel; IMG_UINT32 uiPCEIndex; IMG_UINT32 uiPDEIndex; @@ -2308,7 +2426,7 @@ static INLINE void _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext, { if (eMMULevel == MMU_LEVEL_3) { - /* find the page directory containing the PCE */ + /* find the page directory pointed by the PCE */ uiPCEIndex = _CalcPCEIdx (sDevVAddr, psDevVAddrConfig, IMG_FALSE); psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex]; @@ -2316,32 +2434,39 @@ static INLINE void _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext, if (eMMULevel == MMU_LEVEL_2) { - /* find the page table containing the PDE */ - uiPDEIndex = _CalcPDEIdx (sDevVAddr, psDevVAddrConfig, - IMG_FALSE); - if (psLocalLevel != NULL) - { - psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex]; - } - else + /* find the page table pointed by the PDE */ + uiPDEIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); + if (psLocalLevel == NULL) { - psLocalLevel = - psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex]; + return IMG_FALSE; } + + psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex]; } if (eMMULevel == MMU_LEVEL_1) { /* find PTE index into page table */ - *pui32PTEIndex = _CalcPTEIdx (sDevVAddr, psDevVAddrConfig, - IMG_FALSE); + *pui32PTEIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); + if (psLocalLevel == NULL) { - psLocalLevel = &psMMUContext->sBaseLevelInfo; + if (eMMUBaseLevel == eMMULevel) + { + /* if the MMU only supports one level return the base level */ + psLocalLevel = &psMMUContext->sBaseLevelInfo; + } + else + { + return IMG_FALSE; + } } } } + *ppsLevel = psLocalLevel; + + return IMG_TRUE; } /*************************************************************************/ /*! @@ -2421,11 +2546,262 @@ static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext, } } +/* scratch / zero pages */ -/***************************************************************************** - * Public interface functions * +static INLINE PVRSRV_ERROR _MMU_GetDefPage(PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT uiDefPage, + PVRSRV_DEF_PAGE **ppsDefPage, + IMG_CHAR **ppcDefPageName) +{ +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ppcDefPageName); +#endif + + switch (uiDefPage) + { + case SCRATCH_PAGE: + { + *ppsDefPage = &psDevNode->sScratchPage; +#if defined(PDUMP) + *ppcDefPageName = SCRATCH_PAGE_STR; +#endif + break; + } + case DEV_ZERO_PAGE: + { + *ppsDefPage = &psDevNode->sDevZeroPage; +#if defined(PDUMP) + *ppcDefPageName = DEV_ZERO_PAGE_STR; +#endif + break; + } + default: + { + /* Invalid pcDefPageName */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + return PVRSRV_OK; +} + +static PVRSRV_ERROR _MMU_AllocBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT uiInitValue, + IMG_INT uiDefPage) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEV_PHYADDR sDevPAddr = {0}; + PVRSRV_DEF_PAGE *psDefPage; + IMG_CHAR *pcDefPageName; + + eError = _MMU_GetDefPage(psDevNode, + uiDefPage, + &psDefPage, + &pcDefPageName); + PVR_RETURN_IF_ERROR(eError); + + OSLockAcquire(psDefPage->psPgLock); + + if (psDefPage->ui64PgPhysAddr != MMU_BAD_PHYS_ADDR) + { + goto UnlockAndReturn; + } + +#if defined(PDUMP) + PDUMPCOMMENT(psDevNode, "Alloc %s page object", pcDefPageName); +#endif + + /* Allocate the scratch / zero page required for physical backing + * of virtual ranges + */ + eError = DevPhysMemAlloc(psDevNode, + (1 << psDefPage->ui32Log2PgSize), + 0, + uiInitValue, + IMG_TRUE, +#if defined(PDUMP) + psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName, + pcDefPageName, + &psDefPage->hPdumpPg, +#endif + PVR_SYS_ALLOC_PID, + &psDefPage->sPageHandle, + &sDevPAddr); + + psDefPage->ui64PgPhysAddr = sDevPAddr.uiAddr; + +UnlockAndReturn: + OSLockRelease(psDefPage->psPgLock); + + return eError; +} + +static PVRSRV_ERROR _MMU_GetBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT64 *pui64PgPhysAddr, + IMG_INT uiDefPage) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEF_PAGE *psDefPage; + IMG_CHAR *pcDefPageName; + + eError = _MMU_GetDefPage(psDevNode, + uiDefPage, + &psDefPage, + &pcDefPageName); + PVR_RETURN_IF_ERROR(eError); + + OSLockAcquire(psDefPage->psPgLock); + + if (psDefPage->ui64PgPhysAddr == MMU_BAD_PHYS_ADDR) + { + eError = PVRSRV_ERROR_NOT_FOUND; + PVR_DPF((PVR_DBG_ERROR,"%s: %s (uiDefPage=%d)", + __func__, PVRSRVGetErrorString(eError), uiDefPage)); + goto UnlockAndReturn; + } + + if (pui64PgPhysAddr) + *pui64PgPhysAddr = psDefPage->ui64PgPhysAddr; + +UnlockAndReturn: + OSLockRelease(psDefPage->psPgLock); + + return eError; +} + +static void _MMU_FreeBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT uiDefPage) +{ + PVRSRV_DEF_PAGE *psDefPage; + IMG_CHAR *pcDefPageName; + PVRSRV_ERROR eErr; + + eErr = _MMU_GetDefPage(psDevNode, + uiDefPage, + &psDefPage, + &pcDefPageName); + if (eErr != PVRSRV_OK) + return; + + OSLockAcquire(psDefPage->psPgLock); + + if (psDefPage->ui64PgPhysAddr == MMU_BAD_PHYS_ADDR) + { + goto UnlockAndReturn; + } + +#if defined(PDUMP) + PDUMPCOMMENT(psDevNode, "Free %s page object", pcDefPageName); +#endif + + DevPhysMemFree(psDevNode, +#if defined(PDUMP) + psDefPage->hPdumpPg, +#endif + &psDefPage->sPageHandle); + +#if defined(PDUMP) + psDefPage->hPdumpPg = NULL; +#endif + psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + +UnlockAndReturn: + OSLockRelease(psDefPage->psPgLock); +} + + +/***************************************************************************** + * Public interface functions * *****************************************************************************/ +/* + MMU_InitDevice +*/ +PVRSRV_ERROR MMU_InitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode) +{ + PVRSRV_ERROR eError; + + /* Set the order to 0 */ + psDevNode->sScratchPage.sPageHandle.uiOrder = 0; + psDevNode->sDevZeroPage.sPageHandle.uiOrder = 0; + + /* Set the size of the Scratch and Zero pages to largest page size */ + if (psDevNode->ui32Non4KPageSizeLog2 != 0) + { + psDevNode->sScratchPage.ui32Log2PgSize = psDevNode->ui32Non4KPageSizeLog2; + psDevNode->sDevZeroPage.ui32Log2PgSize = psDevNode->ui32Non4KPageSizeLog2; + } + else + { + psDevNode->sScratchPage.ui32Log2PgSize = OSGetPageShift(); + psDevNode->sDevZeroPage.ui32Log2PgSize = OSGetPageShift(); + } + + /* Set the Scratch page phys addr */ + psDevNode->sScratchPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + + /* Set the Zero page phys addr */ + psDevNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + + /* The lock can be acquired from MISR (Z-buffer) path */ + eError = OSLockCreate(&psDevNode->sScratchPage.psPgLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate.Scratch", ErrReturnError); + + /* Create the lock for zero page */ + eError = OSLockCreate(&psDevNode->sDevZeroPage.psPgLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate.Zero", ErrFreeScratchPageLock); + +#ifdef PDUMP + psDevNode->sScratchPage.hPdumpPg = NULL; + psDevNode->sDevZeroPage.hPdumpPg = NULL; +#endif /* PDUMP */ + + eError = _MMU_AllocBackingPage(psDevNode, + PVR_SCRATCH_PAGE_INIT_VALUE, + SCRATCH_PAGE); + PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_AllocBackingPage.Scratch", ErrFreeZeroPageLock); + + eError = _MMU_AllocBackingPage(psDevNode, + PVR_ZERO_PAGE_INIT_VALUE, + DEV_ZERO_PAGE); + PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_AllocBackingPage.Zero", ErrFreeScratchPage); + + return PVRSRV_OK; + +ErrFreeScratchPage: + _MMU_FreeBackingPage(psDevNode, SCRATCH_PAGE); +ErrFreeZeroPageLock: + OSLockDestroy(psDevNode->sDevZeroPage.psPgLock); + psDevNode->sDevZeroPage.psPgLock = NULL; +ErrFreeScratchPageLock: + OSLockDestroy(psDevNode->sScratchPage.psPgLock); + psDevNode->sScratchPage.psPgLock = NULL; +ErrReturnError: + return eError; +} + +/* + MMU_DeInitDevice +*/ +void MMU_DeInitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode) +{ + if (psDevNode->sScratchPage.psPgLock != NULL) + { + _MMU_FreeBackingPage(psDevNode, SCRATCH_PAGE); + + OSLockDestroy(psDevNode->sScratchPage.psPgLock); + psDevNode->sScratchPage.psPgLock = NULL; + } + + if (psDevNode->sDevZeroPage.psPgLock) + { + _MMU_FreeBackingPage(psDevNode, DEV_ZERO_PAGE); + + + OSLockDestroy(psDevNode->sDevZeroPage.psPgLock); + psDevNode->sDevZeroPage.psPgLock = NULL; + } +} + /* MMU_ContextCreate */ @@ -2451,7 +2827,7 @@ MMU_ContextCreate(CONNECTION_DATA *psConnection, psConfig = psDevAttrs->psBaseConfig; psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig; - switch (psDevAttrs->eTopLevel) + switch (psDevAttrs->psBaseConfig->ePxLevel) { case MMU_LEVEL_3: ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC; @@ -2466,7 +2842,7 @@ MMU_ContextCreate(CONNECTION_DATA *psConnection, break; default: - PVR_LOG_GOTO_WITH_ERROR("psDevAttrs->eTopLevel", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + PVR_LOG_GOTO_WITH_ERROR("psDevAttrs->psBaseConfig->ePxLevel", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); } /* Allocate the MMU context with the Level 1 Px info's */ @@ -2495,9 +2871,9 @@ MMU_ContextCreate(CONNECTION_DATA *psConnection, psPhysMemCtx->psDevNode = psDevNode; /* Needed for Direct Bridge case */ psPhysMemCtx->psMMUContext = psMMUContext; /* Back-link to self */ -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) /* Save the app-specific values for external reference via MMU_GetOSids. */ - if (psConnection != NULL) + if (!_MMU_IS_FWKM_CTX(psMMUContext)) { psPhysMemCtx->ui32OSid = psConnection->ui32OSid; psPhysMemCtx->ui32OSidReg = psConnection->ui32OSidReg; @@ -2517,12 +2893,12 @@ MMU_ContextCreate(CONNECTION_DATA *psConnection, psPhysMemCtx->pszPhysMemRAName = OSAllocMem(psPhysMemCtx->uiPhysMemRANameAllocSize); PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->pszPhysMemRAName, eError, e2); - OSStringLCopy(psPhysMemCtx->pszPhysMemRAName, sBuf, psPhysMemCtx->uiPhysMemRANameAllocSize); + OSStringSafeCopy(psPhysMemCtx->pszPhysMemRAName, sBuf, psPhysMemCtx->uiPhysMemRANameAllocSize); psPhysMemCtx->psPhysMemRA = RA_Create(psPhysMemCtx->pszPhysMemRAName, /* subsequent import */ PhysHeapGetPageShift(psDevNode->psMMUPhysHeap), - RA_LOCKCLASS_1, + RA_LOCKCLASS_2, _MMU_PhysMem_RAImportAlloc, _MMU_PhysMem_RAImportFree, psPhysMemCtx, /* priv */ @@ -2539,10 +2915,10 @@ MMU_ContextCreate(CONNECTION_DATA *psConnection, psPhysMemCtx->psCleanupData = OSAllocMem(sizeof(*(psPhysMemCtx->psCleanupData))); PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->psCleanupData, eError, e4); -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) /* Record the originating OSid for all allocation / free for this context */ psPhysMemCtx->psCleanupData->ui32OSid = psPhysMemCtx->ui32OSid; -#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ +#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */ OSLockCreate(&psPhysMemCtx->psCleanupData->hCleanupLock); psPhysMemCtx->psCleanupData->bMMUContextExists = IMG_TRUE; dllist_init(&psPhysMemCtx->psCleanupData->sMMUCtxCleanupItemsHead); @@ -2554,15 +2930,31 @@ MMU_ContextCreate(CONNECTION_DATA *psConnection, the 1st allocation is made, a device specific callback might request the base object address so we allocate it up front. + In VZ we only need to allocate the FW/KM ctx on the + Host Driver as all Guest tables are allocated and + pre-mapped into the host FW Memory Context. + Drivers with security support and premapped heaps + rely on the TEE to manage the Firmware mappings. */ - if (_PxMemAlloc(psMMUContext, - ui32BaseObjects, - psConfig, - psDevAttrs->eTopLevel, - &psMMUContext->sBaseLevelInfo.sMemDesc, - psDevAttrs->ui32BaseAlign)) +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_PREMAP_FW_HEAPS) + if (!_MMU_IS_FWKM_CTX(psMMUContext)) +#else + if (!_MMU_IS_FWKM_CTX_VZGUEST(psMMUContext)) +#endif { - PVR_LOG_GOTO_WITH_ERROR("_PxMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e5); + if (_PxMemAlloc(psMMUContext, + ui32BaseObjects, + psConfig, + psDevAttrs->psBaseConfig->ePxLevel, + &psMMUContext->sBaseLevelInfo.sMemDesc, +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + NULL, + 0U, +#endif + psDevAttrs->ui32BaseAlign)) + { + PVR_LOG_GOTO_WITH_ERROR("_PxMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e5); + } } dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); @@ -2570,6 +2962,24 @@ MMU_ContextCreate(CONNECTION_DATA *psConnection, psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects; psMMUContext->sBaseLevelInfo.ui32RefCount = 0; + /* Support cores that need to adjust the base level Px */ + if (psDevNode->pfnMMUTopLevelPxWorkarounds != NULL) + { + PVRSRV_ERROR eError; + MMU_MEMORY_DESC *psMemDesc = &psMMUContext->sBaseLevelInfo.sMemDesc; + + psDevNode->pfnMMUTopLevelPxWorkarounds(psConnection, + psDevNode, + psMemDesc->sDevPAddr, + psMemDesc->pvCpuVAddr); + + eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, + &psMemDesc->psMapping->sMemHandle, + psMemDesc->uiOffset, + psMemDesc->uiSize); + PVR_LOG_IF_ERROR(eError, "PhysHeapPagesClean"); + } + eError = OSLockCreate(&psMMUContext->hLock); PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e6); @@ -2579,7 +2989,7 @@ MMU_ContextCreate(CONNECTION_DATA *psConnection, return PVRSRV_OK; e6: - _PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->eTopLevel); + _PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->psBaseConfig->ePxLevel); e5: OSFreeMem(psPhysMemCtx->psCleanupData); e4: @@ -2623,9 +3033,17 @@ MMU_ContextDestroy (MMU_CONTEXT *psMMUContext) /* Free the top level MMU object - will be put on defer free list. * This has to be done before the step below that will empty the * defer-free list. */ - _PxMemFree(psMMUContext, - &psMMUContext->sBaseLevelInfo.sMemDesc, - psMMUContext->psDevAttrs->eTopLevel); + +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_PREMAP_FW_HEAPS) + if (!_MMU_IS_FWKM_CTX(psMMUContext)) +#else + if (!_MMU_IS_FWKM_CTX_VZGUEST(psMMUContext)) +#endif + { + _PxMemFree(psMMUContext, + &psMMUContext->sBaseLevelInfo.sMemDesc, + psMMUContext->psDevAttrs->psBaseConfig->ePxLevel); + } /* Empty the temporary defer-free list of Px */ _FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); @@ -2682,7 +3100,6 @@ MMU_ContextDestroy (MMU_CONTEXT *psMMUContext) PVRSRV_ERROR MMU_Alloc (MMU_CONTEXT *psMMUContext, IMG_DEVMEM_SIZE_T uSize, - IMG_DEVMEM_SIZE_T *puActualSize, IMG_UINT32 uiProtFlags, IMG_DEVMEM_SIZE_T uDevVAddrAlignment, IMG_DEV_VIRTADDR *psDevVAddr, @@ -2699,6 +3116,7 @@ MMU_Alloc (MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv; #if !defined(DEBUG) + PVR_UNREFERENCED_PARAMETER(uiProtFlags); PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment); #endif @@ -2710,7 +3128,6 @@ MMU_Alloc (MMU_CONTEXT *psMMUContext, /* check params */ PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext, "psMMUContext"); PVR_LOG_RETURN_IF_INVALID_PARAM(psDevVAddr, "psDevVAddr"); - PVR_LOG_RETURN_IF_INVALID_PARAM(puActualSize, "puActualSize"); psDevAttrs = psMMUContext->psDevAttrs; @@ -2735,7 +3152,18 @@ MMU_Alloc (MMU_CONTEXT *psMMUContext, sDevVAddrEnd.uiAddr += uSize; OSLockAcquire(psMMUContext->hLock); - eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize); + + +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_PREMAP_FW_HEAPS) + if (_MMU_IS_FWKM_CTX(psMMUContext)) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Driver relying on firmware mappings created by the TEE.", __func__)); + } + else +#endif + { + eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize); + } OSLockRelease(psMMUContext->hLock); if (eError != PVRSRV_OK) @@ -2759,32 +3187,18 @@ MMU_Free (MMU_CONTEXT *psMMUContext, IMG_UINT32 uiLog2DataPageSize) { IMG_DEV_VIRTADDR sDevVAddrEnd; - -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - IMG_UINT32 ui32MMULeakMax = psPVRSRVData->sMemLeakIntervals.ui32MMU; - - mutex_lock(&g_sMMULeakMutex); - - g_ui32MMULeakCounter++; - if (ui32MMULeakMax && g_ui32MMULeakCounter >= ui32MMULeakMax) - { - g_ui32MMULeakCounter = 0; - mutex_unlock(&g_sMMULeakMutex); - - PVR_DPF((PVR_DBG_WARNING, - "%s: Skipped MMU free for address 0x%016" IMG_UINT64_FMTSPECx " to trigger memory leak.", - __func__, - sDevVAddr.uiAddr)); - return; - } - - mutex_unlock(&g_sMMULeakMutex); + MMU_PHYSMEM_CONTEXT *psPhysMemCtx; +#if defined(SUPPORT_MMU_DEFERRED_FREE) + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_ERROR eError; #endif + PVR_ASSERT(psMMUContext != NULL); PVR_LOG_RETURN_VOID_IF_FALSE(psMMUContext != NULL, "psMMUContext"); + psPhysMemCtx = psMMUContext->psPhysMemCtx; + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freeing DevVAddr " IMG_DEV_VIRTADDR_FMTSPEC, __func__, sDevVAddr.uiAddr)); @@ -2795,22 +3209,45 @@ MMU_Free (MMU_CONTEXT *psMMUContext, /* The Cleanup lock has to be taken before the MMUContext hLock to * prevent deadlock scenarios. It is necessary only for parts of * _SetupCleanup_FreeMMUMapping though.*/ - OSLockAcquire(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock); + OSLockAcquire(psPhysMemCtx->psCleanupData->hCleanupLock); OSLockAcquire(psMMUContext->hLock); - _FreePageTables(psMMUContext, - sDevVAddr, - sDevVAddrEnd, - uiLog2DataPageSize); +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_PREMAP_FW_HEAPS) + if (!_MMU_IS_FWKM_CTX(psMMUContext)) +#endif + { + _FreePageTables(psMMUContext, + sDevVAddr, + sDevVAddrEnd, + uiLog2DataPageSize); + } - _SetupCleanup_FreeMMUMapping(psMMUContext->psPhysMemCtx); +#if defined(SUPPORT_MMU_DEFERRED_FREE) + eError = PVRSRVGetDevicePowerState(psPhysMemCtx->psDevNode, &ePowerState); + if (eError != PVRSRV_OK) + { + /* Treat unknown power state as ON. */ + ePowerState = PVRSRV_DEV_POWER_STATE_ON; + } + if (ePowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + _FreeMMUMapping(psPhysMemCtx->psDevNode, &psPhysMemCtx->sTmpMMUMappingHead); + } + else + { + _SetupCleanup_FreeMMUMapping(psMMUContext->psPhysMemCtx); + } +#else + if (!dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead)) + { + _FreeMMUMapping(psPhysMemCtx->psDevNode, &psPhysMemCtx->sTmpMMUMappingHead); + } +#endif OSLockRelease(psMMUContext->hLock); OSLockRelease(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock); - - return; } PVRSRV_ERROR @@ -2838,7 +3275,6 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0, uiDefProtFlags=0; - IMG_UINT64 uiDummyProtFlags = 0; MMU_PROTFLAGS_T uiMMUProtFlags = 0; const MMU_PxE_CONFIG *psConfig; @@ -2852,7 +3288,7 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR sDevPAddr; IMG_BOOL *pbValid; IMG_BOOL bValid; - IMG_BOOL bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; + IMG_BOOL bScratchBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; IMG_BOOL bNeedBacking = IMG_FALSE; PVRSRV_DEVICE_NODE *psDevNode; @@ -2866,8 +3302,8 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, #endif /*PDUMP*/ /* Validate the most essential parameters */ - PVR_LOG_GOTO_IF_INVALID_PARAM(psMMUContext, eError, e0); - PVR_LOG_GOTO_IF_INVALID_PARAM(psPMR, eError, e0); + PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext != NULL, "psMMUContext"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psPMR != NULL, "psPMR"); psDevNode = psMMUContext->psPhysMemCtx->psDevNode; @@ -2876,15 +3312,10 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC) { psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR)); - PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, e0); + PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, ErrReturnError); pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL)); - if (pbValid == NULL) - { - /* Should allocation fail, clean-up here before exit */ - OSFreeMem(psDevPAddr); - PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); - } + PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrFreePAddrMappingArray); } else { @@ -2894,6 +3325,8 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, /* Get the Device physical addresses of the pages we are trying to map * In the case of non indexed mapping we can get all addresses at once */ + + /* DevmemX callers */ if (NULL == paui32MapIndices) { eError = PMR_DevPhysAddr(psPMR, @@ -2901,8 +3334,14 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, ui32MapPageCount, ((IMG_DEVMEM_OFFSET_T) ui32PhysPgOffset << uiLog2HeapPageSize), psDevPAddr, - pbValid); - PVR_GOTO_IF_ERROR(eError, e1); + pbValid, + DEVICE_USE | MAPPING_USE); + /* Since offsets are always used and never indices, we are safe to + * receive retry here as this path won't have performed any mappings + * or overwrites thus far. Retry may be returned due to the combination of + * DEVICE_USE | MAPPING_USE + */ + PVR_GOTO_IF_ERROR(eError, ErrFreeValidArray); } /*Get the Page table level configuration */ @@ -2916,7 +3355,7 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, uiMappingFlags, &uiMMUProtFlags, psMMUContext); - PVR_GOTO_IF_ERROR(eError, e2); + PVR_GOTO_IF_ERROR(eError, ErrPutPTConfig); /* Callback to get device specific protection flags */ if (psConfig->uiBytesPerEntry == 8) @@ -2934,44 +3373,23 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, } else { - PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, e2); + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, ErrPutPTConfig); } - uiDummyProtFlags = uiProtFlags; if (PMR_IsSparse(psPMR)) { /* We know there will not be 4G number of PMR's */ - bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(PMR_Flags(psPMR)); - if (bDummyBacking) - { - bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(PMR_Flags(psPMR)); - } - - if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiMappingFlags)) + bScratchBacking = PVRSRV_IS_SPARSE_SCRATCH_BACKING_REQUIRED(PMR_Flags(psPMR)); + if (bScratchBacking) { - /* Obtain non-coherent protection flags as we cannot have multiple coherent - virtual pages pointing to the same physical page so all dummy page - mappings have to be non-coherent even in a coherent allocation */ - eError = _MMU_ConvertDevMemFlags(IMG_FALSE, - uiMappingFlags & ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT, - &uiMMUProtFlags, - psMMUContext); - PVR_GOTO_IF_ERROR(eError, e2); - - /* Callback to get device specific protection flags */ - if (psConfig->uiBytesPerEntry == 8) - { - uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); - } - else - { - /* We've already validated possible values of uiBytesPerEntry at the start of this function */ - PVR_ASSERT(psConfig->uiBytesPerEntry == 4); - uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); - } + bZeroBacking = PVRSRV_IS_ZERO_BACKING_REQUIRED(PMR_Flags(psPMR)); } } +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PMRMarkForDeferFree(psPMR); +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + OSLockAcquire(psMMUContext->hLock); for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++) @@ -2981,6 +3399,7 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, IMG_DEVMEM_OFFSET_T uiNextSymName; #endif /*PDUMP*/ + /* Devmem Map/Change sparse callers */ if (NULL != paui32MapIndices) { uiPgOffset = paui32MapIndices[uiLoop]; @@ -2994,8 +3413,14 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, 1, uiPgOffset * uiPageSize, &sDevPAddr, - &bValid); - PVR_GOTO_IF_ERROR(eError, e3); + &bValid, + DEVICE_USE | MAPPING_USE); + /* PVRSRV_ERROR_RETRY with SUPPORT_LINUX_OSPAGE_MIGRATION feature + * enabled will not be returned here even with the use of + * DEVICE_USE | MAPPING_USE because the feature does not + * support migratable sparse PMRs. + */ + PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); } else { @@ -3009,19 +3434,28 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, The default value of the entry is invalid so we don't need to mark it as such if the page wasn't valid, we just advance pass that address */ - if (bValid || bDummyBacking) + if (bValid || bScratchBacking) { if (!bValid) { if (bZeroBacking) { - sDevPAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; + eError = _MMU_GetBackingPage(psDevNode, + &sDevPAddr.uiAddr, + DEV_ZERO_PAGE); + PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage", + ErrUnlockAndUnmapPages); + /* Ensure the zero back page PTE is read only */ uiDefProtFlags = uiProtFlagsReadOnly; } else { - sDevPAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr; + eError = _MMU_GetBackingPage(psDevNode, + &sDevPAddr.uiAddr, + SCRATCH_PAGE); + PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage", + ErrUnlockAndUnmapPages); } } else @@ -3042,16 +3476,16 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, if (0 > i32FeatureVal) break; - if (ui32BitLength > i32FeatureVal) + if (ui32BitLength > (IMG_UINT32) i32FeatureVal) { PVR_DPF((PVR_DBG_ERROR, - "%s Failed. The physical address bitlength (%d)" + "%s Failed. The physical address bitlength (%u)" " is greater than the chip can handle (%d).", __func__, ui32BitLength, i32FeatureVal)); - PVR_ASSERT(ui32BitLength <= i32FeatureVal); + PVR_ASSERT(ui32BitLength <= (IMG_UINT32) i32FeatureVal); eError = PVRSRV_ERROR_INVALID_PARAMS; - goto e3; + goto ErrUnlockAndUnmapPages; } } while (0); } @@ -3065,14 +3499,18 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], &uiSymbolicAddrOffset, &uiNextSymName); - PVR_ASSERT(eError == PVRSRV_OK); + PVR_LOG_IF_ERROR(eError, "PMR_PDumpSymbolicAddr"); } #endif /*PDUMP*/ psPrevLevel = psLevel; /* Calculate PT index and get new table descriptor */ - _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, - &psLevel, &uiPTEIndex); + if (!_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex)) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_MAPPING_NOT_FOUND, + ErrUnlockAndUnmapPages); + } if (psPrevLevel == psLevel) { @@ -3095,7 +3533,7 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, &psPrevLevel->sMemDesc.psMapping->sMemHandle, uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); - PVR_GOTO_IF_ERROR(eError, e3); + PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); } uiFlushStart = uiPTEIndex; @@ -3112,14 +3550,15 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, uiPTEIndex, psConfig, &sDevPAddr, + &sDevVAddr, IMG_FALSE, #if defined(PDUMP) (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName), - ((bValid)?aszSymbolicAddress:((bZeroBacking)?DEV_ZERO_PAGE:DUMMY_PAGE)), + ((bValid)?aszSymbolicAddress:((bZeroBacking)?DEV_ZERO_PAGE_STR:SCRATCH_PAGE_STR)), (bValid)?uiSymbolicAddrOffset:0, #endif /*PDUMP*/ uiDefProtFlags); - PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", e3); + PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", ErrUnlockAndUnmapPages); if (bValid) { @@ -3145,7 +3584,7 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, &psLevel->sMemDesc.psMapping->sMemHandle, uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); - PVR_GOTO_IF_ERROR(eError, e3); + PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); } OSLockRelease(psMMUContext->hLock); @@ -3170,45 +3609,47 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, return PVRSRV_OK; -e3: - OSLockRelease(psMMUContext->hLock); - - if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags)) +ErrUnlockAndUnmapPages: + if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_SCRATCH_BACKING_REQUIRED(uiMappingFlags)) { bNeedBacking = IMG_TRUE; } - MMU_UnmapPages(psMMUContext, - (bNeedBacking) ? uiMappingFlags : 0, - sDevVAddrBase, - uiLoop, - paui32MapIndices, - uiLog2HeapPageSize, - uiMappingFlags); -e2: + (void) MMU_UnmapPagesUnlocked(psMMUContext, + (bNeedBacking) ? uiMappingFlags : 0, + sDevVAddrBase, + uiLoop, + paui32MapIndices, + uiLog2HeapPageSize, + uiMappingFlags); + + OSLockRelease(psMMUContext->hLock); +ErrPutPTConfig: _MMU_PutPTConfig(psMMUContext, hPriv); -e1: +ErrFreeValidArray: if (psDevPAddr != asDevPAddr) { OSFreeMem(pbValid); + } +ErrFreePAddrMappingArray: + if (psDevPAddr != asDevPAddr) + { OSFreeMem(psDevPAddr); } -e0: +ErrReturnError: return eError; } -/* - MMU_UnmapPages - */ -void -MMU_UnmapPages(MMU_CONTEXT *psMMUContext, - PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, - IMG_DEV_VIRTADDR sDevVAddrBase, - IMG_UINT32 ui32PageCount, - IMG_UINT32 *pai32FreeIndices, - IMG_UINT32 uiLog2PageSize, - PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags) +static PVRSRV_ERROR +MMU_UnmapPagesUnlocked(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags) { + PVRSRV_ERROR eError; IMG_UINT32 uiPTEIndex = 0, ui32Loop=0; IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; @@ -3221,44 +3662,60 @@ MMU_UnmapPages(MMU_CONTEXT *psMMUContext, MMU_PROTFLAGS_T uiMMUProtFlags = 0, uiMMUReadOnlyProtFlags = 0; IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; IMG_DEV_PHYADDR sBackingPgDevPhysAddr; - IMG_BOOL bUnmap = IMG_TRUE, bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; - IMG_CHAR *pcBackingPageName = NULL; + IMG_BOOL bUnmap = IMG_TRUE, bScratchBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; #if defined(PDUMP) + const IMG_CHAR *pcBackingPageName = NULL; + PDUMPCOMMENT(psDevNode, "Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX, ui32PageCount, (IMG_UINT64)sDevVAddr.uiAddr, ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1); #endif - bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMemAllocFlags); - bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiMemAllocFlags); + + PVR_ASSERT(OSLockIsLocked(psMMUContext->hLock)); + + bScratchBacking = PVRSRV_IS_SPARSE_SCRATCH_BACKING_REQUIRED(uiMemAllocFlags); + bZeroBacking = PVRSRV_IS_ZERO_BACKING_REQUIRED(uiMemAllocFlags); if (bZeroBacking) { - sBackingPgDevPhysAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; - pcBackingPageName = DEV_ZERO_PAGE; + /* Ensure the zero backing page has been created */ + eError = _MMU_GetBackingPage(psDevNode, + &sBackingPgDevPhysAddr.uiAddr, + DEV_ZERO_PAGE); + PVR_LOG_RETURN_IF_ERROR(eError, "_MMU_GetBackingPage (zero)"); } else { - sBackingPgDevPhysAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr; - pcBackingPageName = DUMMY_PAGE; + /* Ensure the scratch backing page has been created */ + eError = _MMU_GetBackingPage(psDevNode, + &sBackingPgDevPhysAddr.uiAddr, + SCRATCH_PAGE); + PVR_LOG_RETURN_IF_ERROR(eError, "_MMU_GetBackingPage (scratch)"); + } + +#if defined(PDUMP) + if (bScratchBacking) + { + pcBackingPageName = bZeroBacking ? DEV_ZERO_PAGE_STR : SCRATCH_PAGE_STR; } +#endif + + bUnmap = (uiMappingFlags)? !bScratchBacking : IMG_TRUE; + + eError = _MMU_ConvertDevMemFlags(bUnmap, + uiMappingFlags, + &uiMMUProtFlags, + psMMUContext); + PVR_RETURN_IF_ERROR(eError); - bUnmap = (uiMappingFlags)? !bDummyBacking : IMG_TRUE; /* Get PT and address configs */ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig); - if (_MMU_ConvertDevMemFlags(bUnmap, - uiMappingFlags, - &uiMMUProtFlags, - psMMUContext) != PVRSRV_OK) - { - return; - } - uiMMUReadOnlyProtFlags = (uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE) | MMU_PROTFLAGS_READABLE; /* Callback to get device specific protection flags */ @@ -3273,9 +3730,6 @@ MMU_UnmapPages(MMU_CONTEXT *psMMUContext, uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUReadOnlyProtFlags, uiLog2PageSize); } - - OSLockAcquire(psMMUContext->hLock); - /* Unmap page by page */ while (ui32Loop < ui32PageCount) { @@ -3288,8 +3742,11 @@ MMU_UnmapPages(MMU_CONTEXT *psMMUContext, psPrevLevel = psLevel; /* Calculate PT index and get new table descriptor */ - _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, - &psLevel, &uiPTEIndex); + if (!_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex)) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_MAPPING_NOT_FOUND, e0); + } if (psPrevLevel == psLevel) { @@ -3308,10 +3765,12 @@ MMU_UnmapPages(MMU_CONTEXT *psMMUContext, /* Flush if we moved to another psLevel, i.e. page table */ if (psPrevLevel != NULL) { - PhysHeapPagesClean(psDevNode->psMMUPhysHeap, + PVRSRV_ERROR eError; + eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, &psPrevLevel->sMemDesc.psMapping->sMemHandle, uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_LOG_IF_ERROR(eError, "PhysHeapPagesClean"); } uiFlushStart = uiPTEIndex; @@ -3322,21 +3781,20 @@ MMU_UnmapPages(MMU_CONTEXT *psMMUContext, HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr)); /* Set the PT entry to invalid and poison it with a bad address */ - if (_SetupPTE(psMMUContext, - psLevel, - uiPTEIndex, - psConfig, - (bDummyBacking)? &sBackingPgDevPhysAddr : &gsBadDevPhyAddr, - bUnmap, + eError = _SetupPTE(psMMUContext, + psLevel, + uiPTEIndex, + psConfig, + (bScratchBacking)? &sBackingPgDevPhysAddr : &gsBadDevPhyAddr, + &sDevVAddr, + bUnmap, #if defined(PDUMP) - (bDummyBacking)? (psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName): NULL, - (bDummyBacking)? pcBackingPageName: NULL, - 0U, + (bScratchBacking)? (psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName): NULL, + pcBackingPageName, + 0U, #endif - (bZeroBacking)? uiProtFlagsReadOnly: uiProtFlags) != PVRSRV_OK) - { - goto e0; - } + (bZeroBacking)? uiProtFlagsReadOnly: uiProtFlags); + PVR_GOTO_IF_ERROR(eError, e0); /* Check we haven't wrapped around */ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); @@ -3347,14 +3805,14 @@ MMU_UnmapPages(MMU_CONTEXT *psMMUContext, /* Flush the last level we touched */ if (psLevel != NULL) { - PhysHeapPagesClean(psDevNode->psMMUPhysHeap, + PVRSRV_ERROR eError; + eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, &psLevel->sMemDesc.psMapping->sMemHandle, uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_LOG_IF_ERROR(eError, "PhysHeapPagesClean"); } - OSLockRelease(psMMUContext->hLock); - _MMU_PutPTConfig(psMMUContext, hPriv); /* Flush TLB for PTs*/ @@ -3363,77 +3821,308 @@ MMU_UnmapPages(MMU_CONTEXT *psMMUContext, MMU_LEVEL_1, IMG_TRUE); - return; + return PVRSRV_OK; e0: _MMU_PutPTConfig(psMMUContext, hPriv); - PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table")); - PVR_ASSERT(0); - OSLockRelease(psMMUContext->hLock); - return; + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map/unmap page table " + "with error %u", __func__, eError)); + + return eError; } +/* + MMU_UnmapPages + */ PVRSRV_ERROR -MMU_MapPMRFast (MMU_CONTEXT *psMMUContext, - IMG_DEV_VIRTADDR sDevVAddrBase, - const PMR *psPMR, - IMG_DEVMEM_SIZE_T uiSizeBytes, - PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, - IMG_UINT32 uiLog2HeapPageSize) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 uiCount, i; - IMG_UINT32 uiPageSize = 1 << uiLog2HeapPageSize; +MMU_UnmapPages(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags) +{ + PVRSRV_ERROR eError; + + OSLockAcquire(psMMUContext->hLock); + + eError = MMU_UnmapPagesUnlocked(psMMUContext, + uiMappingFlags, + sDevVAddrBase, + ui32PageCount, + pai32FreeIndices, + uiLog2PageSize, + uiMemAllocFlags); + + OSLockRelease(psMMUContext->hLock); + + return eError; +} + +PVRSRV_ERROR +MMUX_MapVRangeToBackingPage(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32MapPageCount, + IMG_UINT32 uiLog2HeapPageSize) +{ + PVRSRV_ERROR eError; + + IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize); + IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; IMG_UINT32 uiPTEIndex = 0; - IMG_UINT64 uiProtFlags; - MMU_PROTFLAGS_T uiMMUProtFlags = 0; + MMU_Levelx_INFO *psLevel = NULL; + MMU_Levelx_INFO *psPrevLevel = NULL; + + IMG_UINT32 uiLoop = 0; + + IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0; + MMU_PROTFLAGS_T uiMMUProtFlags = 0; + IMG_BOOL bZeroBacking = PVRSRV_IS_ZERO_BACKING_REQUIRED(uiMappingFlags); + + const MMU_PxE_CONFIG *psConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; IMG_HANDLE hPriv; + + IMG_DEV_PHYADDR sDevPAddr; + + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + /*Get the Page table level configuration */ + _MMU_GetPTConfig(psMMUContext, + (IMG_UINT32) uiLog2HeapPageSize, + &psConfig, + &hPriv, + &psDevVAddrConfig); + + eError = _MMU_ConvertDevMemFlags(IMG_FALSE, + uiMappingFlags, + &uiMMUProtFlags, + psMMUContext); + PVR_GOTO_IF_ERROR(eError, ErrPutPTConfig); + + /* Callback to get device specific protection flags */ + if (psConfig->uiBytesPerEntry == 8) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); + uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE), + uiLog2HeapPageSize); + } + else if (psConfig->uiBytesPerEntry == 4) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE)); + } + else + { + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, ErrPutPTConfig); + } + + if (bZeroBacking) + { + eError = _MMU_GetBackingPage(psDevNode, + &sDevPAddr.uiAddr, + DEV_ZERO_PAGE); + PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage", + ErrPutPTConfig); + } + else + { + eError = _MMU_GetBackingPage(psDevNode, + &sDevPAddr.uiAddr, + SCRATCH_PAGE); + PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage", + ErrPutPTConfig); + } + +#if defined(DEBUG) + { + IMG_INT32 i32FeatureVal = 0; + IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr); + + i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); + do { + /* i32FeatureVal can be negative for cases where this feature is undefined + * In that situation we need to bail out than go ahead with debug comparison */ + if (0 > i32FeatureVal) + break; + + if (ui32BitLength > (IMG_UINT32) i32FeatureVal) + { + PVR_DPF((PVR_DBG_ERROR, + "%s Failed. The physical address bitlength (%u)" + " is greater than the chip can handle (%d).", + __func__, ui32BitLength, i32FeatureVal)); + + PVR_ASSERT(ui32BitLength <= (IMG_UINT32) i32FeatureVal); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto ErrPutPTConfig; + } + } while (0); + } +#endif /*DEBUG*/ + + OSLockAcquire(psMMUContext->hLock); + + for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++) + { + psPrevLevel = psLevel; + /* Calculate PT index and get new table descriptor */ + if (!_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex)) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_MAPPING_NOT_FOUND, + ErrUnlockAndUnmapPages); + } + + if (psPrevLevel == psLevel) + { + /* + * Sparse allocations may have page offsets which + * decrement as well as increment, so make sure we + * update the range we will flush correctly. + */ + if (uiPTEIndex > uiFlushEnd) + uiFlushEnd = uiPTEIndex; + else if (uiPTEIndex < uiFlushStart) + uiFlushStart = uiPTEIndex; + } + else + { + /* Flush if we moved to another psLevel, i.e. page table */ + if (psPrevLevel != NULL) + { + eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, + &psPrevLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); + } + + uiFlushStart = uiPTEIndex; + uiFlushEnd = uiFlushStart; + } + + HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), + HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr)); + + /* Set the PT entry with the specified address and protection flags */ + eError = _SetupPTE(psMMUContext, + psLevel, + uiPTEIndex, + psConfig, + &sDevPAddr, + &sDevVAddr, + IMG_FALSE, +#if defined(PDUMP) + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + bZeroBacking ? DEV_ZERO_PAGE_STR : SCRATCH_PAGE_STR, + 0, +#endif /*PDUMP*/ + bZeroBacking ? uiProtFlagsReadOnly : uiProtFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", ErrUnlockAndUnmapPages); + + sDevVAddr.uiAddr += uiPageSize; + } + + /* Flush the last level we touched */ + if (psLevel != NULL) + { + eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); + } + + OSLockRelease(psMMUContext->hLock); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_FALSE); + + return PVRSRV_OK; + +ErrUnlockAndUnmapPages: + (void) MMU_UnmapPagesUnlocked(psMMUContext, + 0, + sDevVAddrBase, + uiLoop, + NULL, + uiLog2HeapPageSize, + uiMappingFlags); + + OSLockRelease(psMMUContext->hLock); +ErrPutPTConfig: + _MMU_PutPTConfig(psMMUContext, hPriv); + return eError; +} + +PVRSRV_ERROR +MMU_MapPMRFast(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSizeBytes, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_UINT32 uiLog2HeapPageSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; const MMU_PxE_CONFIG *psConfig; const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; - IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + IMG_HANDLE hPriv; + + IMG_UINT32 i, uiChunkStart, uiLastPTEIndex, uiNumEntriesToWrite; + IMG_UINT32 ui32PagesDone=0, uiPTEIndex=0; + + IMG_UINT8 uiAddrLog2Align, uiAddrShift, uiParityShift; + IMG_UINT64 uiAddrMask, uiProtFlags; + IMG_UINT32 uiBytesPerEntry; + IMG_UINT64 uiParityBit = 0; + IMG_BOOL bSetParity = IMG_FALSE; + IMG_DEV_VIRTADDR sDevVAddrRunning, sDevVAddrBaseCopy = sDevVAddrBase; + + IMG_UINT64* pui64LevelBase; + IMG_UINT32* pui32LevelBase; + MMU_PROTFLAGS_T uiMMUProtFlags = 0; + MMU_Levelx_INFO *psLevel = NULL; + IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; - IMG_DEV_PHYADDR *psDevPAddr; - IMG_BOOL *pbValid; - IMG_UINT32 uiFlushStart = 0; - PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + IMG_UINT32 uiNumPages = uiSizeBytes >> uiLog2HeapPageSize; + + +#if defined(PVRSRV_ENABLE_HTB) + IMG_BOOL bHTBLog = + HTB_GROUP_ENABLED(HTB_SF_MMU_PAGE_OP_PMRMAP) || + HTB_GROUP_ENABLED(HTB_SF_MMU_PAGE_OP_TABLE); +#endif + + IMG_BOOL bValidateOrTweak = psDevNode->pfnValidateOrTweakPhysAddrs ? IMG_TRUE : IMG_FALSE; #if defined(PDUMP) IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; IMG_UINT32 ui32MappedCount = 0; - PDUMPCOMMENT(psDevNode, "Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", uiSizeBytes); + IMG_DEV_VIRTADDR sDevVAddrRunningPdump; + PDUMPCOMMENT(psDevNode, "Wire up Page Table entries to point to the Data Pages (%d bytes)", uiNumPages << uiLog2HeapPageSize); #endif /*PDUMP*/ - /* We should verify the size and contiguity when supporting variable page size */ - PVR_ASSERT (psMMUContext != NULL); PVR_ASSERT (psPMR != NULL); - - /* Allocate memory for page-frame-numbers and validity states, - N.B. assert could be triggered by an illegal uiSizeBytes */ - uiCount = uiSizeBytes >> uiLog2HeapPageSize; - PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2HeapPageSize == uiSizeBytes); - if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC) - { - psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR)); - PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, return_error); - - pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL)); - if (pbValid == NULL) - { - /* Should allocation fail, clean-up here before exit */ - OSFreeMem(psDevPAddr); - PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_paddr_array); - } - } - else - { - psDevPAddr = asDevPAddr; - pbValid = abValid; - } + PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiNumPages << uiLog2HeapPageSize == uiSizeBytes); /* Get general PT and address configs */ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2HeapPageSize, @@ -3445,13 +4134,23 @@ MMU_MapPMRFast (MMU_CONTEXT *psMMUContext, psMMUContext); PVR_GOTO_IF_ERROR(eError, put_mmu_context); - /* Callback to get device specific protection flags */ + uiAddrLog2Align = psConfig->uiAddrLog2Align; + uiAddrShift = psConfig->uiAddrShift; + uiAddrMask = psConfig->uiAddrMask; + uiBytesPerEntry = psConfig->uiBytesPerEntry; - if (psConfig->uiBytesPerEntry == 8) + bSetParity = psConfig->uiParityBitMask; + uiParityShift = psConfig->uiParityBitShift; + + sDevVAddrRunning.uiAddr = sDevVAddrBase.uiAddr; +#if defined(PDUMP) + sDevVAddrRunningPdump.uiAddr = sDevVAddrBase.uiAddr; +#endif + if (uiBytesPerEntry == 8) { uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); } - else if (psConfig->uiBytesPerEntry == 4) + else if (uiBytesPerEntry == 4) { uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); } @@ -3461,113 +4160,201 @@ MMU_MapPMRFast (MMU_CONTEXT *psMMUContext, } - /* "uiSize" is the amount of contiguity in the underlying - page. Normally this would be constant for the system, but, - that constant needs to be communicated, in case it's ever - different; caller guarantees that PMRLockSysPhysAddr() has - already been called */ - eError = PMR_DevPhysAddr(psPMR, - uiLog2HeapPageSize, - uiCount, - 0, - psDevPAddr, - pbValid); - PVR_GOTO_IF_ERROR(eError, put_mmu_context); +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PMRMarkForDeferFree(psPMR); +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ OSLockAcquire(psMMUContext->hLock); - _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, - &psLevel, &uiPTEIndex); - uiFlushStart = uiPTEIndex; - - /* Map in all pages of that PMR page by page*/ - for (i=0, uiCount=0; uiCount < uiSizeBytes; i++) + do { -#if defined(DEBUG) + if (!_MMU_GetPTInfo(psMMUContext, sDevVAddrBase, psDevVAddrConfig, + &psLevel, &uiPTEIndex)) { - IMG_INT32 i32FeatureVal = 0; - IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr); - i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); - do { - if (0 > i32FeatureVal) - break; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_MAPPING_NOT_FOUND, + unlock_mmu_context); + } + + pui64LevelBase = (IMG_UINT64*)psLevel->sMemDesc.pvCpuVAddr; + pui32LevelBase = (IMG_UINT32*)psLevel->sMemDesc.pvCpuVAddr; - if (ui32BitLength > i32FeatureVal) + uiLastPTEIndex = MIN(uiPTEIndex + uiNumPages - ui32PagesDone, psDevVAddrConfig->uiNumEntriesPT); + uiNumEntriesToWrite = uiLastPTEIndex - uiPTEIndex; + + for (uiChunkStart = 0; uiChunkStart < uiNumEntriesToWrite; uiChunkStart += PMR_MAX_TRANSLATION_STACK_ALLOC) + { + IMG_UINT32 uiNumPagesInBlock = MIN(uiNumEntriesToWrite - uiChunkStart, PMR_MAX_TRANSLATION_STACK_ALLOC); + + /* With feature SUPPORT_LINUX_OSPAGE_MIGRATION: + * This call is allowed to fail with retry part way through mapping. + * Since this path is only used in Devicemem for fully contiguous PMR mappings + * we know a PMR is always either fully mapped or not at all and cannot be + * overwritten. On failure due to migrate we can unmap our progress and retry later. + * Retry may be returned due to the combination of DEVICE_USE | MAPPING_USE. + */ + eError = PMR_DevPhysAddr(psPMR, + uiLog2HeapPageSize, + uiNumPagesInBlock, + (IMG_UINT64) (ui32PagesDone + uiChunkStart) << uiLog2HeapPageSize, + asDevPAddr, + abValid, + DEVICE_USE | MAPPING_USE); + PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); + + if (bValidateOrTweak) + { + for (i=0; ihLock); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, put_mmu_context); + PVRSRV_ERROR eError = psDevNode->pfnValidateOrTweakPhysAddrs(psDevNode, + psMMUContext->psDevAttrs, + &asDevPAddr[i].uiAddr); + PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); } - } while (0); - } + } + +#if defined(DEBUG) + { + IMG_INT32 i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); + + if (i32FeatureVal >= 0) + { + for (i=0; i (IMG_UINT32) i32FeatureVal) + { + PVR_DPF((PVR_DBG_ERROR, + "%s Failed. The physical address bitlength (%u)" + " is greater than the chip can handle (%d).", + __func__, ui32BitLength, i32FeatureVal)); + + PVR_ASSERT(ui32BitLength <= (IMG_UINT32) i32FeatureVal); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, unlock_mmu_context); + } + } + } + } #endif /*DEBUG*/ -#if defined(PDUMP) - { - IMG_DEVMEM_OFFSET_T uiNextSymName; - - eError = PMR_PDumpSymbolicAddr(psPMR, uiCount, - sizeof(aszMemspaceName), &aszMemspaceName[0], - sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], - &uiSymbolicAddrOffset, - &uiNextSymName); - PVR_ASSERT(eError == PVRSRV_OK); - ui32MappedCount++; - } -#endif /*PDUMP*/ - HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP, - HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), - HTBLOG_U64_BITS_HIGH(psDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(psDevPAddr[i].uiAddr)); + if (uiBytesPerEntry == 8) + { + for (i=0; i> uiAddrLog2Align) << uiAddrShift) & uiAddrMask) | uiProtFlags | uiParityBit; + } + } + else if (uiBytesPerEntry == 4) + { + for (i=0; i> uiAddrLog2Align) << uiAddrShift) & uiAddrMask) | uiProtFlags; + PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); + PVR_ASSERT(!bSetParity); +#endif + + pui32LevelBase[uiPTEIndex + uiChunkStart + i] = + (((asDevPAddr[i].uiAddr >> uiAddrLog2Align) << uiAddrShift) & uiAddrMask) | uiProtFlags; + } + } - /* Set the PT entry with the specified address and protection flags */ - eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex, - psConfig, &psDevPAddr[i], IMG_FALSE, #if defined(PDUMP) - aszMemspaceName, - aszSymbolicAddress, - uiSymbolicAddrOffset, + for (i=0; iuiParityBitMask ? _GetParityBit(sDevVAddrRunningPdump.uiAddr ^ 0ULL) : 0; + + eError = PMR_PDumpSymbolicAddr(psPMR, (ui32PagesDone + uiChunkStart + i) << uiLog2HeapPageSize, + sizeof(aszMemspaceName), &aszMemspaceName[0], + sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], + &uiSymbolicAddrOffset, + &uiNextSymName); + PVR_LOG_IF_ERROR(eError, "PMR_PDumpSymbolicAddr"); + ui32MappedCount++; + + PDumpMMUDumpPxEntries(psDevNode, + MMU_LEVEL_1, + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + psLevel->sMemDesc.pvCpuVAddr, + psLevel->sMemDesc.sDevPAddr, + uiPTEIndex + uiChunkStart + i, + 1, + aszMemspaceName, + aszSymbolicAddress, + uiSymbolicAddrOffset, + uiBytesPerEntry, + uiAddrLog2Align, + uiAddrShift, + uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + uiParityBit, + psConfig->uiParityBitShift, + psConfig->uiParityBitMask, + psMMUContext->psDevAttrs->eMMUType); + + sDevVAddrRunningPdump.uiAddr += (1 << uiLog2HeapPageSize); + } #endif /*PDUMP*/ - uiProtFlags); - PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); - sDevVAddr.uiAddr += uiPageSize; - uiCount += uiPageSize; +#if defined(PVRSRV_ENABLE_HTB) + if (bHTBLog) + { + for (i=0; i> uiAddrLog2Align) << uiAddrShift) & uiAddrMask) | uiProtFlags; + sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (ui32PagesDone + uiChunkStart + i) * (1 << uiLog2HeapPageSize); - /* Calculate PT index and get new table descriptor */ - if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (uiCount != uiSizeBytes)) - { - uiPTEIndex++; - } - else - { - eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, - &psLevel->sMemDesc.psMapping->sMemHandle, - uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, - (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); - PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); + if (bSetParity) + { + ui64PxE64 |= _GetParityBit(sDevVAddr.uiAddr ^ asDevPAddr[i].uiAddr) << uiParityShift; + } + HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), + HTBLOG_U64_BITS_HIGH(asDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(asDevPAddr[i].uiAddr)); - _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, - &psLevel, &uiPTEIndex); - uiFlushStart = uiPTEIndex; + HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, + HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), + uiPTEIndex + uiChunkStart + i, MMU_LEVEL_1, + HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), + IMG_FALSE); + } + } +#endif } - } - OSLockRelease(psMMUContext->hLock); + eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiPTEIndex * uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiNumEntriesToWrite) * uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); + sDevVAddrBase.uiAddr += uiNumEntriesToWrite * (1 << uiLog2HeapPageSize); + ui32PagesDone += uiNumEntriesToWrite; - _MMU_PutPTConfig(psMMUContext, hPriv); + } while (ui32PagesDone < uiNumPages); - if (psDevPAddr != asDevPAddr) - { - OSFreeMem(pbValid); - OSFreeMem(psDevPAddr); - } + OSLockRelease(psMMUContext->hLock); + +#if defined(PDUMP) + PDUMPCOMMENT(psDevNode, "Wired up %d Page Table entries (out of %d)", ui32MappedCount, uiNumPages); +#endif /*PDUMP*/ /* Flush TLB for PTs*/ psDevNode->pfnMMUCacheInvalidate(psDevNode, @@ -3575,93 +4362,93 @@ MMU_MapPMRFast (MMU_CONTEXT *psMMUContext, MMU_LEVEL_1, IMG_FALSE); -#if defined(PDUMP) - PDUMPCOMMENT(psDevNode, "Wired up %d Page Table entries (out of %d)", ui32MappedCount, i); -#endif /*PDUMP*/ + _MMU_PutPTConfig(psMMUContext, hPriv); return PVRSRV_OK; unlock_mmu_context: + /* Unmap starting from the address passed as an argument. */ + (void) MMU_UnmapPMRFastUnlocked(psMMUContext, + sDevVAddrBaseCopy, + uiNumPages, + uiLog2HeapPageSize); OSLockRelease(psMMUContext->hLock); - MMU_UnmapPMRFast(psMMUContext, - sDevVAddrBase, - uiSizeBytes >> uiLog2HeapPageSize, - uiLog2HeapPageSize); - put_mmu_context: _MMU_PutPTConfig(psMMUContext, hPriv); - if (pbValid != abValid) - { - OSFreeMem(pbValid); - } - -free_paddr_array: - if (psDevPAddr != asDevPAddr) - { - OSFreeMem(psDevPAddr); - } - -return_error: - PVR_ASSERT(eError == PVRSRV_OK); return eError; } -/* - MMU_UnmapPages - */ -void -MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, - IMG_DEV_VIRTADDR sDevVAddrBase, - IMG_UINT32 ui32PageCount, - IMG_UINT32 uiLog2PageSize) +static PVRSRV_ERROR +MMU_UnmapPMRFastUnlocked(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 uiLog2PageSize) { - IMG_UINT32 uiPTEIndex = 0, ui32Loop=0; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 uiPTEIndex = 0, uiLastPTEIndex = 0, ui32PagesDone=0, i, uiNumEntriesToWrite; IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; MMU_Levelx_INFO *psLevel = NULL; IMG_HANDLE hPriv; + void* pvPTStart; const MMU_PxE_CONFIG *psConfig; const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; IMG_UINT64 uiProtFlags = 0; MMU_PROTFLAGS_T uiMMUProtFlags = 0; IMG_UINT64 uiEntry = 0; - IMG_UINT32 uiFlushStart = 0; - PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -#if defined(PDUMP) - PDUMPCOMMENT(psDevNode, - "Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX, - ui32PageCount, - (IMG_UINT64)sDevVAddr.uiAddr, - ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1); + IMG_UINT64 uiParityBit = 0; +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + IMG_DEV_VIRTADDR sDevVAddrStartOfTable; + IMG_UINT32 uiParityPatternIdx; + IMG_BOOL bSetParity = IMG_FALSE; + IMG_DEV_VIRTADDR sDevVAddrRunning; + IMG_UINT64 ui64BadPhysAddrParity = 0; #endif + IMG_UINT64 ui64BadPhysAddr = 0; + +#if defined(PVRSRV_ENABLE_HTB) + IMG_BOOL bLog; +#endif + + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + PVR_ASSERT(OSLockIsLocked(psMMUContext->hLock)); + + eError = _MMU_ConvertDevMemFlags(IMG_TRUE, + 0, + &uiMMUProtFlags, + psMMUContext); + PVR_RETURN_IF_ERROR(eError); /* Get PT and address configs */ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig); - if (_MMU_ConvertDevMemFlags(IMG_TRUE, - 0, - &uiMMUProtFlags, - psMMUContext) != PVRSRV_OK) - { - return; - } +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + bSetParity = psConfig->uiParityBitMask != 0; + sDevVAddrRunning.uiAddr = sDevVAddrBase.uiAddr; +#endif /* Callback to get device specific protection flags */ if (psConfig->uiBytesPerEntry == 8) { uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize); - - /* Fill the entry with a bad address but leave space for protection flags */ - uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags; + ui64BadPhysAddr = gsBadDevPhyAddr.uiAddr & ~(psConfig->uiProtMask | psConfig->uiParityBitMask); + /* Fill the entry with a bad address but leave space for protection flags and parity bit */ + uiEntry = ui64BadPhysAddr | uiProtFlags; +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + ui64BadPhysAddrParity = _GetParityBit(ui64BadPhysAddr) << psConfig->uiParityBitShift; +#endif } else if (psConfig->uiBytesPerEntry == 4) { uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); - +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + PVR_ASSERT(!bSetParity); +#endif /* Fill the entry with a bad address but leave space for protection flags */ uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags; } @@ -3673,37 +4460,77 @@ MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, goto e0; } - OSLockAcquire(psMMUContext->hLock); - - _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, - &psLevel, &uiPTEIndex); - uiFlushStart = uiPTEIndex; +#if defined(PVRSRV_ENABLE_HTB) + bLog = HTB_GROUP_ENABLED(HTB_SF_MMU_PAGE_OP_UNMAP) || + HTB_GROUP_ENABLED(HTB_SF_MMU_PAGE_OP_TABLE); +#endif - /* Unmap page by page and keep the loop as quick as possible. - * Only use parts of _SetupPTE that need to be executed. */ - while (ui32Loop < ui32PageCount) + do { + if (!_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex)) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_MAPPING_NOT_FOUND, e0); + } + +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + sDevVAddrStartOfTable.uiAddr = sDevVAddr.uiAddr - uiPTEIndex * (1 << uiLog2PageSize); + uiParityPatternIdx = _GetParityBit(sDevVAddrStartOfTable.uiAddr); +#endif + pvPTStart = psLevel->sMemDesc.pvCpuVAddr; + + uiLastPTEIndex = MIN(uiPTEIndex + ui32PageCount - ui32PagesDone, psDevVAddrConfig->uiNumEntriesPT); + uiNumEntriesToWrite = uiLastPTEIndex - uiPTEIndex; - /* Set the PT entry to invalid and poison it with a bad address */ if (psConfig->uiBytesPerEntry == 8) { - ((IMG_UINT64*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = uiEntry; + for (i=uiPTEIndex; ipsDevAttrs->pui64PrecomputedAllocParity[uiParityPatternIdx][i] ^ ui64BadPhysAddrParity; + sDevVAddrRunning.uiAddr += (1 << uiLog2PageSize); + } +#endif + ((IMG_UINT64*)pvPTStart)[i] = uiEntry | uiParityBit; + } } else { +#if defined(PVRSRV_NEED_PVR_ASSERT) PVR_ASSERT(psConfig->uiBytesPerEntry == 4); - ((IMG_UINT32*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = (IMG_UINT32) uiEntry; +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + PVR_ASSERT(!bSetParity); +#endif +#endif + for (i=uiPTEIndex; isMemDesc.pvCpuVAddr, psLevel->sMemDesc.sDevPAddr, uiPTEIndex, - 1, + uiNumEntriesToWrite, NULL, NULL, 0, @@ -3723,31 +4550,22 @@ MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, psConfig->uiProtMask, psConfig->uiValidEnMask, 0, + 0, /* Unused - Parity bit values are taken directly from PTE memory when bad phys addr is used */ + 0, /* Unused */ + 0, /* Unused */ psMMUContext->psDevAttrs->eMMUType); #endif /*PDUMP*/ - sDevVAddr.uiAddr += uiPageSize; - ui32Loop++; - - /* Calculate PT index and get new table descriptor */ - if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (ui32Loop != ui32PageCount)) - { - uiPTEIndex++; - } - else - { - PhysHeapPagesClean(psDevNode->psMMUPhysHeap, - &psLevel->sMemDesc.psMapping->sMemHandle, - uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, - (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiPTEIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiNumEntriesToWrite) * psConfig->uiBytesPerEntry); + PVR_LOG_IF_ERROR(eError, "PhysHeapPagesClean"); - _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, - &psLevel, &uiPTEIndex); - uiFlushStart = uiPTEIndex; - } - } + sDevVAddr.uiAddr += uiNumEntriesToWrite * uiPageSize; + ui32PagesDone += uiNumEntriesToWrite; - OSLockRelease(psMMUContext->hLock); + } while (ui32PagesDone < ui32PageCount); _MMU_PutPTConfig(psMMUContext, hPriv); @@ -3757,13 +4575,178 @@ MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, MMU_LEVEL_1, IMG_TRUE); - return; + return PVRSRV_OK; e0: - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map/unmap page table", __func__)); - PVR_ASSERT(0); - return; + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map/unmap page table with error %u", + __func__, eError)); + + return eError; +} + +/* + MMU_UnmapPMRFast + */ +PVRSRV_ERROR +MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 uiLog2PageSize) +{ + PVRSRV_ERROR eError; + + OSLockAcquire(psMMUContext->hLock); + + eError = MMU_UnmapPMRFastUnlocked(psMMUContext, + sDevVAddrBase, + ui32PageCount, + uiLog2PageSize); + + OSLockRelease(psMMUContext->hLock); + + return eError; +} + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +PVRSRV_ERROR +MMU_RemapPage(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 uiLog2HeapPageSize, + PMR *psOriginPMR, + IMG_UINT32 ui32LogicalPgOffset) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDevNode; + const MMU_PxE_CONFIG *psConfig; + IMG_HANDLE hPriv; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_UINT32 uiPTEIndex = 0; + MMU_Levelx_INFO *psLevel = NULL; + MMU_PROTFLAGS_T uiMMUProtFlags = 0; + + IMG_UINT64 uiProtFlags = 0; + IMG_UINT64 ui64PrevPxE64; + IMG_UINT32 ui32PrevPxE32; + + IMG_DEV_PHYADDR sTargetDevPAddr; + IMG_BOOL bValid = IMG_FALSE; + + /* Validate the most essential parameters */ + PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext != NULL, "psMMUContext"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psOriginPMR != NULL, "psPMR"); + + psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + eError = _MMU_ConvertDevMemFlags(IMG_FALSE, + uiMappingFlags, + &uiMMUProtFlags, + psMMUContext); + PVR_RETURN_IF_ERROR(eError); + + _MMU_GetPTConfig(psMMUContext, uiLog2HeapPageSize, &psConfig, &hPriv, &psDevVAddrConfig); + + if (!_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex)) + { + PVR_LOG_GOTO_WITH_ERROR("_MMU_GetPTInfo", eError, PVRSRV_ERROR_MAPPING_NOT_FOUND, + ErrPutPTConfig); + } + + if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Px = psLevel->sMemDesc.pvCpuVAddr; + ui32PrevPxE32 = pui32Px[uiPTEIndex]; + + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + } + else if (psConfig->uiBytesPerEntry == 8) + { + IMG_UINT64 *pui64Px = psLevel->sMemDesc.pvCpuVAddr; + ui64PrevPxE64 = pui64Px[uiPTEIndex]; + + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); + } + + OSLockAcquire(psMMUContext->hLock); + + eError = PMR_DevPhysAddr(psOriginPMR, + uiLog2HeapPageSize, + 1, + ((IMG_DEVMEM_OFFSET_T) ui32LogicalPgOffset << uiLog2HeapPageSize), + &sTargetDevPAddr, + &bValid, + DEVICE_USE); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", ErrUnlockContext); + + if (!bValid) + { + PVR_LOG_GOTO_WITH_ERROR("PMR_DevPhysAddr", + eError, + PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE, + ErrUnlockContext); + } + + /* Set the PT entry with the specified address and protection flags */ + eError = _SetupPTE(psMMUContext, + psLevel, + uiPTEIndex, + psConfig, + &sTargetDevPAddr, + &sDevVAddr, + IMG_FALSE, +#if defined(PDUMP) + /*PDUMP Not supported for remap */ + NULL, + NULL, + 0, +#endif /*PDUMP*/ + uiProtFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", ErrUnlockContext); + + eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiPTEIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + psConfig->uiBytesPerEntry); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapPagesClean", ErrUnlockAndUnmapPages); + + OSLockRelease(psMMUContext->hLock); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + /* Flush TLB for PTs, this queues invalidation, actioning of this + * should be ensured complete by the caller before further GPU + * workloads continue. + */ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_FALSE); + + return PVRSRV_OK; + +ErrUnlockAndUnmapPages: + /* Reset old PTE in place */ + if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Px = psLevel->sMemDesc.pvCpuVAddr; + pui32Px[uiPTEIndex] = ui32PrevPxE32; + } + else if (psConfig->uiBytesPerEntry == 8) + { + IMG_UINT64 *pui64Px = psLevel->sMemDesc.pvCpuVAddr; + pui64Px[uiPTEIndex] = ui64PrevPxE64; + } + +ErrUnlockContext: + OSLockRelease(psMMUContext->hLock); + +ErrPutPTConfig: + _MMU_PutPTConfig(psMMUContext, hPriv); + + return eError; } +#endif /* MMU_AcquireBaseAddr @@ -3777,6 +4760,12 @@ MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr) return PVRSRV_ERROR_INVALID_PARAMS; } + if (_MMU_IS_FWKM_CTX_VZGUEST(psMMUContext)) + { + PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_NOT_SUPPORTED, + "MMU_AcquireBaseAddr"); + } + *psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr; return PVRSRV_OK; @@ -3794,6 +4783,12 @@ MMU_AcquireCPUBaseAddr(MMU_CONTEXT *psMMUContext, void **ppvCPUVAddr) return PVRSRV_ERROR_INVALID_PARAMS; } + if (_MMU_IS_FWKM_CTX_VZGUEST(psMMUContext)) + { + PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_NOT_SUPPORTED, + "MMU_AcquireCPUBaseAddr"); + } + *ppvCPUVAddr = psMMUContext->sBaseLevelInfo.sMemDesc.pvCpuVAddr; return PVRSRV_OK; @@ -3809,7 +4804,7 @@ MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext) } /* - MMU_AppendCacheFlags, MMU_ExchangeCacheFlags + MMU_AppendCacheFlags */ void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32AppendFlags) @@ -3824,19 +4819,34 @@ void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32AppendFlags) OSAtomicOr(&psMMUContext->sCacheFlags, (IMG_INT)ui32AppendFlags); } -IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags) +/* + MMU_GetAndResetCacheFlags +*/ +IMG_UINT32 MMU_GetAndResetCacheFlags(MMU_CONTEXT *psMMUContext) { - PVR_ASSERT(psMMUContext != NULL); + IMG_UINT32 uiFlags; + PVR_ASSERT(psMMUContext != NULL); if (psMMUContext == NULL) { return 0; } - return (IMG_UINT32)OSAtomicExchange(&psMMUContext->sCacheFlags, (IMG_INT)ui32NewCacheFlags); + uiFlags = (IMG_UINT32) OSAtomicExchange(&psMMUContext->sCacheFlags, 0); + +#if defined(SUPPORT_PMR_DEFERRED_FREE) + /* kick cleanup thread to free all zombie PMRs residing in the device's + * zombie list */ + if (PMRQueueZombiesForCleanup(psMMUContext->psPhysMemCtx->psDevNode)) + { + BITMASK_SET(uiFlags, RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL | RGXFWIF_MMUCACHEDATA_FLAGS_PT); + } +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + + return uiFlags; } -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) /* MMU_GetOSids */ @@ -3846,12 +4856,88 @@ void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 * *pui32OSid = psMMUContext->psPhysMemCtx->ui32OSid; *pui32OSidReg = psMMUContext->psPhysMemCtx->ui32OSidReg; *pbOSidAxiProt = psMMUContext->psPhysMemCtx->bOSidAxiProt; - - return; } #endif +static IMG_BOOL _MMUGetPxEFaultLevelData(const MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR *psDevVAddr, + const MMU_PxE_CONFIG *psConfig, + void *pvCpuVAddr, + IMG_UINT32 ui32PxIndex, + MMU_LEVEL_DATA *psMMULevelDataOut, + IMG_UINT32 *ui32Log2PageSizeOut) +{ + static const IMG_CHAR *apszMMUValidStr[4] = { + /*--*/ "not valid", + /*-V*/ "valid", + /*P-*/ "pending", + /*PV*/ "inconsistent (pending and valid)" }; + #define _MMU_VALID_STR(_cfg, _entry) \ + (apszMMUValidStr[((((_entry)&(_cfg->uiPendingEnMask))!=0) << 1)| \ + ((((_entry)&(_cfg->uiValidEnMask))!=0) << 0)]) + #define _MMU_MASK_VALID_FOR_32BITS(_cfg) \ + ((_cfg->uiPendingEnMask | _cfg->uiValidEnMask) <= 0xFFFFFFFF) + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + + if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Ptr = pvCpuVAddr; + + psMMULevelDataOut->ui64Address = pui32Ptr[ui32PxIndex]; + + /* Check if we are dealing with a PxE entry where these bits are in the first 32 bits */ + /* But if we know it is a 4 byte entry, why check this? */ + if (_MMU_MASK_VALID_FOR_32BITS(psConfig)) + { + psMMULevelDataOut->psDebugStr = _MMU_VALID_STR(psConfig, pui32Ptr[ui32PxIndex] & psConfig->uiProtMask); + } + else + { + psMMULevelDataOut->psDebugStr = ""; + PVR_DPF((PVR_DBG_ERROR, "Invalid %sE masks for 32-bit entry", psConfig->pszPxLevelStr)); + return IMG_FALSE; + } + + if (ui32Log2PageSizeOut != NULL) + { + if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PxIndex], ui32Log2PageSizeOut) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to get the page size from the %sE", psConfig->pszPxLevelStr)); + return IMG_FALSE; + } + } + } + else + { + IMG_UINT64 *pui64Ptr = pvCpuVAddr; + + psMMULevelDataOut->ui64Address = pui64Ptr[ui32PxIndex]; + psMMULevelDataOut->psDebugStr = _MMU_VALID_STR(psConfig, pui64Ptr[ui32PxIndex] & psConfig->uiProtMask); + + if (ui32Log2PageSizeOut != NULL) + { + /* MMU_VERSION >= 4 */ + if (psDevAttrs->pfnGetPageSizeFromVirtAddr != NULL) + { + if (psDevAttrs->pfnGetPageSizeFromVirtAddr(psMMUContext->psPhysMemCtx->psDevNode, *psDevVAddr, ui32Log2PageSizeOut) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to get the page size from device VA")); + return IMG_FALSE; + } + } + /* MMU_VERSION < 4 */ + else if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PxIndex], ui32Log2PageSizeOut) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to get the page size from the %sE", psConfig->pszPxLevelStr)); + return IMG_FALSE; + } + } + } + + return IMG_TRUE; +} + /* MMU_CheckFaultAddress */ @@ -3859,24 +4945,8 @@ void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, IMG_DEV_VIRTADDR *psDevVAddr, MMU_FAULT_DATA *psOutFaultData) { - /* Ideally the RGX defs should be via callbacks, but the function is only called from RGX. */ -#if defined(SUPPORT_RGX) -# define MMU_MASK_VALID_FOR_32BITS(level) \ - ((RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN | \ - RGX_MMUCTRL_##level##_DATA_VALID_EN) <= 0xFFFFFFFF) -# define MMU_VALID_STR(entry,level) \ - (apszMMUValidStr[((((entry)&(RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN))!=0) << 1)| \ - ((((entry)&(RGX_MMUCTRL_##level##_DATA_VALID_EN))!=0) << 0)]) - static const IMG_PCHAR apszMMUValidStr[1<<2] = {/*--*/ "not valid", - /*-V*/ "valid", - /*P-*/ "pending", - /*PV*/ "inconsistent (pending and valid)"}; -#else -# define MMU_MASK_VALID_FOR_32BITS(level) 0 -# define MMU_VALID_STR(entry,level) ("??") -#endif MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; - MMU_LEVEL eMMULevel = psDevAttrs->eTopLevel; + MMU_LEVEL eMMULevel = psDevAttrs->psBaseConfig->ePxLevel; const MMU_PxE_CONFIG *psConfig; const MMU_PxE_CONFIG *psMMUPDEConfig; const MMU_PxE_CONFIG *psMMUPTEConfig; @@ -3912,7 +4982,7 @@ void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, psLevel = &psMMUContext->sBaseLevelInfo; psConfig = psDevAttrs->psBaseConfig; - sMMUFaultData.eTopLevel = psDevAttrs->eTopLevel; + sMMUFaultData.eTopLevel = psDevAttrs->psBaseConfig->ePxLevel; sMMUFaultData.eType = MMU_FAULT_TYPE_NON_PM; @@ -3936,29 +5006,10 @@ void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, break; } - if (psConfig->uiBytesPerEntry == 4) - { - IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; - - psMMULevelData->ui64Address = pui32Ptr[ui32PCIndex]; - if (MMU_MASK_VALID_FOR_32BITS(PC)) - { - psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PCIndex] & psConfig->uiProtMask, PC); - } - else - { - psMMULevelData->psDebugStr = ""; - PVR_LOG(("Invalid RGX_MMUCTRL_PC_DATA_ENTRY mask for 32-bit entry")); - } - } - else - { - IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; - - psMMULevelData->ui64Address = pui64Ptr[ui32PCIndex]; - psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PCIndex] & psConfig->uiProtMask, PC); - - } + (void) _MMUGetPxEFaultLevelData(psMMUContext, psDevVAddr, psConfig, + psLevel->sMemDesc.pvCpuVAddr, + ui32PCIndex, + psMMULevelData, NULL); psLevel = psLevel->apsNextLevel[ui32PCIndex]; if (!psLevel) @@ -3988,52 +5039,16 @@ void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, break; } - if (psConfig->uiBytesPerEntry == 4) - { - IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; - - psMMULevelData->ui64Address = pui32Ptr[ui32PDIndex]; - if (MMU_MASK_VALID_FOR_32BITS(PD)) - { - psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PDIndex] & psMMUPDEConfig->uiProtMask, PD); - } - else - { - psMMULevelData->psDebugStr = ""; - PVR_LOG(("Invalid RGX_MMUCTRL_PD_DATA_ENTRY mask for 32-bit entry")); - } - - if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK) - { - PVR_LOG(("Failed to get the page size from the PDE")); - } - } - else - { - IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; - - psMMULevelData->ui64Address = pui64Ptr[ui32PDIndex]; - psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PDIndex] & psMMUPDEConfig->uiProtMask, PD); - - if (psDevAttrs->pfnGetPageSizeFromVirtAddr != NULL) - { - /* MMU_VERSION >= 4 */ - if (psDevAttrs->pfnGetPageSizeFromVirtAddr(psMMUContext->psPhysMemCtx->psDevNode, *psDevVAddr, &ui32Log2PageSize) != PVRSRV_OK) - { - PVR_LOG(("Failed to get the page size from the virtual address")); - } - } - else if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK) - { - PVR_LOG(("Failed to get the page size from the PDE")); - } - } + (void) _MMUGetPxEFaultLevelData(psMMUContext, psDevVAddr, psConfig, + psLevel->sMemDesc.pvCpuVAddr, + ui32PDIndex, + psMMULevelData, &ui32Log2PageSize); /* - We assumed the page size was 4K, now we have the actual size - from the PDE we can confirm if our assumption was correct. - Until now it hasn't mattered as the PC and PD are the same - regardless of the page size + We assumed the page size was 4K, now we have the actual size + from the PDE we can confirm if our assumption was correct. + Until now it hasn't mattered as the PC and PD are the same + regardless of the page size */ if (ui32Log2PageSize != 12) { @@ -4052,6 +5067,7 @@ void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, break; } } + psLevel = psLevel->apsNextLevel[ui32PDIndex]; if (!psLevel) { @@ -4080,37 +5096,17 @@ void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, break; } - if (psConfig->uiBytesPerEntry == 4) - { - IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; - - psMMULevelData->ui64Address = pui32Ptr[ui32PTIndex]; - if (MMU_MASK_VALID_FOR_32BITS(PT)) - { - psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT); - } - else - { - psMMULevelData->psDebugStr = ""; - PVR_LOG(("Invalid RGX_MMUCTRL_PT_DATA_ENTRY mask for 32-bit entry")); - } - } - else - { - IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; - - psMMULevelData->ui64Address = pui64Ptr[ui32PTIndex]; - psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT); - - } - goto e1; + (void) _MMUGetPxEFaultLevelData(psMMUContext, psDevVAddr, psConfig, + psLevel->sMemDesc.pvCpuVAddr, + ui32PTIndex, + psMMULevelData, NULL); + break; } PVR_LOG(("Unsupported MMU setup: %d", eMMULevel)); break; } -e1: /* Put the page size data back */ psDevAttrs->pfnPutPageSizeConfiguration(hPriv); OSLockRelease(psMMUContext->hLock); @@ -4121,7 +5117,6 @@ void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, static IMG_UINT64 MMU_GetVDevAddrPTE(MMU_CONTEXT *psMMUContext, const MMU_PxE_CONFIG *psConfig, const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, - IMG_UINT32 uiLog2PageSize, IMG_DEV_VIRTADDR sDevVAddr, IMG_BOOL *pbStatusOut) { @@ -4132,7 +5127,7 @@ static IMG_UINT64 MMU_GetVDevAddrPTE(MMU_CONTEXT *psMMUContext, OSLockAcquire(psMMUContext->hLock); - switch (psMMUContext->psDevAttrs->eTopLevel) + switch (psMMUContext->psDevAttrs->psBaseConfig->ePxLevel) { case MMU_LEVEL_3: uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); @@ -4189,7 +5184,6 @@ IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext, MMU_GetVDevAddrPTE(psMMUContext, psConfig, psDevVAddrConfig, - uiLog2PageSize, sDevVAddr, &bStatus); @@ -4324,6 +5318,24 @@ PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext, } #endif +#if defined(SUPPORT_PMR_DEFERRED_FREE) +PVRSRV_ERROR MMU_CacheInvalidateKick(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_UINT32 *puiRequiredSyncValue) +{ + IMG_UINT32 uiRequiredSyncValue; + PVRSRV_ERROR eError; + + eError = psDeviceNode->pfnMMUCacheInvalidateKick(psDeviceNode, &uiRequiredSyncValue); + + if (puiRequiredSyncValue != NULL) + { + *puiRequiredSyncValue = uiRequiredSyncValue; + } + + return eError; +} +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + /****************************************************************************** End of file (mmu_common.c) ******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_mmu.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_mmu.c index 5b195d6fa0cb..64a5740f52f5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_mmu.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_mmu.c @@ -373,6 +373,9 @@ PVRSRV_ERROR PDumpMMUDumpPxEntries(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 uiPxEProtMask, IMG_UINT64 uiDataValidEnable, IMG_UINT32 ui32Flags, + IMG_UINT32 ui32VAParity, + IMG_UINT32 ui32ParityShift, + IMG_UINT64 ui64ParityMask, PDUMP_MMU_TYPE eMMUType) { PVRSRV_ERROR eErr = PVRSRV_OK; @@ -434,6 +437,7 @@ PVRSRV_ERROR PDumpMMUDumpPxEntries(PVRSRV_DEVICE_NODE *psDeviceNode, uiPxEIdx < uiFirstEntry + uiNumEntries; uiPxEIdx++) { + IMG_BOOL bLastEntry = (uiPxEIdx == uiFirstEntry + uiNumEntries - 1); /* Calc the symbolic address offset of the PxE location This is what we have to add to the table address to get to a certain entry */ ui32SymAddrOffset = (uiPxEIdx*uiBytesPerEntry); @@ -479,10 +483,14 @@ PVRSRV_ERROR PDumpMMUDumpPxEntries(PVRSRV_DEVICE_NODE *psDeviceNode, ui32SymAddrOffset, IMG_FALSE, uiBytesPerEntry, pvRawBytes, ui32Flags); - if (eErr == PVRSRV_OK) + if (bLastEntry && eErr == PVRSRV_OK) { goto done; } + else if (eErr == PVRSRV_OK) + { + continue; + } else { goto ErrUnlock; @@ -540,6 +548,35 @@ PVRSRV_ERROR PDumpMMUDumpPxEntries(PVRSRV_DEVICE_NODE *psDeviceNode, pszSymbolicAddr, uiSymbolicAddrOffset); } + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(psDeviceNode, hScript, ui32Flags); + + /* OR a parity bit */ + if (ui64ParityMask) + { + IMG_UINT32 uiStateWord = + ((0x1U << PDUMP_SET_PARITY_STATE_WORD_VERSION_SHIFT) & PDUMP_SET_PARITY_STATE_WORD_VERSION_MASK) | + ((ui32ParityShift << PDUMP_SET_PARITY_STATE_WORD_PARITY_SHIFT_SHIFT) & PDUMP_SET_PARITY_STATE_WORD_PARITY_SHIFT_MASK) | + ((ui32VAParity << PDUMP_SET_PARITY_STATE_WORD_VA_PARITY_SHIFT) & PDUMP_SET_PARITY_STATE_WORD_VA_PARITY_MASK); + + eErr = PDumpSNPrintf(hScript, + ui32MaxLen, + "CMD:SetParity :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:%s:0x%"IMG_UINT64_FMTSPECX" 0x%08X\n", + /* Dest PTE entry tag */ + pszPDumpDevName, + pszMMUPX, + ui64PxSymAddr, + ui32SymAddrOffset, + /* Source tag to calc PA parity from */ + pszMemspaceName, + pszSymbolicAddr, + uiSymbolicAddrOffset, + /* State word */ + uiStateWord); + + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(psDeviceNode, hScript, ui32Flags); + } } else { @@ -568,10 +605,11 @@ PVRSRV_ERROR PDumpMMUDumpPxEntries(PVRSRV_DEVICE_NODE *psDeviceNode, { pszMMUPX = MMUPX_FMT(eMMULevel); } + + PVR_GOTO_IF_ERROR(eErr, ErrUnlock); + PDumpWriteScript(psDeviceNode, hScript, ui32Flags); } - PVR_GOTO_IF_ERROR(eErr, ErrUnlock); - PDumpWriteScript(psDeviceNode, hScript, ui32Flags); /* Now shift it to the right place, if necessary: */ /* Now shift that value down, by the "Align shift" diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_physmem.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_physmem.c index 322dd285a0e0..e25b1f40e069 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_physmem.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_physmem.c @@ -135,13 +135,23 @@ PVRSRV_ERROR PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, return PVRSRV_OK; } -/************************************************************************** - * Function Name : PDumpMalloc - * Inputs : - * Outputs : - * Returns : PVRSRV_ERROR - * Description : - **************************************************************************/ +/*************************************************************************/ /*! +@Function PDumpMalloc +@Description Builds and writes an allocation command to pdump output. Whilst + writing the thread is locked. + +@Input psDeviceNode A pointer to a device node. +@Input pszDevSpace Device space string. +@Input pszSymbolicAddress Name of the allocation. +@Input ui64Size String size. +@Input uiAlign Command alignment. +@Input bInitialise Should the command initialise the allocation. +@Input ui8InitValue The value memory is initialised to. +@Input phHandlePtr PDump allocation handle. +@Input ui32PDumpFlags PDump allocation flags. + +@Return This function returns a PVRSRV_ERROR. PVRSRV_OK on success. +*/ /**************************************************************************/ PVRSRV_ERROR PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszDevSpace, const IMG_CHAR *pszSymbolicAddress, @@ -154,6 +164,48 @@ PVRSRV_ERROR PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode, { PVRSRV_ERROR eError = PVRSRV_OK; + PDUMP_LOCK(ui32PDumpFlags); + eError = PDumpMallocUnlocked(psDeviceNode, + pszDevSpace, + pszSymbolicAddress, + ui64Size, + uiAlign, + bInitialise, + ui8InitValue, + phHandlePtr, + ui32PDumpFlags); + PDUMP_UNLOCK(ui32PDumpFlags); + return eError; +} + +/*************************************************************************/ /*! +@Function PDumpMallocUnlocked +@Description Builds and writes an allocation command to pdump output. Whilst + writing the thread remains unlocked. + +@Input psDeviceNode A pointer to a device node. +@Input pszDevSpace Device space string. +@Input pszSymbolicAddress Name of the allocation. +@Input ui64Size String size. +@Input uiAlign Command alignment. +@Input bInitialise Should the command initialise the allocation. +@Input ui8InitValue The value memory is initialised to. +@Input phHandlePtr PDump allocation handle. +@Input ui32PDumpFlags PDump allocation flags. + +@Return This function returns a PVRSRV_ERROR. PVRSRV_OK on success. +*/ /**************************************************************************/ +PVRSRV_ERROR PDumpMallocUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_UINT64 ui64Size, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_BOOL bInitialise, + IMG_UINT8 ui8InitValue, + IMG_HANDLE *phHandlePtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo; PDUMP_GET_SCRIPT_STRING() @@ -210,9 +262,7 @@ PVRSRV_ERROR PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode, goto _return; } - PDUMP_LOCK(ui32PDumpFlags); PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags); - PDUMP_UNLOCK(ui32PDumpFlags); psPDumpAllocationInfo->ui64Size = ui64Size; psPDumpAllocationInfo->ui32Align = TRUNCATE_64BITS_TO_32BITS(uiAlign); @@ -224,21 +274,45 @@ PVRSRV_ERROR PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode, return eError; } +/*************************************************************************/ /*! +@Function PDumpFree +@Description Writes a FREE command for an allocation handle to the pdump out2 + stream. When writing to the output stream the thread is locked. -/************************************************************************** - * Function Name : PDumpFree - * Inputs : - * Outputs : - * Returns : PVRSRV_ERROR - * Description : - **************************************************************************/ +@Input psDeviceNode A pointer to a device node. +@Input hPDumpAllocationInfoHandle A PDump allocation handle. + +@Return This function returns a PVRSRV_ERROR. PVRSRV_OK on success. +*/ /**************************************************************************/ PVRSRV_ERROR PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_HANDLE hPDumpAllocationInfoHandle) { PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA; + PDUMP_LOCK(PDUMP_FLAGS_NONE); + eError = PDumpFreeUnlocked(psDeviceNode, hPDumpAllocationInfoHandle); + PDUMP_UNLOCK(PDUMP_FLAGS_NONE); + + return eError; +} + +/*************************************************************************/ /*! +@Function PDumpFreeUnlocked +@Description Writes a FREE command for an allocation handle to the pdump + out2 stream. When writing to the output stream the thread + remains unlocked. + +@Input psDeviceNode A pointer to a device node. +@Input hPDumpAllocationInfoHandle A PDump allocation handle. + +@Return This function returns a PVRSRV_ERROR. PVRSRV_OK on success. +*/ /**************************************************************************/ +PVRSRV_ERROR PDumpFreeUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVRSRV_ERROR eError = PVRSRV_OK; PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo; + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA; PDUMP_GET_SCRIPT_STRING() @@ -251,10 +325,8 @@ PVRSRV_ERROR PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode, psPDumpAllocationInfo->aszSymbolicAddress); PVR_GOTO_IF_ERROR(eError, _return); - PDUMP_LOCK(ui32Flags); PDumpWriteScript(psDeviceNode, hScript, ui32Flags); OSFreeMem(psPDumpAllocationInfo); - PDUMP_UNLOCK(ui32Flags); _return: PDUMP_RELEASE_SCRIPT_STRING(); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_server.c index f5f2f55a0f0b..4b42e9c207a0 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/pdump_server.c @@ -64,7 +64,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "connection_server.h" #include "services_km.h" #include -#include "oskm_apphint.h" +#include "os_apphint.h" /* pdump headers */ #include "tlstream.h" @@ -73,8 +73,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pdumpdesc.h" #include "rgxpdump.h" -#include "tutilsdefs.h" -#include "tutils_km.h" /* Allow temporary buffer size override */ #if !defined(PDUMP_TEMP_BUFFER_SIZE) #define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U) @@ -144,6 +142,12 @@ typedef struct IMG_CHAR szZeroPageFilename[PDUMP_PARAM_MAX_FILE_NAME]; /*< PRM file name where the zero page was pdumped */ } PDUMP_PARAMETERS; +/* PDump global connection count - used to determine when/if the last + * connection (from a PDump generating app) has been closed. + * This is used to key the AUTO_TERMINATED behaviour if enabled. + */ +static ATOMIC_T gPDumpNumConnex; + /* PDump lock to keep pdump write atomic. * Which will protect g_PDumpScript & g_PDumpParameters pdump * specific shared variable. @@ -191,7 +195,7 @@ ATOMIC_T g_sEveryLineCounter; #endif /* Prototype for the test/debug state dump routine used in debugging */ -#if defined(PDUMP_TRACE_STATE) || defined(PVR_TESTING_UTILS) +#if defined(PDUMP_TRACE_STATE) void PDumpCommonDumpState(void); #endif @@ -249,6 +253,7 @@ typedef enum _PDUMP_SM_ #define FLAG_IS_DRIVER_IN_INIT_PHASE 0x1 /*! Control flag that keeps track of State of driver initialisation phase */ #define FLAG_IS_IN_CAPTURE_RANGE 0x2 /*! Control flag that keeps track of Current capture status, is current frame in range */ #define FLAG_IS_IN_CAPTURE_INTERVAL 0x4 /*! Control flag that keeps track of Current capture status, is current frame in an interval where no capture takes place. */ +#define FLAG_IS_AUTO_TERMINATED 0x8 /*! Control flag that indicates app has auto-terminated. */ #define CHECK_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG) BITMASK_HAS(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG) #define SET_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG) BITMASK_SET(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG) @@ -268,6 +273,7 @@ typedef struct _PDUMP_CTRL_STATE_ POS_LOCK hLock; /*!< Exclusive lock to this structure */ IMG_PID InPowerTransitionPID;/*!< pid of thread requesting power transition */ + IMG_UINT32 ui32TimeoutFrequency;/*!< Timer frequency for checking process existence */ } PDUMP_CTRL_STATE; static PDUMP_CTRL_STATE g_PDumpCtrl = @@ -290,9 +296,12 @@ static PDUMP_CTRL_STATE g_PDumpCtrl = }, NULL, + 0, 0 }; +static IMG_HANDLE g_PDumpTimerID; + static void PDumpAssertWriteLockHeld(void); #if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS) @@ -325,7 +334,7 @@ static INLINE IMG_CHAR* PDumpCreateIncVarNameStr(const IMG_CHAR* pszInternalVar) return NULL; } - OSStringLCopy(pszPDumpVarName, pszInternalVar, ui32Size); + OSStringSafeCopy(pszPDumpVarName, pszInternalVar, ui32Size); /* Increase the number on the second variable */ pszPDumpVarName[ui32Size-2] += 1; return pszPDumpVarName; @@ -384,6 +393,12 @@ static INLINE PDUMP_SM PDumpCtrlGetModuleState(void) return g_PDumpCtrl.eServiceState; } +PVRSRV_ERROR PDumpValidateUMFlags(PDUMP_FLAGS_T uiFlags) +{ + /* If these flags are or'd together, they are invalid */ + return ((uiFlags & (~(PDUMP_BLKDATA | PDUMP_CONT))) != 0) ? PVRSRV_ERROR_INVALID_PARAMS : PVRSRV_OK; +} + PVRSRV_ERROR PDumpReady(void) { switch (PDumpCtrlGetModuleState()) @@ -520,7 +535,7 @@ static void PDumpCtrlSetCurrentFrame(IMG_UINT32 ui32Frame) #endif } -static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval) +static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval, IMG_UINT32 ui32AutoTermTimeout) { /* Set the capture range to that supplied by the PDump client tool */ @@ -529,6 +544,26 @@ static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui3 g_PDumpCtrl.sCaptureRange.ui32End = ui32End; g_PDumpCtrl.sCaptureRange.ui32Interval = ui32Interval; + /* Disable / Enable AUTO_TERMINATE behaviour if AutoTermTimeout is set */ + if (ui32AutoTermTimeout == 0U) + { + if (g_PDumpTimerID != NULL) + { + PVRSRV_ERROR eError; + + eError = OSDisableTimer(g_PDumpTimerID); + PVR_LOG_IF_ERROR(eError, "OSDisableTimer"); + + /* Now destroy it too */ + eError = OSRemoveTimer(g_PDumpTimerID); + PVR_LOG_IF_ERROR(eError, "OSRemoveTimer"); + + g_PDumpTimerID = NULL; + } + g_PDumpCtrl.ui32TimeoutFrequency = 0U; + UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_AUTO_TERMINATED); + } + /* Set pdump block mode ctrl variables */ g_PDumpCtrl.sBlockCtrl.ui32BlockLength = (ui32Mode == PDUMP_CAPMODE_BLOCKED)? ui32Interval : 0; /* ui32Interval is interpreted as block length */ g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock = PDUMP_BLOCKNUM_INVALID; @@ -667,6 +702,40 @@ static PVRSRV_ERROR PDumpCtrlGetState(IMG_UINT64 *ui64State) *ui64State |= PDUMP_STATE_SUSPENDED; } + if (CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_AUTO_TERMINATED)) + { + *ui64State |= PDUMP_STATE_APP_TERMINATED; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR PDumpSetAutoTerminate(IMG_UINT32 ui32TimeoutFrequency) +{ + PVRSRV_ERROR eError; + IMG_BOOL bEnable = (ui32TimeoutFrequency != 0U) ? IMG_TRUE : IMG_FALSE; + + eError = PDumpReady(); + PVR_LOG_RETURN_IF_ERROR(eError, "PDumpReady"); + + g_PDumpCtrl.ui32TimeoutFrequency = ui32TimeoutFrequency; + + if (bEnable) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: ENABLING Auto Termination - Timeout %u", + __func__, ui32TimeoutFrequency)); + PDUMP_REFCOUNT_PRINT(("%s: gPDumpNumConnex (%p) = %d", __func__, + &gPDumpNumConnex, OSAtomicRead(&gPDumpNumConnex))); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: DISABLING Auto Termination", + __func__)); + PDUMP_REFCOUNT_PRINT(("%s: gPDumpNumConnex (%p) = %d", __func__, + &gPDumpNumConnex, + OSAtomicRead(&gPDumpNumConnex))); + } + return PVRSRV_OK; } @@ -776,6 +845,13 @@ IMG_BOOL PDumpIsDevicePermitted(PVRSRV_DEVICE_NODE *psDeviceNode) if (psDeviceNode) { + if (psDeviceNode->eDevState < PVRSRV_DEVICE_STATE_CREATED) + { + PVR_DPF((PVR_DBG_FATAL,"%s: PDump output requested for Device %d " + "before device created. Not permitted - please fix driver.", + __func__, psDeviceNode->sDevId.ui32InternalID)); + return IMG_FALSE; + } if ((psDeviceNode->sDevId.ui32InternalID > PVRSRV_MAX_DEVICES) || ((psPVRSRVData->ui32PDumpBoundDevice < PVRSRV_MAX_DEVICES) && (psDeviceNode->sDevId.ui32InternalID != psPVRSRVData->ui32PDumpBoundDevice))) @@ -954,7 +1030,6 @@ static IMG_UINT32 PDumpWriteToBuffer(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32Off = 0; IMG_BYTE *pbyDataBuffer; IMG_UINT32 ui32BytesAvailable = 0; - static IMG_UINT32 ui32TotalBytesWritten; PVRSRV_ERROR eError; IMG_UINT32 uiRetries = 0; @@ -1029,8 +1104,6 @@ static IMG_UINT32 PDumpWriteToBuffer(PVRSRV_DEVICE_NODE *psDeviceNode, if (eError == PVRSRV_OK) { - ui32TotalBytesWritten += ui32BytesToBeWritten; - PVR_ASSERT(pbyDataBuffer != NULL); OSDeviceMemCopy((void*)pbyDataBuffer, pui8Data + ui32Off, ui32BytesToBeWritten); @@ -1516,6 +1589,7 @@ static PVRSRV_ERROR PDumpInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psSc psParam->sInitStream.pszName, psParam->sInitStream.ui32BufferSize, TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP, NULL, NULL, + NULL, NULL, NULL, NULL); PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ParamInit", end); @@ -1527,6 +1601,7 @@ static PVRSRV_ERROR PDumpInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psSc psParam->sMainStream.pszName, psParam->sMainStream.ui32BufferSize, TL_OPMODE_DROP_NEWER , NULL, NULL, + NULL, NULL, NULL, NULL); PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ParamMain", param_main_failed); @@ -1538,6 +1613,7 @@ static PVRSRV_ERROR PDumpInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psSc psParam->sDeinitStream.pszName, psParam->sDeinitStream.ui32BufferSize, TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP, NULL, NULL, + NULL, NULL, NULL, NULL); PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ParamDeinit", param_deinit_failed); @@ -1558,6 +1634,7 @@ static PVRSRV_ERROR PDumpInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psSc psScript->sInitStream.pszName, psScript->sInitStream.ui32BufferSize, TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP, NULL, NULL, + NULL, NULL, NULL, NULL); PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptInit", script_init_failed); @@ -1569,6 +1646,7 @@ static PVRSRV_ERROR PDumpInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psSc psScript->sMainStream.pszName, psScript->sMainStream.ui32BufferSize, TL_OPMODE_DROP_NEWER, NULL, NULL, + NULL, NULL, NULL, NULL); PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptMain", script_main_failed); @@ -1580,6 +1658,7 @@ static PVRSRV_ERROR PDumpInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psSc psScript->sDeinitStream.pszName, psScript->sDeinitStream.ui32BufferSize, TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP, NULL, NULL, + NULL, NULL, NULL, NULL); PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptDeinit", script_deinit_failed); @@ -1591,6 +1670,7 @@ static PVRSRV_ERROR PDumpInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psSc psScript->sBlockStream.pszName, psScript->sBlockStream.ui32BufferSize, TL_OPMODE_DROP_NEWER, NULL, NULL, + NULL, NULL, NULL, NULL); PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptBlock", script_block_failed); @@ -1658,10 +1738,10 @@ static PVRSRV_ERROR PDumpParameterChannelZeroedPageBlock(PVRSRV_DEVICE_NODE *psD IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; IMG_UINT32 ui32GeneralNon4KHeapPageSize; - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, GeneralNon4KHeapPageSize, + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, GeneralNon4KHeapPageSize, &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize); - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); /* ZeroPageSize can't be smaller than page size */ g_PDumpParameters.uiZeroPageSize = MAX(ui32GeneralNon4KHeapPageSize, OSGetPageSize()); @@ -2329,7 +2409,8 @@ PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval, - IMG_UINT32 ui32MaxParamFileSize) + IMG_UINT32 ui32MaxParamFileSize, + IMG_UINT32 ui32AutoTermTimeout) { PVRSRV_ERROR eError; @@ -2375,7 +2456,8 @@ PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection, PDumping app may be reading the state data for some checks */ PDumpCtrlLockAcquire(); - PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval); + PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval, ui32AutoTermTimeout); + PDumpSetAutoTerminate(ui32AutoTermTimeout); PDumpCtrlLockRelease(); if (ui32MaxParamFileSize == 0) @@ -3866,7 +3948,7 @@ PVRSRV_ERROR PDumpCommentWithFlagsVA(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR * pszFormat, va_list args) { IMG_INT32 iCount; - PVRSRV_ERROR eErr = PVRSRV_OK; + PVRSRV_ERROR eErr = PVRSRV_ERROR_INVALID_PARAMS; PDUMP_GET_MSG_STRING(); /* Construct the string */ @@ -4126,17 +4208,6 @@ PVRSRV_ERROR PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888); -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_FBCDC_SIGNATURE_CHECK) - { - PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - - /* - * The render data may be corrupted, so write out the raw - * image buffer to avoid errors in the post-processing tools. - */ - bRawImageData |= (psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_ERR_EN); - } -#endif if (bRawImageData) { @@ -5162,7 +5233,7 @@ void PDumpDisconnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode) * Will set module state back to READY. */ eErr = PDumpSetDefaultCaptureParamsKM(NULL, psDeviceNode, PDUMP_CAPMODE_UNSET, - PDUMP_FRAME_UNSET, PDUMP_FRAME_UNSET, 0, 0); + PDUMP_FRAME_UNSET, PDUMP_FRAME_UNSET, 0, 0, 0); PVR_LOG_IF_ERROR(eErr, "PDumpSetDefaultCaptureParamsKM"); } } @@ -5415,7 +5486,7 @@ static void PDumpAssertWriteLockHeld(void) PVR_ASSERT(OSLockIsLocked(g_hPDumpWriteLock)); } -#if defined(PDUMP_TRACE_STATE) || defined(PVR_TESTING_UTILS) +#if defined(PDUMP_TRACE_STATE) void PDumpCommonDumpState(void) { PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.*.hTL (In, Mn, De, Bk) ( %p, %p, %p, %p )", @@ -5478,8 +5549,10 @@ void PDumpCommonDumpState(void) } #endif /* defined(PDUMP_TRACE_STATE) || defined(PVR_TESTING_UTILS) */ +static void PDumpStartTimer(PVRSRV_DEVICE_NODE *psDeviceNode); -PVRSRV_ERROR PDumpRegisterConnection(void *hSyncPrivData, +PVRSRV_ERROR PDumpRegisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, + void *hSyncPrivData, PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, PDUMP_CONNECTION_DATA **ppsPDumpConnectionData) { @@ -5513,6 +5586,26 @@ PVRSRV_ERROR PDumpRegisterConnection(void *hSyncPrivData, *ppsPDumpConnectionData = psPDumpConnectionData; + if (PDumpIsDevicePermitted(psDeviceNode)) + { + IMG_INT iRefCount; + + /* Add this new reference to the global count of active connections */ + iRefCount = OSAtomicIncrement(&gPDumpNumConnex); + PDUMP_REFCOUNT_PRINT("%s: gPDumpNumConnex (%p) = %d", __func__, + &gPDumpNumConnex, iRefCount); + + if (((iRefCount > 1) && + (g_PDumpCtrl.ui32TimeoutFrequency != 0U)) && + (g_PDumpTimerID == NULL)) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Starting Timeout Chain now, refcnt = %d", + __func__, iRefCount)); + PDumpStartTimer(psDeviceNode); + } + } + return PVRSRV_OK; fail_lockcreate: @@ -5522,9 +5615,21 @@ PVRSRV_ERROR PDumpRegisterConnection(void *hSyncPrivData, return eError; } -void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData) +void PDumpUnregisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, + PDUMP_CONNECTION_DATA *psPDumpConnectionData) { + IMG_INT iRefCount; + _PDumpConnectionRelease(psPDumpConnectionData); + if (PDumpIsDevicePermitted(psDeviceNode)) + { + /* Remove this connection from the global count */ + iRefCount = OSAtomicDecrement(&gPDumpNumConnex); + PDUMP_REFCOUNT_PRINT("%s: gPDumpNumConnex (%p) = %d", __func__, + &gPDumpNumConnex, iRefCount); + PVR_ASSERT(iRefCount >= 0); + } + PVR_UNREFERENCED_PARAMETER(iRefCount); } @@ -5560,4 +5665,76 @@ PVRSRV_ERROR PDumpSNPrintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CH return PVRSRV_OK; } +void PDumpTimerCB(void *pvData); +void PDumpTimerCB(void *pvData) +{ + IMG_INT iRefCount; + + PVR_UNREFERENCED_PARAMETER(pvData); + + if (CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_AUTO_TERMINATED)) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Already flagged as TERMINATED", + __func__)); + } + + /* Simply check to see if the global connection count indicates that all + * subsequent applications requiring PDump logging have terminated. + * In a quiescent state we will have a singleton pdump utility still + * connected. + */ + if ((iRefCount = OSAtomicRead(&gPDumpNumConnex)) == 1) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: No connections active (%d), flagging as AUTO_TERMINATED", + __func__, iRefCount)); + SET_PDUMP_CONTROL_FLAG(FLAG_IS_AUTO_TERMINATED); + } +} + +/* + * FunctionName PDumpStartTimer + * Description Start an OSTimer chain running to scan for active connections + * being present in the connections associated with the given + * psDeviceNode. Only started if we have AutoTerminate flagged + * in the internal PDump state. + * Inputs psDeviceNode associated device node to scan for connections + * Returns nothing + */ +static void PDumpStartTimer(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + if (!PDumpIsDevicePermitted(psDeviceNode)) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: DeviceID %u not valid", __func__, + psDeviceNode->sDevId.ui32InternalID)); + return; + } + + if (g_PDumpCtrl.ui32TimeoutFrequency == 0U) + { + return; + } + + if (g_PDumpTimerID != NULL) + { + eError = OSDisableTimer(g_PDumpTimerID); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSDisableTimer"); + eError = OSRemoveTimer(g_PDumpTimerID); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSRemoveTimer"); + } + + g_PDumpTimerID = OSAddTimer(PDumpTimerCB, NULL, g_PDumpCtrl.ui32TimeoutFrequency * 1000U); + + if (g_PDumpTimerID != NULL) + { + eError = OSEnableTimer(g_PDumpTimerID); + + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEnableTimer"); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Timer %p now active.", __func__, + g_PDumpTimerID)); + } +} + #endif /* defined(PDUMP) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/physheap.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/physheap.c index 099cfd028ce0..c5c603024b54 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/physheap.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/physheap.c @@ -54,19 +54,33 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "physmem.h" #include "physmem_hostmem.h" #include "physmem_lma.h" +#include "physmem_dlm.h" +#include "physmem_ima.h" #include "physmem_osmem.h" +#include "debug_common.h" struct _PHYS_HEAP_ { /*! The type of this heap */ PHYS_HEAP_TYPE eType; + + /*! The allocation policy for this heap */ + PHYS_HEAP_POLICY uiPolicy; + /* Config flags */ PHYS_HEAP_USAGE_FLAGS ui32UsageFlags; + /* OOM Detection state */ +#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) + ATOMIC_T sOOMDetected; +#endif + /*! Pointer to device node struct */ PPVRSRV_DEVICE_NODE psDevNode; /*! PDump name of this physical memory heap */ IMG_CHAR *pszPDumpMemspaceName; + /*! Physheap name of this physical memory heap */ + IMG_CHAR aszName[PHYS_HEAP_NAME_SIZE]; /*! Private data for the translate routines */ IMG_HANDLE hPrivData; /*! Function callbacks */ @@ -81,10 +95,26 @@ struct _PHYS_HEAP_ /*! Pointer to next physical heap */ struct _PHYS_HEAP_ *psNext; -}; -static PHYS_HEAP *g_psPhysHeapList; -static POS_LOCK g_hPhysHeapLock; +#if defined(SUPPORT_STATIC_IPA) + /*! IPA Policy value from Heap Config */ + IMG_UINT32 ui32IPAPolicyValue; + + /*! IPA Clear Mask value from Heap Config */ + IMG_UINT32 ui32IPAClearMask; + + /*! IPA Bit Shift value from Heap Config */ + IMG_UINT32 ui32IPAShift; +#endif /* defined(SUPPORT_STATIC_IPA) */ +#if defined(PVRSRV_ENABLE_XD_MEM) + /* Pointer to the Spas region the physheap is part of + * or NULL. */ + PHYS_HEAP_SPAS_REGION *psSpasRegion; + /* The Spas regions list of physheaps. + * Each physheap is only part of at most 1 Spas region. */ + DLLIST_NODE sSpasSibling; +#endif +}; #if defined(REFCOUNT_DEBUG) #define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) \ @@ -97,12 +127,22 @@ static POS_LOCK g_hPhysHeapLock; #define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) #endif +#define IsOOMError(err) ((err == PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) | \ + (err == PVRSRV_ERROR_OUT_OF_MEMORY) | \ + (err == PVRSRV_ERROR_PMR_TOO_LARGE)) + +typedef enum _PVR_LAYER_HEAP_ACTION_ +{ + PVR_LAYER_HEAP_ACTION_IGNORE, /* skip heap during heap init */ + PVR_LAYER_HEAP_ACTION_INSTANTIATE, /* instantiate heap but don't acquire */ + PVR_LAYER_HEAP_ACTION_INITIALISE /* instantiate and acquire */ +} PVR_LAYER_HEAP_ACTION; typedef struct PHYS_HEAP_PROPERTIES_TAG { PVRSRV_PHYS_HEAP eFallbackHeap; - IMG_BOOL bPVRLayerAcquire; + PVR_LAYER_HEAP_ACTION ePVRLayerAction; IMG_BOOL bUserModeAlloc; } PHYS_HEAP_PROPERTIES; @@ -111,57 +151,317 @@ typedef struct PHYS_HEAP_PROPERTIES_TAG */ static PHYS_HEAP_PROPERTIES gasHeapProperties[PVRSRV_PHYS_HEAP_LAST] = { - /* eFallbackHeap, bPVRLayerAcquire, bUserModeAlloc */ - { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* DEFAULT */ - { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* GPU_LOCAL */ - { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* CPU_LOCAL */ - { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* GPU_PRIVATE */ - { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_FALSE, IMG_FALSE }, /* FW_MAIN */ - { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_FALSE }, /* EXTERNAL */ - { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_FALSE }, /* GPU_COHERENT */ - { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_TRUE }, /* GPU_SECURE */ - { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_CONFIG */ - { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_CODE */ - { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_DATA */ - { PVRSRV_PHYS_HEAP_FW_PREMAP0, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP0 */ - { PVRSRV_PHYS_HEAP_FW_PREMAP1, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP1 */ - { PVRSRV_PHYS_HEAP_FW_PREMAP2, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP2 */ - { PVRSRV_PHYS_HEAP_FW_PREMAP3, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP3 */ - { PVRSRV_PHYS_HEAP_FW_PREMAP4, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP4 */ - { PVRSRV_PHYS_HEAP_FW_PREMAP5, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP5 */ - { PVRSRV_PHYS_HEAP_FW_PREMAP6, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP6 */ - { PVRSRV_PHYS_HEAP_FW_PREMAP7, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP7 */ + /* eFallbackHeap, ePVRLayerAction, bUserModeAlloc */ + { PVRSRV_PHYS_HEAP_DEFAULT, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* DEFAULT */ + { PVRSRV_PHYS_HEAP_DEFAULT, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* CPU_LOCAL */ + { PVRSRV_PHYS_HEAP_DEFAULT, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* GPU_LOCAL */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* GPU_PRIVATE */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_MAIN */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_FALSE }, /* EXTERNAL */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_FALSE }, /* GPU_COHERENT */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* GPU_SECURE */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_CONFIG */ + { PVRSRV_PHYS_HEAP_FW_MAIN, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_CODE */ + { PVRSRV_PHYS_HEAP_FW_MAIN, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PRIV_DATA */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP_PT */ + { PVRSRV_PHYS_HEAP_FW_PREMAP0, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP0 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP1, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP1 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP2, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP2 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP3, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP3 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP4, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP4 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP5, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP5 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP6, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP6 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP7, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP7 */ + { PVRSRV_PHYS_HEAP_WRAP, PVR_LAYER_HEAP_ACTION_INSTANTIATE, IMG_FALSE }, /* WRAP */ + { PVRSRV_PHYS_HEAP_DISPLAY, PVR_LAYER_HEAP_ACTION_INSTANTIATE, IMG_FALSE }, /* DISPLAY */ }; static_assert((ARRAY_SIZE(gasHeapProperties) == PVRSRV_PHYS_HEAP_LAST), "Size or order of gasHeapProperties entries incorrect for PVRSRV_PHYS_HEAP enum"); -void PVRSRVGetDevicePhysHeapCount(PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 *pui32PhysHeapCount) +static IMG_BOOL PhysHeapCreatedByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap); +static IMG_BOOL PhysHeapAcquiredByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap); + +/** + * ! IMPORTANT ! + * Do not change this string array unless the usage flag definitions in + * physheap_config.h have changed. + * + * NOTE: Use DebugCommonFlagStrings or GetPhysHeapUsageString to get + * usage flags string. + */ +static const IMG_FLAGS2DESC g_asPhysHeapUsageFlagStrings[] = +{ + {PHYS_HEAP_USAGE_CPU_LOCAL, "CPU_LOCAL"}, + {PHYS_HEAP_USAGE_GPU_LOCAL, "GPU_LOCAL"}, + {PHYS_HEAP_USAGE_GPU_PRIVATE, "GPU_PRIVATE"}, + {PHYS_HEAP_USAGE_EXTERNAL, "EXTERNAL"}, + {PHYS_HEAP_USAGE_GPU_COHERENT, "GPU_COHERENT"}, + {PHYS_HEAP_USAGE_GPU_SECURE, "GPU_SECURE"}, + {PHYS_HEAP_USAGE_FW_SHARED, "FW_SHARED"}, + {PHYS_HEAP_USAGE_FW_PRIVATE, "FW_PRIVATE"}, + {PHYS_HEAP_USAGE_FW_CODE, "FW_CODE"}, + {PHYS_HEAP_USAGE_FW_PRIV_DATA, "FW_PRIV_DATA"}, + {PHYS_HEAP_USAGE_FW_PREMAP_PT, "FW_PREMAP_PT"}, + {PHYS_HEAP_USAGE_FW_PREMAP, "FW_PREMAP"}, + {PHYS_HEAP_USAGE_WRAP, "WRAP"}, + {PHYS_HEAP_USAGE_DISPLAY, "DISPLAY"}, + {PHYS_HEAP_USAGE_DLM, "DLM"} +}; + +/*************************************************************************/ /*! +@Function PhysHeapCheckValidUsageFlags +@Description Checks if any bits were set outside of the valid ones within + PHYS_HEAP_USAGE_FLAGS. + +@Input ui32PhysHeapUsage The value of the usage flag. + +@Return True or False depending on whether there were only valid bits set. +*/ /**************************************************************************/ +static inline IMG_BOOL PhysHeapCheckValidUsageFlags(PHYS_HEAP_USAGE_FLAGS ui32PhysHeapUsage) { - *pui32PhysHeapCount = psDevNode->ui32UserAllocHeapCount; + return !(ui32PhysHeapUsage & ~PHYS_HEAP_USAGE_MASK); } -static IMG_UINT32 PhysHeapOSGetPageShift(void) +/*************************************************************************/ /*! +@Function GetPhysHeapUsageString +@Description This function is used to create a comma separated string of all + usage flags passed in as a bitfield. + +@Input ui32UsageFlags The bitfield of usage flags. +@Input ui32Size The size of the memory pointed to by + pszUsageString. +@Output pszUsageString A pointer to memory where the created string + will be stored. + +@Return If successful PVRSRV_OK, else a PVRSRV_ERROR. +*/ /**************************************************************************/ +static PVRSRV_ERROR GetPhysHeapUsageString(PHYS_HEAP_USAGE_FLAGS ui32UsageFlags, + IMG_UINT32 ui32Size, + IMG_CHAR *const pszUsageString) { - return (IMG_UINT32)OSGetPageShift(); + IMG_UINT32 i; + IMG_BOOL bFirst = IMG_TRUE; + size_t uiSize = 0; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszUsageString != NULL, "pszUsageString"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ui32Size > 0, "ui32Size"); + + /* Initialise the string to be null terminated at the beginning */ + pszUsageString[0] = '\0'; + + if (ui32UsageFlags == 0) + { + const ssize_t iCopiedCnt = OSStringSafeCopy(pszUsageString, "NONE", (size_t)ui32Size); + PVR_LOG_RETURN_IF_FALSE((iCopiedCnt >= 0), "OSStringSafeCopy", PVRSRV_ERROR_OUT_OF_MEMORY); + + return PVRSRV_OK; + } + + /* Process from left to right. */ + for (i = (sizeof(PHYS_HEAP_USAGE_FLAGS) * BITS_PER_BYTE - 1); i > 0; i--) + { + IMG_UINT32 ui32Flag = BIT(i); + + if (BITMASK_HAS(ui32UsageFlags, ui32Flag)) + { + IMG_CHAR pszString[32] = "\0"; + + if (PhysHeapCheckValidUsageFlags(ui32Flag)) + { + DebugCommonFlagStrings(pszString, + sizeof(pszString), + g_asPhysHeapUsageFlagStrings, + ARRAY_SIZE(g_asPhysHeapUsageFlagStrings), + ui32Flag); + } + else + { + uiSize = OSStringLCat(pszString, + "INVALID", + sizeof(pszString)); + PVR_LOG_RETURN_IF_FALSE((uiSize < sizeof(pszString)), "OSStringLCat", PVRSRV_ERROR_OUT_OF_MEMORY); + } + + if (!bFirst) + { + uiSize = OSStringLCat(pszUsageString, + ", ", + (size_t)ui32Size); + PVR_LOG_RETURN_IF_FALSE((uiSize < ui32Size), "OSStringLCat", PVRSRV_ERROR_OUT_OF_MEMORY); + } + else + { + bFirst = IMG_FALSE; + } + + uiSize = OSStringLCat(pszUsageString, + pszString, + (size_t)ui32Size); + PVR_LOG_RETURN_IF_FALSE((uiSize < ui32Size), "OSStringLCat", PVRSRV_ERROR_OUT_OF_MEMORY); + } + } + + return PVRSRV_OK; } -static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = -{ - .pfnDestroyData = NULL, - .pfnGetPMRFactoryMemStats = PhysmemGetOSRamMemStats, - .pfnCreatePMR = PhysmemNewOSRamBackedPMR, - .pfnPagesAlloc = &OSPhyContigPagesAlloc, - .pfnPagesFree = &OSPhyContigPagesFree, - .pfnPagesMap = &OSPhyContigPagesMap, - .pfnPagesUnMap = &OSPhyContigPagesUnmap, - .pfnPagesClean = &OSPhyContigPagesClean, - .pfnGetPageShift = &PhysHeapOSGetPageShift, -}; +/*************************************************************************/ /*! +@Function PhysHeapPrintHeapProperties +@Description This function is used to print properties + of the specified physheap. + +@Input psPhysHeap The physheap to create the string from. +@Input pfnDumpDebugPrintf The specified print function that should be + used to dump any debug information + (see PVRSRVDebugRequest). +@Input pvDumpDebugFile Optional file identifier to be passed to + the print function if required. + +@Return If successful PVRSRV_OK, else a PVRSRV_ERROR. +*/ /**************************************************************************/ +static PVRSRV_ERROR PhysHeapPrintHeapProperties(PHYS_HEAP *psPhysHeap, + IMG_BOOL bDefaultHeap, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + static const IMG_CHAR *const pszTypeStrings[] = { + #define X(_name) #_name, + PHYS_HEAP_TYPE_LIST + #undef X + }; + + IMG_UINT64 ui64TotalSize; + IMG_UINT64 ui64FreeSize; + IMG_CHAR pszUsageString[127] = "\0"; + PVRSRV_ERROR eError; + + if (psPhysHeap->eType >= ARRAY_SIZE(pszTypeStrings)) + { + PVR_DPF((PVR_DBG_ERROR, + "PhysHeap at address %p eType is not a PHYS_HEAP_TYPE", + psPhysHeap)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAPINFO, failure); + } + + psPhysHeap->psImplFuncs->pfnGetFactoryMemStats(psPhysHeap->pvImplData, + &ui64TotalSize, + &ui64FreeSize); + + eError = GetPhysHeapUsageString(psPhysHeap->ui32UsageFlags, + sizeof(pszUsageString), + pszUsageString); + PVR_LOG_GOTO_IF_ERROR(eError, "GetPhysHeapUsageString", failure); + +#if defined(PVRSRV_ENABLE_XD_MEM) +#define PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTSPEC ", Spas Region: %p" +#define PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTARG(psPhysHeap) (psPhysHeap->psSpasRegion) +#else +#define PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTSPEC "%s" +#define PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTARG(psPhysHeap) "" +#endif + + if ((psPhysHeap->eType == PHYS_HEAP_TYPE_LMA) || + (psPhysHeap->eType == PHYS_HEAP_TYPE_DLM)) + { + IMG_CPU_PHYADDR sCPUPAddr; + IMG_DEV_PHYADDR sGPUPAddr; + + PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetCPUPAddr != NULL); + PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetDevPAddr != NULL); + + eError = psPhysHeap->psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData, + &sCPUPAddr); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "pfnGetCPUPAddr"); + sCPUPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(IMG_UINT64_MAX); + } + + eError = psPhysHeap->psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData, + &sGPUPAddr); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "pfnGetDevPAddr"); + sGPUPAddr.uiAddr = IMG_UINT64_MAX; + } + + PVR_DUMPDEBUG_LOG("0x%p -> PdMs: %s, Type: %s, %s, " + "CPU PA Base: " CPUPHYADDR_UINT_FMTSPEC", " + "GPU PA Base: 0x%08"IMG_UINT64_FMTSPECx", " + "Usage Flags: 0x%08x (%s), Refs: %d, " + "Free Size: %"IMG_UINT64_FMTSPEC"B, " + "Total Size: %"IMG_UINT64_FMTSPEC"B" + PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTSPEC, + psPhysHeap, + psPhysHeap->pszPDumpMemspaceName, + pszTypeStrings[psPhysHeap->eType], + bDefaultHeap ? "default" : "-", + CPUPHYADDR_FMTARG(sCPUPAddr.uiAddr), + sGPUPAddr.uiAddr, + psPhysHeap->ui32UsageFlags, + pszUsageString, + psPhysHeap->ui32RefCount, + ui64FreeSize, + ui64TotalSize, + PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTARG(psPhysHeap)); + } + else if (psPhysHeap->eType == PHYS_HEAP_TYPE_IMA) + { + IMG_CHAR pszSpanString[128] = "\0"; + void *pvIterHandle = NULL; + + PVR_DUMPDEBUG_LOG("0x%p -> PdMs: %s, Type: %s, %s, " + "Usage Flags: 0x%08x (%s), Refs: %d, " + "Free Size: %"IMG_UINT64_FMTSPEC"B, " + "Total Size: %"IMG_UINT64_FMTSPEC"B Spans:", + psPhysHeap, + psPhysHeap->pszPDumpMemspaceName, + pszTypeStrings[psPhysHeap->eType], + bDefaultHeap ? "default" : "-", + psPhysHeap->ui32UsageFlags, + pszUsageString, + psPhysHeap->ui32RefCount, + ui64FreeSize, + ui64TotalSize); + + while (psPhysHeap->psImplFuncs->pfnGetHeapSpansStringIter(psPhysHeap->pvImplData, + pszSpanString, + sizeof(pszSpanString), + &pvIterHandle)) + { + PVR_DUMPDEBUG_LOG("%s", pszSpanString); + } + } + else + { + PVR_DUMPDEBUG_LOG("0x%p -> PdMs: %s, Type: %s, %s, " + "Usage Flags: 0x%08x (%s), Refs: %d, " + "Free Size: %"IMG_UINT64_FMTSPEC"B, " + "Total Size: %"IMG_UINT64_FMTSPEC"B" + PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTSPEC, + psPhysHeap, + psPhysHeap->pszPDumpMemspaceName, + pszTypeStrings[psPhysHeap->eType], + bDefaultHeap ? "default" : "-", + psPhysHeap->ui32UsageFlags, + pszUsageString, + psPhysHeap->ui32RefCount, + ui64FreeSize, + ui64TotalSize, + PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTARG(psPhysHeap)); + } + +#undef PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTSPEC +#undef PHYS_HEAP_DISPLAY_PROPS_SPAS_GROUP_FMTARG + + return PVRSRV_OK; + +failure: + return eError; +} /*************************************************************************/ /*! -@Function _PhysHeapDebugRequest +@Function PhysHeapDebugRequest @Description This function is used to output debug information for a given device's PhysHeaps. @Input pfnDbgRequestHandle Data required by this function that is @@ -175,108 +475,179 @@ static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = the print function if required. @Return void */ /**************************************************************************/ -static void _PhysHeapDebugRequest(PVRSRV_DBGREQ_HANDLE pfnDbgRequestHandle, +static void PhysHeapDebugRequest(PVRSRV_DBGREQ_HANDLE pfnDbgRequestHandle, IMG_UINT32 ui32VerbLevel, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile) { - static const IMG_CHAR *const pszTypeStrings[] = { - "UNKNOWN", - "UMA", - "LMA", - "DMA", -#if defined(SUPPORT_WRAP_EXTMEMOBJECT) - "WRAP" -#endif - }; - PPVRSRV_DEVICE_NODE psDeviceNode = (PPVRSRV_DEVICE_NODE)pfnDbgRequestHandle; - PHYS_HEAP *psPhysHeap = NULL; - IMG_UINT64 ui64TotalSize; - IMG_UINT64 ui64FreeSize; - IMG_UINT32 i; + PHYS_HEAP *psPhysHeap; + + PVR_UNREFERENCED_PARAMETER(ui32VerbLevel); PVR_LOG_RETURN_VOID_IF_FALSE(psDeviceNode != NULL, "Phys Heap debug request failed. psDeviceNode was NULL"); PVR_DUMPDEBUG_LOG("------[ Device ID: %d - Phys Heaps ]------", - psDeviceNode->sDevId.i32OsDeviceID); + psDeviceNode->sDevId.i32KernelDeviceID); - for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++) + for (psPhysHeap = psDeviceNode->psPhysHeapList; psPhysHeap != NULL; psPhysHeap = psPhysHeap->psNext) { - psPhysHeap = psDeviceNode->papsRegisteredPhysHeaps[i]; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bDefaultHeap = psPhysHeap == psDeviceNode->apsPhysHeap[psDeviceNode->psDevConfig->eDefaultHeap]; + + eError = PhysHeapPrintHeapProperties(psPhysHeap, + bDefaultHeap, + pfnDumpDebugPrintf, + pvDumpDebugFile); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "PhysHeapCreateProperties"); + continue; + } + } + +#if defined(SUPPORT_PMR_DEFERRED_FREE) + OSLockAcquire(psDeviceNode->hPMRZombieListLock); + PVR_DUMPDEBUG_LOG("PMR Zombie Count: %u, PMR Zombie Count In Cleanup: %u", + psDeviceNode->uiPMRZombieCount, + psDeviceNode->uiPMRZombieCountInCleanup); + OSLockRelease(psDeviceNode->hPMRZombieListLock); +#endif + PVR_DUMPDEBUG_LOG("PMR Live Count: %d", PMRGetLiveCount()); +} - if (psPhysHeap->eType >= ARRAY_SIZE(pszTypeStrings)) +/*************************************************************************/ /*! +@Function HeapCfgUsedByPVRLayer +@Description Checks if a physheap config must be handled by the PVR Layer +@Input psConfig PhysHeapConfig +@Return IMG_BOOL +*/ /**************************************************************************/ +static IMG_BOOL HeapCfgUsedByPVRLayer(PHYS_HEAP_CONFIG *psConfig) +{ + PVRSRV_PHYS_HEAP eHeap; + IMG_BOOL bPVRHeap = IMG_FALSE; + + /* Heaps are triaged for initialisation by either + * the PVR Layer or the device-specific heap handler. */ + for (eHeap = PVRSRV_PHYS_HEAP_DEFAULT; + eHeap < PVRSRV_PHYS_HEAP_LAST; + eHeap++) + { + if ((BIT_ISSET(psConfig->ui32UsageFlags, eHeap) && + PhysHeapCreatedByPVRLayer(eHeap))) { - PVR_DPF((PVR_DBG_ERROR, - "PhysHeap at address %p eType is not a PHYS_HEAP_TYPE", - psPhysHeap)); + bPVRHeap = IMG_TRUE; break; } + } - psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, - &ui64TotalSize, - &ui64FreeSize); + return bPVRHeap; +} - if (psPhysHeap->eType == PHYS_HEAP_TYPE_LMA) - { - IMG_CPU_PHYADDR sCPUPAddr; - IMG_DEV_PHYADDR sGPUPAddr; - PVRSRV_ERROR eError; +#if defined(PVRSRV_ENABLE_DYNAMIC_PHYSHEAPS) +/*************************************************************************/ /*! +@Function PhysHeapCreateDLMIMAHeapsFromConfig +@Description Create new heaps for a device from DLM and IMA configs. + This function will both create and construct the link + between them. +@Input psDevNode Pointer to device node struct +@Input pasConfigs Pointer to array of Heap configurations. +@Input ui32NumConfigs Number of configurations in array. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +static PVRSRV_ERROR +PhysHeapCreateDLMIMAHeapsFromConfig(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_CONFIG *pasConfigs, + IMG_UINT32 ui32NumConfigs) +{ + /* The DLM heaps must be created before IMA heaps and the config order is not well-defined. + * So for each DLM heap create it and then create the IMA heaps associated. */ - PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetCPUPAddr != NULL); - PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetDevPAddr != NULL); + PVRSRV_ERROR eError; + IMG_UINT32 uiDLMIdx, uiIMAIdx; + PHYS_HEAP_POLICY uiIMAPolicy = OSIsMapPhysNonContigSupported() ? PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG : PHYS_HEAP_POLICY_DEFAULT; + PHYS_HEAP *psDLMHeap; - eError = psPhysHeap->psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData, - &sCPUPAddr); - if (eError != PVRSRV_OK) + /* Iterate and create the DLM heaps */ + for (uiDLMIdx = 0; uiDLMIdx < ui32NumConfigs; uiDLMIdx++) + { + if (pasConfigs[uiDLMIdx].eType == PHYS_HEAP_TYPE_DLM) + { + IMG_UINT32 uiHeapCount = 0; + eError = PhysmemCreateHeapDLM(psDevNode, + PHYS_HEAP_POLICY_DEFAULT, + &pasConfigs[uiDLMIdx], + pasConfigs[uiDLMIdx].uConfig.sDLM.pszHeapName, + &psDLMHeap); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeapDLM"); + + /* Then iterate and create the IMA heaps linked to this DLM heap */ + for (uiIMAIdx = 0; uiIMAIdx < ui32NumConfigs; uiIMAIdx++) { - PVR_LOG_ERROR(eError, "pfnGetCPUPAddr"); - sCPUPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(IMG_UINT64_MAX); + if (pasConfigs[uiIMAIdx].eType == PHYS_HEAP_TYPE_IMA && + pasConfigs[uiIMAIdx].uConfig.sIMA.uiDLMHeapIdx == uiDLMIdx) + { + PhysmemCreateHeapIMA(psDevNode, + uiIMAPolicy, + &pasConfigs[uiIMAIdx], + pasConfigs[uiIMAIdx].uConfig.sIMA.pszHeapName, + psDLMHeap, + pasConfigs[uiDLMIdx].uConfig.sDLM.ui32Log2PMBSize, + NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeapIMA"); + uiHeapCount++; + } } - eError = psPhysHeap->psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData, - &sGPUPAddr); - if (eError != PVRSRV_OK) + if (uiHeapCount == 0) { - PVR_LOG_ERROR(eError, "pfnGetDevPAddr"); - sGPUPAddr.uiAddr = IMG_UINT64_MAX; + PVR_DPF((PVR_DBG_WARNING, "DLM phys heap config %d: No connected IMA heaps. Phys heap will go unused.", uiDLMIdx)); } + } + } + + return PVRSRV_OK; +} - PVR_DUMPDEBUG_LOG("0x%p -> Name: %s, Type: %s, " - - "CPU PA Base: " CPUPHYADDR_UINT_FMTSPEC", " - "GPU PA Base: 0x%08"IMG_UINT64_FMTSPECx", " - "Usage Flags: 0x%08x, Refs: %d, " - "Free Size: %"IMG_UINT64_FMTSPEC", " - "Total Size: %"IMG_UINT64_FMTSPEC, - psPhysHeap, - psPhysHeap->pszPDumpMemspaceName, - pszTypeStrings[psPhysHeap->eType], - CPUPHYADDR_FMTARG(sCPUPAddr.uiAddr), - sGPUPAddr.uiAddr, - psPhysHeap->ui32UsageFlags, - psPhysHeap->ui32RefCount, - ui64FreeSize, - ui64TotalSize); +static void +PhysHeapDestroyDLMIMAHeaps(PVRSRV_DEVICE_NODE *psDevNode) +{ + PHYS_HEAP *psNode = psDevNode->psPhysHeapList; + + /* We need to ensure IMA heaps are destroyed before DLM so that + * we don't cause RA leaks by freeing the DLM first. + */ + while (psNode) + { + PHYS_HEAP *psTmp = psNode; + + psNode = psNode->psNext; + + if (psTmp->eType == PHYS_HEAP_TYPE_IMA) + { + PhysHeapDestroy(psTmp); } - else + } + + /* Reset the loop */ + psNode = psDevNode->psPhysHeapList; + + while (psNode) + { + PHYS_HEAP *psTmp = psNode; + + psNode = psNode->psNext; + + if (psTmp->eType == PHYS_HEAP_TYPE_DLM) { - PVR_DUMPDEBUG_LOG("0x%p -> Name: %s, Type: %s, " - "Usage Flags: 0x%08x, Refs: %d, " - "Free Size: %"IMG_UINT64_FMTSPEC", " - "Total Size: %"IMG_UINT64_FMTSPEC, - psPhysHeap, - psPhysHeap->pszPDumpMemspaceName, - pszTypeStrings[psPhysHeap->eType], - psPhysHeap->ui32UsageFlags, - psPhysHeap->ui32RefCount, - ui64FreeSize, - ui64TotalSize); + PhysHeapDestroy(psTmp); } } } +#endif + /*************************************************************************/ /*! @Function PhysHeapCreateDeviceHeapsFromConfigs @Description Create new heaps for a device from configs. @@ -293,21 +664,33 @@ PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 i; PVRSRV_ERROR eError; - /* Register the physical memory heaps */ - psDevNode->papsRegisteredPhysHeaps = - OSAllocZMem(sizeof(*psDevNode->papsRegisteredPhysHeaps) * ui32NumConfigs); - PVR_LOG_RETURN_IF_NOMEM(psDevNode->papsRegisteredPhysHeaps, "OSAllocZMem"); + psDevNode->psPhysHeapList = NULL; + +#if defined(PVRSRV_ENABLE_DYNAMIC_PHYSHEAPS) + /* DLM/IMA heaps must be constructed in a specific order */ + eError = PhysHeapCreateDLMIMAHeapsFromConfig(psDevNode, + pasConfigs, + ui32NumConfigs); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateDLMIMAHeapsFromConfig"); +#endif - psDevNode->ui32RegisteredPhysHeaps = 0; for (i = 0; i < ui32NumConfigs; i++) { - eError = PhysHeapCreateHeapFromConfig(psDevNode, - pasConfigs + i, - psDevNode->papsRegisteredPhysHeaps + i); - PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); - - psDevNode->ui32RegisteredPhysHeaps++; + /* A PhysHeapConfig can have multiple usage flags. If any flag in a + * heap's set points to a heap type that is handled by the PVR Layer + * then we assume that a single heap is shared between multiple + * allocators and it is safe to instantiate it here. If the heap + * is not marked to be initialised by the PVR Layer, leave it + * to the device specific handler. + * DLM Heaps have usage flags but don't have element in the + * fallback table, this check will prevent instantiate them twice. + */ + if (HeapCfgUsedByPVRLayer(&pasConfigs[i])) + { + eError = PhysHeapCreateHeapFromConfig(psDevNode, &pasConfigs[i], NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); + } } #if defined(SUPPORT_PHYSMEM_TEST) @@ -318,7 +701,7 @@ PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode, { eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hPhysHeapDbgReqNotify, psDevNode, - _PhysHeapDebugRequest, + PhysHeapDebugRequest, DEBUG_REQUEST_SYS, psDevNode); @@ -340,25 +723,42 @@ PhysHeapCreateHeapFromConfig(PVRSRV_DEVICE_NODE *psDevNode, #endif ) { - eResult = PhysHeapCreate(psDevNode, psConfig, NULL, - &_sPHEAPImplFuncs, ppsPhysHeap); + eResult = PhysmemCreateHeapOSMEM(psDevNode, + PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG, + psConfig, + ppsPhysHeap); } - else if (psConfig->eType == PHYS_HEAP_TYPE_LMA) + else if ((psConfig->eType == PHYS_HEAP_TYPE_LMA) || + (psConfig->eType == PHYS_HEAP_TYPE_DMA)) { + PHYS_HEAP_POLICY uiHeapPolicy; + + if (psDevNode->pfnPhysHeapGetLMAPolicy != NULL) + { + uiHeapPolicy = psDevNode->pfnPhysHeapGetLMAPolicy(psConfig->ui32UsageFlags, psDevNode); + } + else + { + uiHeapPolicy = OSIsMapPhysNonContigSupported() ? + PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG : + PHYS_HEAP_POLICY_DEFAULT; + } + eResult = PhysmemCreateHeapLMA(psDevNode, - OSIsMapPhysNonContigSupported() ? PHYSMEM_LMA_POLICY_ALLOC_ALLOW_NONCONTIG : - PHYSMEM_LMA_POLICY_DEFAULT, + uiHeapPolicy, psConfig, - "GPU LMA (Sys)", + (psConfig->eType == PHYS_HEAP_TYPE_LMA) ? + "GPU LMA (Sys)" : + "GPU LMA DMA (Sys)", ppsPhysHeap); } - else if (psConfig->eType == PHYS_HEAP_TYPE_DMA) + else if ((psConfig->eType == PHYS_HEAP_TYPE_DLM) || + (psConfig->eType == PHYS_HEAP_TYPE_IMA)) { - eResult = PhysmemCreateHeapLMA(psDevNode, - PHYSMEM_LMA_POLICY_DEFAULT, - psConfig, - "GPU LMA DMA (Sys)", - ppsPhysHeap); + /* These heaps have already been instantiated in + * PhysHeapCreateDLMIMAHeapsFromConfig + */ + eResult = PVRSRV_OK; } else { @@ -370,7 +770,7 @@ PhysHeapCreateHeapFromConfig(PVRSRV_DEVICE_NODE *psDevNode, return eResult; } -#define PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE (0x100000ULL * 32ULL) /* 32MB */ +#define PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE (IMG_UINT64_C(0x100000) * IMG_UINT64_C(32)) /* 32MB */ static PVRSRV_ERROR PVRSRVValidatePhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig) { @@ -383,35 +783,113 @@ static PVRSRV_ERROR PVRSRVValidatePhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConf for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) { + /* Flags that may be used by multiple heaps. */ + const PHYS_HEAP_USAGE_FLAGS uiDuplicateFlags = PHYS_HEAP_USAGE_DLM; PHYS_HEAP_CONFIG *psHeapConf = &psDevConfig->pasPhysHeaps[i]; PVR_LOG_RETURN_IF_FALSE_VA(psHeapConf->ui32UsageFlags != 0, - PVRSRV_ERROR_PHYSHEAP_CONFIG, - "Phys heap config %d: must specify usage flags.", i); + PVRSRV_ERROR_PHYSHEAP_CONFIG, + "Phys heap config %d: must specify usage flags.", i); - PVR_LOG_RETURN_IF_FALSE_VA((ui32FlagsAccumulate & psHeapConf->ui32UsageFlags) == 0, - PVRSRV_ERROR_PHYSHEAP_CONFIG, - "Phys heap config %d: duplicate usage flags.", i); + PVR_LOG_RETURN_IF_FALSE_VA(((ui32FlagsAccumulate & ~uiDuplicateFlags) & psHeapConf->ui32UsageFlags) == 0, + PVRSRV_ERROR_PHYSHEAP_CONFIG, + "Phys heap config %d: duplicate usage flags.", i); ui32FlagsAccumulate |= psHeapConf->ui32UsageFlags; - /* Output message if default heap is LMA and smaller than recommended minimum */ - if ((i == psDevConfig->eDefaultHeap) && + if (BITMASK_ANY((1U << psDevConfig->eDefaultHeap), PHYS_HEAP_USAGE_MASK) && + BITMASK_ANY((1U << psDevConfig->eDefaultHeap), psHeapConf->ui32UsageFlags)) + { + switch (psHeapConf->eType) + { #if defined(__KERNEL__) - ((psHeapConf->eType == PHYS_HEAP_TYPE_LMA) || - (psHeapConf->eType == PHYS_HEAP_TYPE_DMA)) && -#else - (psHeapConf->eType == PHYS_HEAP_TYPE_LMA) && + case PHYS_HEAP_TYPE_DMA: #endif - (psHeapConf->uiSize < PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE)) + case PHYS_HEAP_TYPE_LMA: + { + if (psHeapConf->uConfig.sLMA.uiSize < PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE) + { + /* Output message if default heap is LMA and smaller than recommended minimum. */ + PVR_DPF((PVR_DBG_ERROR, "%s: Size of default heap is 0x%" IMG_UINT64_FMTSPECX + " (recommended minimum heap size is 0x%" IMG_UINT64_FMTSPECX ")", + __func__, psHeapConf->uConfig.sLMA.uiSize, + PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE)); + } + break; + } + case PHYS_HEAP_TYPE_IMA: + { + /* Check if chained DLM has more than default LMA size. + * This is not perfect as other connected IMAs could reserve all the memory. */ + IMG_UINT64 uiSize = psDevConfig->pasPhysHeaps[psHeapConf->uConfig.sIMA.uiDLMHeapIdx].uConfig.sDLM.uiSize; + + if (uiSize < PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Default heap has access to 0x%" IMG_UINT64_FMTSPECX + " (recommended minimum heap size is 0x%" IMG_UINT64_FMTSPECX ")", + __func__, uiSize, + PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE)); + } + break; + } + case PHYS_HEAP_TYPE_DLM: + { + PVR_DPF((PVR_DBG_ERROR, "%s: Phys heap config %d: " + "A DLM heap cannot be the default heap.", + __func__, i)); + return PVRSRV_ERROR_PHYSHEAP_CONFIG; + } + default: + break; + } + } + +#if !defined(PVRSRV_ENABLE_DYNAMIC_PHYSHEAPS) + if ((psHeapConf->eType == PHYS_HEAP_TYPE_IMA) || (psHeapConf->eType == PHYS_HEAP_TYPE_DLM)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Phys heap config %d: " + "Dynamic phys heaps are not supported! " + "Please enable PVRSRV_ENABLE_DYNAMIC_PHYSHEAPS", + __func__, i)); + return PVRSRV_ERROR_PHYSHEAP_CONFIG; + } +#else /* PVRSRV_ENABLE_DYNAMIC_PHYSHEAPS */ + if (psHeapConf->eType == PHYS_HEAP_TYPE_DLM) { - PVR_DPF((PVR_DBG_ERROR, "%s: Size of default heap is 0x%" IMG_UINT64_FMTSPECX - " (recommended minimum heap size is 0x%llx)", - __func__, psHeapConf->uiSize, - PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE)); + PVR_LOG_RETURN_IF_FALSE_VA((psHeapConf->ui32UsageFlags & PHYS_HEAP_USAGE_DLM) == PHYS_HEAP_USAGE_DLM, + PVRSRV_ERROR_PHYSHEAP_CONFIG, + "Phys heap config %d: DLM heap must specify the DLM usage flag.", i); + + /* If a DLM heap has less space than a PMB, it cannot export a single PMB. The PMB size should be lowered. */ + PVR_LOG_RETURN_IF_FALSE_VA(psHeapConf->uConfig.sDLM.uiSize >= psHeapConf->uConfig.sDLM.ui32Log2PMBSize, + PVRSRV_ERROR_PHYSHEAP_CONFIG, + "Phys heap config %d: Size of DLM heap is 0x%" IMG_UINT64_FMTSPECX + " but the PMB size is 0x%" IMG_UINT64_FMTSPECX + ". The total size must be greater than or equal to the PMB size.", + i, psHeapConf->uConfig.sDLM.uiSize, + IMG_UINT64_C(1) << psHeapConf->uConfig.sDLM.ui32Log2PMBSize); } + + /* IMA heaps must point to a DLM heap. */ + if (psHeapConf->eType == PHYS_HEAP_TYPE_IMA) + { + PVR_LOG_RETURN_IF_FALSE_VA(psHeapConf->uConfig.sIMA.uiDLMHeapIdx < psDevConfig->ui32PhysHeapCount, + PVRSRV_ERROR_PHYSHEAP_CONFIG, + "Phys heap config %d: IMA heap is trying to link to a DLM heap out of bounds. " + "Requested Heap Index: %d, Heap Array Count: %d", + i, psHeapConf->uConfig.sIMA.uiDLMHeapIdx, psDevConfig->ui32PhysHeapCount); + PVR_LOG_RETURN_IF_FALSE_VA(psDevConfig->pasPhysHeaps[psHeapConf->uConfig.sIMA.uiDLMHeapIdx].eType == PHYS_HEAP_TYPE_DLM, + PVRSRV_ERROR_PHYSHEAP_CONFIG, + "Phys heap config %d: IMA heap is trying to link to a NON-DLM heap type. " + "Requested Heap Idx: %d, Heap Type: %d", + i, psHeapConf->uConfig.sIMA.uiDLMHeapIdx, + psDevConfig->pasPhysHeaps[psHeapConf->uConfig.sIMA.uiDLMHeapIdx].eType); + } +#endif /* PVRSRV_ENABLE_DYNAMIC_PHYSHEAPS */ + } + if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_GPU_LOCAL) { PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_GPU_LOCAL) != 0) , @@ -429,6 +907,8 @@ static PVRSRV_ERROR PVRSRVValidatePhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConf } #if defined(SUPPORT_GPUVIRT_VALIDATION) +static void DestroyGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode); + /*************************************************************************/ /*! @Function CreateGpuVirtValArenas @Description Create virtualization validation arenas @@ -441,107 +921,192 @@ static PVRSRV_ERROR CreateGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode) The values are different from base/size of arenas. */ IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; - PHYS_HEAP_CONFIG *psGPULocalHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_GPU_LOCAL); - PHYS_HEAP_CONFIG *psDisplayHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_DISPLAY); - IMG_UINT64 uBase; - IMG_UINT64 uSize; - IMG_UINT64 uBaseShared; - IMG_UINT64 uSizeShared; - IMG_UINT64 uSizeSharedReg; + PHYS_HEAP_CONFIG *psGPULocalHeap = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_GPU_LOCAL); + IMG_DEV_PHYADDR sGPULocalCardBase = PhysHeapConfigGetCardBase(psGPULocalHeap); + IMG_UINT64 uiGPULocalSize = PhysHeapConfigGetSize(psGPULocalHeap); + PHYS_HEAP_CONFIG *psDisplayHeap = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_DISPLAY); + + IMG_UINT64 uPrivateRABase; + IMG_UINT64 uPrivateRASize; + IMG_UINT64 uSharedRABase; + IMG_UINT64 uSharedRASize; + IMG_UINT64 uSharedRegionBase; + IMG_UINT64 uSharedRegionSize; IMG_UINT32 i; - /* Shared region is fixed size, the remaining space is divided amongst OSes */ - uSizeShared = PVR_ALIGN(GPUVIRT_SIZEOF_SHARED, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); - uSize = psGPULocalHeap->uiSize - uSizeShared; - uSize /= GPUVIRT_VALIDATION_NUM_OS; - uSize = uSize & ~((IMG_UINT64)OSGetPageSize() - 1ULL); /* Align, round down */ + //PVR_LOG_RETURN_IF_FALSE((psDeviceNode->psOSSharedArena == NULL), "Validation RAs already created.", PVRSRV_OK); - uBase = psGPULocalHeap->sCardBase.uiAddr; - uBaseShared = uBase + uSize * GPUVIRT_VALIDATION_NUM_OS; - uSizeShared = psGPULocalHeap->uiSize - (uBaseShared - uBase); + /* Shared region is fixed size, the remaining space is divided amongst OSes */ + uPrivateRASize = uiGPULocalSize - PVR_ALIGN(GPUVIRT_SIZEOF_SHARED, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + uPrivateRASize /= GPUVIRT_VALIDATION_NUM_OS; + uPrivateRASize = uPrivateRASize & ~((IMG_UINT64)OSGetPageSize() - 1ULL); /* Align, round down */ + uSharedRASize = uiGPULocalSize - uPrivateRASize * GPUVIRT_VALIDATION_NUM_OS; - PVR_LOG(("GPUVIRT_VALIDATION split GPU_LOCAL base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", - psGPULocalHeap->sCardBase.uiAddr, - psGPULocalHeap->uiSize)); + PVR_LOG(("GPUVIRT_VALIDATION: GPU_LOCAL heap base (base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ")" + " split into %u private regions of size 0x%" IMG_UINT64_FMTSPECX " and one shared region of size 0x%" IMG_UINT64_FMTSPECX " ", + sGPULocalCardBase.uiAddr, uiGPULocalSize, GPUVIRT_VALIDATION_NUM_OS, uPrivateRASize, uSharedRASize)); /* If a display heap config exists, include the display heap in the non-secure regions */ if (psDisplayHeap) { - /* Only works when DISPLAY heap follows GPU_LOCAL heap. */ - PVR_LOG(("GPUVIRT_VALIDATION include DISPLAY in shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", - psDisplayHeap->sCardBase.uiAddr, - psDisplayHeap->uiSize)); + IMG_DEV_PHYADDR sGPUDisplayCardBase = PhysHeapConfigGetCardBase(psDisplayHeap); + IMG_UINT64 uiGPUDisplaySize = PhysHeapConfigGetSize(psDisplayHeap); + + PVR_LOG(("GPUVIRT_VALIDATION: DISPLAY heap (base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ") merged into shared region.", + sGPUDisplayCardBase.uiAddr, + uiGPUDisplaySize)); + + if (sGPUDisplayCardBase.uiAddr > sGPULocalCardBase.uiAddr) + { + /* + * --------------------------------------------------------------------------------------------------- + * | . . . GPU LOCAL HEAP . . . | DISPLAY HEAP | + * --------------------------------------------------------------------------------------------------- + * \______/ \______/ \______/ \______/ \______/ \______/ \______/ \______/ \______/ \_______________/ + * OSID 0 OSID 1 OSID 2 OSID 3 OSID 4 OSID 5 OSID 6 OSID 7 SHARED DISPLAY + * \____SHARED REGION____/ + * OSID 0 has full access over everything + * Shared RA of the GPU Local heap is accessible to all OSIDs for special purpose allocations. + */ + if ((sGPULocalCardBase.uiAddr + uiGPULocalSize) != sGPUDisplayCardBase.uiAddr) + { + PVR_DPF((PVR_DBG_ERROR, "%s: GPU Local heap and display heap must be adjacent in physical memory.", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + uPrivateRABase = sGPULocalCardBase.uiAddr; + uSharedRABase = sGPULocalCardBase.uiAddr + uiGPULocalSize - uSharedRASize; + uSharedRegionBase = uSharedRABase; + } + else + { + /* + * --------------------------------------------------------------------------------------------------- + * | DISPLAY HEAP | . . . GPU LOCAL HEAP . . . | + * --------------------------------------------------------------------------------------------------- + * \_______________/ \______/ \______/ \______/ \______/ \______/ \______/ \______/ \______/ \______/ + * DISPLAY SHARED OSID 0 OSID 1 OSID 2 OSID 3 OSID 4 OSID 5 OSID 6 OSID 7 + * \____SHARED REGION____/ + * + * OSID 0 has full access over everything + * Shared RA of the GPU Local heap is accessible to all OSIDs for special purpose allocations. + */ + if ((sGPUDisplayCardBase.uiAddr + uiGPUDisplaySize) != sGPULocalCardBase.uiAddr) + { + PVR_DPF((PVR_DBG_ERROR, "%s: GPU Local heap and display heap must be adjacent in physical memory.", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } - uSizeSharedReg = uSizeShared + psDisplayHeap->uiSize; + uSharedRABase = sGPULocalCardBase.uiAddr; + uPrivateRABase = sGPULocalCardBase.uiAddr + uSharedRASize; + uSharedRegionBase = sGPUDisplayCardBase.uiAddr; + } + + uSharedRegionSize = uSharedRASize + uiGPUDisplaySize; } else { - uSizeSharedReg = uSizeShared; + /* + * ---------------------------------------------------------------------------------- + * | . . . GPU LOCAL HEAP . . . | + * ---------------------------------------------------------------------------------- + * \______/ \______/ \______/ \______/ \______/ \______/ \______/ \______/ \______/ + * OSID 0 OSID 1 OSID 2 OSID 3 OSID 4 OSID 5 OSID 6 OSID 7 SHARED + * + * OSID 0 has full access over everything + * Shared RA of the GPU Local heap is accessible to all OSIDs for special purpose allocations. + */ + uPrivateRABase = sGPULocalCardBase.uiAddr; + uSharedRABase = uPrivateRABase + uPrivateRASize * GPUVIRT_VALIDATION_NUM_OS; + uSharedRASize = uiGPULocalSize - (uSharedRABase - uPrivateRABase); + uSharedRegionSize = uSharedRASize; + uSharedRegionBase = uSharedRABase; } - PVR_ASSERT(uSize >= GPUVIRT_MIN_SIZE); - PVR_ASSERT(uSizeSharedReg >= GPUVIRT_SIZEOF_SHARED); + if (uPrivateRASize < GPUVIRT_MIN_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Per-OSID private memory regions are too small (current: 0x%" IMG_UINT64_FMTSPECX "" + " required: 0x%" IMG_UINT64_FMTSPECX "). Increase GPU Local heap size.", + __func__, uPrivateRASize, GPUVIRT_MIN_SIZE)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (uSharedRASize < GPUVIRT_SIZEOF_SHARED) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Memory region shared between all OSIDs is too small (current: 0x%" IMG_UINT64_FMTSPECX "" + " required: 0x%" IMG_UINT64_FMTSPECX "). Increase GPU Local heap size.", + __func__, uSharedRASize, GPUVIRT_SIZEOF_SHARED)); + return PVRSRV_ERROR_INVALID_PARAMS; + } - for (i = 0; i < GPUVIRT_VALIDATION_NUM_OS; i++) + FOREACH_VALIDATION_OSID(i) { IMG_CHAR aszOSRAName[RA_MAX_NAME_LENGTH]; - PVR_LOG(("GPUVIRT_VALIDATION create arena OS: %d, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", i, uBase, uSize)); + PVR_LOG(("GPUVIRT_VALIDATION: create arena Private OSID: %u, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", i, uPrivateRABase, uPrivateRASize)); - OSSNPrintf(aszOSRAName, RA_MAX_NAME_LENGTH, "GPUVIRT_OS%d", i); + OSSNPrintf(aszOSRAName, RA_MAX_NAME_LENGTH, "GPUVIRT_OS%u", i); psDeviceNode->psOSidSubArena[i] = RA_Create_With_Span(aszOSRAName, OSGetPageShift(), 0, - uBase, - uSize, + uPrivateRABase, + uPrivateRASize, RA_POLICY_DEFAULT); - PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSidSubArena[i], "RA_Create_With_Span"); - - aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i] = uBase; + PVR_GOTO_IF_FALSE(psDeviceNode->psOSidSubArena[i] != NULL, FreeArenas); + aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i] = uPrivateRABase; if (i == 0) { - /* OSid0 has access to all regions */ - aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = psGPULocalHeap->uiSize - 1ULL; + /* Host OSID0 has access to all regions */ + aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = uiGPULocalSize - 1ULL; } else { - aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = uBase + uSize - 1ULL; + /* Guest OSIDs are limited to their private range */ + aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = uPrivateRABase + uPrivateRASize - 1ULL; } - /* uSizeSharedReg includes display heap */ - aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared; - aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared + uSizeSharedReg - 1ULL; + /* uSharedRegionSize includes display heap */ + aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i] = uSharedRegionBase; + aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i] = uSharedRegionBase + uSharedRegionSize - 1ULL; - PVR_LOG(("GPUVIRT_VALIDATION HW reg regions %d: min[0]: 0x%" IMG_UINT64_FMTSPECX ", max[0]: 0x%" IMG_UINT64_FMTSPECX ", min[1]: 0x%" IMG_UINT64_FMTSPECX ", max[1]: 0x%" IMG_UINT64_FMTSPECX ",", - i, - aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i], - aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i], - aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i], - aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i])); - uBase += uSize; + uPrivateRABase += uPrivateRASize; } - PVR_LOG(("GPUVIRT_VALIDATION create arena Shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", uBaseShared, uSizeShared)); - - PVR_ASSERT(uSizeShared >= GPUVIRT_SIZEOF_SHARED); + PVR_LOG(("GPUVIRT_VALIDATION: create arena Shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", uSharedRABase, uSharedRASize)); - /* uSizeShared does not include display heap */ + /* uSharedRASize does not include display heap */ psDeviceNode->psOSSharedArena = RA_Create_With_Span("GPUVIRT_SHARED", OSGetPageShift(), 0, - uBaseShared, - uSizeShared, + uSharedRABase, + uSharedRASize, RA_POLICY_DEFAULT); - PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSSharedArena, "RA_Create_With_Span"); + PVR_GOTO_IF_FALSE(psDeviceNode->psOSSharedArena != NULL, FreeArenas); + + FOREACH_VALIDATION_OSID(i) + { + PVR_LOG(("GPUVIRT_VALIDATION: HW regions OSID %u: Private Region = [0x%" IMG_UINT64_FMTSPECX "..0x%" IMG_UINT64_FMTSPECX "]" + " Shared Region = [0x%" IMG_UINT64_FMTSPECX "..0x%" IMG_UINT64_FMTSPECX "]", + i, + aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i], + aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i], + aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i], + aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i])); + } - if (psDeviceNode->psDevConfig->pfnSysDevVirtInit != NULL) + if (psDeviceNode->psDevConfig->pfnSysInitFirewall != NULL) { - psDeviceNode->psDevConfig->pfnSysDevVirtInit(aui64OSidMin, aui64OSidMax); + psDeviceNode->psDevConfig->pfnSysInitFirewall(psDeviceNode->psDevConfig->hSysData, aui64OSidMin, aui64OSidMax); } return PVRSRV_OK; + +FreeArenas: + DestroyGpuVirtValArenas(psDeviceNode); + + return PVRSRV_ERROR_OUT_OF_MEMORY; } /* @@ -551,12 +1116,17 @@ static void DestroyGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode) { IMG_UINT32 uiCounter = 0; - /* - * NOTE: We overload psOSidSubArena[0] into the psLocalMemArena so we must - * not free it here as it gets cleared later. - */ - for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++) + FOREACH_VALIDATION_OSID(uiCounter) { + if (uiCounter == RGXFW_HOST_DRIVER_ID) + { + /* + * NOTE: We overload psOSidSubArena[0] into the psLocalMemArena so we must + * not free it here as it gets cleared later. + */ + continue; + } + if (psDeviceNode->psOSidSubArena[uiCounter] == NULL) { continue; @@ -582,9 +1152,9 @@ static PVRSRV_ERROR PhysHeapMMUPxSetup(PPVRSRV_DEVICE_NODE psDeviceNode) PHYS_HEAP_TYPE eHeapType; PVRSRV_ERROR eError; - eError = PhysHeapAcquireByDevPhysHeap(psDeviceNode->psDevConfig->eDefaultHeap, + eError = PhysHeapAcquireByID(psDeviceNode->psDevConfig->eDefaultHeap, psDeviceNode, &psDeviceNode->psMMUPhysHeap); - PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByDevPhysHeap", ErrorDeinit); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByID", ErrorDeinit); eHeapType = PhysHeapGetType(psDeviceNode->psMMUPhysHeap); @@ -638,6 +1208,9 @@ PVRSRV_ERROR PhysHeapInitDeviceHeaps(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DE PVRSRV_ERROR eError; PVRSRV_PHYS_HEAP ePhysHeap; + eError = OSLockCreate(&psDeviceNode->hPhysHeapLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + eError = PVRSRVValidatePhysHeapConfig(psDevConfig); PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVValidatePhysHeapConfig"); @@ -646,24 +1219,20 @@ PVRSRV_ERROR PhysHeapInitDeviceHeaps(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DE psDevConfig->ui32PhysHeapCount); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreateDeviceHeapsFromConfigs", ErrorDeinit); - for (ePhysHeap = PVRSRV_PHYS_HEAP_DEFAULT+1; ePhysHeap < PVRSRV_PHYS_HEAP_LAST; ePhysHeap++) + /* Must loop from the 2nd heap to the last */ + PVR_ASSERT(PVRSRV_PHYS_HEAP_DEFAULT == 0); + for (ePhysHeap = (PVRSRV_PHYS_HEAP)(PVRSRV_PHYS_HEAP_DEFAULT+1); ePhysHeap < PVRSRV_PHYS_HEAP_LAST; ePhysHeap++) { - if (PhysHeapPVRLayerAcquire(ePhysHeap)) + if (PhysHeapAcquiredByPVRLayer(ePhysHeap)) { - eError = PhysHeapAcquireByDevPhysHeap(ePhysHeap, psDeviceNode, &psDeviceNode->apsPhysHeap[ePhysHeap]); - PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByDevPhysHeap", ErrorDeinit); - } - - /* Calculate the total number of user accessible physical heaps */ - if (psDeviceNode->apsPhysHeap[ePhysHeap] && PhysHeapUserModeAlloc(ePhysHeap)) - { - psDeviceNode->ui32UserAllocHeapCount++; + eError = PhysHeapAcquireByID(ePhysHeap, psDeviceNode, &psDeviceNode->apsPhysHeap[ePhysHeap]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByID", ErrorDeinit); } } if (PhysHeapValidateDefaultHeapExists(psDeviceNode)) { - PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPhysHeapCheckUsageFlags", ErrorDeinit); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapValidateDefaultHeapExists", ErrorDeinit); } eError = PhysHeapMMUPxSetup(psDeviceNode); @@ -683,18 +1252,10 @@ void PhysHeapDeInitDeviceHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_PHYS_HEAP ePhysHeapIdx; IMG_UINT32 i; -#if defined(SUPPORT_AUTOVZ) - if (psDeviceNode->psFwMMUReservedPhysHeap) - { - PhysHeapDestroy(psDeviceNode->psFwMMUReservedPhysHeap); - psDeviceNode->psFwMMUReservedPhysHeap = NULL; - } -#endif - PhysHeapMMUPxDeInit(psDeviceNode); /* Release heaps */ - for (ePhysHeapIdx = 0; + for (ePhysHeapIdx = PVRSRV_PHYS_HEAP_DEFAULT; ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap); ePhysHeapIdx++) { @@ -704,19 +1265,7 @@ void PhysHeapDeInitDeviceHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) } } - if (psDeviceNode->psFWMainPhysHeap) - { - PhysHeapDestroy(psDeviceNode->psFWMainPhysHeap); - psDeviceNode->psFWMainPhysHeap = NULL; - } - - if (psDeviceNode->psFWCfgPhysHeap) - { - PhysHeapDestroy(psDeviceNode->psFWCfgPhysHeap); - psDeviceNode->psFWCfgPhysHeap = NULL; - } - - for (i = 0; i < RGX_NUM_OS_SUPPORTED; i++) + FOREACH_SUPPORTED_DRIVER(i) { if (psDeviceNode->apsFWPremapPhysHeap[i]) { @@ -726,10 +1275,100 @@ void PhysHeapDeInitDeviceHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) } PhysHeapDestroyDeviceHeaps(psDeviceNode); + + OSLockDestroy(psDeviceNode->hPhysHeapLock); } +#if defined(PVRSRV_ENABLE_XD_MEM) +static void PhysHeapSpasRegionLock(PHYS_HEAP_SPAS_REGION *psSpasRegion) +{ + while (OSAtomicCompareExchange(&psSpasRegion->ui32Lock, 0, 1) != 0); + OSMemoryBarrier(NULL); +} +static void PhysHeapSpasRegionUnlock(PHYS_HEAP_SPAS_REGION *psSpasRegion) +{ + OSMemoryBarrier(NULL); + OSAtomicWrite(&psSpasRegion->ui32Lock, 0); +} + +/* Insert the physheap into a given SPAS. */ +static void PhysHeapSpasInsert(PHYS_HEAP* psPhysHeap, PHYS_HEAP_SPAS_REGION *psSpasRegion) +{ + if (psSpasRegion == NULL) + return; /* Not in a SPAS region. */ + /* A physheap may only be a part of a single SPAS region. */ + PVR_ASSERT(psPhysHeap->psSpasRegion == NULL); + + psPhysHeap->psSpasRegion = psSpasRegion; + + PhysHeapSpasRegionLock(psSpasRegion); + dllist_add_to_head(&psSpasRegion->sListHead, &psPhysHeap->sSpasSibling); + PhysHeapSpasRegionUnlock(psSpasRegion); +} + +/* Remove the PhysHeap from its SPAS region if it is in one. */ +static void PhysHeapSpasRemove(PHYS_HEAP *psPhysHeap) +{ + PHYS_HEAP_SPAS_REGION *psSpasRegion = psPhysHeap->psSpasRegion; + + if (psSpasRegion == NULL) + return; /* Not in a region. */ + + PhysHeapSpasRegionLock(psSpasRegion); + psPhysHeap->psSpasRegion = NULL; + dllist_remove_node(&psPhysHeap->sSpasSibling); + PhysHeapSpasRegionUnlock(psSpasRegion); + + psPhysHeap->psSpasRegion = NULL; +} + +PVRSRV_ERROR PhysHeapSpasWithDevice(PHYS_HEAP* psFromPhysHeap, + PPVRSRV_DEVICE_NODE psToDevNode) +{ + PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_FOUND; + PDLLIST_NODE psNode, psNextNode; + + if (psFromPhysHeap->psSpasRegion == NULL) + return eError; + + PhysHeapSpasRegionLock(psFromPhysHeap->psSpasRegion); + dllist_foreach_node(&psFromPhysHeap->psSpasRegion->sListHead, psNode, psNextNode) + { + PHYS_HEAP *psPhysHeapCursor = IMG_CONTAINER_OF(psNode, PHYS_HEAP, sSpasSibling); + if (psPhysHeapCursor->psDevNode == psToDevNode) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_OK, exit_unlock_); + } + } +exit_unlock_: + PhysHeapSpasRegionUnlock(psFromPhysHeap->psSpasRegion); + return eError; +} + +static IMG_UINT64 PhysHeapSpasDeviceBitmap(PHYS_HEAP* psPhysHeap) +{ + PDLLIST_NODE psNode, psNextNode; + IMG_UINT64 uiDevices = 0; + + static_assert((sizeof(IMG_UINT64) * 8) >= PVRSRV_MAX_DEVICES, "PhysHeapSpasDeviceBitmap() requires updating"); + + if (psPhysHeap->psSpasRegion == NULL) + return uiDevices; + + PhysHeapSpasRegionLock(psPhysHeap->psSpasRegion); + dllist_foreach_node(&psPhysHeap->psSpasRegion->sListHead, psNode, psNextNode) + { + PHYS_HEAP *psPhysHeapCursor = IMG_CONTAINER_OF(psNode, PHYS_HEAP, sSpasSibling); + uiDevices |= IMG_UINT64_C(1) << psPhysHeapCursor->psDevNode->sDevId.ui32InternalID; + } + PhysHeapSpasRegionUnlock(psPhysHeap->psSpasRegion); + return uiDevices; +} +#endif + PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode, PHYS_HEAP_CONFIG *psConfig, + PHYS_HEAP_POLICY uiPolicy, PHEAP_IMPL_DATA pvImplData, PHEAP_IMPL_FUNCS *psImplFuncs, PHYS_HEAP **ppsPhysHeap) @@ -746,50 +1385,158 @@ PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode, } PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs != NULL, "psImplFuncs"); - PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs->pfnCreatePMR != NULL, "psImplFuncs->pfnCreatePMR"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs->pfnCreatePMR != NULL || + psImplFuncs->pfnCreatePMB, + "psImplFuncs->pfnCreatePMR || psImplFuncs->pfnCreatePMB"); psNew = OSAllocMem(sizeof(PHYS_HEAP)); PVR_RETURN_IF_NOMEM(psNew); - psNew->psDevNode = psDevNode; + +#if defined(PVRSRV_ENABLE_XD_MEM) + psNew->psSpasRegion = NULL; +#endif + + switch (psConfig->eType) + { + case PHYS_HEAP_TYPE_LMA: + psNew->psMemFuncs = psConfig->uConfig.sLMA.psMemFuncs; + psNew->hPrivData = psConfig->uConfig.sLMA.hPrivData; + psNew->pszPDumpMemspaceName = psConfig->uConfig.sLMA.pszPDumpMemspaceName; + OSStringSafeCopy(psNew->aszName, + (psConfig->uConfig.sLMA.pszHeapName) ? psConfig->uConfig.sLMA.pszHeapName : + "Unknown PhysHeap", + PHYS_HEAP_NAME_SIZE); +#if defined(PVRSRV_ENABLE_XD_MEM) + PhysHeapSpasInsert(psNew, psConfig->uConfig.sLMA.psSpasRegion); +#endif + break; + case PHYS_HEAP_TYPE_IMA: + psNew->psMemFuncs = psConfig->uConfig.sIMA.psMemFuncs; + psNew->hPrivData = psConfig->uConfig.sIMA.hPrivData; + psNew->pszPDumpMemspaceName = psConfig->uConfig.sIMA.pszPDumpMemspaceName; + OSStringSafeCopy(psNew->aszName, + (psConfig->uConfig.sIMA.pszHeapName) ? psConfig->uConfig.sIMA.pszHeapName : + "Unknown PhysHeap", + PHYS_HEAP_NAME_SIZE); + break; + case PHYS_HEAP_TYPE_DMA: + psNew->psMemFuncs = psConfig->uConfig.sDMA.psMemFuncs; + psNew->hPrivData = psConfig->uConfig.sDMA.hPrivData; + psNew->pszPDumpMemspaceName = psConfig->uConfig.sDMA.pszPDumpMemspaceName; + OSStringSafeCopy(psNew->aszName, + (psConfig->uConfig.sDMA.pszHeapName) ? psConfig->uConfig.sDMA.pszHeapName : + "Unknown PhysHeap", + PHYS_HEAP_NAME_SIZE); + break; + case PHYS_HEAP_TYPE_UMA: + psNew->psMemFuncs = psConfig->uConfig.sUMA.psMemFuncs; + psNew->hPrivData = psConfig->uConfig.sUMA.hPrivData; + psNew->pszPDumpMemspaceName = psConfig->uConfig.sUMA.pszPDumpMemspaceName; + OSStringSafeCopy(psNew->aszName, + (psConfig->uConfig.sUMA.pszHeapName) ? psConfig->uConfig.sUMA.pszHeapName : + "Unknown PhysHeap", + PHYS_HEAP_NAME_SIZE); +#if defined(PVRSRV_ENABLE_XD_MEM) + PhysHeapSpasInsert(psNew, psConfig->uConfig.sUMA.psSpasRegion); +#endif + break; + case PHYS_HEAP_TYPE_DLM: + psNew->psMemFuncs = psConfig->uConfig.sDLM.psMemFuncs; + psNew->hPrivData = psConfig->uConfig.sDLM.hPrivData; + psNew->pszPDumpMemspaceName = "None"; + OSStringSafeCopy(psNew->aszName, + (psConfig->uConfig.sDLM.pszHeapName) ? psConfig->uConfig.sDLM.pszHeapName : + "Unknown PhysHeap", + PHYS_HEAP_NAME_SIZE); + break; +#if defined(SUPPORT_WRAP_EXTMEMOBJECT) + case PHYS_HEAP_TYPE_WRAP: + psNew->psMemFuncs = psConfig->uConfig.sWRAP.psMemFuncs; + psNew->hPrivData = psConfig->uConfig.sWRAP.hPrivData; + psNew->pszPDumpMemspaceName = psConfig->uConfig.sWRAP.pszPDumpMemspaceName; + OSStringSafeCopy(psNew->aszName, + (psConfig->uConfig.sWRAP.pszHeapName) ? psConfig->uConfig.sWRAP.pszHeapName : + "Unknown PhysHeap", + PHYS_HEAP_NAME_SIZE); + break; +#endif + default: + PVR_LOG_ERROR(PVRSRV_ERROR_NOT_IMPLEMENTED, "psConfig->eType not implemented"); + } + psNew->eType = psConfig->eType; - psNew->psMemFuncs = psConfig->psMemFuncs; - psNew->hPrivData = psConfig->hPrivData; - psNew->ui32RefCount = 0; - psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName; psNew->ui32UsageFlags = psConfig->ui32UsageFlags; - + psNew->uiPolicy = uiPolicy; + psNew->ui32RefCount = 0; + psNew->psDevNode = psDevNode; +#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) + OSAtomicWrite(&psNew->sOOMDetected, IMG_FALSE); +#endif psNew->pvImplData = pvImplData; psNew->psImplFuncs = psImplFuncs; - psNew->psNext = g_psPhysHeapList; - g_psPhysHeapList = psNew; +#if defined(SUPPORT_STATIC_IPA) + { + IMG_UINT8 ui8Val; + + /* Ensure we do not cause an address fault by accessing beyond + * the end of the psConfig->sIPAConfig structure. + */ + ui8Val = psConfig->sIPAConfig.ui8IPAPolicyDefault; + psNew->ui32IPAPolicyValue = (IMG_UINT32)ui8Val; + + ui8Val = psConfig->sIPAConfig.ui8IPAPolicyMask; + psNew->ui32IPAClearMask = (IMG_UINT32)ui8Val; + + ui8Val = psConfig->sIPAConfig.ui8IPAPolicyShift; + psNew->ui32IPAShift = (IMG_UINT32)ui8Val; + PVR_LOG_VA(PVR_DBG_MESSAGE, "%s: Physheap <%p> ['%s'] Config @ <%p> IPA = [0x%x, 0x%x, 0x%x]", + __func__, psNew, psNew->aszName, + psConfig, psNew->ui32IPAPolicyValue, + psNew->ui32IPAClearMask, psNew->ui32IPAShift); + } +#endif + + if (ppsPhysHeap != NULL) + { + *ppsPhysHeap = psNew; + } - *ppsPhysHeap = psNew; + psNew->psNext = psDevNode->psPhysHeapList; + psDevNode->psPhysHeapList = psNew; - PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap); + PVR_DPF_RETURN_RC1(PVRSRV_OK, psNew); } void PhysHeapDestroyDeviceHeaps(PPVRSRV_DEVICE_NODE psDevNode) { - IMG_UINT32 i; + PHYS_HEAP *psNode; if (psDevNode->hPhysHeapDbgReqNotify) { PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hPhysHeapDbgReqNotify); } - /* Unregister heaps */ - for (i = 0; i < psDevNode->ui32RegisteredPhysHeaps; i++) +#if defined(PVRSRV_ENABLE_DYNAMIC_PHYSHEAPS) + /* DLM/IMA heaps must be destroyed in a specific order */ + PhysHeapDestroyDLMIMAHeaps(psDevNode); +#endif + + psNode = psDevNode->psPhysHeapList; + + while (psNode) { - PhysHeapDestroy(psDevNode->papsRegisteredPhysHeaps[i]); - } + PHYS_HEAP *psTmp = psNode; - OSFreeMem(psDevNode->papsRegisteredPhysHeaps); + psNode = psNode->psNext; + PhysHeapDestroy(psTmp); + } } void PhysHeapDestroy(PHYS_HEAP *psPhysHeap) { PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; + PPVRSRV_DEVICE_NODE psDevNode = psPhysHeap->psDevNode; PVR_DPF_ENTERED1(psPhysHeap); @@ -800,13 +1547,17 @@ void PhysHeapDestroy(PHYS_HEAP *psPhysHeap) PVR_ASSERT(psPhysHeap->ui32RefCount == 0); } - if (g_psPhysHeapList == psPhysHeap) +#if defined(PVRSRV_ENABLE_XD_MEM) + PhysHeapSpasRemove(psPhysHeap); +#endif + + if (psDevNode->psPhysHeapList == psPhysHeap) { - g_psPhysHeapList = psPhysHeap->psNext; + psDevNode->psPhysHeapList = psPhysHeap->psNext; } else { - PHYS_HEAP *psTmp = g_psPhysHeapList; + PHYS_HEAP *psTmp = psDevNode->psPhysHeapList; while (psTmp->psNext != psPhysHeap) { @@ -825,123 +1576,152 @@ void PhysHeapDestroy(PHYS_HEAP *psPhysHeap) PVR_DPF_RETURN; } +static void _PhysHeapCountUserModeHeaps(PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP_USAGE_FLAGS ui32UsageFlags) +{ + PVRSRV_PHYS_HEAP eHeap; + + for (eHeap = PVRSRV_PHYS_HEAP_DEFAULT; + eHeap <= PVRSRV_PHYS_HEAP_LAST; + eHeap++) + { + if (BIT_ISSET(ui32UsageFlags, eHeap) && + PhysHeapUserModeAlloc(eHeap)) + { + psDevNode->ui32UserAllocHeapCount++; + break; + } + } +} + PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap) { PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap"); psPhysHeap->ui32RefCount++; + /* When acquiring a heap for the 1st time, perform a check and + * calculate the total number of user accessible physical heaps */ + if (psPhysHeap->ui32RefCount == 1) + { + _PhysHeapCountUserModeHeaps(psPhysHeap->psDevNode, + psPhysHeap->ui32UsageFlags); + } + return PVRSRV_OK; } -PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag, - PPVRSRV_DEVICE_NODE psDevNode, - PHYS_HEAP **ppsPhysHeap) +static PHYS_HEAP * _PhysHeapFindHeapOrFallback(PVRSRV_PHYS_HEAP ePhysHeap, + PPVRSRV_DEVICE_NODE psDevNode) { - PHYS_HEAP *psNode = g_psPhysHeapList; - PVRSRV_ERROR eError = PVRSRV_OK; - - PVR_LOG_RETURN_IF_INVALID_PARAM(ui32UsageFlag != 0, "ui32UsageFlag"); - PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); + PHYS_HEAP *psPhysHeapNode = psDevNode->psPhysHeapList; + PVRSRV_PHYS_HEAP eFallback; - PVR_DPF_ENTERED1(ui32UsageFlag); + /* Swap the default heap alias for the system's real default PhysHeap */ + if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT) + { + ePhysHeap = psDevNode->psDevConfig->eDefaultHeap; + } - OSLockAcquire(g_hPhysHeapLock); + /* Check cache of PhysHeaps to see if it has been resolved before */ + if (psDevNode->apsPhysHeap[ePhysHeap] != NULL) + { + return psDevNode->apsPhysHeap[ePhysHeap]; + } - while (psNode) + /* Cache not ready, carry out search with fallback */ + while (psPhysHeapNode) { - if (psNode->psDevNode != psDevNode) - { - psNode = psNode->psNext; - continue; - } - if (BITMASK_ANY(psNode->ui32UsageFlags, ui32UsageFlag)) + if (BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap)) { - break; + return psPhysHeapNode; } - psNode = psNode->psNext; + + psPhysHeapNode = psPhysHeapNode->psNext; } - if (psNode == NULL) + /* Find fallback PhysHeap */ + eFallback = gasHeapProperties[ePhysHeap].eFallbackHeap; + if (ePhysHeap == eFallback) { - eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID; + return NULL; } else { - psNode->ui32RefCount++; - PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", - __func__, psNode, psNode->ui32RefCount); + return _PhysHeapFindHeapOrFallback(eFallback, psDevNode); } - - OSLockRelease(g_hPhysHeapLock); - - *ppsPhysHeap = psNode; - PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap); } -static PHYS_HEAP * _PhysHeapFindHeap(PVRSRV_PHYS_HEAP ePhysHeap, - PPVRSRV_DEVICE_NODE psDevNode) +/* + * Acquire heap, no fallback, no recursion: single loop acquisition + */ +static PHYS_HEAP* _PhysHeapFindRealHeapNoFallback(PVRSRV_PHYS_HEAP ePhysHeap, + PPVRSRV_DEVICE_NODE psDevNode) { - PHYS_HEAP *psPhysHeapNode = g_psPhysHeapList; - PVRSRV_PHYS_HEAP eFallback; + PHYS_HEAP *psPhysHeapNode = psDevNode->psPhysHeapList; + /* Swap the default heap alias for the system's real default PhysHeap */ if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT) { ePhysHeap = psDevNode->psDevConfig->eDefaultHeap; } + /* Check cache of PhysHeaps to see if it has been resolved before. + * The physheap must initialised to access its cache. */ + if (psDevNode->apsPhysHeap[ePhysHeap] != NULL && + BIT_ISSET(psDevNode->apsPhysHeap[ePhysHeap]->ui32UsageFlags, ePhysHeap)) + { + return psDevNode->apsPhysHeap[ePhysHeap]; + } + + /* Cache not ready, carry out search for real PhysHeap, no fallback */ while (psPhysHeapNode) { - if ((psPhysHeapNode->psDevNode == psDevNode) && - BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap)) + if (BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap)) { return psPhysHeapNode; } psPhysHeapNode = psPhysHeapNode->psNext; } - - eFallback = gasHeapProperties[ePhysHeap].eFallbackHeap; - - if (ePhysHeap == eFallback) - { - return NULL; - } - else - { - return _PhysHeapFindHeap(eFallback, psDevNode); - } + return NULL; } -PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap, - PPVRSRV_DEVICE_NODE psDevNode, - PHYS_HEAP **ppsPhysHeap) +PVRSRV_ERROR PhysHeapAcquireByID(PVRSRV_PHYS_HEAP eDevPhysHeap, + PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP **ppsPhysHeap) { PHYS_HEAP *psPhysHeap; PVRSRV_ERROR eError = PVRSRV_OK; - PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap != PVRSRV_PHYS_HEAP_DEFAULT, "eDevPhysHeap"); PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap < PVRSRV_PHYS_HEAP_LAST, "eDevPhysHeap"); PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); PVR_DPF_ENTERED1(ui32Flags); - OSLockAcquire(g_hPhysHeapLock); + OSLockAcquire(psDevNode->hPhysHeapLock); - psPhysHeap = _PhysHeapFindHeap(eDevPhysHeap, psDevNode); + psPhysHeap = _PhysHeapFindHeapOrFallback(eDevPhysHeap, psDevNode); if (psPhysHeap != NULL) { psPhysHeap->ui32RefCount++; PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __func__, psPhysHeap, psPhysHeap->ui32RefCount); + + /* When acquiring a heap for the 1st time, perform a check and + * calculate the total number of user accessible physical heaps */ + if (psPhysHeap->ui32RefCount == 1) + { + _PhysHeapCountUserModeHeaps(psDevNode, BIT(eDevPhysHeap)); + } } else { eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID; } - OSLockRelease(g_hPhysHeapLock); + OSLockRelease(psDevNode->hPhysHeapLock); *ppsPhysHeap = psPhysHeap; PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap); @@ -951,11 +1731,11 @@ void PhysHeapRelease(PHYS_HEAP *psPhysHeap) { PVR_DPF_ENTERED1(psPhysHeap); - OSLockAcquire(g_hPhysHeapLock); + OSLockAcquire(psPhysHeap->psDevNode->hPhysHeapLock); psPhysHeap->ui32RefCount--; PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __func__, psPhysHeap, psPhysHeap->ui32RefCount); - OSLockRelease(g_hPhysHeapLock); + OSLockRelease(psPhysHeap->psDevNode->hPhysHeapLock); PVR_DPF_RETURN; } @@ -971,28 +1751,46 @@ PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap) return psPhysHeap->eType; } +PHYS_HEAP_POLICY PhysHeapGetPolicy(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->uiPolicy; +} + PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap) { return psPhysHeap->ui32UsageFlags; } +const IMG_CHAR *PhysHeapName(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->aszName; +} + IMG_BOOL PhysHeapValidateDefaultHeapExists(PPVRSRV_DEVICE_NODE psDevNode) { - PHYS_HEAP *psDefaultHeap; - IMG_BOOL bDefaultHeapFound; - PhysHeapAcquireByUsage(1<<(psDevNode->psDevConfig->eDefaultHeap), psDevNode, &psDefaultHeap); - if (psDefaultHeap == NULL) - { - bDefaultHeapFound = IMG_FALSE; - } - else - { - PhysHeapRelease(psDefaultHeap); - bDefaultHeapFound = IMG_TRUE; - } - return bDefaultHeapFound; + PVRSRV_PHYS_HEAP eDefaultHeap = psDevNode->psDevConfig->eDefaultHeap; + + return ((psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_DEFAULT] != NULL) && + ((psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_DEFAULT] == + psDevNode->apsPhysHeap[eDefaultHeap]))); } +#if defined(SUPPORT_STATIC_IPA) +IMG_UINT32 PhysHeapGetIPAValue(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->ui32IPAPolicyValue; +} + +IMG_UINT32 PhysHeapGetIPAMask(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->ui32IPAClearMask; +} + +IMG_UINT32 PhysHeapGetIPAShift(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->ui32IPAShift; +} +#endif /* * This function will set the psDevPAddr to whatever the system layer @@ -1049,16 +1847,64 @@ PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap, return eResult; } +/* Used to add the total and free sizes of an IMA heap and the backing + * DLM heap + */ +static void PhysHeapIMAGetMemInfo(PHYS_HEAP *psPhysHeap, + IMG_UINT64 *puiTotalSize, + IMG_UINT64 *puiFreeSize) +{ + PHYS_HEAP *psDLMHeap; + IMG_UINT64 ui64TotalSize; + IMG_UINT64 ui64FreeSize; + IMG_UINT64 ui64DLMTotalSize; + IMG_UINT64 ui64DLMFreeSize; + + PVR_LOG_RETURN_VOID_IF_FALSE( + psPhysHeap->eType == PHYS_HEAP_TYPE_IMA, + "Physheap type not IMA"); + + /* Obtain the DLM heap chained to this IMA heap */ + PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetHeapDLMBacking); + psPhysHeap->psImplFuncs->pfnGetHeapDLMBacking(psPhysHeap->pvImplData, + &psDLMHeap); + PVR_LOG_RETURN_VOID_IF_FALSE(psDLMHeap != NULL, "pfnGetHeapDLMBacking"); + + /* Obtain the memstats for the current IMA heap */ + PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetFactoryMemStats); + psPhysHeap->psImplFuncs->pfnGetFactoryMemStats(psPhysHeap->pvImplData, + &ui64TotalSize, + &ui64FreeSize); + + /* Obtain the memstats for the chained DLM heap */ + PVR_ASSERT(psDLMHeap->psImplFuncs->pfnGetFactoryMemStats); + psDLMHeap->psImplFuncs->pfnGetFactoryMemStats(psDLMHeap->pvImplData, + &ui64DLMTotalSize, + &ui64DLMFreeSize); + + /* Total size is just the DLM heap size as the IMA heap imports a + * subset of that total size. + * Free size is a combination of the two because DLM heap may + * have spare PMBs to provide and the IMA heap may have space + * in an imported PMB. + */ + *puiTotalSize = ui64DLMTotalSize; + *puiFreeSize = ui64FreeSize + ui64DLMFreeSize; +} + PVRSRV_ERROR PhysHeapGetMemInfo(PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32PhysHeapCount, - PVRSRV_PHYS_HEAP *paePhysHeapID, - PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats) + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP *paePhysHeapID, + PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats) { IMG_UINT32 i = 0; PHYS_HEAP *psPhysHeap; - PVR_ASSERT(ui32PhysHeapCount <= PVRSRV_PHYS_HEAP_LAST); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode invalid"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ui32PhysHeapCount <= MAX_USER_MODE_ALLOC_PHYS_HEAPS, "ui32PhysHeapCount invalid"); + PVR_LOG_RETURN_IF_INVALID_PARAM(paePhysHeapID != NULL, "paePhysHeapID invalid"); + PVR_LOG_RETURN_IF_INVALID_PARAM(paPhysHeapMemStats != NULL, "paPhysHeapMemStats invalid"); for (i = 0; i < ui32PhysHeapCount; i++) { @@ -1067,52 +1913,48 @@ PhysHeapGetMemInfo(PVRSRV_DEVICE_NODE *psDevNode, return PVRSRV_ERROR_PHYSHEAP_ID_INVALID; } - if (paePhysHeapID[i] == PVRSRV_PHYS_HEAP_DEFAULT) - { - return PVRSRV_ERROR_INVALID_PARAMS; - } - - psPhysHeap = _PhysHeapFindHeap(paePhysHeapID[i], psDevNode); + psPhysHeap = _PhysHeapFindRealHeapNoFallback(paePhysHeapID[i], psDevNode); paPhysHeapMemStats[i].ui32PhysHeapFlags = 0; + paPhysHeapMemStats[i].ui64DevicesInSPAS = 0; if (psPhysHeap && PhysHeapUserModeAlloc(paePhysHeapID[i]) - && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats) + && psPhysHeap->psImplFuncs->pfnGetFactoryMemStats) { - psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, - &paPhysHeapMemStats[i].ui64TotalSize, - &paPhysHeapMemStats[i].ui64FreeSize); - paPhysHeapMemStats[i].ui32PhysHeapFlags |= PhysHeapGetType(psPhysHeap); + if (psPhysHeap->eType == PHYS_HEAP_TYPE_IMA) + { + PhysHeapIMAGetMemInfo(psPhysHeap, + &paPhysHeapMemStats[i].ui64TotalSize, + &paPhysHeapMemStats[i].ui64FreeSize); + } + else + { + psPhysHeap->psImplFuncs->pfnGetFactoryMemStats(psPhysHeap->pvImplData, + &paPhysHeapMemStats[i].ui64TotalSize, + &paPhysHeapMemStats[i].ui64FreeSize); + } if (paePhysHeapID[i] == psDevNode->psDevConfig->eDefaultHeap) { paPhysHeapMemStats[i].ui32PhysHeapFlags |= PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT; } +#if defined(PVRSRV_ENABLE_XD_MEM) + paPhysHeapMemStats[i].ui64DevicesInSPAS = PhysHeapSpasDeviceBitmap(psPhysHeap); +#endif /* defined(PVRSRV_ENABLE_XD_MEM) */ + + paPhysHeapMemStats[i].ePhysHeapType = psPhysHeap->eType; } else { paPhysHeapMemStats[i].ui64TotalSize = 0; paPhysHeapMemStats[i].ui64FreeSize = 0; + paPhysHeapMemStats[i].ePhysHeapType = PHYS_HEAP_TYPE_UNKNOWN; } } return PVRSRV_OK; } -void PhysheapGetPhysMemUsage(PHYS_HEAP *psPhysHeap, IMG_UINT64 *pui64TotalSize, IMG_UINT64 *pui64FreeSize) -{ - if (psPhysHeap && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats) - { - psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, - pui64TotalSize, - pui64FreeSize); - } - else - { - *pui64TotalSize = *pui64FreeSize = 0; - } -} - void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap, IMG_UINT32 ui32NumOfAddr, IMG_DEV_PHYADDR *psDevPAddr, @@ -1140,6 +1982,22 @@ IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap) return psPhysHeap->pszPDumpMemspaceName; } +#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) +static inline void _LogOOMDetection(IMG_BOOL isOOMDetected, PHYS_HEAP *psPhysHeap, PVRSRV_MEMALLOCFLAGS_T uiFlags) +{ + IMG_BOOL bExistingVal = OSAtomicExchange(&psPhysHeap->sOOMDetected, isOOMDetected); + PVRSRV_PHYS_HEAP ePhysIdx = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); + + if (bExistingVal != isOOMDetected) + { + PVR_LOG(("Device: %d Physheap: %s OOM: %s", + (psPhysHeap->psDevNode->sDevId.ui32InternalID), + g_asPhysHeapUsageFlagStrings[ePhysIdx-1].pszLabel, + (isOOMDetected) ? "Detected" : "Resolved")); + } +} +#endif + PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, struct _CONNECTION_DATA_ *psConnection, IMG_DEVMEM_SIZE_T uiSize, @@ -1151,11 +2009,19 @@ PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, const IMG_CHAR *pszAnnotation, IMG_PID uiPid, PMR **ppsPMRPtr, - IMG_UINT32 ui32PDumpFlags) + IMG_UINT32 ui32PDumpFlags, + PVRSRV_MEMALLOCFLAGS_T *puiOutFlags) { + PVRSRV_ERROR eError; PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; - - return psImplFuncs->pfnCreatePMR(psPhysHeap, +#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) + IMG_UINT64 uiFreeBytes; + PVRSRV_PHYS_HEAP eDemotionPhysIdx; + PVRSRV_MEMALLOCFLAGS_T uiDemotionFlags = uiFlags; + PVRSRV_PHYS_HEAP ePhysIdx = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); + PHYS_HEAP *psDemotionHeap = NULL; +#endif + eError = psImplFuncs->pfnCreatePMR(psPhysHeap, psConnection, uiSize, ui32NumPhysChunks, @@ -1167,25 +2033,127 @@ PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, uiPid, ppsPMRPtr, ui32PDumpFlags); -} -PVRSRV_ERROR PhysHeapInit(void) -{ - PVRSRV_ERROR eError; +#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) + /* Check for OOM error, return if otherwise */ + _LogOOMDetection(((IsOOMError(eError)) ? IMG_TRUE : IMG_FALSE), psPhysHeap, uiFlags); + if (eError == PVRSRV_OK) + { + if (puiOutFlags) + { + *puiOutFlags = uiFlags; + } + return eError; + } + PVR_LOG_RETURN_IF_FALSE((IsOOMError(eError)), "Failed to allocate PMR", eError); - g_psPhysHeapList = NULL; + /* Skip logic and return if mandate flag is set */ + if (PVRSRV_CHECK_MANDATED_PHYSHEAP(uiFlags)) + { + return eError; + } - eError = OSLockCreate(&g_hPhysHeapLock); - PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + /* Demotion only occurs on CPU_LOCAL,GPU_LOCAL,GPU_PRIVATE */ + if (ePhysIdx > PVRSRV_PHYS_HEAP_GPU_PRIVATE) + { + return eError; + } - return PVRSRV_OK; + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + for (eDemotionPhysIdx = (PVRSRV_PHYS_HEAP)(ePhysIdx-1); eDemotionPhysIdx != PVRSRV_PHYS_HEAP_DEFAULT; eDemotionPhysIdx--) + { + PVRSRV_CHANGE_PHYS_HEAP_HINT(eDemotionPhysIdx, uiDemotionFlags); + PVR_LOG_IF_FALSE_VA(PVR_DBG_MESSAGE, (ePhysIdx-eDemotionPhysIdx < 2), "Demoted from %s to CPU_LOCAL. " + "Expect Performance to be affected!", g_asPhysHeapUsageFlagStrings[ePhysIdx-1].pszLabel); + psDemotionHeap = _PhysHeapFindRealHeapNoFallback(eDemotionPhysIdx, psPhysHeap->psDevNode); + + /* Either no alternative available, or allocation already failed on selected heap */ + if (psDemotionHeap == NULL || psPhysHeap == psDemotionHeap) + { + continue; + } + + if (PhysHeapFreeMemCheck(psDemotionHeap, uiSize, &uiFreeBytes) != PVRSRV_OK) + { + _LogOOMDetection(IMG_TRUE, psDemotionHeap, uiDemotionFlags); + continue; + } + + psImplFuncs = psDemotionHeap->psImplFuncs; + eError = psImplFuncs->pfnCreatePMR(psDemotionHeap, + psConnection, + uiSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2PageSize, + uiDemotionFlags, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags); + _LogOOMDetection(((IsOOMError(eError)) ? IMG_TRUE : IMG_FALSE), psDemotionHeap, uiDemotionFlags); + + if (eError == PVRSRV_OK) + { + if (puiOutFlags) + { + *puiOutFlags = uiDemotionFlags; + } + break; + } + } + if (eError == PVRSRV_OK) + { + /* Success demotion worked error Ok - emit warning. */ + PVR_LOG_VA(PVR_DBG_WARNING, "PhysHeap(%s) failed to allocate PMR. Demoted to %s" , + g_asPhysHeapUsageFlagStrings[ePhysIdx-1].pszLabel, + g_asPhysHeapUsageFlagStrings[eDemotionPhysIdx-1].pszLabel); + } + else + { + /* Unable to create PMR (Heap not found or CreatePMR failed) - emit Error */ + PVR_LOG_VA(PVR_DBG_ERROR, "Error raised %s : Unable to %s." , + PVRSRVGETERRORSTRING(eError), + (psDemotionHeap == NULL) ? "find heaps for demotion" : + "allocate PMR via Demotion heap"); +#if defined(SUPPORT_PMR_DEFERRED_FREE) + { + PPVRSRV_DEVICE_NODE psDevNode = PhysHeapDeviceNode(psPhysHeap); + OSLockAcquire(psDevNode->hPMRZombieListLock); + PVR_LOG_VA(PVR_DBG_ERROR, "PMR Zombie Count: %u, PMR Zombie Count In Cleanup: %u", + psDevNode->uiPMRZombieCount, + psDevNode->uiPMRZombieCountInCleanup); + OSLockRelease(psDevNode->hPMRZombieListLock); + } +#endif + } +#endif + return eError; } -void PhysHeapDeinit(void) +PVRSRV_ERROR PhysHeapCreatePMB(PHYS_HEAP *psPhysHeap, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszAnnotation, + PMB **ppsPMRPtr, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiSize) { - PVR_ASSERT(g_psPhysHeapList == NULL); + PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; + + PVR_LOG_RETURN_IF_FALSE( + psPhysHeap->eType == PHYS_HEAP_TYPE_DLM, + "Physheap type not DLM", + PVRSRV_ERROR_INVALID_PARAMS); - OSLockDestroy(g_hPhysHeapLock); + PVR_ASSERT(psImplFuncs->pfnCreatePMB); + + return psImplFuncs->pfnCreatePMB(psPhysHeap, + uiSize, + pszAnnotation, + ppsPMRPtr, + puiBase, + puiSize); } PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap) @@ -1195,11 +2163,18 @@ PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap) return psPhysHeap->psDevNode; } -IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap) +static IMG_BOOL PhysHeapCreatedByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap) +{ + PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST); + + return (gasHeapProperties[ePhysHeap].ePVRLayerAction != PVR_LAYER_HEAP_ACTION_IGNORE); +} + +static IMG_BOOL PhysHeapAcquiredByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap) { PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST); - return gasHeapProperties[ePhysHeap].bPVRLayerAcquire; + return (gasHeapProperties[ePhysHeap].ePVRLayerAction == PVR_LAYER_HEAP_ACTION_INITIALISE); } IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap) @@ -1316,3 +2291,41 @@ IMG_UINT32 PhysHeapGetPageShift(PHYS_HEAP *psPhysHeap) return ui32PageShift; } + +PVRSRV_ERROR PhysHeapFreeMemCheck(PHYS_HEAP *psPhysHeap, + IMG_UINT64 ui64MinRequiredMem, + IMG_UINT64 *pui64FreeMem) +{ + IMG_UINT64 ui64TotalSize; + IMG_UINT64 ui64FreeSize; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pui64FreeMem != NULL, "pui64FreeMem"); + + + if (psPhysHeap->eType == PHYS_HEAP_TYPE_IMA) + { + PhysHeapIMAGetMemInfo(psPhysHeap->pvImplData, + &ui64TotalSize, + &ui64FreeSize); + } + else + { + psPhysHeap->psImplFuncs->pfnGetFactoryMemStats(psPhysHeap->pvImplData, + &ui64TotalSize, + &ui64FreeSize); + } + + *pui64FreeMem = ui64FreeSize; + if (ui64MinRequiredMem >= *pui64FreeMem) + { + eError = PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY; + } + + return eError; +} + +/****************************************************************************** + End of file (physheap.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem.c index a6b73f733f57..ec05b472cc70 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem.c @@ -50,8 +50,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "osfunc.h" #include "pdump_physmem.h" #include "pdump_km.h" -#include "rgx_heaps.h" #include "pvr_ricommon.h" +#include "allocmem.h" #include "physmem_lma.h" #include "physmem_osmem.h" @@ -74,6 +74,12 @@ MODULE_PARM_DESC(PMRAllocFail, "When number of PMR allocs reaches " #include "proc_stats.h" #endif +/** Computes division using log2 of divisor. */ +#define LOG2_DIV(x, log2) ((x) >> (log2)) + +/** Computes modulo of a power of 2. */ +#define LOG2_MOD(x, log2) ((x) & ((1 << (log2)) - 1)) + PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32MemSize, IMG_UINT32 ui32Log2Align, @@ -84,6 +90,7 @@ PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, const IMG_CHAR *pszSymbolicAddress, IMG_HANDLE *phHandlePtr, #endif + IMG_PID uiPid, IMG_HANDLE hMemHandle, IMG_DEV_PHYADDR *psDevPhysAddr) { @@ -99,15 +106,9 @@ PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle; IMG_UINT64 uiMask; IMG_DEV_PHYADDR sDevPhysAddr_int; - IMG_PID uiPid = 0; psMemHandle = hMemHandle; -#if defined(PVRSRV_ENABLE_PROCESS_STATS) - uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ? - PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM(); -#endif - /* Allocate the pages */ eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap, TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize), @@ -119,7 +120,7 @@ PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, /* Check to see if the page allocator returned pages with our desired * alignment, which is not unlikely */ - uiMask = (1 << ui32Log2Align) - 1; + uiMask = IMG_PAGE2BYTES32(ui32Log2Align) - 1; if (ui32Log2Align && (sDevPhysAddr_int.uiAddr & uiMask)) { /* use over allocation instead */ @@ -139,7 +140,7 @@ PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, *psDevPhysAddr = sDevPhysAddr_int; #if defined(PDUMP) - ui32PageSize = ui32Log2Align? (1 << ui32Log2Align) : OSGetPageSize(); + ui32PageSize = ui32Log2Align? IMG_PAGE2BYTES32(ui32Log2Align) : OSGetPageSize(); eError = PDumpMalloc(psDevNode, pszDevSpace, pszSymbolicAddress, @@ -272,45 +273,135 @@ void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode, } +PVRSRV_ERROR PhysMemValidateMappingTable(IMG_UINT32 ui32TotalNumVirtChunks, + IMG_UINT32 ui32IndexCount, + const IMG_UINT32 *pui32MappingTable) +{ + IMG_UINT8 *paui8TrackedIndices; + IMG_UINT32 ui32BytesToTrackIndicies; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Allocate memory for a bitmask to track indices. + * We allocate 'n' bytes with 1 bit representing each index, to allow + * us to check for any repeated entries in pui32MappingTable. + */ + ui32BytesToTrackIndicies = LOG2_DIV(ui32TotalNumVirtChunks, 3); + if (LOG2_MOD(ui32TotalNumVirtChunks, 3) != 0) + { + ++ui32BytesToTrackIndicies; + } + paui8TrackedIndices = OSAllocZMem(ui32BytesToTrackIndicies); + if (paui8TrackedIndices == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + for (i = 0; i < ui32IndexCount; i++) + { + IMG_UINT32 ui32LogicalIndex = pui32MappingTable[i]; + + /* Check that index is within the bounds of the allocation */ + if (ui32LogicalIndex >= ui32TotalNumVirtChunks) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Index %u is OOB", + __func__, + ui32LogicalIndex)); + eError = PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY; + break; + } + + /* Check that index is not repeated */ + if (BIT_ISSET(paui8TrackedIndices[LOG2_DIV(ui32LogicalIndex, 3)], LOG2_MOD(ui32LogicalIndex, 3))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Duplicate index found: %u", + __func__, + ui32LogicalIndex)); + + eError = PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY; + break; + } + BIT_SET(paui8TrackedIndices[LOG2_DIV(ui32LogicalIndex, 3)], LOG2_MOD(ui32LogicalIndex, 3)); + } + + OSFreeMem(paui8TrackedIndices); + + return eError; +} /* Checks the input parameters and adjusts them if possible and necessary */ -static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 *puiLog2AllocPageSize, - IMG_DEVMEM_SIZE_T *puiSize) +PVRSRV_ERROR PhysMemValidateParams(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_PID uiPid, + IMG_UINT32 *puiLog2AllocPageSize, + IMG_DEVMEM_SIZE_T *puiSize) { IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize; + IMG_UINT32 ui32PageSize = IMG_PAGE2BYTES32(uiLog2AllocPageSize); IMG_DEVMEM_SIZE_T uiSize = *puiSize; /* Sparse if we have different number of virtual and physical chunks plus * in general all allocations with more than one virtual chunk */ IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks || ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE; - if (ui32NumPhysChunks == 0 && ui32NumVirtChunks == 0) + /* Sparse allocations must be backed immediately as the requested + * pui32MappingTable is not retained in any structure if not immediately + * actioned on allocation. + */ + if (PVRSRV_CHECK_ON_DEMAND(uiFlags) && bIsSparse) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Number of physical chunks and number of virtual chunks " - "cannot be both 0", - __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid to specify ON_DEMAND for a sparse allocation: 0x%" IMG_UINT64_FMTSPECX, __func__, uiFlags)); + return PVRSRV_ERROR_INVALID_FLAGS; + } + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + if (PVRSRV_CHECK_OS_LINUX_MOVABLE(uiFlags) && uiPid == PVR_SYS_ALLOC_PID) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot move system allocated resources PID = PVR_SYS_ALLOC_PID", + __func__)); + + return PVRSRV_ERROR_INVALID_FLAGS; + } + + if (PVRSRV_CHECK_OS_LINUX_MOVABLE(uiFlags) && + PVRSRV_CHECK_OS_LINUX_DENY_MOVE(uiFlags)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot move denied movable allocation", + __func__)); + + return PVRSRV_ERROR_INVALID_FLAGS; + } +#else + PVR_UNREFERENCED_PARAMETER(uiPid); +#endif + + if (ui32NumVirtChunks == 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Number of virtual chunks cannot be 0", + __func__)); return PVRSRV_ERROR_INVALID_PARAMS; } - /* Protect against ridiculous page sizes */ - if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT || uiLog2AllocPageSize < RGX_HEAP_4KB_PAGE_SHIFT) + /* Protect against invalid page sizes */ + if ((ui32PageSize & psDevNode->psMMUDevAttrs->ui32ValidPageSizeMask) == 0) { - PVR_DPF((PVR_DBG_ERROR, "Page size is out of range: 2^%u.", uiLog2AllocPageSize)); + PVR_LOG_VA(PVR_DBG_ERROR, "Page size of %u is invalid", ui32PageSize); return PVRSRV_ERROR_INVALID_PARAMS; } - /* Range check of the alloc size PMRs can be a max of 1GB*/ + /* Range check of the alloc size */ if (!PMRValidateSize(uiSize)) { PVR_LOG_VA(PVR_DBG_ERROR, - "PMR size exceeds limit #Chunks: %u ChunkSz %"IMG_UINT64_FMTSPECX"", - ui32NumVirtChunks, - (IMG_UINT64) 1ULL << uiLog2AllocPageSize); + "PMR size exceeds limit #Chunks: %u ChunkSz 0x%08X", + ui32NumVirtChunks, + IMG_PAGE2BYTES32(uiLog2AllocPageSize)); return PVRSRV_ERROR_PMR_TOO_LARGE; } @@ -354,13 +445,15 @@ static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks, return PVRSRV_ERROR_INVALID_PARAMS; } - if (ui32NumVirtChunks * (1 << uiLog2AllocPageSize) != uiSize) + if (IMG_PAGES2BYTES64(ui32NumVirtChunks, uiLog2AllocPageSize) != uiSize) { PVR_DPF((PVR_DBG_ERROR, "%s: Total alloc size (%#" IMG_UINT64_FMTSPECx ") " "is not equal to virtual chunks * chunk size " "(%#" IMG_UINT64_FMTSPECx ")", - __func__, uiSize, (IMG_UINT64) (ui32NumVirtChunks * (1ULL << uiLog2AllocPageSize)))); + __func__, + uiSize, + IMG_PAGES2BYTES64(ui32NumVirtChunks, uiLog2AllocPageSize))); return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; } @@ -391,16 +484,25 @@ static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks, uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); } - if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0) + if ((uiSize & (IMG_PAGE2BYTES32(uiLog2AllocPageSize) - 1)) != 0) { PVR_DPF((PVR_DBG_ERROR, "%s: Total size (%#" IMG_UINT64_FMTSPECx ") " - "must be a multiple of the requested contiguity (%" - IMG_UINT64_FMTSPEC ")", __func__, uiSize, - (IMG_UINT64) (1ULL << uiLog2AllocPageSize))); + "must be a multiple of the requested contiguity (%u)", + __func__, + uiSize, + IMG_PAGE2BYTES32(uiLog2AllocPageSize))); return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; } + /* Parameter validation - Mapping table entries */ + { + PVRSRV_ERROR eErr = PhysMemValidateMappingTable(ui32NumVirtChunks, + ui32NumPhysChunks, + pui32MappingTable); + PVR_RETURN_IF_ERROR(eErr); + } + *puiLog2AllocPageSize = uiLog2AllocPageSize; *puiSize = uiSize; @@ -408,7 +510,8 @@ static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks, } static PVRSRV_ERROR _DevPhysHeapFromFlags(PVRSRV_MEMALLOCFLAGS_T uiFlags, - PVRSRV_PHYS_HEAP *peDevPhysHeap) + PVRSRV_PHYS_HEAP *peDevPhysHeap, + PVRSRV_DEVICE_NODE *psDevNode) { PVRSRV_PHYS_HEAP eHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); @@ -424,7 +527,7 @@ static PVRSRV_ERROR _DevPhysHeapFromFlags(PVRSRV_MEMALLOCFLAGS_T uiFlags, case PVRSRV_PHYS_HEAP_FW_PREMAP7: { /* keep heap (with check) */ - PVR_RETURN_IF_INVALID_PARAM(PVRSRV_VZ_MODE_IS(HOST)); + PVR_RETURN_IF_INVALID_PARAM(!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)); break; } case PVRSRV_PHYS_HEAP_LAST: @@ -442,6 +545,23 @@ static PVRSRV_ERROR _DevPhysHeapFromFlags(PVRSRV_MEMALLOCFLAGS_T uiFlags, return PVRSRV_OK; } +static INLINE void _PromoteToCpuCached(PVRSRV_MEMALLOCFLAGS_T *puiFlags) +{ + if ((*puiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE)) == 0) + { + /* We don't need to upgrade if we don't map into the CPU */ + return; + } + + /* Clear the existing CPU cache flags */ + *puiFlags &= ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK); + + /* Add CPU cached flags */ + *puiFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT; +} + PVRSRV_ERROR PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDevNode, @@ -459,21 +579,58 @@ PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection, PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags) { PVRSRV_ERROR eError; + IMG_UINT32 i; PVRSRV_PHYS_HEAP ePhysHeapIdx; PVRSRV_MEMALLOCFLAGS_T uiPMRFlags = uiFlags; - PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize = - psDevNode->psDevConfig->pfnCheckMemAllocSize; - + uiPid = (psConnection != NULL) ? OSGetCurrentClientProcessIDKM() : uiPid; + + /* This is where we would expect to validate the uiAnnotationLength parameter + (to confirm it is sufficient to store the string in pszAnnotation plus a + terminating NULL). However, we do not make reference to this value when + we copy the string in PMRCreatePMR() - instead there we use strlcpy() + to copy at most chars and ensure whatever is copied is null-terminated. + The parameter is only used by the generated bridge code. + */ PVR_UNREFERENCED_PARAMETER(uiAnnotationLength); - eError = _ValidateParams(ui32NumPhysChunks, - ui32NumVirtChunks, - uiFlags, - &uiLog2AllocPageSize, - &uiSize); + if (PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) && + psDevNode->pfnGetDeviceSnoopMode(psDevNode) == PVRSRV_DEVICE_SNOOP_CPU_ONLY) + { + _PromoteToCpuCached(&uiPMRFlags); + } + + eError = PhysMemValidateParams(psDevNode, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiFlags, + uiPid, + &uiLog2AllocPageSize, + &uiSize); + PVR_RETURN_IF_ERROR(eError); + +#if defined(SUPPORT_STATIC_IPA) +#if !defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) + /* Do not permit IPA PMR allocation flags to be passed through to the + * new PMR. + */ + uiPMRFlags &= ~PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK; +#endif +#endif + +#if defined(PDUMP) + eError = PDumpValidateUMFlags(ui32PDumpFlags); PVR_RETURN_IF_ERROR(eError); +#endif + + for (i = 0; i < ui32NumPhysChunks; i++) + { + PVR_LOG_RETURN_IF_FALSE(pui32MappingTable[i] < ui32NumVirtChunks, + "Mapping table value exceeds ui32NumVirtChunks", + PVRSRV_ERROR_INVALID_PARAMS); + } - eError = _DevPhysHeapFromFlags(uiFlags, &ePhysHeapIdx); + eError = _DevPhysHeapFromFlags(uiFlags, &ePhysHeapIdx, psDevNode); PVR_RETURN_IF_ERROR(eError); if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_DEFAULT) @@ -519,25 +676,6 @@ PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection, return PVRSRV_ERROR_INVALID_HEAP; } - /* Apply memory budgeting policy */ - if (pfnCheckMemAllocSize) - { - IMG_UINT64 uiMemSize; - - /* If sparse allocation */ - if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1U) - { - uiMemSize = (1ULL<psDevConfig->hSysData, uiMemSize); - PVR_RETURN_IF_ERROR(eError); - } - #if defined(DEBUG) if (PMRAllocFail > 0) { @@ -560,12 +698,19 @@ PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection, * should be attributed to the driver (PID 1) rather than to the * process those allocations are made under. Same applies to the memory * allocated for the Firmware. */ - if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT || + if (psDevNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE || PVRSRV_CHECK_FW_MAIN(uiFlags)) { uiPid = PVR_SYS_ALLOC_PID; } + /* ePhysHeapIdx and PhysHeap hint in uiPMRFlags match and provide the + * intended PhysHeap to use at this point, but systems vary so the next + * call may fallback (apsPhysHeap[]) or demote (OutOfMem) and not be the + * heap that was intended, e.g. GPU_PRIVATE index may fallback to GPU_LOCAL, + * GPU_LOCAL may demote to CPU_LOCAL. + * On output uiPMRFlags show the PhysHeap finally used. + */ eError = PhysHeapCreatePMR(psDevNode->apsPhysHeap[ePhysHeapIdx], psConnection, uiSize, @@ -573,11 +718,12 @@ PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection, ui32NumVirtChunks, pui32MappingTable, uiLog2AllocPageSize, - uiFlags, + uiPMRFlags, pszAnnotation, uiPid, ppsPMRPtr, - ui32PDumpFlags); + ui32PDumpFlags, + &uiPMRFlags); if (puiPMRFlags != NULL) { @@ -615,6 +761,7 @@ PhysmemNewRamBackedPMR(CONNECTION_DATA *psConnection, { PVRSRV_PHYS_HEAP ePhysHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); + PVR_LOG_RETURN_IF_INVALID_PARAM(ePhysHeap < PVRSRV_PHYS_HEAP_LAST, "uiFlags"); PVR_LOG_RETURN_IF_INVALID_PARAM(uiAnnotationLength != 0, "uiAnnotationLength"); PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation"); @@ -630,76 +777,25 @@ PhysmemNewRamBackedPMR(CONNECTION_DATA *psConnection, } return PhysmemNewRamBackedPMR_direct(psConnection, - psDevNode, - uiSize, - ui32NumPhysChunks, - ui32NumVirtChunks, - pui32MappingTable, - uiLog2AllocPageSize, - uiFlags, - uiAnnotationLength, - pszAnnotation, - uiPid, - ppsPMRPtr, - ui32PDumpFlags, - puiPMRFlags); -} - -PVRSRV_ERROR -PhysmemNewRamBackedLockedPMR(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_DEVMEM_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 *pui32MappingTable, - IMG_UINT32 uiLog2PageSize, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 uiAnnotationLength, - const IMG_CHAR *pszAnnotation, - IMG_PID uiPid, - PMR **ppsPMRPtr, - IMG_UINT32 ui32PDumpFlags, - PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags) -{ - - PVRSRV_ERROR eError; - eError = PhysmemNewRamBackedPMR(psConnection, - psDevNode, - uiSize, - ui32NumPhysChunks, - ui32NumVirtChunks, - pui32MappingTable, - uiLog2PageSize, - uiFlags, - uiAnnotationLength, - pszAnnotation, - uiPid, - ppsPMRPtr, - ui32PDumpFlags, - puiPMRFlags); - - if (eError == PVRSRV_OK) - { - eError = PMRLockSysPhysAddresses(*ppsPMRPtr); - } - - return eError; -} - -PVRSRV_ERROR -PVRSRVGetMaxPhysHeapCountKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 *pui32PhysHeapCount) -{ - PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRVGetDevicePhysHeapCount(psDevNode, pui32PhysHeapCount); - return PVRSRV_OK; + psDevNode, + uiSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + uiFlags, + uiAnnotationLength, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags, + puiPMRFlags); } PVRSRV_ERROR PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_PHYS_HEAP *peHeap) + PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_PHYS_HEAP *peHeap) { PVR_UNREFERENCED_PARAMETER(psConnection); *peHeap = psDevNode->psDevConfig->eDefaultHeap; @@ -707,50 +803,33 @@ PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection, } PVRSRV_ERROR -PVRSRVGetHeapPhysMemUsageKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32PhysHeapCount, - PHYS_HEAP_MEM_STATS *apPhysHeapMemStats) +PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP *paePhysHeapID, + PHYS_HEAP_MEM_STATS_V1 *paPhysHeapMemStats) { - PHYS_HEAP *psPhysHeap; - IMG_UINT uiHeapIndex, i = 0; - PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(ui32PhysHeapCount); + PVR_UNREFERENCED_PARAMETER(paePhysHeapID); + PVR_UNREFERENCED_PARAMETER(paPhysHeapMemStats); - if (ui32PhysHeapCount != psDevNode->ui32UserAllocHeapCount) - { - return PVRSRV_ERROR_INVALID_PARAMS; - } - - for (uiHeapIndex = PVRSRV_PHYS_HEAP_DEFAULT+1; (uiHeapIndex < PVRSRV_PHYS_HEAP_LAST); uiHeapIndex++) - { - psPhysHeap = psDevNode->apsPhysHeap[uiHeapIndex]; - - if (psPhysHeap && PhysHeapUserModeAlloc(uiHeapIndex)) - { - PVR_ASSERT(i < ui32PhysHeapCount); - - PhysheapGetPhysMemUsage(psPhysHeap, &apPhysHeapMemStats[i].ui64TotalSize, - &apPhysHeapMemStats[i].ui64FreeSize); - - i++; - } - } - return PVRSRV_OK; + return PVRSRV_ERROR_NOT_IMPLEMENTED; } PVRSRV_ERROR -PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32PhysHeapCount, - PVRSRV_PHYS_HEAP *paePhysHeapID, - PHYS_HEAP_MEM_STATS *paPhysHeapMemStats) +PVRSRVPhysHeapGetMemInfo2KM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP *paePhysHeapID, + PHYS_HEAP_MEM_STATS_V2 *paPhysHeapMemStats) { PVR_UNREFERENCED_PARAMETER(psConnection); return PhysHeapGetMemInfo(psDevNode, - ui32PhysHeapCount, - paePhysHeapID, - paPhysHeapMemStats); + ui32PhysHeapCount, + paePhysHeapID, + paPhysHeapMemStats); } /* 'Wrapper' function to call PMRImportPMR(), which first checks the PMR is @@ -758,6 +837,7 @@ PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection, * would then need PVRSRV_DEVICE_NODE (defining this type in pmr.h causes a * typedef redefinition issue). */ +#if defined(SUPPORT_INSECURE_EXPORT) PVRSRV_ERROR PhysmemImportPMR(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDevNode, @@ -781,3 +861,4 @@ PhysmemImportPMR(CONNECTION_DATA *psConnection, uiLog2Contig, ppsPMR); } +#endif /* if defined(SUPPORT_INSECURE_EXPORT) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_cpumap_history.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_cpumap_history.c index 10a7d84626a1..6975926a45d8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_cpumap_history.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_cpumap_history.c @@ -428,7 +428,7 @@ void InsertMappingRecord(const IMG_CHAR *pszAnnotation, psRecord->etype = MAP; psRecord->pfnRecordString = MapRecordString; - OSStringLCopy(psRecord->u.sMapData.aszAnnotation, pszAnnotation, MAX_MAPPING_ANNOT_STR); + OSStringSafeCopy(psRecord->u.sMapData.aszAnnotation, pszAnnotation, MAX_MAPPING_ANNOT_STR); psRecord->u.sMapData.uiPID = uiPID; psRecord->u.sMapData.pvAddress = pvAddress; psRecord->u.sMapData.ui32CPUCacheFlags = ui32CPUCacheFlags; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_dlm.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_dlm.c new file mode 100644 index 000000000000..07a92b2973df --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_dlm.c @@ -0,0 +1,341 @@ +/*************************************************************************/ /*! +@File physmem_dlm.c +@Title Dedicated Local Memory allocator for Physical Memory Blocks +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for Dedicated local memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "osfunc.h" +#include "physheap.h" +#include "physheap_config.h" +#include "device.h" +#include "physmem_dlm.h" + +typedef struct PHYSMEM_DLM_DATA_TAG { + RA_ARENA *psRA; + IMG_CPU_PHYADDR sStartAddr; + IMG_DEV_PHYADDR sCardBase; + IMG_UINT64 uiSize; + IMG_UINT32 uiLog2PMBSize; +} PHYSMEM_DLM_DATA; + +/* PMB (Physical Memory Block) */ +struct _PMB_ +{ + RA_ARENA *pArena; + RA_BASE_T uiBase; + RA_LENGTH_T uiSize; + const IMG_CHAR *pszAnnotation; +}; + +/* PMBCreatePMB + * + * Creates a new PMB used to represent a block of memory + * obtained from a DLM heap. + */ +static +PVRSRV_ERROR PMBCreatePMB(RA_ARENA *pArena, + RA_LENGTH_T uiSize, + RA_LENGTH_T uiAlignment, + const IMG_CHAR *pszAnnotation, + PMB **ppsPMB) +{ + PVRSRV_ERROR eError; + PMB* psPMB = OSAllocMem(sizeof(*psPMB)); + PVR_LOG_GOTO_IF_NOMEM(psPMB, eError, error_Return); + + eError = RA_Alloc(pArena, + uiSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* No flags */ + uiAlignment, + pszAnnotation, + &psPMB->uiBase, + &psPMB->uiSize, + NULL); /* No private handle */ + PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", error_FreePMB); + + psPMB->pArena = pArena; + psPMB->pszAnnotation = pszAnnotation; + + *ppsPMB = psPMB; + + return PVRSRV_OK; + +error_FreePMB: + OSFreeMem(psPMB); +error_Return: + return eError; +} + +void +PMBDestroy(PMB *psPMB) +{ + PVR_LOG_RETURN_VOID_IF_FALSE(psPMB, "psPMB NULL"); + + RA_Free(psPMB->pArena, psPMB->uiBase); + OSFreeMem(psPMB); +} + + +const IMG_CHAR * +PMBGetAnnotation(PMB *psPMB) +{ + if (psPMB == NULL) + { + PVR_DPF((PVR_DBG_ERROR,"psPMB in %s",__func__)); + return ""; + } + + return psPMB->pszAnnotation; +} + +/* DLM */ + +static void PFNGetLocalRamMemStats(PHEAP_IMPL_DATA pvImplData, + IMG_UINT64 *pui64TotalSize, + IMG_UINT64 *pui64FreeSize) +{ + RA_USAGE_STATS sRAUsageStats; + PHYSMEM_DLM_DATA *psDLMData = (PHYSMEM_DLM_DATA*)pvImplData; + PVR_LOG_RETURN_VOID_IF_FALSE(pvImplData, "pvImplData NULL"); + + RA_Get_Usage_Stats(psDLMData->psRA, &sRAUsageStats); + + *pui64TotalSize = sRAUsageStats.ui64TotalArenaSize; + *pui64FreeSize = sRAUsageStats.ui64FreeArenaSize; +} + +/* +* This function will set the psDevPAddr to whatever the system layer +* has set it for the referenced heap. +* It will not fail if the psDevPAddr is invalid. +*/ +static PVRSRV_ERROR +PFNGetDevPAddr(PHEAP_IMPL_DATA pvImplData, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PHYSMEM_DLM_DATA *psDLMData = (PHYSMEM_DLM_DATA*)pvImplData; + PVR_LOG_RETURN_IF_INVALID_PARAM(pvImplData != NULL, "pvImplData"); + + *psDevPAddr = psDLMData->sCardBase; + + return PVRSRV_OK; +} + +/* +* This function will set the psCpuPAddr to whatever the system layer +* has set it for the referenced heap. +* It will not fail if the psCpuPAddr is invalid. +*/ +static PVRSRV_ERROR +PFNGetCPUPAddr(PHEAP_IMPL_DATA pvImplData, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PHYSMEM_DLM_DATA *psDLMData = (PHYSMEM_DLM_DATA*)pvImplData; + PVR_LOG_RETURN_IF_INVALID_PARAM(pvImplData != NULL, "pvImplData"); + + *psCpuPAddr = psDLMData->sStartAddr; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PFNGetSize(PHEAP_IMPL_DATA pvImplData, + IMG_UINT64 *puiSize) +{ + PHYSMEM_DLM_DATA *psDLMData = (PHYSMEM_DLM_DATA*)pvImplData; + PVR_LOG_RETURN_IF_INVALID_PARAM(pvImplData != NULL, "pvImplData"); + + *puiSize = psDLMData->uiSize; + + return PVRSRV_OK; +} + +static IMG_UINT32 +PFNGetPageShift(void) +{ + return PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT; +} + +static PVRSRV_ERROR +CreateArenas(PHYSMEM_DLM_DATA *psDLMData, IMG_CHAR *pszLabel, PHYS_HEAP_POLICY uiPolicy) +{ + psDLMData->psRA = RA_Create_With_Span(pszLabel, + OSGetPageShift(), + psDLMData->sStartAddr.uiAddr, + psDLMData->sCardBase.uiAddr, + psDLMData->uiSize, + RA_POLICY_DEFAULT); + PVR_LOG_RETURN_IF_NOMEM(psDLMData->psRA, "RA_Create_With_Span"); + + return PVRSRV_OK; +} + +static void +DestroyArenas(PHYSMEM_DLM_DATA *psDLMData) +{ + /* Remove RAs and RA names for dedicated local memory */ + if (psDLMData->psRA) + { + RA_Delete(psDLMData->psRA); + psDLMData->psRA = NULL; + } +} + +static void +PFNDestroyImplData(PHEAP_IMPL_DATA pvImplData) +{ + PHYSMEM_DLM_DATA *psDLMData = (PHYSMEM_DLM_DATA*)pvImplData; + PVR_LOG_RETURN_VOID_IF_FALSE(pvImplData, "pvImplData NULL"); + + DestroyArenas(pvImplData); + + OSFreeMem(psDLMData); +} + +static PVRSRV_ERROR +PFNPhysmemNewLocalRamBackedPMB(PHYS_HEAP *psPhysHeap, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszAnnotation, + PMB **ppPMBPtr, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiSize) +{ + PHYSMEM_DLM_DATA *psDLMData; + PVRSRV_ERROR eError; + PMB* pPMB; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppPMBPtr != NULL, "ppPMBPtr"); + + /* Check size is aligned to page size */ + if (uiSize & ((1 << PFNGetPageShift()) - 1ULL)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: uiSize %" IMG_UINT64_FMTSPEC " is not aligned to page size: %u", + __func__, + uiSize, + 1 << PFNGetPageShift())); + return PVRSRV_ERROR_PMB_NOT_PAGE_MULTIPLE; + } + + PVR_ASSERT(PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_DLM); + + psDLMData = (PHYSMEM_DLM_DATA*)PhysHeapGetImplData(psPhysHeap); + + eError = PMBCreatePMB(psDLMData->psRA, uiSize, IMG_UINT64_C(1) << psDLMData->uiLog2PMBSize, pszAnnotation, &pPMB); + PVR_LOG_GOTO_IF_ERROR(eError, "PMBCreatePMB", error_Return); + + *ppPMBPtr = pPMB; + *puiBase = pPMB->uiBase; + *puiSize = pPMB->uiSize; + + return PVRSRV_OK; + +error_Return: + return eError; +} + +static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = +{ + .pfnDestroyData = &PFNDestroyImplData, + .pfnGetDevPAddr = &PFNGetDevPAddr, + .pfnGetCPUPAddr = &PFNGetCPUPAddr, + .pfnGetSize = &PFNGetSize, + .pfnGetPageShift = &PFNGetPageShift, + .pfnGetFactoryMemStats = &PFNGetLocalRamMemStats, + .pfnCreatePMB = &PFNPhysmemNewLocalRamBackedPMB, +}; + +PVRSRV_ERROR +PhysmemCreateHeapDLM(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_POLICY uiPolicy, + PHYS_HEAP_CONFIG *psConfig, + IMG_CHAR *pszLabel, + PHYS_HEAP **ppsPhysHeap) +{ + PHYSMEM_DLM_DATA *psDLMData; + PHYS_HEAP *psPhysHeap; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psConfig != NULL, "psConfig"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pszLabel != NULL, "pszLabel"); + + PVR_ASSERT(psConfig->eType == PHYS_HEAP_TYPE_DLM); + + psDLMData = OSAllocMem(sizeof(*psDLMData)); + PVR_LOG_RETURN_IF_NOMEM(psDLMData, "OSAllocMem"); + + psDLMData->sStartAddr = PhysHeapConfigGetStartAddr(psConfig); + psDLMData->sCardBase = PhysHeapConfigGetCardBase(psConfig); + psDLMData->uiSize = PhysHeapConfigGetSize(psConfig); + psDLMData->uiLog2PMBSize = psConfig->uConfig.sDLM.ui32Log2PMBSize; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDLMData->uiLog2PMBSize >= OSGetPageShift(), "ui32Log2PMBSize must be greater than or equal to OSPageSize"); + + eError = PhysHeapCreate(psDevNode, + psConfig, + uiPolicy, + (PHEAP_IMPL_DATA)psDLMData, + &_sPHEAPImplFuncs, + &psPhysHeap); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreate", error_FreeDlmData); + + eError = CreateArenas(psDLMData, pszLabel, uiPolicy); + PVR_LOG_GOTO_IF_ERROR(eError, "CreateArenas", error_FreePhysHeap); + + if (ppsPhysHeap != NULL) + { + *ppsPhysHeap = psPhysHeap; + } + + return PVRSRV_OK; + +error_FreePhysHeap: + PhysHeapDestroy(psPhysHeap); +error_FreeDlmData: + OSFreeMem(psDLMData); + return eError; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_hostmem.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_hostmem.c index 3e17346a4099..6c87632bbea5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_hostmem.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_hostmem.c @@ -77,14 +77,12 @@ static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[]; static PHYS_HEAP_CONFIG gsPhysHeapConfigHostMemDevice[] = { { - PHYS_HEAP_TYPE_UMA, - "SYSMEM", - &gsHostMemDevPhysHeapFuncs, - {0}, - {0}, - 0, - (IMG_HANDLE)&gsHostMemDevConfig[0], - PHYS_HEAP_USAGE_CPU_LOCAL, + .eType = PHYS_HEAP_TYPE_UMA, + .ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL, + .uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM", + .uConfig.sUMA.psMemFuncs = &gsHostMemDevPhysHeapFuncs, + .uConfig.sUMA.pszHeapName = "uma_cpu_local", + .uConfig.sUMA.hPrivData = (IMG_HANDLE)&gsHostMemDevConfig[0] } }; @@ -152,7 +150,7 @@ PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode) /* N.B.- In case of any failures in this function, we just return error to the caller, as clean-up is taken care by _HostMemDeviceDestroy function */ - psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); + psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode) + sizeof(*psDeviceNode->psMMUDevAttrs)); PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); /* early save return pointer to aid clean-up */ @@ -160,24 +158,37 @@ PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode) psDeviceNode->sDevId.ui32InternalID = PVRSRV_HOST_DEVICE_ID; psDeviceNode->psDevConfig = psDevConfig; - psDeviceNode->papsRegisteredPhysHeaps = - OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) * - psDevConfig->ui32PhysHeapCount); - PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps, "OSAllocZMem"); + psDeviceNode->psPhysHeapList = NULL; + + eError = OSLockCreate(&psDeviceNode->hPhysHeapLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", LockCreateErr); eError = PhysHeapCreateHeapFromConfig(psDeviceNode, &psDevConfig->pasPhysHeaps[0], - &psDeviceNode->papsRegisteredPhysHeaps[0]); - PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig"); - psDeviceNode->ui32RegisteredPhysHeaps = 1; + NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig", PhysHeapCreateErr); /* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */ - eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_CPU_LOCAL, - psDeviceNode, - &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]); - PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire"); + eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_CPU_LOCAL, + psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire", AcquirePhysHeapErr); + + psDeviceNode->psMMUDevAttrs = (MMU_DEVICEATTRIBS*)(psDeviceNode + 1); + psDeviceNode->psMMUDevAttrs->ui32ValidPageSizeMask = OSGetPageSize(); + + dllist_init(&psDeviceNode->sCleanupThreadWorkList); return PVRSRV_OK; + +AcquirePhysHeapErr: + PhysHeapDestroyDeviceHeaps(psDeviceNode); +PhysHeapCreateErr: + OSLockDestroy(psDeviceNode->hPhysHeapLock); +LockCreateErr: + OSFreeMem(psDeviceNode); + + return eError; } void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) @@ -186,22 +197,17 @@ void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) { return; } - - if (psDeviceNode->papsRegisteredPhysHeaps) + else { if (psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]) { PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]); } - if (psDeviceNode->papsRegisteredPhysHeaps[0]) - { - /* clean-up function as well is aware of only one heap */ - PVR_ASSERT(psDeviceNode->ui32RegisteredPhysHeaps == 1); - PhysHeapDestroy(psDeviceNode->papsRegisteredPhysHeaps[0]); - } - - OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps); + PhysHeapDestroyDeviceHeaps(psDeviceNode); } + + OSLockDestroy(psDeviceNode->hPhysHeapLock); + OSFreeMem(psDeviceNode); } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_ima.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_ima.c new file mode 100644 index 000000000000..87595e1c8391 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_ima.c @@ -0,0 +1,655 @@ +/*************************************************************************/ /*! +@File physmem_ima.c +@Title Import memory allocator +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of memory management. This module is responsible for + implementing the function callbacks for local card memory when + used under a shared heap system. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memalloc_physheap.h" +#include "physheap.h" +#include "physheap_config.h" +#include "allocmem.h" +#include "ra.h" +#include "device.h" +#include "osfunc.h" +#include "physmem_ima.h" +#include "physmem_dlm.h" +#include "physmem_ramem.h" + +typedef struct PHYSMEM_IMA_DATA_TAG { + PHYS_HEAP *psPhysHeap; + RA_ARENA *psRA; + PHYS_HEAP *pDLMHeap; + IMG_UINT32 uiLog2PMBSize; + IMG_UINT32 uiReservedPMBs; + PMB **ppsReservedPMBs; +} PHYSMEM_IMA_DATA; + +static PVRSRV_ERROR +IMAGetDevPAddr(PHEAP_IMPL_DATA pvImplData, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*)pvImplData; + RA_ARENA_ITERATOR *psRAIter = RA_IteratorAcquire(psIMAData->psRA, IMG_FALSE); + RA_ITERATOR_DATA sData = {0}; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_NOMEM(psRAIter, "RA_IteratorAcquire"); + + if (!RA_IteratorNext(psRAIter, &sData)) + { + PVR_LOG_GOTO_WITH_ERROR("RA_IteratorNext", + eError, + PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR, + err_free_iter); + } + + psDevPAddr->uiAddr = sData.uiAddr; + +err_free_iter: + RA_IteratorRelease(psRAIter); + return eError; +} + +static PVRSRV_ERROR +IMAGetCPUPAddr(PHEAP_IMPL_DATA pvImplData, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + IMG_DEV_PHYADDR sDevPAddr; + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*)pvImplData; + PVRSRV_ERROR eError = IMAGetDevPAddr(pvImplData, &sDevPAddr); + PVR_LOG_RETURN_IF_ERROR(eError, "IMAGetDevPAddr"); + + PhysHeapDevPAddrToCpuPAddr( + psIMAData->pDLMHeap, + 1, + psCpuPAddr, + &sDevPAddr + ); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +IMAGetSize(PHEAP_IMPL_DATA pvImplData, + IMG_UINT64 *puiSize) +{ + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*)pvImplData; + RA_USAGE_STATS sRAUsageStats; + + RA_Get_Usage_Stats(psIMAData->psRA, &sRAUsageStats); + + *puiSize = sRAUsageStats.ui64TotalArenaSize; + + return PVRSRV_OK; +} + +static void IMAPhysmemGetRAMemRamMemStats(PHEAP_IMPL_DATA pvImplData, + IMG_UINT64 *pui64TotalSize, + IMG_UINT64 *pui64FreeSize) +{ + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*)pvImplData; + RA_USAGE_STATS sRAUsageStats; + + RA_Get_Usage_Stats(psIMAData->psRA, &sRAUsageStats); + + *pui64TotalSize = sRAUsageStats.ui64TotalArenaSize; + *pui64FreeSize = sRAUsageStats.ui64FreeArenaSize; +} + +/* + * We iterate on this function multiple times using the IterHandle. + * First iteration will create the IterHandle. + * We return IMG_TRUE when there are iterations remaining. + * We return IMG_FALSE when there are no iterations remaining, this + * will also free the IterHandle. + * Callers must always iterate until false to ensure IterHandle is + * freed. + * */ +static IMG_BOOL IMAGetHeapSpansStringIter(PHEAP_IMPL_DATA pvImplData, + IMG_CHAR *ppszStrBuf, + IMG_UINT32 uiStrBufSize, + void **ppvIterHandle) +{ + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*)pvImplData; + RA_ITERATOR_DATA sData = {0}; + PVRSRV_ERROR eError; + IMG_BOOL bIter = IMG_FALSE; + + /* If we haven't been given an IterHandle */ + if (!*ppvIterHandle) + { + *ppvIterHandle = RA_IteratorAcquire(psIMAData->psRA, IMG_TRUE); + PVR_LOG_GOTO_IF_NOMEM(*ppvIterHandle, eError, return_false); + } + + bIter = RA_IteratorNextSpan(*ppvIterHandle, &sData); + if (bIter) + { + IMG_DEV_PHYADDR sRangeBase = {sData.uiAddr}; + IMG_CHAR aszCpuAddr[18] = "Non-Addressable"; /* length of CPUPHYADDR_UINT_FMTSPEC */ + IMG_UINT64 uiRangeSize = sData.uiSize; + IMG_INT32 iCount; + + /* Cannot get the CPU addr for private heaps. Display "Non-Addressable" instead. */ + if ((PhysHeapGetFlags(psIMAData->psPhysHeap) & PHYS_HEAP_USAGE_GPU_PRIVATE) != PHYS_HEAP_USAGE_GPU_PRIVATE) + { + IMG_CPU_PHYADDR sCPURangeBase; + PhysHeapDevPAddrToCpuPAddr(psIMAData->pDLMHeap, + 1, + &sCPURangeBase, + &sRangeBase); + OSSNPrintf(aszCpuAddr, ARRAY_SIZE(aszCpuAddr), CPUPHYADDR_UINT_FMTSPEC, CPUPHYADDR_FMTARG(sCPURangeBase.uiAddr)); + } + + iCount = OSSNPrintf(ppszStrBuf, + uiStrBufSize, + " " /* padding */ + "CPU PA Base: %s, " + "GPU PA Base: 0x%08"IMG_UINT64_FMTSPECx", " + "Size: %"IMG_UINT64_FMTSPEC"B", + aszCpuAddr, + sRangeBase.uiAddr, + uiRangeSize); + if (!(0 < iCount && iCount < (IMG_INT32)uiStrBufSize)) + { + PVR_DPF((PVR_DBG_ERROR, "OSSNPrintf in %s(), " + "Heap Span print may be corrupt!", __func__)); + } + return IMG_TRUE; + } + /* else end iteration and free the iter handle */ + + RA_IteratorRelease(*ppvIterHandle); + +return_false: + return IMG_FALSE; +} + +static void +IMAGetHeapDLMBacking(PHEAP_IMPL_DATA pvImplData, + PHYS_HEAP **psDLMPhysHeap) +{ + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*)pvImplData; + *psDLMPhysHeap = psIMAData->pDLMHeap; +} + +/* When uiSize > DLM PMB size, allocates a single physically contiguous Huge PMB to satisfy the import. + * A huge PMB is a PMB with a size larger than the DLM PMB size (1 << psIMAData->uiLog2PMBSize). + * The requested import size will be rounded up to a multiple of the DLM PMB size. */ +static PVRSRV_ERROR IMAImportDLMAllocHuge(RA_PERARENA_HANDLE hArenaHandle, + RA_LENGTH_T uiSize, + RA_FLAGS_T uiFlags, + RA_LENGTH_T uBaseAlignment, + const IMG_CHAR *pszAnnotation, + RA_IMPORT *psImport) +{ + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*) hArenaHandle; + PHYS_HEAP *psPhysHeap = psIMAData->pDLMHeap; + PMB *pPMB; + IMG_UINT64 uiPMBSizeBytes; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(uiFlags); + PVR_UNREFERENCED_PARAMETER(uBaseAlignment); + PVR_ASSERT(psIMAData->uiLog2PMBSize != 0); + /* Round up the allocation size to the nearest multiple of the + * DLM PMB size (1 << psIMAData->uiLog2PMBSize). + * If this is greater than DLM PMB size, a huge PMB will be imported. */ + uiPMBSizeBytes = PVR_ALIGN(uiSize, IMG_UINT64_C(1) << psIMAData->uiLog2PMBSize); + + eError = PhysHeapCreatePMB(psPhysHeap, + uiPMBSizeBytes, + pszAnnotation, + &pPMB, + &psImport->base, + &psImport->uSize); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreatePMB"); + + psImport->hPriv = pPMB; + + return PVRSRV_OK; +} + +/* When uiSize <= DLM PMB size, this returns a single PMB, otherwise + * it requests multiple PMBs from the connected DLM heap. + * The PMBs will be RA_Free'd once there are no busy segments inside of the span. + * This will trigger the normal imp_free callback. + * This strategy relies on RA_POLICY_ALLOC_ALLOW_NONCONTIG + * and that all spans in the IMA heap are all the same size of 1 PMB, otherwise + * we might add a span that is not used, invalidating the invariants of the RA. */ +static PVRSRV_ERROR IMAImportDLMAllocMulti(RA_PERARENA_HANDLE hArenaHandle, + RA_LENGTH_T uiSize, + RA_FLAGS_T uiFlags, + RA_LENGTH_T uBaseAlignment, + const IMG_CHAR *pszAnnotation, + IMG_UINT32 *puiImportsCount, + RA_IMPORT **ppsImports) +{ + PVRSRV_ERROR eError; + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*) hArenaHandle; + PHYS_HEAP *psPhysHeap = psIMAData->pDLMHeap; + + IMG_UINT32 i; + IMG_UINT32 uiPMBCount; + + PVR_UNREFERENCED_PARAMETER(uiFlags); + PVR_UNREFERENCED_PARAMETER(uBaseAlignment); + PVR_ASSERT(psIMAData->uiLog2PMBSize != 0); + + /* Round up the allocation size to the nearest multiple of the + * DLM PMB size (1 << psIMAData->uiLog2PMBSize). */ + uiSize = PVR_ALIGN(uiSize, IMG_UINT64_C(1) << psIMAData->uiLog2PMBSize); + /* Get the number of PMBs required to make the allocation. */ + uiPMBCount = uiSize >> psIMAData->uiLog2PMBSize; + + /* Try use the provided ppsImports array instead of allocating a new array + * if the array is large enough to hold all the new PMBs. */ + if (uiPMBCount > *puiImportsCount) + { + *ppsImports = OSAllocMem(uiPMBCount * sizeof(**ppsImports)); + PVR_LOG_RETURN_IF_NOMEM(ppsImports, "ppsImports"); + } + *puiImportsCount = uiPMBCount; + + /* Create the PMB that will be become the spans. */ + for (i = 0; i < uiPMBCount; i++) + { + RA_IMPORT *psImport = &(*ppsImports)[i]; + eError = PhysHeapCreatePMB(psPhysHeap, + IMG_UINT64_C(1) << psIMAData->uiLog2PMBSize, + pszAnnotation, + (PMB**) &psImport->hPriv, + &psImport->base, + &psImport->uSize); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreatePMB", err_FreePMBs); + } + + return PVRSRV_OK; + +/* Free i PMBs from the array. */ +err_FreePMBs: + uiPMBCount = i; + for (i = 0; i < uiPMBCount; i++) + { + PMBDestroy((*ppsImports)[i].hPriv); + } + return eError; +} + +static void IMAImportDLMFree(RA_PERARENA_HANDLE hArenaHandle, + RA_BASE_T uiBase, + RA_PERISPAN_HANDLE hPriv) +{ + PMB *pPMB = (PMB*) hPriv; + PVR_ASSERT(pPMB != NULL); + + PVR_UNREFERENCED_PARAMETER(hArenaHandle); + PVR_UNREFERENCED_PARAMETER(uiBase); + + PMBDestroy(pPMB); +} + +static void +FreeReservedMemory(PHYSMEM_IMA_DATA *psIMAData) +{ + IMG_UINT32 i; + if (psIMAData->ppsReservedPMBs) + { + for (i = 0; i < psIMAData->uiReservedPMBs; i++) + { + /* The PMBs will be RA_Free'd by RA_Delete, + * so it doesn't need to be done manually */ + PMBDestroy(psIMAData->ppsReservedPMBs[i]); + } + OSFreeMem(psIMAData->ppsReservedPMBs); + } +} + +static PVRSRV_ERROR +AllocateReservedMemory(PHYSMEM_IMA_DATA *psIMAData, + PHYS_HEAP_POLICY uiPolicy, + IMG_UINT32 uiReservedPMBs) +{ + PVRSRV_ERROR eError; + IMG_BOOL bSuccess; + IMG_UINT32 i; + + IMG_UINT64 uiPMBSize = IMG_UINT64_C(1) << psIMAData->uiLog2PMBSize; + IMG_UINT32 uiPMBCount = uiReservedPMBs; + + RA_BASE_T uiBase; + RA_LENGTH_T uiReserveActSize; + + if (uiReservedPMBs == 0) + { + psIMAData->uiReservedPMBs = 0; + psIMAData->ppsReservedPMBs = NULL; + return PVRSRV_OK; + } + + /* If we do not support non-contiguous, we can allocate a single huge PMB. + * We cannot do this for non-contiguous as IMAImportDLMAllocMulti requires all spans in the IMA-RA + * to be exactly the same: 1 << psIMAData->uiLog2PMBSize. */ + if ((uiPolicy & PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG_MASK) != PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) + { + uiPMBSize *= uiPMBCount; + uiPMBCount = 1; + } + + /* Count the number of PMBs that have been successfully created. */ + psIMAData->uiReservedPMBs = 0; + psIMAData->ppsReservedPMBs = OSAllocMem(uiPMBCount * sizeof(*psIMAData->ppsReservedPMBs)); + PVR_LOG_RETURN_IF_NOMEM(psIMAData->ppsReservedPMBs, "OSAllocMem"); + + for (i = 0; i < uiPMBCount; i++) + { + eError = PhysHeapCreatePMB(psIMAData->pDLMHeap, + uiPMBSize, + "PMB Reserved", + &psIMAData->ppsReservedPMBs[i], + &uiBase, + &uiReserveActSize); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreatePMB", err_FreeReserved); + + psIMAData->uiReservedPMBs++; + + bSuccess = RA_Add(psIMAData->psRA, + uiBase, + uiReserveActSize, + 0, + (RA_PERISPAN_HANDLE) psIMAData->ppsReservedPMBs[i]); + PVR_LOG_GOTO_IF_FALSE(bSuccess, "RA_Add", err_AddFailed); + } + + return PVRSRV_OK; + +err_AddFailed: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + +err_FreeReserved: + FreeReservedMemory(psIMAData); + return eError; +} + +static PVRSRV_ERROR +CreateIMAArena(PHYSMEM_IMA_DATA *psIMAData, + IMG_CHAR *pszLabel, + PHYS_HEAP_POLICY uiPolicy, + IMG_UINT32 uiReservedPMBs) +{ + /* In practice an IMA heap only differs from LMA in the fact it can import more memory + * when it has expended its current extent. */ + + /* If non contiguous mapping is available then we should allow that for an IMA heap.*/ + IMG_UINT32 ui32RAPolicy = + ((uiPolicy & PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG_MASK) == PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) + ? RA_POLICY_ALLOC_ALLOW_NONCONTIG : RA_POLICY_DEFAULT; + PVRSRV_ERROR eError; + + PVR_ASSERT(psIMAData != NULL); + + if ((ui32RAPolicy & RA_POLICY_ALLOC_ALLOW_NONCONTIG_MASK) == RA_POLICY_ALLOC_ALLOW_NONCONTIG) + { + psIMAData->psRA = RA_CreateMulti(pszLabel, + OSGetPageShift(), + RA_LOCKCLASS_1, + IMAImportDLMAllocMulti, + IMAImportDLMFree, + psIMAData, + ui32RAPolicy); + } + else + { + psIMAData->psRA = RA_Create(pszLabel, + OSGetPageShift(), + RA_LOCKCLASS_1, + IMAImportDLMAllocHuge, + IMAImportDLMFree, + psIMAData, + ui32RAPolicy); + } + PVR_LOG_RETURN_IF_NOMEM(psIMAData->psRA, "RA_Create"); + + eError = AllocateReservedMemory(psIMAData, uiPolicy, uiReservedPMBs); + PVR_LOG_GOTO_IF_ERROR(eError, "AllocateReservedMemory", err_ra_free); + + return PVRSRV_OK; + +err_ra_free: + RA_Delete(psIMAData->psRA); + psIMAData->psRA = NULL; + return eError; +} + +static void +DestroyIMAArena(PHYSMEM_IMA_DATA *psIMAData) +{ + PVR_ASSERT(psIMAData != NULL); + + /* Locked imports should be implicitly freed has a part of + * the RA_Delete assuming there are no allocations remaining on + * the import + */ + FreeReservedMemory(psIMAData); + + /* Remove RAs and RA names for local card memory */ + if (psIMAData->psRA) + { + RA_Delete(psIMAData->psRA); + psIMAData->psRA = NULL; + } +} + +static void +IMADestroyImplData(PHEAP_IMPL_DATA pvImplData) +{ + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*)pvImplData; + + DestroyIMAArena(pvImplData); + + OSFreeMem(psIMAData); +} + +static PVRSRV_ERROR +PhysmemGetArenaIMA(PHYS_HEAP *psPhysHeap, + RA_ARENA **ppsArena) +{ + PHYSMEM_IMA_DATA *psIMAData = (PHYSMEM_IMA_DATA*)PhysHeapGetImplData(psPhysHeap); + + PVR_LOG_RETURN_IF_FALSE(psIMAData != NULL, "psIMAData", PVRSRV_ERROR_NOT_IMPLEMENTED); + + *ppsArena = psIMAData->psRA; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +IMAPhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, + size_t uiSize, + PG_HANDLE *psMemHandle, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid) +{ + PVRSRV_ERROR eError; + + RA_ARENA *pArena; + IMG_UINT32 ui32Log2NumPages = 0; + PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); + + eError = PhysmemGetArenaIMA(psPhysHeap, &pArena); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLocalMem"); + + PVR_ASSERT(uiSize != 0); + ui32Log2NumPages = OSGetOrder(uiSize); + uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); + + eError = RAMemDoPhyContigPagesAlloc(pArena, uiSize, psDevNode, psMemHandle, + psDevPAddr, uiPid); + PVR_LOG_IF_ERROR(eError, "LocalDoPhyContigPagesAlloc"); + + return eError; +} + +static void +IMAPhyContigPagesFree(PHYS_HEAP *psPhysHeap, + PG_HANDLE *psMemHandle) +{ + RA_ARENA *pArena; + + PhysmemGetArenaIMA(psPhysHeap, &pArena); + + RAMemDoPhyContigPagesFree(pArena, + psMemHandle); +} + +static PVRSRV_ERROR +IMAPhysmemNewRAMemRamBackedPMR(PHYS_HEAP *psPhysHeap, + CONNECTION_DATA *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + RA_ARENA *pArena; + + eError = PhysmemGetArenaIMA(psPhysHeap, &pArena); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaIMA"); + + eError = PhysmemNewRAMemRamBackedPMR(psPhysHeap, + pArena, + psConnection, + uiSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + uiFlags, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemNewLocalRamBackedPMR"); + + return PVRSRV_OK; +} + +static PHEAP_IMPL_FUNCS _sPHEAPImplFuncsIMA = +{ + .pfnDestroyData = &IMADestroyImplData, + .pfnGetDevPAddr = &IMAGetDevPAddr, + .pfnGetCPUPAddr = &IMAGetCPUPAddr, + .pfnGetSize = &IMAGetSize, + .pfnGetPageShift = &RAMemGetPageShift, + .pfnGetFactoryMemStats = &IMAPhysmemGetRAMemRamMemStats, + .pfnGetHeapSpansStringIter = &IMAGetHeapSpansStringIter, + .pfnGetHeapDLMBacking = &IMAGetHeapDLMBacking, + .pfnCreatePMR = &IMAPhysmemNewRAMemRamBackedPMR, + .pfnPagesAlloc = &IMAPhyContigPagesAlloc, + .pfnPagesFree = &IMAPhyContigPagesFree, + .pfnPagesMap = &RAMemPhyContigPagesMap, + .pfnPagesUnMap = &RAMemPhyContigPagesUnmap, + .pfnPagesClean = &RAMemPhyContigPagesClean, +}; + +PVRSRV_ERROR +PhysmemCreateHeapIMA(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_POLICY uiPolicy, + PHYS_HEAP_CONFIG *psConfig, + IMG_CHAR *pszLabel, + PHYS_HEAP *psDLMHeap, + IMG_UINT32 uiLog2PMBSize, + PHYS_HEAP **ppsPhysHeap) +{ + PHYSMEM_IMA_DATA *psIMAData; + PHYS_HEAP *psPhysHeap; + IMG_UINT32 uiPMBStartingMultiple = psConfig->uConfig.sIMA.ui32PMBStartingMultiple; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psConfig != NULL, "psConfig"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pszLabel != NULL, "pszLabel"); + + PVR_ASSERT(psConfig->eType == PHYS_HEAP_TYPE_IMA); + + psIMAData = OSAllocMem(sizeof(*psIMAData)); + PVR_LOG_RETURN_IF_NOMEM(psIMAData, "OSAllocMem"); + + eError = PhysHeapCreate(psDevNode, + psConfig, + uiPolicy, + (PHEAP_IMPL_DATA)psIMAData, + &_sPHEAPImplFuncsIMA, + &psPhysHeap); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreate", err_free_ima_data); + + psIMAData->psPhysHeap = psPhysHeap; + psIMAData->pDLMHeap = psDLMHeap; + psIMAData->uiLog2PMBSize = uiLog2PMBSize; + + eError = CreateIMAArena(psIMAData, + pszLabel, + uiPolicy, + uiPMBStartingMultiple); + PVR_LOG_GOTO_IF_ERROR(eError, "CreateIMAArena", err_free_physheap); + + if (ppsPhysHeap != NULL) + { + *ppsPhysHeap = psPhysHeap; + } + + return PVRSRV_OK; + +err_free_physheap: + PhysHeapDestroy(psPhysHeap); +err_free_ima_data: + OSFreeMem(psIMAData); + return eError; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_lma.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_lma.c index 5b12acb6d028..3075166b5366 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_lma.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_lma.c @@ -42,124 +42,22 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ -#include "img_types.h" -#include "img_defs.h" +#include "physmem_lma.h" #include "pvr_debug.h" -#include "pvrsrv_error.h" -#include "pvrsrv_memallocflags.h" -#include "rgx_pdump_panics.h" +#include "pvrsrv_memalloc_physheap.h" +#include "physheap.h" +#include "physheap_config.h" #include "allocmem.h" +#include "ra.h" +#include "device.h" #include "osfunc.h" +#include "physmem_ramem.h" #include "pvrsrv.h" -#include "devicemem_server_utils.h" -#include "physmem_lma.h" -#include "pdump_km.h" -#include "pmr.h" -#include "pmr_impl.h" -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#include "process_stats.h" -#endif #if defined(SUPPORT_GPUVIRT_VALIDATION) #include "rgxutils.h" #endif -#if defined(INTEGRITY_OS) -#include "mm.h" -#include "integrity_memobject.h" -#endif - -/* Assert that the conversions between the RA base type and the device - * physical address are safe. - */ -static_assert(sizeof(IMG_DEV_PHYADDR) == sizeof(RA_BASE_T), - "Size IMG_DEV_PHYADDR != RA_BASE_T"); - -/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid - * page address */ -#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0) -#define ZERO_PAGE_VALUE 0 - -typedef struct _PMR_KERNEL_MAP_HANDLE_ { - void *vma; - void *pvKernelAddress; - /* uiSize has 2 uses: - * In Physically contiguous case it is used to track size of the mapping - * for free. - * In Physically sparse case it is used to determine free path to use, single page - * sparse mapping or multi page - */ - size_t uiSize; -} PMR_KERNEL_MAPPING; - -typedef struct _PMR_LMALLOCARRAY_DATA_ { - IMG_PID uiPid; - - /* - * N.B Chunks referenced in this struct commonly are - * to OS page sized. But in reality it is dependent on - * the uiLog2ChunkSize. - * Chunks will always be one 1 << uiLog2ChunkSize in size. - * */ - - /* - * The number of chunks currently allocated in the PMR. - */ - IMG_INT32 iNumChunksAllocated; - - /* - * Total number of (Virtual) chunks supported by this PMR. - */ - IMG_UINT32 uiTotalNumChunks; - - /* The number of chunks to next be allocated for the PMR. - * This will initially be the number allocated at first alloc - * but may be changed in later calls to change sparse. - * It represents the number of chunks to next be allocated. - * This is used to store this value because we have the ability to - * defer allocation. - */ - IMG_UINT32 uiChunksToAlloc; - - /* - * Log2 representation of the chunksize. - */ - IMG_UINT32 uiLog2ChunkSize; - - IMG_BOOL bIsSparse; /* Is the PMR sparse */ - IMG_BOOL bPhysContig; /* Is the alloc Physically contiguous */ - RA_BASE_ARRAY_T paBaseArray; /* Array of RA Bases */ - - IMG_BOOL bZeroOnAlloc; - IMG_BOOL bPoisonOnAlloc; - - IMG_BOOL bOnDemand; - - /* - Record at alloc time whether poisoning will be required when the - PMR is freed. - */ - IMG_BOOL bPoisonOnFree; - - /* Physical heap and arena pointers for this allocation */ - PHYS_HEAP* psPhysHeap; - RA_ARENA* psArena; - PVRSRV_MEMALLOCFLAGS_T uiAllocFlags; - - /* - Connection data for this requests' originating process. NULL for - direct-bridge originating calls - */ - CONNECTION_DATA *psConnection; - -} PMR_LMALLOCARRAY_DATA; - -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) -/* Global structure to manage GPU memory leak */ -static DEFINE_MUTEX(g_sLMALeakMutex); -static IMG_UINT32 g_ui32LMALeakCounter = 0; -#endif - typedef struct PHYSMEM_LMA_DATA_TAG { RA_ARENA *psRA; IMG_CPU_PHYADDR sStartAddr; @@ -210,13 +108,7 @@ _GetSize(PHEAP_IMPL_DATA pvImplData, return PVRSRV_OK; } -static IMG_UINT32 -_GetPageShift(void) -{ - return PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT; -} - -static void PhysmemGetLocalRamMemStats(PHEAP_IMPL_DATA pvImplData, +static void PhysmemGetRAMemRamMemStats(PHEAP_IMPL_DATA pvImplData, IMG_UINT64 *pui64TotalSize, IMG_UINT64 *pui64FreeSize) { @@ -229,7 +121,6 @@ static void PhysmemGetLocalRamMemStats(PHEAP_IMPL_DATA pvImplData, *pui64FreeSize = sRAUsageStats.ui64FreeArenaSize; } -#if !defined(SUPPORT_GPUVIRT_VALIDATION) static PVRSRV_ERROR PhysmemGetArenaLMA(PHYS_HEAP *psPhysHeap, RA_ARENA **ppsArena) @@ -242,15 +133,14 @@ PhysmemGetArenaLMA(PHYS_HEAP *psPhysHeap, return PVRSRV_OK; } -#endif static PVRSRV_ERROR -_CreateArenas(PHEAP_IMPL_DATA pvImplData, IMG_CHAR *pszLabel, PHYSMEM_LMA_POLICY uiLMAPolicy) +_CreateArenas(PHEAP_IMPL_DATA pvImplData, IMG_CHAR *pszLabel, PHYS_HEAP_POLICY uiPolicy) { PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; IMG_UINT32 ui32RAPolicy = - ((uiLMAPolicy & PHYSMEM_LMA_POLOCY_ALLOC_ALLOW_NONCONTIG_MASK) == PHYSMEM_LMA_POLICY_ALLOC_ALLOW_NONCONTIG) + ((uiPolicy & PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG_MASK) == PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) ? RA_POLICY_ALLOC_ALLOW_NONCONTIG : RA_POLICY_DEFAULT; psLMAData->psRA = RA_Create_With_Span(pszLabel, @@ -296,7 +186,7 @@ struct _PHYS_HEAP_ITERATOR_ { }; PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode, - PHYS_HEAP_USAGE_FLAGS ui32Flags, + PVRSRV_PHYS_HEAP ePhysHeap, PHYS_HEAP_ITERATOR **ppsIter) { PVRSRV_ERROR eError; @@ -307,10 +197,9 @@ PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode, PVR_LOG_RETURN_IF_INVALID_PARAM(ppsIter != NULL, "ppsIter"); PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); - PVR_LOG_RETURN_IF_INVALID_PARAM(ui32Flags != 0, "ui32Flags"); - eError = PhysHeapAcquireByUsage(ui32Flags, psDevNode, &psPhysHeap); - PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquireByUsage"); + eError = PhysHeapAcquireByID(ePhysHeap, psDevNode, &psPhysHeap); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquireByID"); PVR_LOG_GOTO_IF_FALSE(PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA, "PhysHeap must be of LMA type", release_heap); @@ -405,100 +294,6 @@ PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter, return PVRSRV_OK; } - -static PVRSRV_ERROR -_LMA_DoPhyContigPagesAlloc(RA_ARENA *pArena, - size_t uiSize, - PG_HANDLE *psMemHandle, - IMG_DEV_PHYADDR *psDevPAddr, - IMG_PID uiPid) -{ - RA_BASE_T uiCardAddr = 0; - RA_LENGTH_T uiActualSize; - PVRSRV_ERROR eError; - IMG_UINT32 ui32Log2NumPages; - -#if defined(DEBUG) -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) - static IMG_UINT32 ui32MaxLog2NumPages = 7; /* 128 pages => 512KB */ -#else - static IMG_UINT32 ui32MaxLog2NumPages = 4; /* 16 pages => 64KB */ -#endif -#endif /* defined(DEBUG) */ - - PVR_ASSERT(uiSize != 0); - ui32Log2NumPages = OSGetOrder(uiSize); - uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); - - eError = RA_Alloc(pArena, - uiSize, - RA_NO_IMPORT_MULTIPLIER, - 0, /* No flags */ - uiSize, - "LMA_PhyContigPagesAlloc", - &uiCardAddr, - &uiActualSize, - NULL); /* No private handle */ - - if (eError != PVRSRV_OK) - { - RA_USAGE_STATS sRAStats; - RA_Get_Usage_Stats(pArena, &sRAStats); - - PVR_DPF((PVR_DBG_ERROR, - "Failed to Allocate size = 0x"IMG_SIZE_FMTSPECX", align = 0x" - IMG_SIZE_FMTSPECX" Arena Free Space 0x%"IMG_UINT64_FMTSPECX, - uiSize, uiSize, sRAStats.ui64FreeArenaSize)); - return eError; - } - - PVR_ASSERT(uiSize == uiActualSize); - - psMemHandle->u.ui64Handle = uiCardAddr; - psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr; - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, - uiSize, - uiCardAddr, - uiPid); -#else - { - IMG_CPU_PHYADDR sCpuPAddr; - sCpuPAddr.uiAddr = psDevPAddr->uiAddr; - - PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, - NULL, - sCpuPAddr, - uiSize, - uiPid - DEBUG_MEMSTATS_VALUES); - } -#endif -#endif -#if defined(SUPPORT_GPUVIRT_VALIDATION) - PVR_DPF((PVR_DBG_MESSAGE, - "%s: (GPU Virtualisation) Allocated 0x" IMG_SIZE_FMTSPECX " at 0x%" - IMG_UINT64_FMTSPECX ", Arena ID %u", - __func__, uiSize, psDevPAddr->uiAddr, psMemHandle->uiOSid)); -#endif - -#if defined(DEBUG) - PVR_ASSERT((ui32Log2NumPages <= ui32MaxLog2NumPages)); - if (ui32Log2NumPages > ui32MaxLog2NumPages) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: ui32MaxLog2NumPages = %u, increasing to %u", __func__, - ui32MaxLog2NumPages, ui32Log2NumPages )); - ui32MaxLog2NumPages = ui32Log2NumPages; - } -#endif /* defined(DEBUG) */ - psMemHandle->uiOrder = ui32Log2NumPages; - - return eError; -} - #if defined(SUPPORT_GPUVIRT_VALIDATION) static PVRSRV_ERROR LMA_PhyContigPagesAllocGPV(PHYS_HEAP *psPhysHeap, @@ -515,7 +310,7 @@ LMA_PhyContigPagesAllocGPV(PHYS_HEAP *psPhysHeap, PVR_ASSERT(uiSize != 0); ui32Log2NumPages = OSGetOrder(uiSize); - uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); + uiSize = IMG_PAGES2BYTES64(OSGetPageSize(),ui32Log2NumPages); PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS); if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS) @@ -535,9 +330,9 @@ LMA_PhyContigPagesAllocGPV(PHYS_HEAP *psPhysHeap, psMemHandle->uiOSid = ui32OSid; /* For Free() use */ - eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, + eError = RAMemDoPhyContigPagesAlloc(pArena, uiSize, psDevNode, psMemHandle, psDevPAddr, uiPid); - PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); + PVR_LOG_IF_ERROR(eError, "RAMemDoPhyContigPagesAlloc"); return eError; } @@ -545,10 +340,10 @@ LMA_PhyContigPagesAllocGPV(PHYS_HEAP *psPhysHeap, static PVRSRV_ERROR LMA_PhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, - size_t uiSize, - PG_HANDLE *psMemHandle, - IMG_DEV_PHYADDR *psDevPAddr, - IMG_PID uiPid) + size_t uiSize, + PG_HANDLE *psMemHandle, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid) { #if defined(SUPPORT_GPUVIRT_VALIDATION) IMG_UINT32 ui32OSid = 0; @@ -559,17 +354,18 @@ LMA_PhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, RA_ARENA *pArena; IMG_UINT32 ui32Log2NumPages = 0; + PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); eError = PhysmemGetArenaLMA(psPhysHeap, &pArena); PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA"); PVR_ASSERT(uiSize != 0); ui32Log2NumPages = OSGetOrder(uiSize); - uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); + uiSize = IMG_PAGES2BYTES64(OSGetPageSize(),ui32Log2NumPages); - eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, + eError = RAMemDoPhyContigPagesAlloc(pArena, uiSize, psDevNode, psMemHandle, psDevPAddr, uiPid); - PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); + PVR_LOG_IF_ERROR(eError, "RAMemDoPhyContigPagesAlloc"); return eError; #endif @@ -577,12 +373,12 @@ LMA_PhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, static void LMA_PhyContigPagesFree(PHYS_HEAP *psPhysHeap, - PG_HANDLE *psMemHandle) + PG_HANDLE *psMemHandle) { - RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle; RA_ARENA *pArena; #if defined(SUPPORT_GPUVIRT_VALIDATION) + RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle; PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); IMG_UINT32 ui32OSid = psMemHandle->uiOSid; @@ -613,93 +409,47 @@ LMA_PhyContigPagesFree(PHYS_HEAP *psPhysHeap, uiCardAddr, ui32OSid)); #else - PhysmemGetArenaLMA(psPhysHeap, &pArena); -#endif - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, - (IMG_UINT64)uiCardAddr); -#else - PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, - (IMG_UINT64)uiCardAddr, - OSGetCurrentClientProcessIDKM()); -#endif + PVRSRV_ERROR eError = PhysmemGetArenaLMA(psPhysHeap, &pArena); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PhysmemGetArenaLMA"); #endif - RA_Free(pArena, uiCardAddr); - psMemHandle->uiOrder = 0; + RAMemDoPhyContigPagesFree(pArena, psMemHandle); } static PVRSRV_ERROR -LMA_PhyContigPagesMap(PHYS_HEAP *psPhysHeap, - PG_HANDLE *psMemHandle, - size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, - void **pvPtr) -{ - IMG_CPU_PHYADDR sCpuPAddr; - IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); - PVR_UNREFERENCED_PARAMETER(uiSize); - - PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr); - *pvPtr = OSMapPhysToLin(sCpuPAddr, - ui32NumPages * OSGetPageSize(), - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); - PVR_RETURN_IF_NOMEM(*pvPtr); - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, - ui32NumPages * OSGetPageSize(), - OSGetCurrentClientProcessIDKM()); -#else - { - PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, - *pvPtr, - sCpuPAddr, - ui32NumPages * OSGetPageSize(), - OSGetCurrentClientProcessIDKM() - DEBUG_MEMSTATS_VALUES); - } -#endif -#endif - return PVRSRV_OK; -} - -static void -LMA_PhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, - PG_HANDLE *psMemHandle, - void *pvPtr) +LMAPhysmemNewRAMemRamBackedPMR(PHYS_HEAP *psPhysHeap, + CONNECTION_DATA *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) { - IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); - PVR_UNREFERENCED_PARAMETER(psPhysHeap); - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, - ui32NumPages * OSGetPageSize(), - OSGetCurrentClientProcessIDKM()); -#else - PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, - (IMG_UINT64)(uintptr_t)pvPtr, - OSGetCurrentClientProcessIDKM()); -#endif -#endif + PVRSRV_ERROR eError; + RA_ARENA *pArena; - OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize()); -} + eError = PhysmemGetArenaLMA(psPhysHeap, &pArena); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA"); -static PVRSRV_ERROR -LMA_PhyContigPagesClean(PHYS_HEAP *psPhysHeap, - PG_HANDLE *psMemHandle, - IMG_UINT32 uiOffset, - IMG_UINT32 uiLength) -{ - /* No need to flush because we map as uncached */ - PVR_UNREFERENCED_PARAMETER(psPhysHeap); - PVR_UNREFERENCED_PARAMETER(psMemHandle); - PVR_UNREFERENCED_PARAMETER(uiOffset); - PVR_UNREFERENCED_PARAMETER(uiLength); + eError = PhysmemNewRAMemRamBackedPMR(psPhysHeap, + pArena, + psConnection, + uiSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + uiFlags, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemNewLocalRamBackedPMR"); return PVRSRV_OK; } @@ -710,2026 +460,64 @@ static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = .pfnGetDevPAddr = &_GetDevPAddr, .pfnGetCPUPAddr = &_GetCPUPAddr, .pfnGetSize = &_GetSize, - .pfnGetPageShift = &_GetPageShift, - .pfnGetPMRFactoryMemStats = &PhysmemGetLocalRamMemStats, - .pfnCreatePMR = &PhysmemNewLocalRamBackedPMR, + .pfnGetPageShift = &RAMemGetPageShift, + .pfnGetFactoryMemStats = &PhysmemGetRAMemRamMemStats, + .pfnCreatePMR = &LMAPhysmemNewRAMemRamBackedPMR, #if defined(SUPPORT_GPUVIRT_VALIDATION) .pfnPagesAllocGPV = &LMA_PhyContigPagesAllocGPV, #endif .pfnPagesAlloc = &LMA_PhyContigPagesAlloc, .pfnPagesFree = &LMA_PhyContigPagesFree, - .pfnPagesMap = &LMA_PhyContigPagesMap, - .pfnPagesUnMap = &LMA_PhyContigPagesUnmap, - .pfnPagesClean = &LMA_PhyContigPagesClean, + .pfnPagesMap = &RAMemPhyContigPagesMap, + .pfnPagesUnMap = &RAMemPhyContigPagesUnmap, + .pfnPagesClean = &RAMemPhyContigPagesClean, }; PVRSRV_ERROR PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode, - PHYSMEM_LMA_POLICY uiLMAPolicy, + PHYS_HEAP_POLICY uiPolicy, PHYS_HEAP_CONFIG *psConfig, IMG_CHAR *pszLabel, PHYS_HEAP **ppsPhysHeap) { PHYSMEM_LMA_DATA *psLMAData; + PHYS_HEAP *psPhysHeap; PVRSRV_ERROR eError; PVR_LOG_RETURN_IF_INVALID_PARAM(pszLabel != NULL, "pszLabel"); + PVR_ASSERT(psConfig->eType == PHYS_HEAP_TYPE_LMA || + psConfig->eType == PHYS_HEAP_TYPE_DMA); + psLMAData = OSAllocMem(sizeof(*psLMAData)); PVR_LOG_RETURN_IF_NOMEM(psLMAData, "OSAllocMem"); - psLMAData->sStartAddr = psConfig->sStartAddr; - psLMAData->sCardBase = psConfig->sCardBase; - psLMAData->uiSize = psConfig->uiSize; + psLMAData->sStartAddr = PhysHeapConfigGetStartAddr(psConfig); + psLMAData->sCardBase = PhysHeapConfigGetCardBase(psConfig); + psLMAData->uiSize = PhysHeapConfigGetSize(psConfig); eError = PhysHeapCreate(psDevNode, psConfig, + uiPolicy, (PHEAP_IMPL_DATA)psLMAData, &_sPHEAPImplFuncs, - ppsPhysHeap); - if (eError != PVRSRV_OK) - { - OSFreeMem(psLMAData); - return eError; - } - - eError = _CreateArenas(psLMAData, pszLabel, uiLMAPolicy); - PVR_LOG_RETURN_IF_ERROR(eError, "_CreateArenas"); - - - return eError; -} - -static PVRSRV_ERROR _MapPhysicalContigAlloc(PHYS_HEAP *psPhysHeap, - RA_BASE_ARRAY_T paBaseArray, - size_t uiSize, - PMR_FLAGS_T ulFlags, - PMR_KERNEL_MAPPING *psMapping) -{ - IMG_UINT32 ui32CPUCacheFlags; - PVRSRV_ERROR eError; - IMG_CPU_PHYADDR sCpuPAddr; - IMG_DEV_PHYADDR sDevPAddr; - sDevPAddr.uiAddr = RA_BASE_STRIP_GHOST_BIT(*paBaseArray); - - eError = DevmemCPUCacheMode(PhysHeapDeviceNode(psPhysHeap), ulFlags, &ui32CPUCacheFlags); - PVR_RETURN_IF_ERROR(eError); - - PhysHeapDevPAddrToCpuPAddr(psPhysHeap, - 1, - &sCpuPAddr, - &sDevPAddr); - - psMapping->pvKernelAddress = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags); - PVR_LOG_RETURN_IF_FALSE(psMapping->pvKernelAddress, - "OSMapPhyToLin: out of VM Mem", - PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING); - psMapping->vma = NULL; - psMapping->uiSize = uiSize; - - return PVRSRV_OK; -} - -static PVRSRV_ERROR _MapPhysicalSparseAlloc(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, - RA_BASE_ARRAY_T paBaseArray, - size_t uiSize, - PMR_FLAGS_T ulFlags, - PMR_KERNEL_MAPPING *psMapping) -{ - IMG_UINT32 uiChunkCount = uiSize >> psLMAllocArrayData->uiLog2ChunkSize; - IMG_CPU_PHYADDR uiPages[PMR_MAX_TRANSLATION_STACK_ALLOC], *puiPages; - PVRSRV_ERROR eError; - size_t uiPageShift = OSGetPageShift(); - IMG_UINT32 uiOSPageCnt = psLMAllocArrayData->uiLog2ChunkSize - uiPageShift; - - if ((uiChunkCount << uiOSPageCnt) > PMR_MAX_TRANSLATION_STACK_ALLOC) - { - puiPages = OSAllocZMem(sizeof(IMG_CPU_PHYADDR) * (uiChunkCount << uiOSPageCnt)); - PVR_RETURN_IF_NOMEM(puiPages); - } - else - { - puiPages = &uiPages[0]; - } - - if (uiOSPageCnt == 0) - { - IMG_UINT32 i; - PhysHeapDevPAddrToCpuPAddr(psLMAllocArrayData->psPhysHeap, - uiChunkCount, - puiPages, - (IMG_DEV_PHYADDR *)paBaseArray); - - /* If the ghost bit is present then the addrs returned will be off by 1 - * Strip the ghost bit to correct to real page aligned addresses. - * */ - for (i = 0; i < uiChunkCount; i++) - { - puiPages[i].uiAddr = RA_BASE_STRIP_GHOST_BIT(puiPages[i].uiAddr); - } - } - else - { - IMG_UINT32 i = 0, j = 0, index = 0; - for (i = 0; i < uiChunkCount; i++) - { - IMG_UINT32 ui32OSPagesPerDeviceChunk = (1 << uiOSPageCnt); - IMG_DEV_PHYADDR uiDevAddr; - uiDevAddr.uiAddr = RA_BASE_STRIP_GHOST_BIT(paBaseArray[i]); - for (j = 0; j < ui32OSPagesPerDeviceChunk; j++) - { - uiDevAddr.uiAddr += (1ULL << uiPageShift); - PhysHeapDevPAddrToCpuPAddr(psLMAllocArrayData->psPhysHeap, - 1, - &puiPages[index], - &uiDevAddr); - index++; - } - } - } + &psPhysHeap); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreate", err_free_lma_data); - eError = OSMapPhysArrayToLin(puiPages, - uiChunkCount, - &psMapping->pvKernelAddress, - &psMapping->vma); - if (eError == PVRSRV_OK) - { - psMapping->uiSize = uiSize; - } - - if (puiPages != &uiPages[0]) - { - OSFreeMem(puiPages); - } - - return eError; -} - -static PVRSRV_ERROR _MapPMRKernel(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, - RA_BASE_ARRAY_T paBaseArray, - size_t uiSize, - PMR_FLAGS_T ulFlags, - PMR_KERNEL_MAPPING *psMapping) -{ - PVRSRV_ERROR eError; - PHYS_HEAP *psPhysHeap = psLMAllocArrayData->psPhysHeap; - if (!psLMAllocArrayData->bIsSparse) - { - /* Physically Contig */ - if (psLMAllocArrayData->bPhysContig) - { - eError = _MapPhysicalContigAlloc(psPhysHeap, - paBaseArray, - uiSize, - ulFlags, - psMapping); - } - /* Physically Sparse */ - else - { - eError = _MapPhysicalSparseAlloc(psLMAllocArrayData, - paBaseArray, - uiSize, - ulFlags, - psMapping); - } - } - else - { - /* Sparse Alloc Single Chunk */ - if (uiSize == (1 << psLMAllocArrayData->uiLog2ChunkSize)) - { - eError = _MapPhysicalContigAlloc(psPhysHeap, - paBaseArray, - uiSize, - ulFlags, - psMapping); - } - /* Sparse Alloc Multi Chunk */ - else - { - eError = _MapPhysicalSparseAlloc(psLMAllocArrayData, - paBaseArray, - uiSize, - ulFlags, - psMapping); - } - } - - return eError; -} - -static void _UnMapPhysicalContigAlloc(PMR_KERNEL_MAPPING *psKernelMapping) -{ - OSUnMapPhysToLin(psKernelMapping->pvKernelAddress, psKernelMapping->uiSize); -} - -static void _UnMapPhysicalSparseAlloc(PMR_KERNEL_MAPPING *psKernelMapping) -{ - OSUnMapPhysArrayToLin(psKernelMapping->pvKernelAddress, - psKernelMapping->vma); -} - -static void _UnMapPMRKernel(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, - PMR_KERNEL_MAPPING *psKernelMapping) -{ - if (!psLMAllocArrayData->bIsSparse) - { - /* Physically Contig */ - if (psLMAllocArrayData->bPhysContig) - { - _UnMapPhysicalContigAlloc(psKernelMapping); - } - /* Physically Sparse */ - else - { - _UnMapPhysicalSparseAlloc(psKernelMapping); - } - } - else - { - /* Sparse Alloc Single Chunk */ - if (psKernelMapping->uiSize == (1 << psLMAllocArrayData->uiLog2ChunkSize)) - { - _UnMapPhysicalContigAlloc(psKernelMapping); - } - /* Sparse Alloc Multi Chunk */ - else - { - _UnMapPhysicalSparseAlloc(psKernelMapping); - } - } -} - -static PVRSRV_ERROR -_PhysPgMemSet(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, - RA_BASE_ARRAY_T paBaseArray, - size_t uiSize, - IMG_BYTE ui8SetValue) -{ - PVRSRV_ERROR eError; - PMR_KERNEL_MAPPING sKernelMapping; - - eError = _MapPMRKernel(psLMAllocArrayData, - paBaseArray, - uiSize, - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, - &sKernelMapping); - PVR_GOTO_IF_ERROR(eError, map_failed); - - OSCachedMemSetWMB(sKernelMapping.pvKernelAddress, ui8SetValue, uiSize); - - _UnMapPMRKernel(psLMAllocArrayData, &sKernelMapping); - - return PVRSRV_OK; - -map_failed: - PVR_DPF((PVR_DBG_ERROR, "Failed to poison/zero allocation")); - return eError; -} - -static PVRSRV_ERROR -_AllocLMPageArray(PMR_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 uiLog2AllocPageSize, - IMG_BOOL bZero, - IMG_BOOL bPoisonOnAlloc, - IMG_BOOL bPoisonOnFree, - IMG_BOOL bIsSparse, - IMG_BOOL bOnDemand, - PHYS_HEAP* psPhysHeap, - PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, - IMG_PID uiPid, - PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr, - CONNECTION_DATA *psConnection) -{ - PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL; - PVRSRV_ERROR eError; - IMG_UINT32 uiNumPages; - - PVR_ASSERT(!bZero || !bPoisonOnAlloc); - PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize); - - psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA)); - PVR_GOTO_IF_NOMEM(psPageArrayData, eError, errorOnAllocArray); - - /* Use of cast below is justified by the assertion that follows to - prove that no significant bits have been truncated */ - uiNumPages = (IMG_UINT32)(((uiSize - 1) >> uiLog2AllocPageSize) + 1); - PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2AllocPageSize) == uiSize); - - psPageArrayData->uiTotalNumChunks = uiNumPages; - - psPageArrayData->uiChunksToAlloc = bIsSparse ? ui32NumPhysChunks : uiNumPages; - - psPageArrayData->uiLog2ChunkSize = uiLog2AllocPageSize; - psPageArrayData->paBaseArray = OSAllocMem(sizeof(RA_BASE_T) * - psPageArrayData->uiTotalNumChunks); - PVR_GOTO_IF_NOMEM(psPageArrayData->paBaseArray, eError, errorOnAllocAddr); - - /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */ - OSCachedMemSet(psPageArrayData->paBaseArray, - 0xFF, - sizeof(RA_BASE_T) * - psPageArrayData->uiTotalNumChunks); - - psPageArrayData->psConnection = psConnection; - psPageArrayData->uiPid = uiPid; - psPageArrayData->iNumChunksAllocated = 0; - psPageArrayData->bIsSparse = bIsSparse; - psPageArrayData->bZeroOnAlloc = bZero; - psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc; - psPageArrayData->bPoisonOnFree = bPoisonOnFree; - psPageArrayData->bOnDemand = bOnDemand; - psPageArrayData->psPhysHeap = psPhysHeap; - psPageArrayData->uiAllocFlags = uiAllocFlags; - - *ppsPageArrayDataPtr = psPageArrayData; - - return PVRSRV_OK; - - /* - error exit paths follow: - */ -errorOnAllocAddr: - OSFreeMem(psPageArrayData); - -errorOnAllocArray: - PVR_ASSERT(eError != PVRSRV_OK); - return eError; -} - -static PVRSRV_ERROR -_AllocLMPagesContig(PMR_LMALLOCARRAY_DATA *psPageArrayData) -{ - PVRSRV_ERROR eError; - IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; - IMG_UINT64 uiPhysSize = (IMG_UINT64) psPageArrayData->uiChunksToAlloc << uiLog2ChunkSize; - IMG_BOOL bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc; - IMG_BOOL bZeroOnAlloc = psPageArrayData->bZeroOnAlloc; - - - eError = RA_AllocMulti(psPageArrayData->psArena, - uiPhysSize, - uiLog2ChunkSize, - RA_NO_IMPORT_MULTIPLIER, - 0, /* No flags */ - "LMA_Page_Alloc", - (RA_BASE_ARRAY_T) psPageArrayData->paBaseArray, - psPageArrayData->uiTotalNumChunks, - &psPageArrayData->bPhysContig); - if (PVRSRV_OK != eError) - { - RA_USAGE_STATS sRAStats; - RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats); - - PVR_DPF((PVR_DBG_ERROR, - "Failed to Allocate size = 0x%llx, align = 0x%llx" - " Arena Free Space 0x%"IMG_UINT64_FMTSPECX"", - (unsigned long long)uiPhysSize, - 1ULL << uiLog2ChunkSize, - sRAStats.ui64FreeArenaSize)); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); - } - -#if defined(SUPPORT_GPUVIRT_VALIDATION) -{ - PVR_DPF((PVR_DBG_MESSAGE, - "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX, - psPageArrayData->paBaseArray[0])); -} -#endif - - if (bPoisonOnAlloc) - { - eError = _PhysPgMemSet(psPageArrayData, - psPageArrayData->paBaseArray, - uiPhysSize, - PVRSRV_POISON_ON_ALLOC_VALUE); - PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoison); - } - - if (bZeroOnAlloc) - { - eError = _PhysPgMemSet(psPageArrayData, - psPageArrayData->paBaseArray, - uiPhysSize, - ZERO_PAGE_VALUE); - PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnZero); - } - - psPageArrayData->iNumChunksAllocated += psPageArrayData->uiChunksToAlloc; - - /* We have alloc'd the previous request, set 0 for book keeping */ - psPageArrayData->uiChunksToAlloc = 0; - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiPhysSize, psPageArrayData->uiPid); -#else - { - IMG_UINT32 i, j; - IMG_CPU_PHYADDR sLocalCpuPAddr; - - for (i = 0; i < psPageArrayData->uiTotalNumChunks;) - { - IMG_UINT32 ui32AllocSizeInChunks = 1; - - for (j = i; - j + 1 != psPageArrayData->uiTotalNumChunks && - RA_BASE_IS_GHOST(psPageArrayData->paBaseArray[j + 1]); - j++) - { - ui32AllocSizeInChunks++; - } - - sLocalCpuPAddr.uiAddr = (IMG_UINT64) psPageArrayData->paBaseArray[i]; - PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - NULL, - sLocalCpuPAddr, - ui32AllocSizeInChunks << uiLog2ChunkSize, - psPageArrayData->uiPid - DEBUG_MEMSTATS_VALUES); - - i += ui32AllocSizeInChunks; - } - } -#endif -#endif - - return PVRSRV_OK; - - /* - error exit paths follow: - */ -errorOnZero: -errorOnPoison: - eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; - - RA_FreeMulti(psPageArrayData->psArena, - psPageArrayData->paBaseArray, - psPageArrayData->uiTotalNumChunks); - -errorOnRAAlloc: - PVR_ASSERT(eError != PVRSRV_OK); - return eError; -} - -/* - * Fully allocated variant of sparse allocation does not take in as argument an - * array of indices. It is used in cases where the amount of chunks to allocate is - * the same as the total the PMR can represent. I.E when we want to fully populate - * a sparse PMR. - */ -static PVRSRV_ERROR -_AllocLMPagesSparseFull(PMR_LMALLOCARRAY_DATA *psPageArrayData) -{ - PVRSRV_ERROR eError; - IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; - IMG_UINT64 uiPhysSize = (IMG_UINT64) psPageArrayData->uiChunksToAlloc << uiLog2ChunkSize; - IMG_BOOL bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc; - IMG_BOOL bZeroOnAlloc = psPageArrayData->bZeroOnAlloc; - - - eError = RA_AllocMultiSparse(psPageArrayData->psArena, - uiLog2ChunkSize, - RA_NO_IMPORT_MULTIPLIER, - 0, /* No flags */ - "LMA_Page_Alloc", - (RA_BASE_ARRAY_T) psPageArrayData->paBaseArray, - psPageArrayData->uiTotalNumChunks, - NULL, /* No indices given meaning allocate full base array using chunk count below */ - psPageArrayData->uiChunksToAlloc); - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_ERROR, - "RA Failed to Allocate size = 0x%llx", - (unsigned long long) uiPhysSize)); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); - } - -#if defined(SUPPORT_GPUVIRT_VALIDATION) -{ - PVR_DPF((PVR_DBG_MESSAGE, - "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX, - psPageArrayData->paBaseArray[0])); -} -#endif - - if (bPoisonOnAlloc) - { - eError = _PhysPgMemSet(psPageArrayData, - psPageArrayData->paBaseArray, - uiPhysSize, - PVRSRV_POISON_ON_ALLOC_VALUE); - PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoison); - } - - if (bZeroOnAlloc) - { - eError = _PhysPgMemSet(psPageArrayData, - psPageArrayData->paBaseArray, - uiPhysSize, - ZERO_PAGE_VALUE); - PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnZero); - } - - psPageArrayData->iNumChunksAllocated += psPageArrayData->uiChunksToAlloc; + eError = _CreateArenas(psLMAData, pszLabel, uiPolicy); + PVR_LOG_GOTO_IF_ERROR(eError, "_CreateArenas", err_free_physheap); - /* We have alloc'd the previous request, set 0 for book keeping */ - psPageArrayData->uiChunksToAlloc = 0; - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiPhysSize, psPageArrayData->uiPid); -#else + if (ppsPhysHeap != NULL) { - IMG_UINT32 i; - - for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) - { - IMG_CPU_PHYADDR sLocalCpuPAddr; - sLocalCpuPAddr.uiAddr = - (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT(psPageArrayData->paBaseArray[i]); - PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - NULL, - sLocalCpuPAddr, - 1 << uiLog2ChunkSize, - psPageArrayData->uiPid - DEBUG_MEMSTATS_VALUES); - } + *ppsPhysHeap = psPhysHeap; } -#endif -#endif return PVRSRV_OK; - /* - error exit paths follow: - */ -errorOnZero: -errorOnPoison: - eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; - - RA_FreeMulti(psPageArrayData->psArena, - psPageArrayData->paBaseArray, - psPageArrayData->uiTotalNumChunks); - -errorOnRAAlloc: - PVR_ASSERT(eError != PVRSRV_OK); +err_free_physheap: + PhysHeapDestroy(psPhysHeap); +err_free_lma_data: + OSFreeMem(psLMAData); return eError; -} - -static PVRSRV_ERROR -_AllocLMPagesSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable) -{ - PVRSRV_ERROR eError; - IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; - IMG_UINT32 uiChunkSize = 1ULL << uiLog2ChunkSize; - IMG_UINT32 uiChunksToAlloc = psPageArrayData->uiChunksToAlloc; - IMG_BOOL bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc; - IMG_BOOL bZeroOnAlloc = psPageArrayData->bZeroOnAlloc; - - if (!pui32MapTable) - { - PVR_LOG_GOTO_WITH_ERROR("pui32MapTable", eError, PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY, errorOnRAAlloc); - } -#if defined(DEBUG) - /* - * This block performs validation of the mapping table input in the following ways: - * Check that each index in the mapping table does not exceed the number of the chunks - * the whole PMR supports. - * Check that each index given by the mapping table is not already allocated. - * Check that there are no duplicated indices given in the mapping table. - */ - { - IMG_UINT32 i; - IMG_BOOL bIssueDetected = IMG_FALSE; - PVRSRV_ERROR eMapCheckError; - - for (i = 0; i < uiChunksToAlloc; i++) - { - if (pui32MapTable[i] >= psPageArrayData->uiTotalNumChunks) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Page alloc request Index out of bounds for PMR @0x%p", - __func__, - psPageArrayData)); - eMapCheckError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; - bIssueDetected = IMG_TRUE; - break; - } - - if (!RA_BASE_IS_INVALID(psPageArrayData->paBaseArray[pui32MapTable[i]])) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Mapping already exists Index %u Mapping index %u", - __func__, - i, - pui32MapTable[i])); - eMapCheckError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; - bIssueDetected = IMG_TRUE; - break; - } - - if (RA_BASE_IS_SPARSE_PREP(psPageArrayData->paBaseArray[pui32MapTable[i]])) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Mapping already exists in mapping table given Index %u Mapping index %u", - __func__, - i, - pui32MapTable[i])); - eMapCheckError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; - bIssueDetected = IMG_TRUE; - break; - } - else - { - /* Set the To Prep value so we can detect duplicated map indices */ - psPageArrayData->paBaseArray[pui32MapTable[i]] = RA_BASE_SPARSE_PREP_ALLOC_ADDR; - } - } - /* Unwind the Alloc Prep Values */ - if (bIssueDetected) - { - /* We don't want to affect the index of the issue seen - * as it could be a valid mapping. If it is a duplicated - * mapping in the given table then we will clean-up the - * previous instance anyway. - */ - IMG_UINT32 uiUnwind = i; - - for (i = 0; i < uiUnwind; i++) - { - psPageArrayData->paBaseArray[pui32MapTable[i]] = INVALID_BASE_ADDR; - } - - PVR_GOTO_WITH_ERROR(eError, eMapCheckError, errorOnRAAlloc); - } - } -#endif - - eError = RA_AllocMultiSparse(psPageArrayData->psArena, - psPageArrayData->uiLog2ChunkSize, - RA_NO_IMPORT_MULTIPLIER, - 0, - "LMA_Page_Alloc", - psPageArrayData->paBaseArray, - psPageArrayData->uiTotalNumChunks, - pui32MapTable, - uiChunksToAlloc); - if (PVRSRV_OK != eError) - { - RA_USAGE_STATS sRAStats; - RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats); - - PVR_DPF((PVR_DBG_ERROR, - "Failed to Allocate the pages, size = 0x%x, align = 0x%x" - " Arena Free Space 0x%"IMG_UINT64_FMTSPECX"", - uiChunkSize, - uiChunkSize, - sRAStats.ui64FreeArenaSize)); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); - } - -#if defined(SUPPORT_GPUVIRT_VALIDATION) -{ - PVR_DPF((PVR_DBG_MESSAGE, - "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX, - psPageArrayData->paBaseArray[pui32MapTable[0]])); -} -#endif - - if (bPoisonOnAlloc || bZeroOnAlloc) - { - IMG_UINT32 i, ui32Index = 0; - for (i = 0; i < uiChunksToAlloc; i++) - { - ui32Index = pui32MapTable[i]; - - eError = _PhysPgMemSet(psPageArrayData, - &psPageArrayData->paBaseArray[ui32Index], - uiChunkSize, - bPoisonOnAlloc ? PVRSRV_POISON_ON_ALLOC_VALUE : - ZERO_PAGE_VALUE); - PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoisonZero); - } - } - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - uiChunksToAlloc << uiLog2ChunkSize, - psPageArrayData->uiPid); -#else - { - IMG_UINT32 i; - - for (i = 0; i < psPageArrayData->uiChunksToAlloc; i++) - { - IMG_UINT32 ui32Index = pui32MapTable[i]; - IMG_CPU_PHYADDR sLocalCpuPAddr; - sLocalCpuPAddr.uiAddr = - (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT(psPageArrayData->paBaseArray[ui32Index]); - PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - NULL, - sLocalCpuPAddr, - uiChunkSize, - psPageArrayData->uiPid - DEBUG_MEMSTATS_VALUES); - } - } -#endif -#endif - - psPageArrayData->iNumChunksAllocated += uiChunksToAlloc; - - /* We have alloc'd the previous request, set 0 for book keeping */ - psPageArrayData->uiChunksToAlloc = 0; - - return PVRSRV_OK; - - /* - error exit paths follow: - */ -errorOnPoisonZero: - eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; - - RA_FreeMultiSparse(psPageArrayData->psArena, - psPageArrayData->paBaseArray, - psPageArrayData->uiTotalNumChunks, - psPageArrayData->uiLog2ChunkSize, - pui32MapTable, - &uiChunksToAlloc); - -errorOnRAAlloc: - PVR_ASSERT(eError != PVRSRV_OK); - return eError; - -} - -static PVRSRV_ERROR -_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable) -{ - PVRSRV_ERROR eError; - RA_ARENA *pArena; - - PVR_ASSERT(NULL != psPageArrayData); - PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); - - if (psPageArrayData->uiTotalNumChunks < - (psPageArrayData->iNumChunksAllocated + psPageArrayData->uiChunksToAlloc)) - { - PVR_DPF((PVR_DBG_ERROR, "Pages requested to allocate don't fit PMR alloc Size. " - "Allocated: %u + Requested: %u > Total Allowed: %u", - psPageArrayData->iNumChunksAllocated, - psPageArrayData->uiChunksToAlloc, - psPageArrayData->uiTotalNumChunks)); - return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; - } - - /* If we have a non-backed sparse PMR then we can just return */ - if (psPageArrayData->uiChunksToAlloc == 0) - { - PVR_DPF((PVR_DBG_MESSAGE, - "%s: Non-Backed Sparse PMR Created: %p.", - __func__, - psPageArrayData)); - return PVRSRV_OK; - } - -#if defined(SUPPORT_GPUVIRT_VALIDATION) - { - IMG_UINT32 ui32OSid=0; - PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPageArrayData->psPhysHeap); - - /* Obtain the OSid specific data from our connection handle */ - if (psPageArrayData->psConnection != NULL) - { - ui32OSid = psPageArrayData->psConnection->ui32OSid; - } - - if (PVRSRV_CHECK_SHARED_BUFFER(psPageArrayData->uiAllocFlags)) - { - pArena=psDevNode->psOSSharedArena; - PVR_DPF((PVR_DBG_MESSAGE, - "(GPU Virtualization Validation): Giving from shared mem")); - } - else - { - pArena=psDevNode->psOSidSubArena[ui32OSid]; - PVR_DPF((PVR_DBG_MESSAGE, - "(GPU Virtualization Validation): Giving from OS slot %d", - ui32OSid)); - } - } -#else - /* Get suitable local memory region for this GPU physheap allocation */ - eError = PhysmemGetArenaLMA(psPageArrayData->psPhysHeap, &pArena); - PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA"); -#endif - - psPageArrayData->psArena = pArena; - - /* - * 3 cases: - * Sparse allocation populating the whole PMR. - * [**********] - * Sparse allocation partially populating the PMR at given indices. - * [*** *** **] - * Contiguous allocation. - * [**********] - * - * Note: Separate cases are required for 1 and 3 due to memstats tracking. - * In Contiguous case we can track the block as a single memstat record as we know - * we will also free in that size record. - * Sparse allocations require a memstat record per chunk as they can be arbitrarily - * free'd. - */ - if (psPageArrayData->bIsSparse) - { - if (psPageArrayData->uiTotalNumChunks == psPageArrayData->uiChunksToAlloc && - !pui32MapTable) - { - eError = _AllocLMPagesSparseFull(psPageArrayData); - } - else - { - eError = _AllocLMPagesSparse(psPageArrayData, pui32MapTable); - } - } - else - { - eError = _AllocLMPagesContig(psPageArrayData); - } - - return eError; -} - -static PVRSRV_ERROR -_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData) -{ - - OSFreeMem(psPageArrayData->paBaseArray); - - PVR_DPF((PVR_DBG_MESSAGE, - "physmem_lma.c: freed local memory array structure for PMR @0x%p", - psPageArrayData)); - - OSFreeMem(psPageArrayData); - - return PVRSRV_OK; -} - -static PVRSRV_ERROR -_FreeLMPagesContig(PMR_LMALLOCARRAY_DATA *psPageArrayData) -{ - RA_ARENA *pArena = psPageArrayData->psArena; - IMG_UINT64 uiPhysSize = - (IMG_UINT64) psPageArrayData->uiTotalNumChunks << psPageArrayData->uiLog2ChunkSize; - PVRSRV_ERROR eError; - - PVR_ASSERT(psPageArrayData->iNumChunksAllocated != 0); - PVR_ASSERT(psPageArrayData->iNumChunksAllocated == - psPageArrayData->uiTotalNumChunks); - - if (psPageArrayData->bPoisonOnFree) - { - eError = _PhysPgMemSet(psPageArrayData, - psPageArrayData->paBaseArray, - uiPhysSize, - PVRSRV_POISON_ON_FREE_VALUE); - PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet"); - } - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#if !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - uiPhysSize, - psPageArrayData->uiPid); -#else - { - IMG_UINT32 i; - - for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) - { - if (RA_BASE_IS_REAL(psPageArrayData->paBaseArray[i])) - { - PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - (IMG_UINT64) psPageArrayData->paBaseArray[i], - psPageArrayData->uiPid); - } - } - - } -#endif -#endif - - if (psPageArrayData->bPhysContig) - { - eError = RA_FreeMulti(pArena, - (RA_BASE_ARRAY_T) psPageArrayData->paBaseArray, - 1); - PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti"); - } - else - { - eError = RA_FreeMulti(pArena, - (RA_BASE_ARRAY_T) psPageArrayData->paBaseArray, - psPageArrayData->iNumChunksAllocated); - PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti"); - } - - psPageArrayData->iNumChunksAllocated = 0; - - PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); - - PVR_DPF((PVR_DBG_MESSAGE, - "%s: freed %"IMG_UINT64_FMTSPEC" local memory for PMR @0x%p", - __func__, - uiPhysSize, - psPageArrayData)); - - return eError; -} - -static PVRSRV_ERROR -_FreeLMPagesRemainingSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData) -{ - IMG_UINT32 i; - PVRSRV_ERROR eError; - IMG_UINT32 uiChunkSize = 1ULL << psPageArrayData->uiLog2ChunkSize; - IMG_BOOL bPoisonOnFree = psPageArrayData->bPoisonOnFree; - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - psPageArrayData->iNumChunksAllocated << psPageArrayData->uiLog2ChunkSize, - psPageArrayData->uiPid); -#endif - - for (i = 0; i < psPageArrayData->uiTotalNumChunks;) - { - if (RA_BASE_IS_REAL(psPageArrayData->paBaseArray[i])) - { - IMG_UINT32 j; - IMG_UINT32 ui32AccumulatedChunks = 1; - - for (j = i; - j + 1 != psPageArrayData->uiTotalNumChunks && - RA_BASE_IS_GHOST(psPageArrayData->paBaseArray[j + 1]); - j++) - { - ui32AccumulatedChunks++; - } - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) - for (j = i; j < (i + ui32AccumulatedChunks); j++) - { - PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - RA_BASE_STRIP_GHOST_BIT(psPageArrayData->paBaseArray[j]), - psPageArrayData->uiPid); - if (bPoisonOnFree) -#else - for (j = i; j < (i + ui32AccumulatedChunks) && bPoisonOnFree; j++) - { -#endif - { - eError = _PhysPgMemSet(psPageArrayData, - &psPageArrayData->paBaseArray[j], - uiChunkSize, - PVRSRV_POISON_ON_FREE_VALUE); - PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet"); - } - } - - eError = RA_FreeMulti(psPageArrayData->psArena, - &psPageArrayData->paBaseArray[i], - ui32AccumulatedChunks); - PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti"); - - psPageArrayData->iNumChunksAllocated -= ui32AccumulatedChunks; - i += ui32AccumulatedChunks; - } - else if (RA_BASE_IS_INVALID(psPageArrayData->paBaseArray[i])) - { - i++; - } - } - - /* We have freed all allocations in the previous loop */ - PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); - - return PVRSRV_OK; -} - -static PVRSRV_ERROR -_FreeLMPagesSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData, - IMG_UINT32 *pui32FreeIndices, - IMG_UINT32 ui32FreeChunkCount) -{ - RA_ARENA *pArena = psPageArrayData->psArena; - IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; - IMG_UINT32 uiChunkSize = 1ULL << uiLog2ChunkSize; - IMG_BOOL bPoisonOnFree = psPageArrayData->bPoisonOnFree; - IMG_UINT32 uiActualFreeCount = ui32FreeChunkCount; - PVRSRV_ERROR eError; - - PVR_ASSERT(psPageArrayData->iNumChunksAllocated != 0); - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) - { - IMG_UINT32 i; - for (i = 0; i < ui32FreeChunkCount; i++) - { - IMG_UINT32 ui32Index = pui32FreeIndices[i]; - - PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT( - psPageArrayData->paBaseArray[ui32Index]), - psPageArrayData->uiPid); - } - } -#endif - - if (bPoisonOnFree) - { - IMG_UINT32 i, ui32Index = 0; - for (i = 0; i < ui32FreeChunkCount; i++) - { - ui32Index = pui32FreeIndices[i]; - - eError = _PhysPgMemSet(psPageArrayData, - &psPageArrayData->paBaseArray[ui32Index], - uiChunkSize, - PVRSRV_POISON_ON_FREE_VALUE); - PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet"); - } - } - - eError = RA_FreeMultiSparse(pArena, - psPageArrayData->paBaseArray, - psPageArrayData->uiTotalNumChunks, - uiLog2ChunkSize, - pui32FreeIndices, - &uiActualFreeCount); - psPageArrayData->iNumChunksAllocated -= uiActualFreeCount; -#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, - uiActualFreeCount << psPageArrayData->uiLog2ChunkSize, - psPageArrayData->uiPid); -#endif - if (eError == PVRSRV_ERROR_RA_FREE_INVALID_CHUNK) - { - /* Log the RA error but convert it to PMR level to match the interface, - * this is important because other PMR factories may not use the RA but - * still return error, returning a PMR based error - * keeps the interface agnostic to implementation behaviour. - */ - PVR_LOG_IF_ERROR(eError, "RA_FreeMultiSparse"); - return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK; - } - PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMultiSparse"); - - PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); - - - PVR_DPF((PVR_DBG_MESSAGE, - "%s: freed %d local memory for PMR @0x%p", - __func__, - (uiActualFreeCount * uiChunkSize), - psPageArrayData)); - - return PVRSRV_OK; -} - -static PVRSRV_ERROR -_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, - IMG_UINT32 *pui32FreeIndices, - IMG_UINT32 ui32FreeChunkCount) -{ - PVRSRV_ERROR eError; - - if (psPageArrayData->bIsSparse) - { - if (!pui32FreeIndices) - { - eError = _FreeLMPagesRemainingSparse(psPageArrayData); - } - else - { - eError = _FreeLMPagesSparse(psPageArrayData, pui32FreeIndices, ui32FreeChunkCount); - } - } - else - { - eError = _FreeLMPagesContig(psPageArrayData); - } - - return eError; -} - -/* - * - * Implementation of callback functions - * - */ - -/* destructor func is called after last reference disappears, but - before PMR itself is freed. */ -static PVRSRV_ERROR -PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv) -{ - PVRSRV_ERROR eError; - PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; - - psLMAllocArrayData = pvPriv; - - /* We can't free pages until now. */ - if (psLMAllocArrayData->iNumChunksAllocated != 0) - { -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - IMG_UINT32 ui32LMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU; - - mutex_lock(&g_sLMALeakMutex); - - g_ui32LMALeakCounter++; - if (ui32LMALeakMax && g_ui32LMALeakCounter >= ui32LMALeakMax) - { - g_ui32LMALeakCounter = 0; - mutex_unlock(&g_sLMALeakMutex); - - PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv)); - return PVRSRV_OK; - } - - mutex_unlock(&g_sLMALeakMutex); -#endif - eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); - PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ - } - - eError = _FreeLMPageArray(psLMAllocArrayData); - PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ - - return PVRSRV_OK; -} - -/* callback function for locking the system physical page addresses. - As we are LMA there is nothing to do as we control physical memory. */ -static PVRSRV_ERROR -PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) -{ - - PVRSRV_ERROR eError; - PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; - - psLMAllocArrayData = pvPriv; - - if (psLMAllocArrayData->bOnDemand) - { - /* Allocate Memory for deferred allocation */ - eError = _AllocLMPages(psLMAllocArrayData, NULL); - PVR_RETURN_IF_ERROR(eError); - } - - return PVRSRV_OK; -} - -static PVRSRV_ERROR -PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; - - psLMAllocArrayData = pvPriv; - - if (psLMAllocArrayData->bOnDemand) - { - /* Free Memory for deferred allocation */ - eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); - PVR_RETURN_IF_ERROR(eError); - } - - PVR_ASSERT(eError == PVRSRV_OK); - return eError; -} - -/* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */ -static PVRSRV_ERROR -PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv, - IMG_UINT32 ui32Log2PageSize, - IMG_UINT32 ui32NumOfPages, - IMG_DEVMEM_OFFSET_T *puiOffset, - IMG_BOOL *pbValid, - IMG_DEV_PHYADDR *psDevPAddr) -{ - PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; - IMG_UINT32 idx; - IMG_UINT32 uiLog2AllocSize; - IMG_UINT64 uiAllocIndex; - IMG_DEVMEM_OFFSET_T uiInAllocOffset; - IMG_UINT32 uiNumAllocs = psLMAllocArrayData->uiTotalNumChunks; - - if (psLMAllocArrayData->uiLog2ChunkSize < ui32Log2PageSize) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Requested physical addresses from PMR " - "for incompatible contiguity %u!", - __func__, - ui32Log2PageSize)); - return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; - } - - PVR_ASSERT(psLMAllocArrayData->uiLog2ChunkSize != 0); - PVR_ASSERT(ui32Log2PageSize >= RA_BASE_FLAGS_LOG2); - - if (psLMAllocArrayData->bPhysContig) - { - for (idx=0; idx < ui32NumOfPages; idx++) - { - if (pbValid[idx]) - { - psDevPAddr[idx].uiAddr = psLMAllocArrayData->paBaseArray[0] + puiOffset[idx]; - } - } - } - else - { - uiLog2AllocSize = psLMAllocArrayData->uiLog2ChunkSize; - - for (idx=0; idx < ui32NumOfPages; idx++) - { - if (pbValid[idx]) - { - uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize; - uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize); - - PVR_LOG_RETURN_IF_FALSE(uiAllocIndex < uiNumAllocs, - "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE); - - PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize)); - - /* The base may or may not be a ghost base, but we don't care, - * we just need the real representation of the base. - */ - psDevPAddr[idx].uiAddr = RA_BASE_STRIP_GHOST_BIT( - psLMAllocArrayData->paBaseArray[uiAllocIndex] + uiInAllocOffset); - } - } - } - - return PVRSRV_OK; -} - -static PVRSRV_ERROR -PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, - size_t uiOffset, - size_t uiSize, - void **ppvKernelAddressOut, - IMG_HANDLE *phHandleOut, - PMR_FLAGS_T ulFlags) -{ - PVRSRV_ERROR eError; - PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; - PMR_KERNEL_MAPPING *psKernelMapping; - RA_BASE_ARRAY_T paBaseArray; - IMG_UINT32 ui32ChunkIndex = 0; - size_t uiOffsetMask = uiOffset; - - IMG_UINT32 uiLog2ChunkSize = psLMAllocArrayData->uiLog2ChunkSize; - IMG_UINT64 uiChunkSize = 1ULL << uiLog2ChunkSize; - IMG_UINT64 uiPhysSize; - - PVR_ASSERT(psLMAllocArrayData); - PVR_ASSERT(ppvKernelAddressOut); - PVR_ASSERT(phHandleOut); - - if (psLMAllocArrayData->bIsSparse) - { - IMG_UINT32 i; - /* Locate the desired physical chunk to map in */ - ui32ChunkIndex = uiOffset >> psLMAllocArrayData->uiLog2ChunkSize; - - if (OSIsMapPhysNonContigSupported()) - { - /* If a size hasn't been supplied assume we are mapping a single page */ - IMG_UINT32 uiNumChunksToMap = uiSize ? uiSize >> psLMAllocArrayData->uiLog2ChunkSize : uiChunkSize; - - /* Check we are attempting to map at least a chunk in size */ - if (uiNumChunksToMap < 1) - { - PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_INVALID_PARAMS, "uiNumChunksToMap < 1"); - } - - /* Check contiguous region doesn't exceed size of PMR */ - if (ui32ChunkIndex + (uiNumChunksToMap - 1) > psLMAllocArrayData->uiTotalNumChunks) - { - PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_INVALID_PARAMS, - "Mapping range exceeds total num chunks in PMR"); - } - - /* Check the virtually contiguous region given is physically backed */ - for (i = ui32ChunkIndex; i < ui32ChunkIndex + uiNumChunksToMap; i++) - { - if (RA_BASE_IS_INVALID(psLMAllocArrayData->paBaseArray[i])) - { - PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, "Sparse contiguity check"); - } - } - /* Size of virtually contiguous sparse alloc */ - uiPhysSize = uiSize; - } - else - { - size_t uiStart = uiOffset; - size_t uiEnd = uiOffset + uiSize - 1; - size_t uiChunkMask = ~((1 << psLMAllocArrayData->uiLog2ChunkSize) - 1); - - /* We can still map if only one chunk is required */ - if ((uiStart & uiChunkMask) != (uiEnd & uiChunkMask)) - { - PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, "Sparse contiguity check"); - } - /* Map a single chunk */ - uiPhysSize = uiChunkSize; - } - - paBaseArray = &psLMAllocArrayData->paBaseArray[ui32ChunkIndex]; - - /* Offset mask to be used for address offsets within a chunk */ - uiOffsetMask = (1U << psLMAllocArrayData->uiLog2ChunkSize) - 1; - } - else - { - paBaseArray = psLMAllocArrayData->paBaseArray; - uiPhysSize = (IMG_UINT64) psLMAllocArrayData->uiTotalNumChunks << uiLog2ChunkSize; - } - - PVR_ASSERT(ui32ChunkIndex < psLMAllocArrayData->uiTotalNumChunks); - - psKernelMapping = OSAllocMem(sizeof(*psKernelMapping)); - PVR_RETURN_IF_NOMEM(psKernelMapping); - - eError = _MapPMRKernel(psLMAllocArrayData, - paBaseArray, - uiPhysSize, - ulFlags, - psKernelMapping); - if (eError == PVRSRV_OK) - { - /* uiOffset & uiOffsetMask is used to get the kernel addr within the page */ - *ppvKernelAddressOut = ((IMG_CHAR *) psKernelMapping->pvKernelAddress) + (uiOffset & uiOffsetMask); - *phHandleOut = psKernelMapping; - } - else - { - OSFreeMem(psKernelMapping); - PVR_LOG_ERROR(eError, "_MapPMRKernel"); - } - - return eError; -} - -static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, - IMG_HANDLE hHandle) -{ - PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv; - PMR_KERNEL_MAPPING *psKernelMapping = (PMR_KERNEL_MAPPING *) hHandle; - - PVR_ASSERT(psLMAllocArrayData); - PVR_ASSERT(psKernelMapping); - - _UnMapPMRKernel(psLMAllocArrayData, - psKernelMapping); - - OSFreeMem(psKernelMapping); -} - -static PVRSRV_ERROR -CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, - IMG_DEVMEM_OFFSET_T uiOffset, - IMG_UINT8 *pcBuffer, - size_t uiBufSz, - size_t *puiNumBytes, - void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer, - IMG_UINT8 *pcPMR, - size_t uiSize)) -{ - PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; - size_t uiBytesCopied; - size_t uiBytesToCopy; - size_t uiBytesCopyableFromAlloc; - PMR_KERNEL_MAPPING sMapping; - IMG_UINT8 *pcKernelPointer = NULL; - size_t uiBufferOffset; - IMG_UINT64 uiAllocIndex; - IMG_DEVMEM_OFFSET_T uiInAllocOffset; - IMG_UINT32 uiLog2ChunkSize = psLMAllocArrayData->uiLog2ChunkSize; - IMG_UINT64 uiChunkSize = 1ULL << uiLog2ChunkSize; - IMG_UINT64 uiPhysSize; - PVRSRV_ERROR eError; - - uiBytesCopied = 0; - uiBytesToCopy = uiBufSz; - uiBufferOffset = 0; - - if (psLMAllocArrayData->bIsSparse) - { - while (uiBytesToCopy > 0) - { - /* we have to map one alloc in at a time */ - PVR_ASSERT(psLMAllocArrayData->uiLog2ChunkSize != 0); - uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2ChunkSize; - uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2ChunkSize); - uiBytesCopyableFromAlloc = uiBytesToCopy; - if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2ChunkSize)) - { - uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2ChunkSize)-uiInAllocOffset); - } - /* Mapping a single chunk at a time */ - uiPhysSize = uiChunkSize; - - PVR_ASSERT(uiBytesCopyableFromAlloc != 0); - PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumChunks); - PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2ChunkSize)); - - eError = _MapPMRKernel(psLMAllocArrayData, - &psLMAllocArrayData->paBaseArray[uiAllocIndex], - uiPhysSize, - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, - &sMapping); - PVR_GOTO_IF_ERROR(eError, e0); - pcKernelPointer = sMapping.pvKernelAddress; - pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc); - - _UnMapPMRKernel(psLMAllocArrayData, - &sMapping); - - uiBufferOffset += uiBytesCopyableFromAlloc; - uiBytesToCopy -= uiBytesCopyableFromAlloc; - uiOffset += uiBytesCopyableFromAlloc; - uiBytesCopied += uiBytesCopyableFromAlloc; - } - } - else - { - uiPhysSize = (IMG_UINT64) psLMAllocArrayData->uiTotalNumChunks << uiLog2ChunkSize; - PVR_ASSERT((uiOffset + uiBufSz) <= uiPhysSize); - PVR_ASSERT(uiChunkSize != 0); - eError = _MapPMRKernel(psLMAllocArrayData, - psLMAllocArrayData->paBaseArray, - uiPhysSize, - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, - &sMapping); - PVR_GOTO_IF_ERROR(eError, e0); - pcKernelPointer = sMapping.pvKernelAddress; - pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz); - - _UnMapPMRKernel(psLMAllocArrayData, - &sMapping); - - uiBytesCopied = uiBufSz; - } - *puiNumBytes = uiBytesCopied; - return PVRSRV_OK; -e0: - *puiNumBytes = uiBytesCopied; - return eError; -} - -static void ReadLocalMem(IMG_UINT8 *pcBuffer, - IMG_UINT8 *pcPMR, - size_t uiSize) -{ - /* the memory is mapped as WC (and also aligned to page size) so we can - * safely call "Cached" memcpy */ - OSCachedMemCopy(pcBuffer, pcPMR, uiSize); -} - -static PVRSRV_ERROR -PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, - IMG_DEVMEM_OFFSET_T uiOffset, - IMG_UINT8 *pcBuffer, - size_t uiBufSz, - size_t *puiNumBytes) -{ - return CopyBytesLocalMem(pvPriv, - uiOffset, - pcBuffer, - uiBufSz, - puiNumBytes, - ReadLocalMem); -} - -static void WriteLocalMem(IMG_UINT8 *pcBuffer, - IMG_UINT8 *pcPMR, - size_t uiSize) -{ - /* the memory is mapped as WC (and also aligned to page size) so we can - * safely call "Cached" memcpy but need to issue a write memory barrier - * to flush the write buffers after */ - OSCachedMemCopyWMB(pcPMR, pcBuffer, uiSize); -} - -static PVRSRV_ERROR -PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, - IMG_DEVMEM_OFFSET_T uiOffset, - IMG_UINT8 *pcBuffer, - size_t uiBufSz, - size_t *puiNumBytes) -{ - return CopyBytesLocalMem(pvPriv, - uiOffset, - pcBuffer, - uiBufSz, - puiNumBytes, - WriteLocalMem); -} - -/*************************************************************************/ /*! -@Function PMRChangeSparseMemLocalMem -@Description This function Changes the sparse mapping by allocating and - freeing of pages. It also changes the GPU maps accordingly. -@Return PVRSRV_ERROR failure code -*/ /**************************************************************************/ -static PVRSRV_ERROR -PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv, - const PMR *psPMR, - IMG_UINT32 ui32AllocPageCount, - IMG_UINT32 *pai32AllocIndices, - IMG_UINT32 ui32FreePageCount, - IMG_UINT32 *pai32FreeIndices, - IMG_UINT32 uiFlags) -{ - PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; - - IMG_UINT32 ui32AdtnlAllocPages = 0; - IMG_UINT32 ui32AdtnlFreePages = 0; - IMG_UINT32 ui32CommonRequstCount = 0; - IMG_UINT32 ui32Loop = 0; - IMG_UINT32 ui32Index = 0; - IMG_UINT32 uiAllocpgidx; - IMG_UINT32 uiFreepgidx; - - PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; - IMG_UINT32 uiLog2ChunkSize = psPMRPageArrayData->uiLog2ChunkSize; - IMG_UINT32 uiChunkSize = 1ULL << uiLog2ChunkSize; - -#if defined(DEBUG) - IMG_BOOL bPoisonFail = IMG_FALSE; - IMG_BOOL bZeroFail = IMG_FALSE; -#endif - - /* Fetch the Page table array represented by the PMR */ - RA_BASE_ARRAY_T paBaseArray = psPMRPageArrayData->paBaseArray; - PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR); - - /* The incoming request is classified into two operations independent of - * each other: alloc & free chunks. - * These operations can be combined with two mapping operations as well - * which are GPU & CPU space mappings. - * - * From the alloc and free chunk requests, the net amount of chunks to be - * allocated or freed is computed. Chunks that were requested to be freed - * will be reused to fulfil alloc requests. - * - * The order of operations is: - * 1. Allocate new Chunks. - * 2. Move the free chunks from free request to alloc positions. - * 3. Free the rest of the chunks not used for alloc - * - * Alloc parameters are validated at the time of allocation - * and any error will be handled then. */ - - if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) - { - ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ? - ui32FreePageCount : ui32AllocPageCount; - - PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free chunks not supported"); - } - - if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) - { - ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount; - } - else - { - ui32AllocPageCount = 0; - } - - if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) - { - ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount; - } - else - { - ui32FreePageCount = 0; - } - - PVR_LOG_RETURN_IF_FALSE( - (ui32CommonRequstCount | ui32AdtnlAllocPages | ui32AdtnlFreePages) != 0, - "Invalid combination of parameters: ui32CommonRequstCount," - " ui32AdtnlAllocPages and ui32AdtnlFreePages.", - PVRSRV_ERROR_INVALID_PARAMS - ); - - { - /* Validate the free page indices */ - if (ui32FreePageCount) - { - if (pai32FreeIndices != NULL) - { - for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) - { - uiFreepgidx = pai32FreeIndices[ui32Loop]; - - if (uiFreepgidx > psPMRPageArrayData->uiTotalNumChunks) - { - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); - } - - if (RA_BASE_IS_INVALID(paBaseArray[uiFreepgidx])) - { - PVR_LOG_GOTO_WITH_ERROR("paBaseArray[uiFreepgidx]", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); - } - } - } - else - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Given non-zero free count but missing indices array", - __func__)); - return PVRSRV_ERROR_INVALID_PARAMS; - } - } - - /* The following block of code verifies any issues with common alloc chunk indices */ - for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) - { - uiAllocpgidx = pai32AllocIndices[ui32Loop]; - if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumChunks) - { - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); - } - - if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) - { - if ((!RA_BASE_IS_INVALID(paBaseArray[uiAllocpgidx])) || - (psPMRMapTable->aui32Translation[uiAllocpgidx] != TRANSLATION_INVALID)) - { - PVR_LOG_GOTO_WITH_ERROR("Trying to allocate already allocated page again", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); - } - } - else - { - if ((RA_BASE_IS_INVALID(paBaseArray[uiAllocpgidx])) || - (psPMRMapTable->aui32Translation[uiAllocpgidx] == TRANSLATION_INVALID)) - { - PVR_LOG_GOTO_WITH_ERROR("Unable to remap memory due to missing page", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); - } - } - } - - ui32Loop = 0; - - /* Allocate new chunks */ - if (0 != ui32AdtnlAllocPages) - { - /* Say how many chunks to allocate */ - psPMRPageArrayData->uiChunksToAlloc = ui32AdtnlAllocPages; - - eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices); - PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPages", e0); - - /* Mark the corresponding chunks of translation table as valid */ - for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) - { - psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; - } - - psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; - } - - ui32Index = ui32Loop; - ui32Loop = 0; - - /* Move the corresponding free chunks to alloc request */ - eError = RA_SwapSparseMem(psPMRPageArrayData->psArena, - paBaseArray, - psPMRPageArrayData->uiTotalNumChunks, - psPMRPageArrayData->uiLog2ChunkSize, - &pai32AllocIndices[ui32Index], - &pai32FreeIndices[ui32Loop], - ui32CommonRequstCount); - PVR_LOG_GOTO_IF_ERROR(eError, "RA_SwapSparseMem", unwind_alloc); - - for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++) - { - uiAllocpgidx = pai32AllocIndices[ui32Index]; - uiFreepgidx = pai32FreeIndices[ui32Loop]; - - /* Is remap mem used in real world scenario? Should it be turned to a - * debug feature? The condition check needs to be out of loop, will be - * done at later point though after some analysis */ - if ((uiFlags & SPARSE_REMAP_MEM) != SPARSE_REMAP_MEM) - { - psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; - psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; - } - else - { - psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx; - psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; - } - - /* Be sure to honour the attributes associated with the allocation - * such as zeroing, poisoning etc. */ - if (psPMRPageArrayData->bPoisonOnAlloc) - { - eError = _PhysPgMemSet(psPMRPageArrayData, - &psPMRPageArrayData->paBaseArray[uiAllocpgidx], - uiChunkSize, - PVRSRV_POISON_ON_ALLOC_VALUE); - - /* Consider this as a soft failure and go ahead but log error to kernel log */ - if (eError != PVRSRV_OK) - { -#if defined(DEBUG) - bPoisonFail = IMG_TRUE; -#endif - } - } - - if (psPMRPageArrayData->bZeroOnAlloc) - { - eError = _PhysPgMemSet(psPMRPageArrayData, - &psPMRPageArrayData->paBaseArray[uiAllocpgidx], - uiChunkSize, - ZERO_PAGE_VALUE); - /* Consider this as a soft failure and go ahead but log error to kernel log */ - if (eError != PVRSRV_OK) - { -#if defined(DEBUG) - /* Don't think we need to zero any chunks further */ - bZeroFail = IMG_TRUE; -#endif - } - } - } - - /* Free the additional free chunks */ - if (0 != ui32AdtnlFreePages) - { - ui32Index = ui32Loop; - eError = _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages); - PVR_LOG_GOTO_IF_ERROR(eError, "_FreeLMPages", e0); - - ui32Loop = 0; - - while (ui32Loop++ < ui32AdtnlFreePages) - { - /* Set the corresponding mapping table entry to invalid address */ - psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID; - } - - psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; - } - } - -#if defined(DEBUG) - if (IMG_TRUE == bPoisonFail) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the chunk", __func__)); - } - - if (IMG_TRUE == bZeroFail) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the chunk", __func__)); - } -#endif - - return PVRSRV_OK; - -unwind_alloc: - _FreeLMPages(psPMRPageArrayData, pai32AllocIndices, ui32Index); - - for (ui32Loop = 0; ui32Loop < ui32Index; ui32Loop++) - { - psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = TRANSLATION_INVALID; - } - -e0: - return eError; -} - -/*************************************************************************/ /*! -@Function PMRChangeSparseMemCPUMapLocalMem -@Description This function Changes CPU maps accordingly -@Return PVRSRV_ERROR failure code -*/ /**************************************************************************/ -static -PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv, - const PMR *psPMR, - IMG_UINT64 sCpuVAddrBase, - IMG_UINT32 ui32AllocPageCount, - IMG_UINT32 *pai32AllocIndices, - IMG_UINT32 ui32FreePageCount, - IMG_UINT32 *pai32FreeIndices) -{ - PVRSRV_ERROR eError; - IMG_DEV_PHYADDR *psPageArray; - PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; - uintptr_t sCpuVABase = sCpuVAddrBase; - IMG_CPU_PHYADDR sCpuAddrPtr; - IMG_BOOL bValid = IMG_FALSE; - IMG_UINT32 i; - - /* Get the base address of the heap */ - eError = PMR_CpuPhysAddr(psPMR, - psPMRPageArrayData->uiLog2ChunkSize, - 1, - 0, /* offset zero here mean first page in the PMR */ - &sCpuAddrPtr, - &bValid); - PVR_LOG_RETURN_IF_ERROR(eError, "PMR_CpuPhysAddr"); - - /* Phys address of heap is computed here by subtracting the offset of this page - * basically phys address of any page = Base address of heap + offset of the page */ - sCpuAddrPtr.uiAddr -= RA_BASE_STRIP_GHOST_BIT(psPMRPageArrayData->paBaseArray[0]); - - /* We still have ghost bits in the base array, this interface expects true page - * addresses so we need to pre mask / translate the base array - */ - psPageArray = OSAllocMem(sizeof(IMG_DEV_PHYADDR)* - psPMRPageArrayData->uiTotalNumChunks); - PVR_LOG_RETURN_IF_NOMEM(psPageArray, "Page translation array"); - - for (i = 0; i < psPMRPageArrayData->uiTotalNumChunks; i++) - { - psPageArray[i].uiAddr = RA_BASE_STRIP_GHOST_BIT(psPMRPageArrayData->paBaseArray[i]); - } - - eError = OSChangeSparseMemCPUAddrMap((void**) psPageArray, - sCpuVABase, - sCpuAddrPtr, - ui32AllocPageCount, - pai32AllocIndices, - ui32FreePageCount, - pai32FreeIndices, - IMG_TRUE); - - OSFreeMem(psPageArray); - - return eError; -} - -static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = { - .pfnLockPhysAddresses = &PMRLockSysPhysAddressesLocalMem, - .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesLocalMem, - .pfnDevPhysAddr = &PMRSysPhysAddrLocalMem, - .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataLocalMem, - .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataLocalMem, - .pfnReadBytes = &PMRReadBytesLocalMem, - .pfnWriteBytes = &PMRWriteBytesLocalMem, - .pfnChangeSparseMem = &PMRChangeSparseMemLocalMem, - .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapLocalMem, - .pfnMMap = NULL, - .pfnFinalize = &PMRFinalizeLocalMem -}; - -PVRSRV_ERROR -PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap, - CONNECTION_DATA *psConnection, - IMG_DEVMEM_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 *pui32MappingTable, - IMG_UINT32 uiLog2AllocPageSize, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - const IMG_CHAR *pszAnnotation, - IMG_PID uiPid, - PMR **ppsPMRPtr, - IMG_UINT32 ui32PDumpFlags) -{ - PVRSRV_ERROR eError; - PVRSRV_ERROR eError2; - PMR *psPMR = NULL; - PMR_LMALLOCARRAY_DATA *psPrivData = NULL; - PMR_FLAGS_T uiPMRFlags; - IMG_BOOL bZero; - IMG_BOOL bPoisonOnAlloc; - IMG_BOOL bPoisonOnFree; - IMG_BOOL bOnDemand; - IMG_BOOL bIsSparse; - - /* This path is checking for the type of PMR to create, if sparse we - * have to perform additional validation as we can only map sparse ranges - * if the os functionality to do so is present. We can also only map virtually - * contiguous sparse regions. Non backed gaps in a range cannot be mapped. - */ - if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1) - { - if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) && - !OSIsMapPhysNonContigSupported()) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: LMA kernel mapping functions not available " - "for physically discontiguous memory.", - __func__)); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, errorOnParam); - } - bIsSparse = IMG_TRUE; - } - else - { - bIsSparse = IMG_FALSE; - } - - bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE; - bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; - bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; -#if defined(DEBUG) - bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE; -#else - bPoisonOnFree = IMG_FALSE; -#endif - - /* Create Array structure that holds the physical pages */ - eError = _AllocLMPageArray(uiSize, - ui32NumPhysChunks, - ui32NumVirtChunks, - uiLog2AllocPageSize, - bZero, - bPoisonOnAlloc, - bPoisonOnFree, - bIsSparse, - bOnDemand, - psPhysHeap, - uiFlags, - uiPid, - &psPrivData, - psConnection); - PVR_GOTO_IF_ERROR(eError, errorOnAllocPageArray); - - if (!bOnDemand) - { - /* Allocate the physical pages */ - eError = _AllocLMPages(psPrivData, pui32MappingTable); - PVR_GOTO_IF_ERROR(eError, errorOnAllocPages); - } - - /* In this instance, we simply pass flags straight through. - - Generically, uiFlags can include things that control the PMR - factory, but we don't need any such thing (at the time of - writing!), and our caller specifies all PMR flags so we don't - need to meddle with what was given to us. - */ - uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); - /* check no significant bits were lost in cast due to different - bit widths for flags */ - PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); - - if (bOnDemand) - { - PDUMPCOMMENT(PhysHeapDeviceNode(psPhysHeap), "Deferred Allocation PMR (LMA)"); - } - - eError = PMRCreatePMR(psPhysHeap, - uiSize, - ui32NumPhysChunks, - ui32NumVirtChunks, - pui32MappingTable, - uiLog2AllocPageSize, - uiPMRFlags, - pszAnnotation, - &_sPMRLMAFuncTab, - psPrivData, - PMR_TYPE_LMA, - &psPMR, - ui32PDumpFlags); - PVR_LOG_GOTO_IF_ERROR(eError, "PMRCreatePMR", errorOnCreate); - - *ppsPMRPtr = psPMR; - return PVRSRV_OK; - -errorOnCreate: - if (!bOnDemand && psPrivData->iNumChunksAllocated) - { - eError2 = _FreeLMPages(psPrivData, NULL, 0); - PVR_ASSERT(eError2 == PVRSRV_OK); - } - -errorOnAllocPages: - eError2 = _FreeLMPageArray(psPrivData); - PVR_ASSERT(eError2 == PVRSRV_OK); - -errorOnAllocPageArray: -errorOnParam: - PVR_ASSERT(eError != PVRSRV_OK); - return eError; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_osmem.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_osmem.c new file mode 100644 index 000000000000..cef1ea620821 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_osmem.c @@ -0,0 +1,91 @@ +/*************************************************************************/ /*! +@File physmem_osmem.c +@Title OS Memory PMR Factory common definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of Services memory management. This file defines the + OS memory PMR factory API that must be defined so that the + common & device layer code in the Services Server can allocate + new PMRs back with pages from the OS page allocator. Applicable + for UMA based platforms, such platforms must implement this API + in the OS Porting layer, in the "env" directory for that + system. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* include/ */ +#include "img_types.h" +#include "pvrsrv_error.h" + +/* services/server/include/ */ +#include "physheap.h" +#include "osfunc.h" +#include "physmem_osmem.h" + + +static IMG_UINT32 PhysHeapOSGetPageShift(void) +{ + return (IMG_UINT32)OSGetPageShift(); +} + +static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = +{ + .pfnDestroyData = NULL, + .pfnGetFactoryMemStats = PhysmemGetOSRamMemStats, + .pfnCreatePMR = PhysmemNewOSRamBackedPMR, + .pfnPagesAlloc = &OSPhyContigPagesAlloc, + .pfnPagesFree = &OSPhyContigPagesFree, + .pfnPagesMap = &OSPhyContigPagesMap, + .pfnPagesUnMap = &OSPhyContigPagesUnmap, + .pfnPagesClean = &OSPhyContigPagesClean, + .pfnGetPageShift = &PhysHeapOSGetPageShift, +}; + + +PVRSRV_ERROR +PhysmemCreateHeapOSMEM(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_POLICY uiPolicy, + PHYS_HEAP_CONFIG *psConfig, + PHYS_HEAP **ppsPhysHeap) +{ + return PhysHeapCreate(psDevNode, + psConfig, + uiPolicy, + NULL, + &_sPHEAPImplFuncs, + ppsPhysHeap); +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_ramem.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_ramem.c new file mode 100644 index 000000000000..6597042770d9 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/physmem_ramem.c @@ -0,0 +1,2609 @@ +/*************************************************************************/ /*! +@File physmem_ramem.c +@Title Resource allocator managed PMR Factory common definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of Services memory management. This file defines the + RA managed memory PMR factory API that is shared between local + physheap implementations (LMA & IMA) +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "physmem_ramem.h" +#include "physheap.h" +#include "allocmem.h" +#include "ra.h" +#include "connection_server.h" +#include "device.h" +#include "devicemem_server_utils.h" +#include "osfunc.h" +#include "pmr.h" +#include "pmr_impl.h" +#include "rgx_pdump_panics.h" +#include "pdump_km.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "rgxutils.h" +#endif + +#if defined(INTEGRITY_OS) +#include "mm.h" +#include "integrity_memobject.h" +#endif + +#include "physmem_dlm.h" + + +#if defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) +#define PHYSHEAP_DPF_LVL PVR_DBG_ERROR +#else +#define PHYSHEAP_DPF_LVL PVR_DBG_WARNING +#endif + +/* Common Physheap Callback implementations */ + +IMG_UINT32 +RAMemGetPageShift(void) +{ + return PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT; +} + +PVRSRV_ERROR +RAMemDoPhyContigPagesAlloc(RA_ARENA *pArena, + size_t uiSize, + PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid) +{ + RA_BASE_T uiCardAddr = 0; + RA_LENGTH_T uiActualSize; + PVRSRV_ERROR eError; + IMG_UINT32 ui32Log2NumPages; + +#if defined(DEBUG) + static IMG_UINT32 ui32MaxLog2NumPagesHistory = 0; +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + /* Firmware heaps on VZ are usually LMA, and several 64kb allocations need to + * be made on Guest drivers: FwGuardPage, FwSysInit, FwConnectionCtl, FwOsInit. + * Increase the maximum number to avoid driver warnings. */ + IMG_UINT32 ui32MaxLog2NumPages = 7; /* 128 pages => 512KB */ +#else + IMG_UINT32 ui32MaxLog2NumPages = 4; /* 16 pages => 64KB */ +#endif + ui32MaxLog2NumPages = MAX(ui32MaxLog2NumPages, OSGetOrder(1 << psDevNode->ui32Non4KPageSizeLog2)); +#else /* defined(DEBUG) */ + PVR_UNREFERENCED_PARAMETER(psDevNode); +#endif /* defined(DEBUG) */ + + PVR_ASSERT(uiSize != 0); + ui32Log2NumPages = OSGetOrder(uiSize); + uiSize = IMG_PAGES2BYTES64(OSGetPageSize(),ui32Log2NumPages); + + eError = RA_Alloc(pArena, + uiSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* No flags */ + uiSize, + "RAMemPhyContigPagesAlloc", + &uiCardAddr, + &uiActualSize, + NULL); /* No private handle */ + + if (eError != PVRSRV_OK) + { + RA_USAGE_STATS sRAStats; + RA_Get_Usage_Stats(pArena, &sRAStats); + + PVR_DPF((PVR_DBG_ERROR, + "Failed to Allocate size = 0x"IMG_SIZE_FMTSPECX", align = 0x" + IMG_SIZE_FMTSPECX" Arena Free Space 0x%"IMG_UINT64_FMTSPECX, + uiSize, uiSize, sRAStats.ui64FreeArenaSize)); + return eError; + } + + PVR_ASSERT(uiSize == uiActualSize); + + psMemHandle->u.ui64Handle = uiCardAddr; + psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + uiSize, + uiCardAddr, + uiPid); +#else + { + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = psDevPAddr->uiAddr; + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + NULL, + sCpuPAddr, + uiSize, + uiPid + DEBUG_MEMSTATS_VALUES); + } +#endif +#else /* PVRSRV_ENABLE_PROCESS_STATS */ + PVR_UNREFERENCED_PARAMETER(uiPid); +#endif /* PVRSRV_ENABLE_PROCESS_STATS */ +#if defined(SUPPORT_GPUVIRT_VALIDATION) + PVR_DPF((PVR_DBG_MESSAGE, + "%s: (GPU Virtualisation) Allocated 0x" IMG_SIZE_FMTSPECX " at 0x%" + IMG_UINT64_FMTSPECX ", Arena ID %u", + __func__, uiSize, psDevPAddr->uiAddr, psMemHandle->uiOSid)); +#endif + +#if defined(DEBUG) + if (ui32Log2NumPages > ui32MaxLog2NumPages && ui32Log2NumPages > ui32MaxLog2NumPagesHistory) + { + PVR_ASSERT((ui32Log2NumPages <= ui32MaxLog2NumPages)); + PVR_DPF((PVR_DBG_ERROR, + "%s: ui32MaxLog2NumPages = %u, increasing to %u", __func__, + ui32MaxLog2NumPages, ui32Log2NumPages )); + ui32MaxLog2NumPagesHistory = ui32Log2NumPages; + } +#endif /* defined(DEBUG) */ + psMemHandle->uiOrder = ui32Log2NumPages; + + return eError; +} + +void +RAMemDoPhyContigPagesFree(RA_ARENA *pArena, + PG_HANDLE *psMemHandle) +{ + RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + (IMG_UINT64)uiCardAddr); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + (IMG_UINT64)uiCardAddr, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + RA_Free(pArena, uiCardAddr); + psMemHandle->uiOrder = 0; +} + +PVRSRV_ERROR +RAMemPhyContigPagesMap(PHYS_HEAP *psPhysHeap, + PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr) +{ + IMG_CPU_PHYADDR sCpuPAddr; + IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); + PVR_UNREFERENCED_PARAMETER(uiSize); + + PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr); + *pvPtr = OSMapPhysToLin(sCpuPAddr, + ui32NumPages * OSGetPageSize(), + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); + PVR_RETURN_IF_NOMEM(*pvPtr); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + ui32NumPages * OSGetPageSize(), + OSGetCurrentClientProcessIDKM()); +#else + { + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + *pvPtr, + sCpuPAddr, + ui32NumPages * OSGetPageSize(), + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_VALUES); + } +#endif +#endif + return PVRSRV_OK; +} + +void +RAMemPhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, + PG_HANDLE *psMemHandle, + void *pvPtr) +{ + IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); + PVR_UNREFERENCED_PARAMETER(psPhysHeap); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + ui32NumPages * OSGetPageSize(), + OSGetCurrentClientProcessIDKM()); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + (IMG_UINT64)(uintptr_t)pvPtr, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize()); +} + +PVRSRV_ERROR +RAMemPhyContigPagesClean(PHYS_HEAP *psPhysHeap, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength) +{ + /* No need to flush because we map as uncached */ + PVR_UNREFERENCED_PARAMETER(psPhysHeap); + PVR_UNREFERENCED_PARAMETER(psMemHandle); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiLength); + + return PVRSRV_OK; +} + +/* Local memory allocation routines */ + +/* Assert that the conversions between the RA base type and the device + * physical address are safe. + */ +static_assert(sizeof(IMG_DEV_PHYADDR) == sizeof(RA_BASE_T), + "Size IMG_DEV_PHYADDR != RA_BASE_T"); + +/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid + * page address */ +#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0) +#define ZERO_PAGE_VALUE 0 + +typedef struct _PMR_KERNEL_MAP_HANDLE_ { + void *vma; + void *pvKernelAddress; + /* uiSize has 2 uses: + * In Physically contiguous case it is used to track size of the mapping + * for free. + * In Physically sparse case it is used to determine free path to use, single page + * sparse mapping or multi page + */ + size_t uiSize; +} PMR_KERNEL_MAPPING; + +typedef struct _PMR_LMALLOCARRAY_DATA_ { + +#define FLAG_ZERO (0U) +#define FLAG_POISON_ON_FREE (1U) +#define FLAG_POISON_ON_ALLOC (2U) +#define FLAG_ONDEMAND (3U) +#define FLAG_SPARSE (4U) +#define FLAG_PHYS_CONTIG (5U) +#define FLAG_ZOMBIE (6U) + + IMG_PID uiPid; + + /* + * N.B Chunks referenced in this struct commonly are + * to OS page sized. But in reality it is dependent on + * the uiLog2ChunkSize. + * Chunks will always be one 1 << uiLog2ChunkSize in size. + * */ + + /* + * The number of chunks currently allocated in the PMR. + */ + IMG_INT32 iNumChunksAllocated; + + /* + * Total number of (Virtual) chunks supported by this PMR. + */ + IMG_UINT32 uiTotalNumChunks; + + /* The number of chunks to next be allocated for the PMR. + * This will initially be the number allocated at first alloc + * but may be changed in later calls to change sparse. + * It represents the number of chunks to next be allocated. + * This is used to store this value because we have the ability to + * defer allocation. + */ + IMG_UINT32 uiChunksToAlloc; + + /* + * Log2 representation of the chunksize. + */ + IMG_UINT32 uiLog2ChunkSize; + + /* Physical heap and arena pointers for this allocation */ + PHYS_HEAP* psPhysHeap; + RA_ARENA* psArena; + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags; + + /* + Connection data for this requests' originating process. NULL for + direct-bridge originating calls + */ + CONNECTION_DATA *psConnection; + + /* + * Allocation flags related to the pages: + * Zero - Should we Zero memory on alloc + * Poison on free - Should we Poison the memory on free. + * Poison on alloc - Should we Poison the memory on alloc. + * On demand - Is the allocation on Demand i.e Do we defer allocation to time of use. + * Sparse - Is the PMR sparse. + * Phys Contig - Is the alloc Physically contiguous + * Zombie - Is zombie + * */ + IMG_UINT32 ui32Flags; + + RA_BASE_ARRAY_T aBaseArray; /* Array of RA Bases */ + +} PMR_LMALLOCARRAY_DATA; + +static PVRSRV_ERROR +_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, + IMG_UINT32 *pui32FreeIndices, + IMG_UINT32 ui32FreeChunkCount); + +static PVRSRV_ERROR _MapPhysicalContigAlloc(PHYS_HEAP *psPhysHeap, + RA_BASE_ARRAY_T paBaseArray, + size_t uiSize, + PMR_FLAGS_T ulFlags, + PMR_KERNEL_MAPPING *psMapping) +{ + IMG_UINT32 ui32CPUCacheFlags; + PVRSRV_ERROR eError; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_DEV_PHYADDR sDevPAddr; + sDevPAddr.uiAddr = RA_BASE_STRIP_GHOST_BIT(*paBaseArray); + + eError = DevmemCPUCacheMode(ulFlags, &ui32CPUCacheFlags); + PVR_RETURN_IF_ERROR(eError); + + PhysHeapDevPAddrToCpuPAddr(psPhysHeap, + 1, + &sCpuPAddr, + &sDevPAddr); + + psMapping->pvKernelAddress = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags); + PVR_LOG_RETURN_IF_FALSE(psMapping->pvKernelAddress, + "OSMapPhyToLin: out of VM Mem", + PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING); + psMapping->vma = NULL; + psMapping->uiSize = uiSize; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _MapPhysicalSparseAlloc(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, + RA_BASE_ARRAY_T paBaseArray, + size_t uiSize, + PMR_KERNEL_MAPPING *psMapping) +{ + IMG_UINT32 uiChunkCount = uiSize >> psLMAllocArrayData->uiLog2ChunkSize; + IMG_CPU_PHYADDR uiPages[PMR_MAX_TRANSLATION_STACK_ALLOC], *puiPages; + PVRSRV_ERROR eError; + size_t uiPageShift = OSGetPageShift(); + IMG_UINT32 uiOSPagesPerChunkShift = psLMAllocArrayData->uiLog2ChunkSize - uiPageShift; + IMG_UINT32 uiOSPageCount = uiChunkCount << uiOSPagesPerChunkShift; + + if (uiOSPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + puiPages = OSAllocZMem(sizeof(IMG_CPU_PHYADDR) * uiOSPageCount); + PVR_RETURN_IF_NOMEM(puiPages); + } + else + { + puiPages = &uiPages[0]; + } + + if (uiOSPagesPerChunkShift == 0) + { + IMG_UINT32 i; + PhysHeapDevPAddrToCpuPAddr(psLMAllocArrayData->psPhysHeap, + uiChunkCount, + puiPages, + (IMG_DEV_PHYADDR *)paBaseArray); + + /* If the ghost bit is present then the addrs returned will be off by 1 + * Strip the ghost bit to correct to real page aligned addresses. + * */ + for (i = 0; i < uiChunkCount; i++) + { + puiPages[i].uiAddr = RA_BASE_STRIP_GHOST_BIT(puiPages[i].uiAddr); + } + } + else + { + IMG_UINT32 i = 0, j = 0, index = 0; + for (i = 0; i < uiChunkCount; i++) + { + IMG_UINT32 ui32OSPagesPerDeviceChunk = (1 << uiOSPagesPerChunkShift); + IMG_DEV_PHYADDR uiDevAddr; + uiDevAddr.uiAddr = RA_BASE_STRIP_GHOST_BIT(paBaseArray[i]); + for (j = 0; j < ui32OSPagesPerDeviceChunk; j++) + { + PhysHeapDevPAddrToCpuPAddr(psLMAllocArrayData->psPhysHeap, + 1, + &puiPages[index], + &uiDevAddr); + uiDevAddr.uiAddr += (1ULL << uiPageShift); + index++; + } + } + } + + eError = OSMapPhysArrayToLin(puiPages, + uiOSPageCount, + &psMapping->pvKernelAddress, + &psMapping->vma); + if (eError == PVRSRV_OK) + { + psMapping->uiSize = uiSize; + } + + if (puiPages != &uiPages[0]) + { + OSFreeMem(puiPages); + } + + return eError; +} + +static PVRSRV_ERROR _MapPMRKernel(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, + RA_BASE_ARRAY_T paBaseArray, + size_t uiSize, + PMR_FLAGS_T ulFlags, + PMR_KERNEL_MAPPING *psMapping) +{ + PVRSRV_ERROR eError; + PHYS_HEAP *psPhysHeap = psLMAllocArrayData->psPhysHeap; + if (!BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_SPARSE)) + { + /* Physically Contig */ + if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_PHYS_CONTIG)) + { + eError = _MapPhysicalContigAlloc(psPhysHeap, + paBaseArray, + uiSize, + ulFlags, + psMapping); + } + /* Physically Sparse */ + else + { + eError = _MapPhysicalSparseAlloc(psLMAllocArrayData, + paBaseArray, + uiSize, + psMapping); + } + } + else + { + /* Sparse Alloc Single Chunk */ + if (uiSize == IMG_PAGE2BYTES64(psLMAllocArrayData->uiLog2ChunkSize)) + { + eError = _MapPhysicalContigAlloc(psPhysHeap, + paBaseArray, + uiSize, + ulFlags, + psMapping); + } + /* Sparse Alloc Multi Chunk */ + else + { + eError = _MapPhysicalSparseAlloc(psLMAllocArrayData, + paBaseArray, + uiSize, + psMapping); + } + } + + return eError; +} + +static void _UnMapPhysicalContigAlloc(PMR_KERNEL_MAPPING *psKernelMapping) +{ + OSUnMapPhysToLin(psKernelMapping->pvKernelAddress, psKernelMapping->uiSize); +} + +static void _UnMapPhysicalSparseAlloc(PMR_KERNEL_MAPPING *psKernelMapping) +{ + OSUnMapPhysArrayToLin(psKernelMapping->pvKernelAddress, + psKernelMapping->vma); +} + +static void _UnMapPMRKernel(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, + PMR_KERNEL_MAPPING *psKernelMapping) +{ + if (!BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_SPARSE)) + { + /* Physically Contig */ + if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_PHYS_CONTIG)) + { + _UnMapPhysicalContigAlloc(psKernelMapping); + } + /* Physically Sparse */ + else + { + _UnMapPhysicalSparseAlloc(psKernelMapping); + } + } + else + { + /* Sparse Alloc Single Chunk */ + if (psKernelMapping->uiSize == IMG_PAGE2BYTES64(psLMAllocArrayData->uiLog2ChunkSize)) + { + _UnMapPhysicalContigAlloc(psKernelMapping); + } + /* Sparse Alloc Multi Chunk */ + else + { + _UnMapPhysicalSparseAlloc(psKernelMapping); + } + } +} + +static PVRSRV_ERROR +_PhysPgMemSet(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, + RA_BASE_ARRAY_T paBaseArray, + size_t uiSize, + IMG_BYTE ui8SetValue) +{ + PVRSRV_ERROR eError; + PMR_KERNEL_MAPPING sKernelMapping; + + eError = _MapPMRKernel(psLMAllocArrayData, + paBaseArray, + uiSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, + &sKernelMapping); + PVR_GOTO_IF_ERROR(eError, map_failed); + + OSCachedMemSetWMB(sKernelMapping.pvKernelAddress, ui8SetValue, uiSize); + + _UnMapPMRKernel(psLMAllocArrayData, &sKernelMapping); + + return PVRSRV_OK; + +map_failed: + PVR_DPF((PVR_DBG_ERROR, "Failed to poison/zero allocation")); + return eError; +} + +static PVRSRV_ERROR +_AllocLMPageArray(PMR_SIZE_T uiSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 uiLog2AllocPageSize, + IMG_UINT32 ui32Flags, + PHYS_HEAP* psPhysHeap, + RA_ARENA* pArena, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, + IMG_PID uiPid, + PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr, + CONNECTION_DATA *psConnection) +{ + PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL; + PVRSRV_ERROR eError; + IMG_UINT32 uiNumPages; + + PVR_ASSERT(!BIT_ISSET(ui32Flags, FLAG_ZERO) || !BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC)); + PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize); + + /* Use of cast below is justified by the assertion that follows to + prove that no significant bits have been truncated */ + uiNumPages = (IMG_UINT32)(((uiSize - 1) >> uiLog2AllocPageSize) + 1); + PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2AllocPageSize) == uiSize); + + psPageArrayData = OSAllocMem(sizeof(PMR_LMALLOCARRAY_DATA) + IMG_FLEX_ARRAY_SIZE(sizeof(RA_BASE_T), uiNumPages)); + PVR_GOTO_IF_NOMEM(psPageArrayData, eError, errorOnAllocArray); + + if (BIT_ISSET(ui32Flags, FLAG_SPARSE)) + { + /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */ + OSCachedMemSet(psPageArrayData->aBaseArray, + 0xFF, + sizeof(RA_BASE_T) * + uiNumPages); + } + else + { + /* Base pointers have been allocated for the full PMR in case we require a non + * physically contiguous backing for the virtually contiguous allocation but the most + * common case will be contiguous and so only require the first Base to be present + */ + psPageArrayData->aBaseArray[0] = INVALID_BASE_ADDR; + } + + psPageArrayData->uiTotalNumChunks = uiNumPages; + psPageArrayData->uiChunksToAlloc = BIT_ISSET(ui32Flags, FLAG_SPARSE) ? ui32NumPhysChunks : uiNumPages; + psPageArrayData->uiLog2ChunkSize = uiLog2AllocPageSize; + + psPageArrayData->psConnection = psConnection; + psPageArrayData->uiPid = uiPid; + psPageArrayData->iNumChunksAllocated = 0; + psPageArrayData->ui32Flags = ui32Flags; + psPageArrayData->psPhysHeap = psPhysHeap; + psPageArrayData->psArena = pArena; + psPageArrayData->uiAllocFlags = uiAllocFlags; + + *ppsPageArrayDataPtr = psPageArrayData; + + return PVRSRV_OK; + +/* + error exit path follows: +*/ + +errorOnAllocArray: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static PVRSRV_ERROR +_AllocLMPagesContig(PMR_LMALLOCARRAY_DATA *psPageArrayData) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; + IMG_UINT64 ui64PhysSize = IMG_PAGES2BYTES64(psPageArrayData->uiChunksToAlloc, uiLog2ChunkSize); + IMG_BOOL bPhysContig; + IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; + + + eError = RA_AllocMulti(psPageArrayData->psArena, + ui64PhysSize, + uiLog2ChunkSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* No flags */ + "LMA_Page_Alloc", + psPageArrayData->aBaseArray, + psPageArrayData->uiTotalNumChunks, + &bPhysContig); + + if (PVRSRV_OK != eError) + { + RA_USAGE_STATS sRAStats; + RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats); + + PVR_DPF((PHYSHEAP_DPF_LVL, + "Contig: Failed to Allocate size = 0x%llx, align = 0x%llx" + " Arena Free Space 0x%"IMG_UINT64_FMTSPECX"" + " Arena Name: '%s'", + (unsigned long long)ui64PhysSize, + 1ULL << uiLog2ChunkSize, + sRAStats.ui64FreeArenaSize, + RA_GetArenaName(psPageArrayData->psArena))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); + } + + if (bPhysContig) + { + BIT_SET(psPageArrayData->ui32Flags, FLAG_PHYS_CONTIG); + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX, + psPageArrayData->aBaseArray[0])); +} +#endif + + if (BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC)) + { + eError = _PhysPgMemSet(psPageArrayData, + psPageArrayData->aBaseArray, + ui64PhysSize, + PVRSRV_POISON_ON_ALLOC_VALUE); + PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoison); + } + + if (BIT_ISSET(ui32Flags, FLAG_ZERO)) + { + eError = _PhysPgMemSet(psPageArrayData, + psPageArrayData->aBaseArray, + ui64PhysSize, + ZERO_PAGE_VALUE); + PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnZero); + } + + psPageArrayData->iNumChunksAllocated += psPageArrayData->uiChunksToAlloc; + + /* We have alloc'd the previous request, set 0 for book keeping */ + psPageArrayData->uiChunksToAlloc = 0; + + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, ui64PhysSize, psPageArrayData->uiPid); +#else + if (bPhysContig) + { + IMG_CPU_PHYADDR sLocalCpuPAddr; + sLocalCpuPAddr.uiAddr = (IMG_UINT64) psPageArrayData->aBaseArray[0]; + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + NULL, + sLocalCpuPAddr, + IMG_PAGES2BYTES64(psPageArrayData->uiTotalNumChunks, uiLog2ChunkSize), + psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); + } + else + { + IMG_UINT32 i, j; + IMG_CPU_PHYADDR sLocalCpuPAddr; + + for (i = 0; i < psPageArrayData->uiTotalNumChunks;) + { + IMG_UINT32 ui32AllocSizeInChunks = 1; + + for (j = i; + j + 1 != psPageArrayData->uiTotalNumChunks && + RA_BASE_IS_GHOST(psPageArrayData->aBaseArray[j + 1]); + j++) + { + ui32AllocSizeInChunks++; + } + + sLocalCpuPAddr.uiAddr = (IMG_UINT64) psPageArrayData->aBaseArray[i]; + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + NULL, + sLocalCpuPAddr, + IMG_PAGES2BYTES64(ui32AllocSizeInChunks, uiLog2ChunkSize), + psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); + + i += ui32AllocSizeInChunks; + } + } +#endif +#endif + + return PVRSRV_OK; + + /* + error exit paths follow: + */ +errorOnZero: +errorOnPoison: + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + + RA_FreeMulti(psPageArrayData->psArena, + psPageArrayData->aBaseArray, + psPageArrayData->uiTotalNumChunks); + +errorOnRAAlloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* + * Fully allocated variant of sparse allocation does not take in as argument an + * array of indices. It is used in cases where the amount of chunks to allocate is + * the same as the total the PMR can represent. I.E when we want to fully populate + * a sparse PMR. + */ +static PVRSRV_ERROR +_AllocLMPagesSparseFull(PMR_LMALLOCARRAY_DATA *psPageArrayData) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; + IMG_UINT64 ui64PhysSize = IMG_PAGES2BYTES64(psPageArrayData->uiChunksToAlloc, uiLog2ChunkSize); + IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; + + + eError = RA_AllocMultiSparse(psPageArrayData->psArena, + uiLog2ChunkSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* No flags */ + "LMA_Page_Alloc", + psPageArrayData->aBaseArray, + psPageArrayData->uiTotalNumChunks, + NULL, /* No indices given meaning allocate full base array using chunk count below */ + psPageArrayData->uiChunksToAlloc); + if (PVRSRV_OK != eError) + { + RA_USAGE_STATS sRAStats; + RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats); + + PVR_DPF((PHYSHEAP_DPF_LVL, + "SparseFull: Failed to Allocate size = 0x%llx, align = 0x%llx" + " Arena Free Space 0x%"IMG_UINT64_FMTSPECX"" + " Arena Name: '%s'", + (unsigned long long)ui64PhysSize, + 1ULL << uiLog2ChunkSize, + sRAStats.ui64FreeArenaSize, + RA_GetArenaName(psPageArrayData->psArena))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX, + psPageArrayData->aBaseArray[0])); +} +#endif + + if (BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC)) + { + eError = _PhysPgMemSet(psPageArrayData, + psPageArrayData->aBaseArray, + ui64PhysSize, + PVRSRV_POISON_ON_ALLOC_VALUE); + PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoison); + } + + if (BIT_ISSET(ui32Flags, FLAG_ZERO)) + { + eError = _PhysPgMemSet(psPageArrayData, + psPageArrayData->aBaseArray, + ui64PhysSize, + ZERO_PAGE_VALUE); + PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnZero); + } + + psPageArrayData->iNumChunksAllocated += psPageArrayData->uiChunksToAlloc; + + /* We have alloc'd the previous request, set 0 for book keeping */ + psPageArrayData->uiChunksToAlloc = 0; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, ui64PhysSize, psPageArrayData->uiPid); +#else + { + IMG_UINT32 i; + + for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) + { + IMG_CPU_PHYADDR sLocalCpuPAddr; + sLocalCpuPAddr.uiAddr = + (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[i]); + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + NULL, + sLocalCpuPAddr, + 1 << uiLog2ChunkSize, + psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); + } + } +#endif +#endif + + return PVRSRV_OK; + + /* + error exit paths follow: + */ +errorOnZero: +errorOnPoison: + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + + RA_FreeMulti(psPageArrayData->psArena, + psPageArrayData->aBaseArray, + psPageArrayData->uiTotalNumChunks); + +errorOnRAAlloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static PVRSRV_ERROR +_AllocLMPagesSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; + IMG_UINT64 ui64ChunkSize = IMG_PAGE2BYTES64(uiLog2ChunkSize); + IMG_UINT32 uiChunksToAlloc = psPageArrayData->uiChunksToAlloc; + IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; + + if (!pui32MapTable) + { + PVR_LOG_GOTO_WITH_ERROR("pui32MapTable", eError, PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY, errorOnRAAlloc); + } + +#if defined(DEBUG) + /* + * This block performs validation of the mapping table input in the following ways: + * Check that each index in the mapping table does not exceed the number of the chunks + * the whole PMR supports. + * Check that each index given by the mapping table is not already allocated. + * Check that there are no duplicated indices given in the mapping table. + */ + { + IMG_UINT32 i; + IMG_BOOL bIssueDetected = IMG_FALSE; + PVRSRV_ERROR eMapCheckError; + + for (i = 0; i < uiChunksToAlloc; i++) + { + if (pui32MapTable[i] >= psPageArrayData->uiTotalNumChunks) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Page alloc request Index out of bounds for PMR @0x%p", + __func__, + psPageArrayData)); + eMapCheckError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + bIssueDetected = IMG_TRUE; + break; + } + + if (!RA_BASE_IS_INVALID(psPageArrayData->aBaseArray[pui32MapTable[i]])) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mapping already exists Index %u Mapping index %u", + __func__, + i, + pui32MapTable[i])); + eMapCheckError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; + bIssueDetected = IMG_TRUE; + break; + } + + if (RA_BASE_IS_SPARSE_PREP(psPageArrayData->aBaseArray[pui32MapTable[i]])) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mapping already exists in mapping table given Index %u Mapping index %u", + __func__, + i, + pui32MapTable[i])); + eMapCheckError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; + bIssueDetected = IMG_TRUE; + break; + } + else + { + /* Set the To Prep value so we can detect duplicated map indices */ + psPageArrayData->aBaseArray[pui32MapTable[i]] = RA_BASE_SPARSE_PREP_ALLOC_ADDR; + } + } + /* Unwind the Alloc Prep Values */ + if (bIssueDetected) + { + /* We don't want to affect the index of the issue seen + * as it could be a valid mapping. If it is a duplicated + * mapping in the given table then we will clean-up the + * previous instance anyway. + */ + IMG_UINT32 uiUnwind = i; + + for (i = 0; i < uiUnwind; i++) + { + psPageArrayData->aBaseArray[pui32MapTable[i]] = INVALID_BASE_ADDR; + } + + PVR_GOTO_WITH_ERROR(eError, eMapCheckError, errorOnRAAlloc); + } + } +#endif + + eError = RA_AllocMultiSparse(psPageArrayData->psArena, + psPageArrayData->uiLog2ChunkSize, + RA_NO_IMPORT_MULTIPLIER, + 0, + "LMA_Page_Alloc", + psPageArrayData->aBaseArray, + psPageArrayData->uiTotalNumChunks, + pui32MapTable, + uiChunksToAlloc); + if (PVRSRV_OK != eError) + { + RA_USAGE_STATS sRAStats; + RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats); + + PVR_DPF((PHYSHEAP_DPF_LVL, + "Sparse: Failed to Allocate size = 0x%llx, align = 0x%llx" + " Arena Free Space 0x%"IMG_UINT64_FMTSPECX"" + " Arena Name: '%s'", + (unsigned long long) uiChunksToAlloc << uiLog2ChunkSize, + 1ULL << uiLog2ChunkSize, + sRAStats.ui64FreeArenaSize, + RA_GetArenaName(psPageArrayData->psArena))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX, + psPageArrayData->aBaseArray[pui32MapTable[0]])); +} +#endif + + if (BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC) || BIT_ISSET(ui32Flags, FLAG_ZERO)) + { + IMG_UINT32 i, ui32Index = 0; + for (i = 0; i < uiChunksToAlloc; i++) + { + ui32Index = pui32MapTable[i]; + + eError = _PhysPgMemSet(psPageArrayData, + &psPageArrayData->aBaseArray[ui32Index], + ui64ChunkSize, + BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC) ? PVRSRV_POISON_ON_ALLOC_VALUE : + ZERO_PAGE_VALUE); + PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoisonZero); + } + } + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + uiChunksToAlloc << uiLog2ChunkSize, + psPageArrayData->uiPid); +#else + { + IMG_UINT32 i; + + for (i = 0; i < psPageArrayData->uiChunksToAlloc; i++) + { + IMG_UINT32 ui32Index = pui32MapTable[i]; + IMG_CPU_PHYADDR sLocalCpuPAddr; + sLocalCpuPAddr.uiAddr = + (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[ui32Index]); + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + NULL, + sLocalCpuPAddr, + ui64ChunkSize, + psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); + } + } +#endif +#endif + + psPageArrayData->iNumChunksAllocated += uiChunksToAlloc; + + /* We have alloc'd the previous request, set 0 for book keeping */ + psPageArrayData->uiChunksToAlloc = 0; + + return PVRSRV_OK; + + /* + error exit paths follow: + */ +errorOnPoisonZero: + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + + RA_FreeMultiSparse(psPageArrayData->psArena, + psPageArrayData->aBaseArray, + psPageArrayData->uiTotalNumChunks, + psPageArrayData->uiLog2ChunkSize, + pui32MapTable, + &uiChunksToAlloc); + +errorOnRAAlloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; + +} + +static PVRSRV_ERROR +_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(NULL != psPageArrayData); + PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); + + if (psPageArrayData->uiTotalNumChunks < + (psPageArrayData->iNumChunksAllocated + psPageArrayData->uiChunksToAlloc)) + { + PVR_DPF((PVR_DBG_ERROR, "Pages requested to allocate don't fit PMR alloc Size. " + "Allocated: %u + Requested: %u > Total Allowed: %u", + psPageArrayData->iNumChunksAllocated, + psPageArrayData->uiChunksToAlloc, + psPageArrayData->uiTotalNumChunks)); + return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; + } + + /* If we have a non-backed sparse PMR then we can just return */ + if (psPageArrayData->uiChunksToAlloc == 0) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Non-Backed Sparse PMR Created: %p.", + __func__, + psPageArrayData)); + return PVRSRV_OK; + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + { + IMG_UINT32 ui32OSid=0; + PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPageArrayData->psPhysHeap); + + /* Obtain the OSid specific data from our connection handle */ + if (psPageArrayData->psConnection != NULL) + { + ui32OSid = psPageArrayData->psConnection->ui32OSid; + } + + /* Replace the given RA Arena with OS Arena */ + if (PVRSRV_CHECK_SHARED_BUFFER(psPageArrayData->uiAllocFlags)) + { + psPageArrayData->psArena = psDevNode->psOSSharedArena; + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): Giving from shared mem")); + } + else + { + psPageArrayData->psArena = psDevNode->psOSidSubArena[ui32OSid]; + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): Giving from OS slot %d", + ui32OSid)); + } + } +#endif + + /* + * 3 cases: + * Sparse allocation populating the whole PMR. + * [**********] + * Sparse allocation partially populating the PMR at given indices. + * [*** *** **] + * Contiguous allocation. + * [**********] + * + * Note: Separate cases are required for 1 and 3 due to memstats tracking. + * In Contiguous case we can track the block as a single memstat record as we know + * we will also free in that size record. + * Sparse allocations require a memstat record per chunk as they can be arbitrarily + * free'd. + */ + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_SPARSE)) + { + if (psPageArrayData->uiTotalNumChunks == psPageArrayData->uiChunksToAlloc && + !pui32MapTable) + { + eError = _AllocLMPagesSparseFull(psPageArrayData); + } + else + { + eError = _AllocLMPagesSparse(psPageArrayData, pui32MapTable); + } + } + else + { + eError = _AllocLMPagesContig(psPageArrayData); + } + + return eError; +} + +static void +_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData) +{ + PVR_DPF((PVR_DBG_MESSAGE, + "physmem_lma.c: freed local memory array structure for PMR @0x%p", + psPageArrayData)); + + OSFreeMem(psPageArrayData); +} + +static PVRSRV_ERROR +_FreeLMPagesContig(PMR_LMALLOCARRAY_DATA *psPageArrayData) +{ + RA_ARENA *pArena = psPageArrayData->psArena; + IMG_UINT64 ui64PhysSize = IMG_PAGES2BYTES64(psPageArrayData->uiTotalNumChunks, psPageArrayData->uiLog2ChunkSize); + PVRSRV_ERROR eError; +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_ZOMBIE)) + { + uiStat = PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES; + } +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ +#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ + + PVR_ASSERT(psPageArrayData->iNumChunksAllocated != 0); + PVR_ASSERT(psPageArrayData->iNumChunksAllocated == + psPageArrayData->uiTotalNumChunks); + + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_POISON_ON_FREE)) + { + eError = _PhysPgMemSet(psPageArrayData, + psPageArrayData->aBaseArray, + ui64PhysSize, + PVRSRV_POISON_ON_FREE_VALUE); + PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet"); + } + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStat(uiStat, + ui64PhysSize, + psPageArrayData->uiPid); +#else + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_PHYS_CONTIG)) + { + PVRSRVStatsRemoveMemAllocRecord(uiStat, + (IMG_UINT64) psPageArrayData->aBaseArray[0], + psPageArrayData->uiPid); + } + else + { + IMG_UINT32 i; + + for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) + { + if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i])) + { + PVRSRVStatsRemoveMemAllocRecord(uiStat, + (IMG_UINT64) psPageArrayData->aBaseArray[i], + psPageArrayData->uiPid); + } + } + } +#endif +#endif + + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_PHYS_CONTIG)) + { + eError = RA_FreeMulti(pArena, + psPageArrayData->aBaseArray, + 1); + PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti"); + } + else + { + eError = RA_FreeMulti(pArena, + psPageArrayData->aBaseArray, + psPageArrayData->iNumChunksAllocated); + PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti"); + } + + psPageArrayData->iNumChunksAllocated = 0; + + PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: freed %"IMG_UINT64_FMTSPEC" local memory for PMR @0x%p", + __func__, + ui64PhysSize, + psPageArrayData)); + + return eError; +} + +static PVRSRV_ERROR +_FreeLMPagesRemainingSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData) +{ + IMG_UINT32 i; + PVRSRV_ERROR eError; + IMG_UINT64 ui64ChunkSize = IMG_PAGE2BYTES64(psPageArrayData->uiLog2ChunkSize); + IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; + IMG_BOOL bPoisonOnFree = (BIT_ISSET(ui32Flags, FLAG_POISON_ON_FREE)); +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_ZOMBIE)) + { + uiStat = PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES; + } +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ +#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStat(uiStat, + psPageArrayData->iNumChunksAllocated << psPageArrayData->uiLog2ChunkSize, + psPageArrayData->uiPid); +#endif + + for (i = 0; i < psPageArrayData->uiTotalNumChunks;) + { + if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i])) + { + IMG_UINT32 j; + IMG_UINT32 ui32AccumulatedChunks = 1; + + for (j = i; + j + 1 != psPageArrayData->uiTotalNumChunks && + RA_BASE_IS_GHOST(psPageArrayData->aBaseArray[j + 1]); + j++) + { + ui32AccumulatedChunks++; + } + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) + for (j = i; j < (i + ui32AccumulatedChunks); j++) + { + PVRSRVStatsRemoveMemAllocRecord(uiStat, + RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[j]), + psPageArrayData->uiPid); + if (bPoisonOnFree) +#else + for (j = i; j < (i + ui32AccumulatedChunks) && bPoisonOnFree; j++) + { +#endif + { + eError = _PhysPgMemSet(psPageArrayData, + &psPageArrayData->aBaseArray[j], + ui64ChunkSize, + PVRSRV_POISON_ON_FREE_VALUE); + PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet"); + } + } + + eError = RA_FreeMulti(psPageArrayData->psArena, + &psPageArrayData->aBaseArray[i], + ui32AccumulatedChunks); + PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti"); + + psPageArrayData->iNumChunksAllocated -= ui32AccumulatedChunks; + i += ui32AccumulatedChunks; + } + else if (RA_BASE_IS_INVALID(psPageArrayData->aBaseArray[i])) + { + i++; + } + } + + /* We have freed all allocations in the previous loop */ + PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_FreeLMPagesSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData, + IMG_UINT32 *pui32FreeIndices, + IMG_UINT32 ui32FreeChunkCount) +{ + RA_ARENA *pArena = psPageArrayData->psArena; + IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; + IMG_UINT64 ui64ChunkSize = IMG_PAGE2BYTES64(uiLog2ChunkSize); + IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; + IMG_UINT32 uiActualFreeCount = ui32FreeChunkCount; + PVRSRV_ERROR eError; +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_ZOMBIE)) + { + uiStat = PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES; + } +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ +#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ + + PVR_ASSERT(psPageArrayData->iNumChunksAllocated != 0); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) + { + IMG_UINT32 i; + + for (i = 0; i < ui32FreeChunkCount; i++) + { + IMG_UINT32 ui32Index = pui32FreeIndices[i]; + + PVRSRVStatsRemoveMemAllocRecord(uiStat, + (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT( + psPageArrayData->aBaseArray[ui32Index]), + psPageArrayData->uiPid); + } + } +#endif + + if (BIT_ISSET(ui32Flags, FLAG_POISON_ON_FREE)) + { + IMG_UINT32 i, ui32Index = 0; + for (i = 0; i < ui32FreeChunkCount; i++) + { + ui32Index = pui32FreeIndices[i]; + + eError = _PhysPgMemSet(psPageArrayData, + &psPageArrayData->aBaseArray[ui32Index], + ui64ChunkSize, + PVRSRV_POISON_ON_FREE_VALUE); + PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet"); + } + } + + eError = RA_FreeMultiSparse(pArena, + psPageArrayData->aBaseArray, + psPageArrayData->uiTotalNumChunks, + uiLog2ChunkSize, + pui32FreeIndices, + &uiActualFreeCount); + psPageArrayData->iNumChunksAllocated -= uiActualFreeCount; +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStat(uiStat, + uiActualFreeCount << psPageArrayData->uiLog2ChunkSize, + psPageArrayData->uiPid); +#endif + if (eError == PVRSRV_ERROR_RA_FREE_INVALID_CHUNK) + { + /* Log the RA error but convert it to PMR level to match the interface, + * this is important because other PMR factories may not use the RA but + * still return error, returning a PMR based error + * keeps the interface agnostic to implementation behaviour. + */ + PVR_LOG_IF_ERROR(eError, "RA_FreeMultiSparse"); + return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK; + } + PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMultiSparse"); + + PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); + + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: freed %" IMG_UINT64_FMTSPEC " local memory for PMR @0x%p", + __func__, + (uiActualFreeCount * ui64ChunkSize), + psPageArrayData)); + + return PVRSRV_OK; +} + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +static PVRSRV_ERROR PMRFreeZombiePagesRAMem(PMR_IMPL_ZOMBIEPAGES pvPriv) +{ + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psZombiePageArray = pvPriv; + + eError = _FreeLMPages(psZombiePageArray, NULL, 0); + PVR_GOTO_IF_ERROR(eError, e0); + + _FreeLMPageArray(psZombiePageArray); + return PVRSRV_OK; +e0: + return eError; +} + +/* Allocates a new PMR_LMALLOCARRAY_DATA object and fills it with + * pages to be extracted from psSrcPageArrayData. + */ +static PVRSRV_ERROR +_ExtractPages(PMR_LMALLOCARRAY_DATA *psSrcPageArrayData, + IMG_UINT32 *pai32ExtractIndices, + IMG_UINT32 ui32ExtractPageCount, + PMR_LMALLOCARRAY_DATA **psOutPageArrayData) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32ExtractPageCountSaved; + PMR_LMALLOCARRAY_DATA* psDstPageArrayData; + + /* Alloc PMR_LMALLOCARRAY_DATA for the extracted pages */ + eError = _AllocLMPageArray(ui32ExtractPageCount << psSrcPageArrayData->uiLog2ChunkSize, + ui32ExtractPageCount, + psSrcPageArrayData->uiLog2ChunkSize, + psSrcPageArrayData->ui32Flags, + psSrcPageArrayData->psPhysHeap, + psSrcPageArrayData->psArena, + psSrcPageArrayData->uiAllocFlags, + psSrcPageArrayData->uiPid, + &psDstPageArrayData, + psSrcPageArrayData->psConnection); + PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPageArray", alloc_error); + + ui32ExtractPageCountSaved = ui32ExtractPageCount; + /* Transfer pages from source base array to newly allocated page array */ + eError = RA_TransferMultiSparseIndices(psSrcPageArrayData->psArena, + psSrcPageArrayData->aBaseArray, + psSrcPageArrayData->uiTotalNumChunks, + psDstPageArrayData->aBaseArray, + psDstPageArrayData->uiTotalNumChunks, + psSrcPageArrayData->uiLog2ChunkSize, + pai32ExtractIndices, + &ui32ExtractPageCountSaved); + PVR_LOG_GOTO_IF_FALSE((eError == PVRSRV_OK) && (ui32ExtractPageCountSaved == ui32ExtractPageCount), + "RA_TransferMultiSparseIndices failed", + transfer_error); + + + /* Update page counts */ + psSrcPageArrayData->iNumChunksAllocated -= ui32ExtractPageCount; + psDstPageArrayData->iNumChunksAllocated += ui32ExtractPageCount; + + *psOutPageArrayData = psDstPageArrayData; + + return PVRSRV_OK; +transfer_error: + _FreeLMPageArray(psDstPageArrayData); +alloc_error: + return eError; +} + +/* Extracts all allocated pages referenced psSrcPageArrayData + * Allocates a new PMR_OSPAGEARRAY_DATA object and fills it with the extracted + * pages information. + */ +static PVRSRV_ERROR +_ExtractAllPages(PMR_LMALLOCARRAY_DATA *psSrcPageArrayData, + PMR_LMALLOCARRAY_DATA **psOutPageArrayData) +{ + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA* psDstPageArrayData; + IMG_UINT32 ui32IdxSrc, ui32IdxDst; + + if (psSrcPageArrayData->iNumChunksAllocated == 0) + { + /* Do nothing if psSrcPageArrayData contains no allocated pages */ + return PVRSRV_OK; + } + + /* Alloc PMR_LMALLOCARRAY_DATA for the extracted pages */ + eError = _AllocLMPageArray(psSrcPageArrayData->iNumChunksAllocated << psSrcPageArrayData->uiLog2ChunkSize, + psSrcPageArrayData->iNumChunksAllocated, + psSrcPageArrayData->uiLog2ChunkSize, + psSrcPageArrayData->ui32Flags, + psSrcPageArrayData->psPhysHeap, + psSrcPageArrayData->psArena, + psSrcPageArrayData->uiAllocFlags, + psSrcPageArrayData->uiPid, + &psDstPageArrayData, + psSrcPageArrayData->psConnection); + PVR_LOG_RETURN_IF_ERROR_VA(eError, "_AllocLMPageArray failed in %s", __func__); + + /* Now do the transfer */ + ui32IdxDst=0; + for (ui32IdxSrc=0; ((ui32IdxDstiNumChunksAllocated) && + (psDstPageArrayData->iNumChunksAllocatediNumChunksAllocated)); ui32IdxSrc++) + { + if (psSrcPageArrayData->aBaseArray[ui32IdxSrc] != INVALID_BASE_ADDR) + { + psDstPageArrayData->aBaseArray[ui32IdxDst++] = psSrcPageArrayData->aBaseArray[ui32IdxSrc]; + psSrcPageArrayData->aBaseArray[ui32IdxSrc] = INVALID_BASE_ADDR; + psDstPageArrayData->iNumChunksAllocated++; + } + } + + /* Update src page count */ + psSrcPageArrayData->iNumChunksAllocated = 0; + + *psOutPageArrayData = psDstPageArrayData; + + return PVRSRV_OK; +} +#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */ + +static PVRSRV_ERROR +_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, + IMG_UINT32 *pui32FreeIndices, + IMG_UINT32 ui32FreeChunkCount) +{ + PVRSRV_ERROR eError; + + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_SPARSE)) + { + if (!pui32FreeIndices) + { + eError = _FreeLMPagesRemainingSparse(psPageArrayData); + } + else + { + eError = _FreeLMPagesSparse(psPageArrayData, pui32FreeIndices, ui32FreeChunkCount); + } + } + else + { + eError = _FreeLMPagesContig(psPageArrayData); + } + + return eError; +} + +/* + * + * Implementation of PMR callback functions + * + */ + +/* destructor func is called after last reference disappears, but + before PMR itself is freed. */ +static void +PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; + + /* We can't free pages until now. */ + if (psLMAllocArrayData->iNumChunksAllocated != 0) + { + eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); + PVR_LOG_IF_ERROR(eError, "_FreeLMPages"); + PVR_ASSERT (eError == PVRSRV_OK); + } + + _FreeLMPageArray(psLMAllocArrayData); +} + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +static inline void _TransferToMemZombieRecord_LmaPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, + RA_BASE_T uiBase) +{ + IMG_CPU_PHYADDR sCPUPhysAddr = { + .uiAddr = RA_BASE_STRIP_GHOST_BIT(uiBase) + }; + + PVRSRVStatsTransferMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES, + sCPUPhysAddr.uiAddr, + psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); +} +#endif + +static PVRSRV_ERROR PMRZombifyLocalMem(PMR_IMPL_PRIVDATA pvPriv, PMR *psPMR) +{ + PMR_LMALLOCARRAY_DATA *psPageArrayData = pvPriv; + + BIT_SET(psPageArrayData->ui32Flags, FLAG_ZOMBIE); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + { + IMG_PID uiPid = psPageArrayData->uiPid; + IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; + IMG_UINT64 uiSize = BIT_ISSET(psPageArrayData->ui32Flags, FLAG_SPARSE) ? + (IMG_UINT64) psPageArrayData->iNumChunksAllocated << uiLog2ChunkSize : + (IMG_UINT64) psPageArrayData->uiTotalNumChunks << uiLog2ChunkSize; + + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiSize, uiPid); + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES, uiSize, uiPid); + } +#else /* !defined(PVRSRV_ENABLE_MEMORY_STATS) */ + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_SPARSE)) + { + /* _FreeLMPagesRemainingSparse path */ + + IMG_UINT32 i; + + for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) + { + if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i])) + { + IMG_UINT32 j; + IMG_UINT32 ui32AccumulatedChunks = 1; + + for (j = i; + j + 1 != psPageArrayData->uiTotalNumChunks && + RA_BASE_IS_GHOST(psPageArrayData->aBaseArray[j + 1]); + j++) + { + ui32AccumulatedChunks++; + } + + for (j = i; j < (i + ui32AccumulatedChunks); j++) + { + _TransferToMemZombieRecord_LmaPages(psPageArrayData, + psPageArrayData->aBaseArray[j]); + } + } + } + } + else + { + /* _FreeLMPagesContig path */ + + if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_PHYS_CONTIG)) + { + _TransferToMemZombieRecord_LmaPages(psPageArrayData, + psPageArrayData->aBaseArray[0]); + } + else + { + IMG_UINT32 i; + + for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) + { + if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i])) + { + _TransferToMemZombieRecord_LmaPages(psPageArrayData, + psPageArrayData->aBaseArray[i]); + } + } + + } + } +#endif /* !defined(PVRSRV_ENABLE_MEMORY_STATS) */ +#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ + + PVR_UNREFERENCED_PARAMETER(psPMR); + + return PVRSRV_OK; +} +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + +/* callback function for locking the system physical page addresses. + As we are LMA there is nothing to do as we control physical memory. */ +static PVRSRV_ERROR +PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) +{ + + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; + + psLMAllocArrayData = pvPriv; + + if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_ONDEMAND)) + { + /* Allocate Memory for deferred allocation */ + eError = _AllocLMPages(psLMAllocArrayData, NULL); + PVR_RETURN_IF_ERROR(eError); + } + + return PVRSRV_OK; +} + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +static PVRSRV_ERROR +PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + PMR_IMPL_ZOMBIEPAGES *ppvZombiePages) +#else +static PVRSRV_ERROR +PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) +#endif +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_LMALLOCARRAY_DATA *psExtractedPagesPageArray = NULL; + + *ppvZombiePages = NULL; +#endif + + if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_ONDEMAND)) + { +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + if (psLMAllocArrayData->iNumChunksAllocated == 0) + { + *ppvZombiePages = NULL; + return PVRSRV_OK; + } + + eError = _ExtractAllPages(psLMAllocArrayData, + &psExtractedPagesPageArray); + PVR_LOG_GOTO_IF_ERROR(eError, "_ExtractAllPages", e0); + + if (psExtractedPagesPageArray) + { + /* Zombify pages to get proper stats */ + eError = PMRZombifyLocalMem(psExtractedPagesPageArray, NULL); + PVR_WARN_IF_ERROR(eError, "PMRZombifyLocalMem"); + } + *ppvZombiePages = psExtractedPagesPageArray; +#else + /* Free Memory for deferred allocation */ + eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); + PVR_RETURN_IF_ERROR(eError); +#endif + } + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +e0: +#endif + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + +/* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */ +static PVRSRV_ERROR +PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, +#if defined(SUPPORT_STATIC_IPA) + IMG_UINT64 ui64IPAPolicyValue, + IMG_UINT64 ui64IPAClearMask, +#endif + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; + IMG_UINT32 idx; + IMG_UINT32 uiLog2AllocSize; + IMG_UINT64 uiAllocIndex; + IMG_DEVMEM_OFFSET_T uiInAllocOffset; + IMG_UINT32 uiNumAllocs = psLMAllocArrayData->uiTotalNumChunks; + +#if defined(SUPPORT_STATIC_IPA) + PVR_UNREFERENCED_PARAMETER(ui64IPAPolicyValue); + PVR_UNREFERENCED_PARAMETER(ui64IPAClearMask); +#endif + + if (psLMAllocArrayData->uiLog2ChunkSize < ui32Log2PageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Requested physical addresses from PMR " + "for incompatible contiguity %u!", + __func__, + ui32Log2PageSize)); + return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; + } + + PVR_ASSERT(psLMAllocArrayData->uiLog2ChunkSize != 0); + PVR_ASSERT(ui32Log2PageSize >= RA_BASE_FLAGS_LOG2); + + if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_PHYS_CONTIG)) + { + for (idx=0; idx < ui32NumOfPages; idx++) + { + if (pbValid[idx]) + { + psDevPAddr[idx].uiAddr = psLMAllocArrayData->aBaseArray[0] + puiOffset[idx]; +#if defined(SUPPORT_STATIC_IPA) + /* Modify the physical address with the associated IPA values */ + psDevPAddr[idx].uiAddr &= ~ui64IPAClearMask; + psDevPAddr[idx].uiAddr |= ui64IPAPolicyValue; +#endif + } + } + } + else + { + uiLog2AllocSize = psLMAllocArrayData->uiLog2ChunkSize; + + for (idx=0; idx < ui32NumOfPages; idx++) + { + if (pbValid[idx]) + { + uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize; + uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize); + + PVR_LOG_RETURN_IF_FALSE(uiAllocIndex < uiNumAllocs, + "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE); + + PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize)); + + /* The base may or may not be a ghost base, but we don't care, + * we just need the real representation of the base. + */ + psDevPAddr[idx].uiAddr = RA_BASE_STRIP_GHOST_BIT( + psLMAllocArrayData->aBaseArray[uiAllocIndex]) + uiInAllocOffset; +#if defined(SUPPORT_STATIC_IPA) + /* Modify the physical address with the associated IPA values */ + psDevPAddr[idx].uiAddr &= ~ui64IPAClearMask; + psDevPAddr[idx].uiAddr |= ui64IPAPolicyValue; +#endif + } + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags) +{ + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; + PMR_KERNEL_MAPPING *psKernelMapping; + RA_BASE_T *paBaseArray; + IMG_UINT32 ui32ChunkIndex = 0; + size_t uiOffsetMask = uiOffset; + + IMG_UINT32 uiLog2ChunkSize = psLMAllocArrayData->uiLog2ChunkSize; + IMG_UINT64 ui64ChunkSize = IMG_PAGE2BYTES64(uiLog2ChunkSize); + IMG_UINT64 ui64PhysSize; + + PVR_ASSERT(psLMAllocArrayData); + PVR_ASSERT(ppvKernelAddressOut); + PVR_ASSERT(phHandleOut); + + if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_SPARSE)) + { + IMG_UINT32 i; + /* Locate the desired physical chunk to map in */ + ui32ChunkIndex = uiOffset >> psLMAllocArrayData->uiLog2ChunkSize; + + if (OSIsMapPhysNonContigSupported()) + { + /* If a size hasn't been supplied assume we are mapping a single page */ + IMG_UINT32 uiNumChunksToMap; + + /* This is to support OSMapPMR originated parameters */ + if (uiOffset == 0 && uiSize == 0) + { + uiNumChunksToMap = psLMAllocArrayData->iNumChunksAllocated; + } + else + { + uiNumChunksToMap = uiSize >> psLMAllocArrayData->uiLog2ChunkSize; + } + + /* Check we are attempting to map at least a chunk in size */ + if (uiNumChunksToMap < 1) + { + PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_INVALID_PARAMS, "uiNumChunksToMap < 1"); + } + + /* Check contiguous region doesn't exceed size of PMR */ + if (ui32ChunkIndex + (uiNumChunksToMap - 1) > psLMAllocArrayData->uiTotalNumChunks) + { + PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_INVALID_PARAMS, + "Mapping range exceeds total num chunks in PMR"); + } + + /* Check the virtually contiguous region given is physically backed */ + for (i = ui32ChunkIndex; i < ui32ChunkIndex + uiNumChunksToMap; i++) + { + if (RA_BASE_IS_INVALID(psLMAllocArrayData->aBaseArray[i])) + { + PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, "Sparse contiguity check"); + } + } + /* Size of virtually contiguous sparse alloc */ + ui64PhysSize = IMG_PAGES2BYTES64(uiNumChunksToMap, psLMAllocArrayData->uiLog2ChunkSize); + } + else + { + size_t uiStart = uiOffset; + size_t uiEnd = uiOffset + uiSize - 1; + size_t uiChunkMask = ~(IMG_PAGE2BYTES64(psLMAllocArrayData->uiLog2ChunkSize) - 1); + + /* We can still map if only one chunk is required */ + if ((uiStart & uiChunkMask) != (uiEnd & uiChunkMask)) + { + PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, "Sparse contiguity check"); + } + /* Map a single chunk */ + ui64PhysSize = ui64ChunkSize; + } + + paBaseArray = &psLMAllocArrayData->aBaseArray[ui32ChunkIndex]; + + /* Offset mask to be used for address offsets within a chunk */ + uiOffsetMask = (1U << psLMAllocArrayData->uiLog2ChunkSize) - 1; + } + else + { + paBaseArray = psLMAllocArrayData->aBaseArray; + ui64PhysSize = IMG_PAGES2BYTES64(psLMAllocArrayData->uiTotalNumChunks, uiLog2ChunkSize); + } + + PVR_ASSERT(ui32ChunkIndex < psLMAllocArrayData->uiTotalNumChunks); + + psKernelMapping = OSAllocMem(sizeof(*psKernelMapping)); + PVR_RETURN_IF_NOMEM(psKernelMapping); + + eError = _MapPMRKernel(psLMAllocArrayData, + paBaseArray, + ui64PhysSize, + ulFlags, + psKernelMapping); + if (eError == PVRSRV_OK) + { + /* uiOffset & uiOffsetMask is used to get the kernel addr within the page */ + *ppvKernelAddressOut = ((IMG_CHAR *) psKernelMapping->pvKernelAddress) + (uiOffset & uiOffsetMask); + *phHandleOut = psKernelMapping; + } + else + { + OSFreeMem(psKernelMapping); + PVR_LOG_ERROR(eError, "_MapPMRKernel"); + } + + return eError; +} + +static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle) +{ + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv; + PMR_KERNEL_MAPPING *psKernelMapping = (PMR_KERNEL_MAPPING *) hHandle; + + PVR_ASSERT(psLMAllocArrayData); + PVR_ASSERT(psKernelMapping); + + _UnMapPMRKernel(psLMAllocArrayData, + psKernelMapping); + + OSFreeMem(psKernelMapping); +} + +static PVRSRV_ERROR +CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes, + void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer, + IMG_UINT8 *pcPMR, + size_t uiSize)) +{ + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; + size_t uiBytesCopied; + size_t uiBytesToCopy; + size_t uiBytesCopyableFromAlloc; + PMR_KERNEL_MAPPING sMapping; + IMG_UINT8 *pcKernelPointer = NULL; + size_t uiBufferOffset; + IMG_UINT64 uiAllocIndex; + IMG_DEVMEM_OFFSET_T uiInAllocOffset; + IMG_UINT32 uiLog2ChunkSize = psLMAllocArrayData->uiLog2ChunkSize; + IMG_UINT64 ui64ChunkSize = IMG_PAGE2BYTES64(uiLog2ChunkSize); + IMG_UINT64 ui64PhysSize; + PVRSRV_ERROR eError; + + uiBytesCopied = 0; + uiBytesToCopy = uiBufSz; + uiBufferOffset = 0; + + if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_SPARSE)) + { + while (uiBytesToCopy > 0) + { + /* we have to map one alloc in at a time */ + PVR_ASSERT(psLMAllocArrayData->uiLog2ChunkSize != 0); + uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2ChunkSize; + uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2ChunkSize); + uiBytesCopyableFromAlloc = uiBytesToCopy; + if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2ChunkSize)) + { + uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2ChunkSize)-uiInAllocOffset); + } + /* Mapping a single chunk at a time */ + ui64PhysSize = ui64ChunkSize; + + PVR_ASSERT(uiBytesCopyableFromAlloc != 0); + PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumChunks); + PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2ChunkSize)); + + eError = _MapPMRKernel(psLMAllocArrayData, + &psLMAllocArrayData->aBaseArray[uiAllocIndex], + ui64PhysSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, + &sMapping); + PVR_GOTO_IF_ERROR(eError, e0); + pcKernelPointer = sMapping.pvKernelAddress; + pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc); + + _UnMapPMRKernel(psLMAllocArrayData, + &sMapping); + + uiBufferOffset += uiBytesCopyableFromAlloc; + uiBytesToCopy -= uiBytesCopyableFromAlloc; + uiOffset += uiBytesCopyableFromAlloc; + uiBytesCopied += uiBytesCopyableFromAlloc; + } + } + else + { + ui64PhysSize = IMG_PAGES2BYTES64(psLMAllocArrayData->uiTotalNumChunks, uiLog2ChunkSize); + PVR_ASSERT((uiOffset + uiBufSz) <= ui64PhysSize); + PVR_ASSERT(ui64ChunkSize != 0); + eError = _MapPMRKernel(psLMAllocArrayData, + psLMAllocArrayData->aBaseArray, + ui64PhysSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, + &sMapping); + PVR_GOTO_IF_ERROR(eError, e0); + pcKernelPointer = sMapping.pvKernelAddress; + pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz); + + _UnMapPMRKernel(psLMAllocArrayData, + &sMapping); + + uiBytesCopied = uiBufSz; + } + *puiNumBytes = uiBytesCopied; + return PVRSRV_OK; +e0: + *puiNumBytes = uiBytesCopied; + return eError; +} + +static void ReadLocalMem(IMG_UINT8 *pcBuffer, + IMG_UINT8 *pcPMR, + size_t uiSize) +{ + /* the memory is mapped as WC (and also aligned to page size) so we can + * safely call "Cached" memcpy */ + OSCachedMemCopy(pcBuffer, pcPMR, uiSize); +} + +static PVRSRV_ERROR +PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + return CopyBytesLocalMem(pvPriv, + uiOffset, + pcBuffer, + uiBufSz, + puiNumBytes, + ReadLocalMem); +} + +static void WriteLocalMem(IMG_UINT8 *pcBuffer, + IMG_UINT8 *pcPMR, + size_t uiSize) +{ + /* the memory is mapped as WC (and also aligned to page size) so we can + * safely call "Cached" memcpy but need to issue a write memory barrier + * to flush the write buffers after */ + OSCachedMemCopyWMB(pcPMR, pcBuffer, uiSize); +} + +static PVRSRV_ERROR +PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + return CopyBytesLocalMem(pvPriv, + uiOffset, + pcBuffer, + uiBufSz, + puiNumBytes, + WriteLocalMem); +} + +/*************************************************************************/ /*! +@Function PMRChangeSparseMemLocalMem +@Description This function Changes the sparse mapping by allocating and + freeing of pages. It also changes the GPU maps accordingly. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +static PVRSRV_ERROR +PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_IMPL_ZOMBIEPAGES *ppvZombiePages, +#endif + IMG_UINT32 uiFlags) +{ + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; + + IMG_UINT32 ui32AdtnlAllocPages = 0; + IMG_UINT32 ui32AdtnlFreePages = 0; + IMG_UINT32 ui32CommonRequstCount = 0; + IMG_UINT32 ui32Loop = 0; + IMG_UINT32 ui32Index = 0; + IMG_UINT32 uiAllocpgidx; + IMG_UINT32 uiFreepgidx; + + PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; + IMG_UINT32 uiLog2ChunkSize = psPMRPageArrayData->uiLog2ChunkSize; + IMG_UINT64 ui64ChunkSize = IMG_PAGE2BYTES64(uiLog2ChunkSize); + +#if defined(DEBUG) + IMG_BOOL bPoisonFail = IMG_FALSE; + IMG_BOOL bZeroFail = IMG_FALSE; +#endif + + /* Fetch the Page table array represented by the PMR */ + RA_BASE_T *paBaseArray = psPMRPageArrayData->aBaseArray; + PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR); + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + *ppvZombiePages = NULL; +#endif + /* The incoming request is classified into two operations independent of + * each other: alloc & free chunks. + * These operations can be combined with two mapping operations as well + * which are GPU & CPU space mappings. + * + * From the alloc and free chunk requests, the net amount of chunks to be + * allocated or freed is computed. Chunks that were requested to be freed + * will be reused to fulfil alloc requests. + * + * The order of operations is: + * 1. Allocate new Chunks. + * 2. Move the free chunks from free request to alloc positions. + * 3. Free the rest of the chunks not used for alloc + * + * Alloc parameters are validated at the time of allocation + * and any error will be handled then. */ + + if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) + { + ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ? + ui32FreePageCount : ui32AllocPageCount; + + PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free chunks not supported"); + } + + if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) + { + ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount; + } + else + { + ui32AllocPageCount = 0; + } + + if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) + { + ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount; + } + else + { + ui32FreePageCount = 0; + } + + PVR_LOG_RETURN_IF_FALSE( + (ui32CommonRequstCount | ui32AdtnlAllocPages | ui32AdtnlFreePages) != 0, + "Invalid combination of parameters: ui32CommonRequstCount," + " ui32AdtnlAllocPages and ui32AdtnlFreePages.", + PVRSRV_ERROR_INVALID_PARAMS + ); + + { + /* Validate the free page indices */ + if (ui32FreePageCount) + { + if (pai32FreeIndices != NULL) + { + for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) + { + uiFreepgidx = pai32FreeIndices[ui32Loop]; + + if (uiFreepgidx >= psPMRPageArrayData->uiTotalNumChunks) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); + } + + if (RA_BASE_IS_INVALID(paBaseArray[uiFreepgidx])) + { + PVR_LOG_GOTO_WITH_ERROR("paBaseArray[uiFreepgidx]", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Given non-zero free count but missing indices array", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + /* The following block of code verifies any issues with common alloc chunk indices */ + for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) + { + uiAllocpgidx = pai32AllocIndices[ui32Loop]; + if (uiAllocpgidx >= psPMRPageArrayData->uiTotalNumChunks) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); + } + + if ((!RA_BASE_IS_INVALID(paBaseArray[uiAllocpgidx])) || + (psPMRMapTable->aui32Translation[uiAllocpgidx] != TRANSLATION_INVALID)) + { + PVR_LOG_GOTO_WITH_ERROR("Trying to allocate already allocated page again", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + } + + ui32Loop = 0; + + /* Allocate new chunks */ + if (0 != ui32AdtnlAllocPages) + { + /* Say how many chunks to allocate */ + psPMRPageArrayData->uiChunksToAlloc = ui32AdtnlAllocPages; + + eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices); + PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPages", e0); + + /* Mark the corresponding chunks of translation table as valid */ + for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) + { + psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; + } + + psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; + } + + ui32Index = ui32Loop; + ui32Loop = 0; + + /* Move the corresponding free chunks to alloc request */ + eError = RA_SwapSparseMem(psPMRPageArrayData->psArena, + paBaseArray, + psPMRPageArrayData->uiTotalNumChunks, + psPMRPageArrayData->uiLog2ChunkSize, + &pai32AllocIndices[ui32Index], + &pai32FreeIndices[ui32Loop], + ui32CommonRequstCount); + PVR_LOG_GOTO_IF_ERROR(eError, "RA_SwapSparseMem", unwind_alloc); + + for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++) + { + uiAllocpgidx = pai32AllocIndices[ui32Index]; + uiFreepgidx = pai32FreeIndices[ui32Loop]; + + psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; + + /* Be sure to honour the attributes associated with the allocation + * such as zeroing, poisoning etc. */ + if (BIT_ISSET(psPMRPageArrayData->ui32Flags, FLAG_POISON_ON_ALLOC)) + { + eError = _PhysPgMemSet(psPMRPageArrayData, + &psPMRPageArrayData->aBaseArray[uiAllocpgidx], + ui64ChunkSize, + PVRSRV_POISON_ON_ALLOC_VALUE); + + /* Consider this as a soft failure and go ahead but log error to kernel log */ + if (eError != PVRSRV_OK) + { +#if defined(DEBUG) + bPoisonFail = IMG_TRUE; +#endif + } + } + + if (BIT_ISSET(psPMRPageArrayData->ui32Flags, FLAG_ZERO)) + { + eError = _PhysPgMemSet(psPMRPageArrayData, + &psPMRPageArrayData->aBaseArray[uiAllocpgidx], + ui64ChunkSize, + ZERO_PAGE_VALUE); + /* Consider this as a soft failure and go ahead but log error to kernel log */ + if (eError != PVRSRV_OK) + { +#if defined(DEBUG) + /* Don't think we need to zero any chunks further */ + bZeroFail = IMG_TRUE; +#endif + } + } + } + + /* Free or zombie the additional free chunks */ + if (0 != ui32AdtnlFreePages) + { +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_LMALLOCARRAY_DATA *psExtractedPagesPageArray = NULL; + + eError = _ExtractPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages, &psExtractedPagesPageArray); + PVR_LOG_GOTO_IF_ERROR(eError, "_ExtractPages", e0); + + /* Zombify pages to get proper stats */ + eError = PMRZombifyLocalMem(psExtractedPagesPageArray, NULL); + PVR_LOG_IF_ERROR(eError, "PMRZombifyLocalMem"); + + *ppvZombiePages = psExtractedPagesPageArray; +#else + eError = _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages); + PVR_LOG_GOTO_IF_ERROR(eError, "_FreeLMPages", e0); +#endif /* SUPPORT_PMR_PAGES_DEFERRED_FREE */ + ui32Index = ui32Loop; + ui32Loop = 0; + + while (ui32Loop++ < ui32AdtnlFreePages) + { + /* Set the corresponding mapping table entry to invalid address */ + psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID; + } + + psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; + } + } + +#if defined(DEBUG) + if (IMG_TRUE == bPoisonFail) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the chunk", __func__)); + } + + if (IMG_TRUE == bZeroFail) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the chunk", __func__)); + } +#endif + + return PVRSRV_OK; + +unwind_alloc: + _FreeLMPages(psPMRPageArrayData, pai32AllocIndices, ui32Index); + + for (ui32Loop = 0; ui32Loop < ui32Index; ui32Loop++) + { + psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = TRANSLATION_INVALID; + } + +e0: + return eError; +} + +static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = { + .pfnLockPhysAddresses = &PMRLockSysPhysAddressesLocalMem, + .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesLocalMem, + .pfnDevPhysAddr = &PMRSysPhysAddrLocalMem, + .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataLocalMem, + .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataLocalMem, + .pfnReadBytes = &PMRReadBytesLocalMem, + .pfnWriteBytes = &PMRWriteBytesLocalMem, + .pfnChangeSparseMem = &PMRChangeSparseMemLocalMem, + .pfnMMap = NULL, + .pfnFinalize = &PMRFinalizeLocalMem, +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + .pfnFreeZombiePages = &PMRFreeZombiePagesRAMem, +#endif +#if defined(SUPPORT_PMR_DEFERRED_FREE) + .pfnZombify = &PMRZombifyLocalMem, +#endif +}; + +PVRSRV_ERROR +PhysmemNewRAMemRamBackedPMR(PHYS_HEAP *psPhysHeap, + RA_ARENA *pArena, + CONNECTION_DATA *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + PMR *psPMR = NULL; + PMR_LMALLOCARRAY_DATA *psPrivData = NULL; + PMR_FLAGS_T uiPMRFlags; + IMG_UINT32 ui32LMAllocFlags = 0; + + /* This path is checking for the type of PMR to create, if sparse we + * have to perform additional validation as we can only map sparse ranges + * if the os functionality to do so is present. We can also only map virtually + * contiguous sparse regions. Non backed gaps in a range cannot be mapped. + */ + if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1) + { + if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) && + !OSIsMapPhysNonContigSupported()) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: LMA kernel mapping functions not available " + "for physically discontiguous memory.", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, errorOnParam); + } + BIT_SET(ui32LMAllocFlags, FLAG_SPARSE); + } + + if (PVRSRV_CHECK_ON_DEMAND(uiFlags)) + { + BIT_SET(ui32LMAllocFlags, FLAG_ONDEMAND); + } + + if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) + { + BIT_SET(ui32LMAllocFlags, FLAG_ZERO); + } + + if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) + { + BIT_SET(ui32LMAllocFlags, FLAG_POISON_ON_ALLOC); + } + +#if defined(DEBUG) + if (PVRSRV_CHECK_POISON_ON_FREE(uiFlags)) + { + BIT_SET(ui32LMAllocFlags, FLAG_POISON_ON_FREE); + } +#endif + + /* Create Array structure that holds the physical pages */ + eError = _AllocLMPageArray(uiSize, + ui32NumPhysChunks, + uiLog2AllocPageSize, + ui32LMAllocFlags, + psPhysHeap, + pArena, + uiFlags, + uiPid, + &psPrivData, + psConnection); + PVR_GOTO_IF_ERROR(eError, errorOnAllocPageArray); + + if (!BIT_ISSET(ui32LMAllocFlags, FLAG_ONDEMAND)) + { + /* Allocate the physical pages */ + eError = _AllocLMPages(psPrivData, pui32MappingTable); + PVR_GOTO_IF_ERROR(eError, errorOnAllocPages); + } + + /* In this instance, we simply pass flags straight through. + + Generically, uiFlags can include things that control the PMR + factory, but we don't need any such thing (at the time of + writing!), and our caller specifies all PMR flags so we don't + need to meddle with what was given to us. + */ + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + /* check no significant bits were lost in cast due to different + bit widths for flags */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + if (BIT_ISSET(ui32LMAllocFlags, FLAG_ONDEMAND)) + { + PDUMPCOMMENT(PhysHeapDeviceNode(psPhysHeap), "Deferred Allocation PMR (LMA)"); + } + + eError = PMRCreatePMR(psPhysHeap, + uiSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + uiPMRFlags, + pszAnnotation, + &_sPMRLMAFuncTab, + psPrivData, + PMR_TYPE_LMA, + &psPMR, + ui32PDumpFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRCreatePMR", errorOnCreate); + + *ppsPMRPtr = psPMR; + return PVRSRV_OK; + +errorOnCreate: + if (!BIT_ISSET(ui32LMAllocFlags, FLAG_ONDEMAND) && psPrivData->iNumChunksAllocated) + { + eError2 = _FreeLMPages(psPrivData, NULL, 0); + PVR_ASSERT(eError2 == PVRSRV_OK); + } + +errorOnAllocPages: + _FreeLMPageArray(psPrivData); + +errorOnAllocPageArray: +errorOnParam: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/pmr.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/pmr.c index 4d173201910d..588b076a4ed5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/pmr.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/pmr.c @@ -60,12 +60,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pdump.h" #include "devicemem_server_utils.h" +#include "devicemem_server.h" #include "osfunc.h" #include "pdump_km.h" #include "pdump_physmem.h" #include "pmr_impl.h" #include "pmr_os.h" +#include "pmr_env.h" #include "pvrsrv.h" #include "allocmem.h" @@ -95,6 +97,16 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pdump_km.h" +#define PMR_FLAG_INTERNAL_SPARSE_ALLOC (1 << 0) +#define PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE (1 << 1) +#if defined(SUPPORT_PMR_DEFERRED_FREE) +#define PMR_FLAG_INTERNAL_DEFER_FREE (1 << 2) +#define PMR_FLAG_INTERNAL_IS_ZOMBIE (1 << 3) + +/* Indicates PMR should be destroyed immediately and not deferred. */ +#define PMR_NO_ZOMBIE_FENCE IMG_UINT64_MAX +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + /* Memalloc flags can be converted into pmr, ra or psplay flags. * Ensure flags types are same size. */ @@ -125,7 +137,7 @@ static struct _PMR_CTX_ IMG_UINT64 uiNextKey; /* For debugging only, I guess: Number of live PMRs */ - IMG_UINT32 uiNumLivePMRs; + ATOMIC_T uiNumLivePMRs; /* Lock for this structure */ POS_LOCK hLock; @@ -135,8 +147,59 @@ static struct _PMR_CTX_ * count is zero. */ IMG_BOOL bModuleInitialised; -} _gsSingletonPMRContext = { 1, 0, 0, NULL, IMG_FALSE }; +} _gsSingletonPMRContext = { 1, 0, {0}, NULL, IMG_FALSE }; + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +typedef enum _PMR_ZOMBIE_TYPE_ { + PMR_ZOMBIE_TYPE_PMR, +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_ZOMBIE_TYPE_PAGES, +#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */ +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + PMR_ZOMBIE_TYPE_DEVICE_IMPORT, +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ +} PMR_ZOMBIE_TYPE; + +typedef struct _PMR_HEADER_ +{ + /* List node used to put the header on the zombie list + * (psDevNode->sPMRZombieList). */ + DLLIST_NODE sZombieNode; + + PMR_ZOMBIE_TYPE eZombieType; +} PMR_HEADER; +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +/* + * A structure describing zombie pages. + */ +typedef struct _PMR_ZOMBIE_PAGES_ +{ + PMR_HEADER sHeader; + PMR_IMPL_ZOMBIEPAGES pvFactoryPages; + PFN_FREE_ZOMBIE_PAGES_FN pfnFactoryFreeZombies; +} PMR_ZOMBIE_PAGES; +#endif + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +typedef enum _PMR_STATE_ +{ + PMR_STATE_INIT, + PMR_STATE_ACTIVE, + PMR_STATE_PAGES_IN_MIGRATE +} PMR_STATE; +#endif +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) +typedef struct _PMR_DEVICE_IMPORT_ +{ + PMR_HEADER sHeader; /* psDevNode zombie queue list node. */ + DLLIST_NODE sNext; /* PMR::sXDeviceImports list node. */ + PVRSRV_DEVICE_NODE *psDevNode; /* Device this import is representing. */ + PMR *psParent; /* PMR the import belongs to. */ +} PMR_DEVICE_IMPORT; +#endif /* A PMR. One per physical allocation. May be "shared". * @@ -154,6 +217,11 @@ static struct _PMR_CTX_ */ struct _PMR_ { +#if defined(SUPPORT_PMR_DEFERRED_FREE) + /* A Common header structure shared between PMR and PMR-like PMR_ZOMBIE_PAGES object */ + PMR_HEADER sHeader; +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + /* This object is strictly refcounted. References include: * - mapping * - live handles (to this object) @@ -167,7 +235,48 @@ struct _PMR_ */ PHYS_HEAP *psPhysHeap; - ATOMIC_T iRefCount; + /* Reference count of the PMR. */ + IMG_UINT32 uiRefCount; + /* Lock protecting reference counting (uiRefCount). */ + POS_SPINLOCK hRefCountLock; + + /* Client CPU mapping count - this is the number of times the PMR has been + * mapped by the client to the CPU. It is used to determine when it + * is safe to permit modification of a sparse allocation's layout. + * Note that the process of mapping also increments uiRefCount + * independently (as that is used to determine when a PMR may safely + * be destroyed). + */ + ATOMIC_T iClientCpuMapCount; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + + /* Kernel CPU mapping count - number of times the PMR has been + * mapped into the kernel. Used to signal if migrate is allowed + * to occur. + */ + ATOMIC_T iKernelCpuMapCount; + /* + * GPU mappings associated with PMR. + * Must be protected with PMR lock. + */ + DLLIST_NODE sGpuMappingListHead; + + /* + * Current state of PMR + * Must be protected by PMR lock. + * Used to describe the current state of the PMR and determine which interactions + * are possible. Pages could be in progress of migration during which physical + * addresses cannot be requested. + */ + PMR_STATE eState; +#endif + + /* Count of how many reservations refer to this + * PMR as a part of a GPU mapping. Must be protected + * by PMR lock. + */ + IMG_INT32 iAssociatedResCount; /* Lock count - this is the number of times PMRLockSysPhysAddresses() * has been called, less the number of PMRUnlockSysPhysAddresses() @@ -181,6 +290,18 @@ struct _PMR_ /* Lock for this structure */ POS_LOCK hLock; + /* Protects: `uiInternalFlags` & `uiDevImportBitmap` */ + POS_SPINLOCK hBitmapLock; + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) + /* See PMR_ImportedDevicesMask() + * Protected by hBitmapLock. */ + IMG_UINT64 uiDevImportBitmap; +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) */ +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + /* List of PMR_DEVICE_IMPORT's */ + DLLIST_NODE sXDeviceImports; +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ /* Incrementing serial number to each allocation. */ IMG_UINT64 uiSerialNum; @@ -196,6 +317,9 @@ struct _PMR_ /* Data associated with the "subtype" */ PMR_IMPL_PRIVDATA pvFlavourData; + /* Environment specific data */ + DECLARE_PMR_ENV_DATA(sEnvData) + /* What kind of PMR do we have? */ PMR_IMPL_TYPE eFlavour; @@ -229,25 +353,6 @@ struct _PMR_ */ PMR_MAPPING_TABLE *psMappingTable; - /* Indicates whether this PMR has been allocated as sparse. - * The condition for this variable to be set at allocation time is: - * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1) - */ - IMG_BOOL bSparseAlloc; - - /* - * Flag that conveys mutability of the PMR: - * - TRUE indicates the PMR is immutable (no more memory changes) - * - FALSE means the memory layout associated with the PMR is mutable - * - * A PMR is always mutable by default but is marked immutable on the - * first export for the rest of its life. - * - * Also, any PMRs that track the same memory through imports are - * marked immutable as well. - */ - IMG_BOOL bNoLayoutChange; - /* Minimum Physical Contiguity Guarantee. Might be called "page size", * but that would be incorrect, as page size is something meaningful * only in virtual realm. This contiguity guarantee provides an @@ -283,6 +388,32 @@ struct _PMR_ */ PMR_FLAGS_T uiFlags; + /* Various flags informing about PMR's state: + * + * SPARSE_ALLOC: + * indicates whether this PMR has been allocated as sparse. + * The condition for this variable to be set at allocation time is: + * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1) + * + * NO_LAYOUT_CHANGE: + * Flag that conveys mutability of the PMR: + * - set: indicates the PMR is immutable (no more memory changes) + * - unset: means the memory layout associated with the PMR is mutable + * + * A PMR is always mutable by default but is marked immutable on the + * first export for the rest of its life. + * + * Also, any PMRs that track the same memory through imports are + * marked immutable as well. + * + * DEFER_FREE: + * If present the PMR is marked to be freed by the CleanupThread. + * + * IS_ZOMBIE: + * Indicates if the PMR is in the zombie state (marked for free in the + * CleanupThread). */ + IMG_UINT32 uiInternalFlags; + /* Do we really need this? * For now we'll keep it, until we know we don't. * NB: this is not the "memory context" in client terms - this is @@ -311,6 +442,19 @@ struct _PMR_PAGELIST_ struct _PMR_ *psReferencePMR; }; +static INLINE IMG_UINT32 +_GetRef(const PMR *const psPMR) +{ + OS_SPINLOCK_FLAGS uiFlags; + IMG_UINT32 uiRefCount; + + OSSpinLockAcquire(psPMR->hRefCountLock, uiFlags); + uiRefCount = psPMR->uiRefCount; + OSSpinLockRelease(psPMR->hRefCountLock, uiFlags); + + return uiRefCount; +} + #if defined(PDUMP) static INLINE IMG_BOOL _IsHostDevicePMR(const PMR *const psPMR) { @@ -330,7 +474,7 @@ PDumpPMRMallocPMR(PMR *psPMR, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32ChunkSize, IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumLogicalChunks, IMG_UINT32 *puiMappingTable, IMG_UINT32 uiLog2Contiguity, IMG_BOOL bInitialise, @@ -350,6 +494,11 @@ PDumpPMRChangeSparsePMR(PMR *psPMR, IMG_HANDLE *phPDumpAllocInfoOut); #endif /* defined PDUMP */ +IMG_INT32 PMRGetLiveCount(void) +{ + return OSAtomicRead(&_gsSingletonPMRContext.uiNumLivePMRs); +} + PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR) { PPVRSRV_DEVICE_NODE psReturnedDeviceNode = NULL; @@ -360,11 +509,17 @@ PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR) PVR_ASSERT(psExportPMR->psPMR != NULL); if (psExportPMR->psPMR) { - PVR_ASSERT(OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0); - if (OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0) + IMG_UINT32 uiRefCount = _GetRef(psExportPMR->psPMR); + if (uiRefCount > 0) { psReturnedDeviceNode = PMR_DeviceNode(psExportPMR->psPMR); } +#ifdef PVRSRV_NEED_PVR_ASSERT + else + { + PVR_ASSERT(IMG_FALSE); + } +#endif } } @@ -374,7 +529,7 @@ PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR) static PVRSRV_ERROR _PMRCreate(PMR_SIZE_T uiLogicalSize, IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumLogicalChunks, IMG_UINT32 *pui32MappingTable, PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, PMR_FLAGS_T uiFlags, @@ -392,8 +547,8 @@ _PMRCreate(PMR_SIZE_T uiLogicalSize, psContext = &_gsSingletonPMRContext; /* Do we have a sparse allocation? */ - if ( (ui32NumVirtChunks != ui32NumPhysChunks) || - (ui32NumVirtChunks > 1) ) + if ( (ui32NumLogicalChunks != ui32NumPhysChunks) || + (ui32NumLogicalChunks > 1) ) { bSparse = IMG_TRUE; uiChunkSize = 1ULL << uiLog2ContiguityGuarantee; @@ -407,15 +562,15 @@ _PMRCreate(PMR_SIZE_T uiLogicalSize, if (bSparse) { /* Check the logical size and chunk information agree with each other */ - if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks)) + if (uiLogicalSize != (uiChunkSize * ui32NumLogicalChunks)) { - PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)", - __func__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks)); + PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumLogicalChunks = %d)", + __func__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumLogicalChunks)); return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; } } - pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks); + pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + IMG_FLEX_ARRAY_SIZE(sizeof(IMG_UINT32), ui32NumLogicalChunks)); PVR_RETURN_IF_NOMEM(pvPMRLinAddr); psPMR = (PMR *) pvPMRLinAddr; @@ -423,33 +578,34 @@ _PMRCreate(PMR_SIZE_T uiLogicalSize, /* Setup the mapping table */ psMappingTable->uiChunkSize = uiChunkSize; - psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks; + psMappingTable->ui32NumLogicalChunks = ui32NumLogicalChunks; psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks; OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])* - ui32NumVirtChunks); + ui32NumLogicalChunks); for (i=0; iaui32Translation[ui32Temp] = ui32Temp; - } - else - { - OSFreeMem(psPMR); - return PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY; - } + PVR_ASSERT(ui32Temp < ui32NumLogicalChunks); + psMappingTable->aui32Translation[ui32Temp] = ui32Temp; } eError = OSLockCreate(&psPMR->hLock); - if (eError != PVRSRV_OK) - { - OSFreeMem(psPMR); - return eError; - } + PVR_GOTO_IF_ERROR(eError, ErrFreePMR); + + eError = OSSpinLockCreate(&psPMR->hBitmapLock); + PVR_GOTO_IF_ERROR(eError, ErrFreePMRLock); + + eError = OSSpinLockCreate(&psPMR->hRefCountLock); + PVR_GOTO_IF_ERROR(eError, ErrFreeBitmapLock); /* Setup the PMR */ - OSAtomicWrite(&psPMR->iRefCount, 0); + psPMR->uiRefCount = 0; + OSAtomicWrite(&psPMR->iClientCpuMapCount, 0); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + OSAtomicWrite(&psPMR->iKernelCpuMapCount, 0); + dllist_init(&psPMR->sGpuMappingListHead); +#endif + psPMR->iAssociatedResCount = 0; /* If allocation is not made on demand, it will be backed now and * backing will not be removed until the PMR is destroyed, therefore @@ -462,195 +618,955 @@ _PMRCreate(PMR_SIZE_T uiLogicalSize, psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee; psPMR->uiFlags = uiFlags; psPMR->psMappingTable = psMappingTable; - psPMR->bSparseAlloc = bSparse; - psPMR->bNoLayoutChange = IMG_FALSE; + psPMR->uiInternalFlags = bSparse ? PMR_FLAG_INTERNAL_SPARSE_ALLOC : 0; psPMR->szAnnotation[0] = '\0'; + PMR_ENV_INITIALIZE(psPMR, sEnvData); + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) + psPMR->uiDevImportBitmap = 0; +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) */ + +#if defined(SUPPORT_PMR_DEFERRED_FREE) + psPMR->sHeader.eZombieType = PMR_ZOMBIE_TYPE_PMR; + dllist_init(&psPMR->sHeader.sZombieNode); +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + dllist_init(&psPMR->sXDeviceImports); +#endif /* defined(SUPPORT_DEVICE_IMPORT_DEFERRED_FREE) */ +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) psPMR->hRIHandle = NULL; #endif + OSLockAcquire(psContext->hLock); + psPMR->uiKey = psContext->uiNextKey; + psPMR->uiSerialNum = psContext->uiNextSerialNum; + psContext->uiNextKey = (0x80200003 * psContext->uiNextKey) + ^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr); + psContext->uiNextSerialNum++; + *ppsPMR = psPMR; + OSLockRelease(psContext->hLock); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: 0x%p, key:0x%016" IMG_UINT64_FMTSPECX ", numLive:%d", + __func__, psPMR, psPMR->uiKey, OSAtomicRead(&psPMR->psContext->uiNumLivePMRs))); + + /* Increment live PMR count */ + OSAtomicIncrement(&psContext->uiNumLivePMRs); + + return PVRSRV_OK; + +ErrFreeBitmapLock: + OSSpinLockDestroy(psPMR->hBitmapLock); +ErrFreePMRLock: + OSLockDestroy(psPMR->hLock); +ErrFreePMR: + OSFreeMem(psPMR); + + return eError; +} + +static PVRSRV_ERROR +_Ref(PMR *psPMR, IMG_UINT32 uiRefCount) +{ + OS_SPINLOCK_FLAGS uiFlags; + + OSSpinLockAcquire(psPMR->hRefCountLock, uiFlags); + + if (psPMR->uiRefCount == 0) + { + OSSpinLockRelease(psPMR->hRefCountLock, uiFlags); + + PVR_DPF((PVR_DBG_ERROR, "pmr.c: Ref Count == 0 PMR: @0x%p Annot: %s", + psPMR, + psPMR->szAnnotation)); + OSWarnOn(1); + + return PVRSRV_ERROR_REFCOUNT_OVERFLOW; + } + else if (psPMR->uiRefCount >= IMG_UINT32_MAX - uiRefCount) + { + OSSpinLockRelease(psPMR->hRefCountLock, uiFlags); + + PVR_DPF((PVR_DBG_ERROR, "pmr.c: Ref Count >= IMG_UINT32_MAX PMR: @0x%p " + "Annot: %s RefCount: %u", + psPMR, + psPMR->szAnnotation, + uiRefCount)); + OSWarnOn(1); + + return PVRSRV_ERROR_REFCOUNT_OVERFLOW; + } + + psPMR->uiRefCount += uiRefCount; + + OSSpinLockRelease(psPMR->hRefCountLock, uiFlags); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_Unref(PMR *psPMR, IMG_INT32 uiRefCount, IMG_UINT32 *pui32RefCount) +{ + OS_SPINLOCK_FLAGS uiFlags; + + OSSpinLockAcquire(psPMR->hRefCountLock, uiFlags); + + if (psPMR->uiRefCount == 0) + { + OSSpinLockRelease(psPMR->hRefCountLock, uiFlags); + + PVR_DPF((PVR_DBG_ERROR, "pmr.c: Unref Count = 0 PMR: @0x%p Annot: %s " + "RefCount: %u", + psPMR, + psPMR->szAnnotation, + uiRefCount)); + OSWarnOn(1); + + return PVRSRV_ERROR_REFCOUNT_OVERFLOW; + } + + psPMR->uiRefCount -= uiRefCount; + + if (pui32RefCount != NULL) + { + *pui32RefCount = psPMR->uiRefCount; + } + + OSSpinLockRelease(psPMR->hRefCountLock, uiFlags); + + return PVRSRV_OK; +} + +#if defined(DEBUG) +void PMRLockHeldAssert(const PMR *psPMR) +{ + OSLockHeldAssert(psPMR->hLock); +} +#endif + +void +PMRLockPMR(const PMR *psPMR) +{ + OSLockAcquire(psPMR->hLock); /* Uses same lock as PhysAddresses */ +} + +void +PMRUnlockPMR(const PMR *psPMR) +{ + OSLockRelease(psPMR->hLock); /* Uses same lock as PhysAddresses */ +} + +static INLINE void _IntFlagSet(PMR *psPMR, const IMG_UINT32 uiValue) +{ + OS_SPINLOCK_FLAGS uiLockingFlags; + + OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags); + BITMASK_SET(psPMR->uiInternalFlags, uiValue); + OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags); +} + +static INLINE void _IntFlagClr(PMR *psPMR, const IMG_UINT32 uiValue) +{ + OS_SPINLOCK_FLAGS uiLockingFlags; + + OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags); + BITMASK_UNSET(psPMR->uiInternalFlags, uiValue); + OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags); +} + +static INLINE IMG_BOOL _IntFlagIsSet(const PMR *psPMR, const IMG_UINT32 uiValue) +{ + OS_SPINLOCK_FLAGS uiLockingFlags; + IMG_BOOL bIsSet; + + OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags); + bIsSet = BITMASK_HAS(psPMR->uiInternalFlags, uiValue); + OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags); + + return bIsSet; +} + +static INLINE void +_FactoryLock(const PMR_IMPL_FUNCTAB *psFuncTable) +{ + if (psFuncTable->pfnGetPMRFactoryLock != NULL) + { + psFuncTable->pfnGetPMRFactoryLock(); + } +} + +static INLINE void +_FactoryUnlock(const PMR_IMPL_FUNCTAB *psFuncTable) +{ + if (psFuncTable->pfnReleasePMRFactoryLock != NULL) + { + psFuncTable->pfnReleasePMRFactoryLock(); + } +} + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +/* Protects: + * - `psDevNode->sPMRZombieList` + * - `uiPMRZombieCount` + * - `uiPMRZombieCountInCleanup` + * + * and all `PMR_ZOMBIE_CLEANUP_ITEM::sZombieList` where + * `PMR_ZOMBIE_CLEANUP_ITEM::psDevNode == psDevNode` */ +static INLINE void +_ZombieListLock(PPVRSRV_DEVICE_NODE psDevNode) +{ + OSLockAcquire(psDevNode->hPMRZombieListLock); +} + +static INLINE void +_ZombieListUnlock(PPVRSRV_DEVICE_NODE psDevNode) +{ + OSLockRelease(psDevNode->hPMRZombieListLock); +} + +static IMG_BOOL _IsDeviceOnAndOperating(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + PVRSRV_DEV_POWER_STATE ePowerState; + + eError = PVRSRVGetDevicePowerState(psDevNode, &ePowerState); + if (eError != PVRSRV_OK) + { + /* Treat unknown power state as ON. */ + ePowerState = PVRSRV_DEV_POWER_STATE_ON; + } + + /* The device does not accept zombies when its power is OFF as + * the cache invalidation comes as a given. */ + return !( ePowerState == PVRSRV_DEV_POWER_STATE_OFF + || psDevNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR); +} +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) +static IMG_UINT64 +_DeviceImportBitmapGet(const PMR *psPMR) +{ + OS_SPINLOCK_FLAGS uiLockingFlags; + IMG_UINT64 uiDevImportBitmap; + + OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags); + uiDevImportBitmap = psPMR->uiDevImportBitmap; + OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags); + + return uiDevImportBitmap; +} +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) */ + +#if defined(PVRSRV_ENABLE_XD_MEM) && !defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) +static void +_DeviceImportBitmapSet(PMR *psPMR, const PPVRSRV_DEVICE_NODE psDevNode) +{ + OS_SPINLOCK_FLAGS uiLockingFlags; + + OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags); + BITMASK_SET(psPMR->uiDevImportBitmap, IMG_UINT64_C(1) << psDevNode->sDevId.ui32InternalID); + OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags); +} +#endif /* defined(PVRSRV_ENABLE_XD_MEM) && !defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) +static void +_DeviceImportBitmapClr(PMR *psPMR, const PPVRSRV_DEVICE_NODE psDevNode) +{ + OS_SPINLOCK_FLAGS uiLockingFlags; + + OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags); + BITMASK_UNSET(psPMR->uiDevImportBitmap, IMG_UINT64_C(1) << psDevNode->sDevId.ui32InternalID); + OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags); +} + +static IMG_BOOL +_DeviceImportBitmapIsSet(const PMR *psPMR, const PPVRSRV_DEVICE_NODE psDevNode) +{ + OS_SPINLOCK_FLAGS uiLockingFlags; + IMG_BOOL bIsSet; + + OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags); + bIsSet = BITMASK_HAS(psPMR->uiDevImportBitmap, + IMG_UINT64_C(1) << psDevNode->sDevId.ui32InternalID); + OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags); + + return bIsSet; +} + +static IMG_BOOL +/* Atomically, return if the `psDevNode` is set in the bitmap and then set it. */ +_DeviceImportBitmapFetchAndSet(PMR *psPMR, const PPVRSRV_DEVICE_NODE psDevNode) +{ + OS_SPINLOCK_FLAGS uiLockingFlags; + IMG_BOOL bIsSet; + + OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags); + bIsSet = BITMASK_HAS(psPMR->uiDevImportBitmap, + IMG_UINT64_C(1) << psDevNode->sDevId.ui32InternalID); + BITMASK_SET(psPMR->uiDevImportBitmap, + IMG_UINT64_C(1) << psDevNode->sDevId.ui32InternalID); + OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags); + + return bIsSet; +} + +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) +static PVRSRV_ERROR +_DeviceImportRegister(PMR *psPMR, PPVRSRV_DEVICE_NODE psDevNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PMR_DEVICE_IMPORT *psImport; + + PVR_ASSERT(psPMR); + PVR_ASSERT(psDevNode); + PVR_ASSERT(PMR_DeviceNode(psPMR) != psDevNode); + + /* Explicitly reject: + * - PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC + * - !PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE + * as XD PMRs don't have support for + * SUPPORT_PMR_PAGES_DEFERRED_FREE. */ + if (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) || + !_IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE)) + { + eError = PVRSRV_ERROR_PMR_NOT_PERMITTED; + PVR_LOG_ERROR(eError, + "PVRSRV_CHECK_ON_DEMAND || !PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE"); + return eError; + } + + /* Check if the device is already imported */ + if (_DeviceImportBitmapFetchAndSet(psPMR, psDevNode)) + { + return PVRSRV_OK; + } + + psImport = OSAllocMem(sizeof(*psImport)); + PVR_LOG_RETURN_IF_NOMEM(psImport, "PMR_DEVICE_IMPORT"); + + psImport->psParent = psPMR; + psImport->psDevNode = psDevNode; + dllist_init(&psImport->sHeader.sZombieNode); + psImport->sHeader.eZombieType = PMR_ZOMBIE_TYPE_DEVICE_IMPORT; + + PMRLockPMR(psPMR); + dllist_add_to_tail(&psPMR->sXDeviceImports, &psImport->sNext); + PMRUnlockPMR(psPMR); + + return eError; +} + +static void +_DeviceImportFreeImportZombie(PMR_DEVICE_IMPORT *psImport) +{ + PVR_ASSERT(_DeviceImportBitmapIsSet(psImport->psParent, psImport->psDevNode)); + _DeviceImportBitmapClr(psImport->psParent, psImport->psDevNode); + + PMRLockPMR(psImport->psParent); + dllist_remove_node(&psImport->sNext); + PMRUnlockPMR(psImport->psParent); + + OSFreeMem(psImport); +} + +static IMG_BOOL +_DeviceImportEnqueueZombie(PMR_DEVICE_IMPORT *psImport) +{ + PVR_ASSERT(_DeviceImportBitmapIsSet(psImport->psParent, psImport->psDevNode)); + + if (!_IsDeviceOnAndOperating(psImport->psDevNode)) + { + _DeviceImportFreeImportZombie(psImport); + return IMG_FALSE; + } + + _ZombieListLock(psImport->psDevNode); + dllist_add_to_tail(&psImport->psDevNode->sPMRZombieList, + &psImport->sHeader.sZombieNode); + psImport->psDevNode->uiPMRZombieCount++; + _ZombieListUnlock(psImport->psDevNode); + + return IMG_TRUE; +} + +static void +_DeviceImportsReviveZombies(PMR *psPMR) +{ + PDLLIST_NODE psNode, psNext; + PMR_DEVICE_IMPORT *psImport; + + dllist_foreach_node(&psPMR->sXDeviceImports, psNode, psNext) + { + psImport = IMG_CONTAINER_OF(psNode, PMR_DEVICE_IMPORT, sNext); + _ZombieListLock(psImport->psDevNode); + if (!dllist_is_empty(&psImport->sHeader.sZombieNode)) + { + dllist_remove_node(&psImport->sHeader.sZombieNode); + psImport->psDevNode->uiPMRZombieCount--; + } + _ZombieListUnlock(psImport->psDevNode); + } +} + +static IMG_BOOL +_DeviceImportsEnqueueZombies(PMR *psPMR) +{ + PDLLIST_NODE psNode, psNext; + PMR_DEVICE_IMPORT *psImport; + IMG_BOOL bEnqueued = IMG_FALSE; + + PMRLockPMR(psPMR); + + dllist_foreach_node(&psPMR->sXDeviceImports, psNode, psNext) + { + psImport = IMG_CONTAINER_OF(psNode, PMR_DEVICE_IMPORT, sNext); + bEnqueued |= _DeviceImportEnqueueZombie(psImport); + } + + PMRUnlockPMR(psPMR); + + return bEnqueued; +} + +static void +_DeviceImportsUnregisterAll(PMR *psPMR) +{ + OS_SPINLOCK_FLAGS uiLockingFlags; + PDLLIST_NODE psNode, psNext; + + PMRLockPMR(psPMR); + dllist_foreach_node(&psPMR->sXDeviceImports, psNode, psNext) + { + PMR_DEVICE_IMPORT *psImport = IMG_CONTAINER_OF(psNode, PMR_DEVICE_IMPORT, sNext); + PVR_ASSERT(_DeviceImportBitmapIsSet(psPMR, psImport->psDevNode)); + OSFreeMem(psImport); + } + dllist_init(&psPMR->sXDeviceImports); + + OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags); + psPMR->uiDevImportBitmap = 0; + OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags); + PMRUnlockPMR(psPMR); +} +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + +static void +_PMRDestroy(PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + PMR_ENV_FINALIZE(psPMR, sEnvData); + + if (psPMR->psFuncTab->pfnFinalize != NULL) + { + psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData); + } + +#if defined(PDUMP) + /* if allocation is done on the host node don't include it in the PDUMP */ + if (!_IsHostDevicePMR(psPMR)) + { + PDumpPMRFreePMR(psPMR, + psPMR->uiLogicalSize, + IMG_PAGE2BYTES64(psPMR->uiLog2ContiguityGuarantee), + psPMR->uiLog2ContiguityGuarantee, + psPMR->hPDumpAllocHandle); + } +#endif + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + /* This PMR is about to be destroyed, update its mmap stats record (if present) + * to avoid dangling pointer. Additionally, this is required because mmap stats + * are identified by PMRs and a new PMR down the line "might" get the same address + * as the one we're about to free and we'd like 2 different entries in mmaps + * stats for such cases */ + MMapStatsRemovePMR(psPMR); +#endif + +#ifdef PVRSRV_NEED_PVR_ASSERT + /* If not backed on demand, iLockCount should be 1 otherwise it should be 0 */ + PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + /* Delete RI entry */ + if (psPMR->hRIHandle) + { + PVRSRV_ERROR eError = RIDeletePMREntryKM(psPMR->hRIHandle); + PVR_LOG_IF_ERROR(eError, "RIDeletePMREntryKM"); + /* continue destroying the PMR */ + } +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + _DeviceImportsUnregisterAll(psPMR); +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + + /* Decrement live PMR count. Probably only of interest for debugging */ + PVR_ASSERT(OSAtomicRead(&psPMR->psContext->uiNumLivePMRs) > 0); + OSAtomicDecrement(&psPMR->psContext->uiNumLivePMRs); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: 0x%p, key:0x%016" IMG_UINT64_FMTSPECX ", numLive:%d", + __func__, psPMR, psPMR->uiKey, OSAtomicRead(&psPMR->psContext->uiNumLivePMRs))); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + /* Detect programming errors here, either a reference on the PMR + * has not been taken by a user or the mapping records added + * to the list head have not been destroyed correctly. + */ + PVR_ASSERT(dllist_is_empty(&psPMR->sGpuMappingListHead)); +#endif + + OSSpinLockDestroy(psPMR->hRefCountLock); + OSSpinLockDestroy(psPMR->hBitmapLock); + OSLockDestroy(psPMR->hLock); + OSFreeMem(psPMR); +} + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +static INLINE PMR_ZOMBIE_TYPE +PMR_GetZombieTypeFromNode(const DLLIST_NODE *psNode) +{ + PMR_HEADER *psPMRHeader = IMG_CONTAINER_OF(psNode, PMR_HEADER, sZombieNode); + PVR_ASSERT(psPMRHeader != NULL); + return psPMRHeader->eZombieType; +} + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +static INLINE PMR_ZOMBIE_PAGES* +PMR_GetZombiePagesFromNode(const DLLIST_NODE *psNode) +{ + PMR_HEADER *psPMRHeader = IMG_CONTAINER_OF(psNode, PMR_HEADER, sZombieNode); + PVR_ASSERT(psPMRHeader != NULL); + return IMG_CONTAINER_OF(psPMRHeader, PMR_ZOMBIE_PAGES, sHeader); +} +#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */ + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) +static INLINE PMR_DEVICE_IMPORT* +PMR_GetDeviceImportFromNode(const DLLIST_NODE *psNode) +{ + PMR_HEADER *psPMRHeader = IMG_CONTAINER_OF(psNode, PMR_HEADER, sZombieNode); + PVR_ASSERT(psPMRHeader != NULL); + return IMG_CONTAINER_OF(psPMRHeader, PMR_DEVICE_IMPORT, sHeader); +} +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + +static INLINE PMR* +PMR_GetPMRFromNode(const DLLIST_NODE *psNode) +{ + PMR_HEADER *psPMRHeader = IMG_CONTAINER_OF(psNode, PMR_HEADER, sZombieNode); + PVR_ASSERT(psPMRHeader != NULL); + return IMG_CONTAINER_OF(psPMRHeader, PMR, sHeader); +} +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + +static PVRSRV_ERROR +_UnrefAndMaybeDestroy(PMR *psPMR, IMG_UINT32 uiRefCount) +{ + const PMR_IMPL_FUNCTAB *psFuncTable; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PVRSRV_DEVICE_NODE *psDevNode; + IMG_BOOL bQueuedDeviceImports = IMG_FALSE; +#endif + PVRSRV_ERROR eError; + + PVR_ASSERT(psPMR != NULL); + + psFuncTable = psPMR->psFuncTab; + + _FactoryLock(psFuncTable); + + eError = _Unref(psPMR, uiRefCount, &uiRefCount); + PVR_LOG_GOTO_IF_ERROR(eError, "_Unref", ErrFactoryUnlock); + + if (uiRefCount > 0) + { + /* PMR is still referenced so just return */ + _FactoryUnlock(psFuncTable); + return PVRSRV_OK; + } + +#if !defined(SUPPORT_PMR_DEFERRED_FREE) + /* Don't defer PMR destruction in NoHW and PDUMP drivers. */ + _PMRDestroy(psPMR); +#else /* !defined(SUPPORT_PMR_DEFERRED_FREE) */ + psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap); + + /* PMRs that are not marked for deferred free can be freed right away. + * Those are the PMRs that have not been mapped to the device. + * All PMRs that have been mapped to the device need to go through + * the defer free path unless the power is OFF for the PMR's device + * and for all of the device imports. If power is OFF + * the cache invalidation comes as a given. */ + if (!_IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_DEFER_FREE)) + { + _PMRDestroy(psPMR); + goto exit_; + } + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + bQueuedDeviceImports = _DeviceImportsEnqueueZombies(psPMR); +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + + if (!bQueuedDeviceImports + && !_IsDeviceOnAndOperating(psDevNode) +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + && !PVRSRV_CHECK_OS_LINUX_MOVABLE(psPMR->uiFlags) +#endif + ) + { + _PMRDestroy(psPMR); + } + else + { + /* Defer freeing the PMR until the Firmware invalidates the caches. */ + _ZombieListLock(psDevNode); + + _IntFlagSet(psPMR, PMR_FLAG_INTERNAL_IS_ZOMBIE); + + dllist_add_to_tail(&psDevNode->sPMRZombieList, &psPMR->sHeader.sZombieNode); + psDevNode->uiPMRZombieCount++; + + /* PMR pages are accounted by the driver/process stats. Those stats + * are available on page level hence they need to be adjusted by + * the factories. This is done by the pfnZombify callback. + * Operation needs to be done while holding hPMRZombieListLock + * to prevent CleanupThread from freeing pages while memory stats + * accounting is ongoing. */ + if (psPMR->psFuncTab->pfnZombify != NULL) + { + eError = psPMR->psFuncTab->pfnZombify(psPMR->pvFlavourData, psPMR); + PVR_LOG_IF_ERROR(eError, "pfnZombify"); + } - OSLockAcquire(psContext->hLock); - psPMR->uiKey = psContext->uiNextKey; - psPMR->uiSerialNum = psContext->uiNextSerialNum; - psContext->uiNextKey = (0x80200003 * psContext->uiNextKey) - ^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr); - psContext->uiNextSerialNum++; - *ppsPMR = psPMR; - PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR)); - /* Increment live PMR count */ - psContext->uiNumLivePMRs++; - OSLockRelease(psContext->hLock); + _ZombieListUnlock(psDevNode); + } +exit_: +#endif /* !defined(SUPPORT_PMR_DEFERRED_FREE) */ + + _FactoryUnlock(psFuncTable); return PVRSRV_OK; -} -/* This function returns true if the PMR is in use and false otherwise. - * This function is not thread safe and hence the caller - * needs to ensure the thread safety by explicitly taking - * the lock on the PMR or through other means */ -IMG_BOOL PMRIsPMRLive(PMR *psPMR) -{ - return (OSAtomicRead(&psPMR->iRefCount) > 0); -} +ErrFactoryUnlock: + _FactoryUnlock(psFuncTable); -static IMG_UINT32 -_Ref(PMR *psPMR) -{ - if (OSAtomicRead(&psPMR->iRefCount) == 0) - { - PVR_DPF((PVR_DBG_ERROR, "pmr.c: Ref Count == 0 PMR: @0x%p Annot: %s", - psPMR, - psPMR->szAnnotation)); - OSWarnOn(1); - } - return OSAtomicIncrement(&psPMR->iRefCount); + return eError; } -static IMG_UINT32 -_Unref(PMR *psPMR) +#if defined(SUPPORT_PMR_DEFERRED_FREE) +typedef struct _PMR_ZOMBIE_CLEANUP_ITEM_ { - if (OSAtomicRead(&psPMR->iRefCount) <= 0) - { - PVR_DPF((PVR_DBG_ERROR, "pmr.c: Unref Count <= 0 PMR: @0x%p Annot: %s RefCount: %d", - psPMR, - psPMR->szAnnotation, - (IMG_INT32) OSAtomicRead(&psPMR->iRefCount))); - OSWarnOn(1); - } - return OSAtomicDecrement(&psPMR->iRefCount); + PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; + DLLIST_NODE sZombieList; /*!< See _ZombieListLock */ + PPVRSRV_DEVICE_NODE psDevNode; + PVRSRV_CLIENT_SYNC_PRIM *psSync; + IMG_UINT32 uiRequiredSyncValue; + IMG_UINT32 uiRequiredPowerOffCounter; +} PMR_ZOMBIE_CLEANUP_ITEM; + +static INLINE IMG_BOOL +_CanNotFreeZombies(const PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem) +{ + const PVRSRV_DEVICE_NODE *psDevNode = psCleanupItem->psDevNode; + + /* For a zombie PMR to be eligible to be freed either the GPU MMU caches + * need to be flushed (the Firmware updates the sync) or the GPU power needs + * to be off. */ + return !PVRSRVHasCounter32Advanced(OSReadDeviceMem32(psCleanupItem->psSync->pui32LinAddr), + psCleanupItem->uiRequiredSyncValue) && + !PVRSRVHasCounter32Advanced(psDevNode->uiPowerOffCounter, + psCleanupItem->uiRequiredPowerOffCounter); } -static void -_UnrefAndMaybeDestroy(PMR *psPMR) +static PVRSRV_ERROR _PmrZombieCleanup(void *pvData) { - PVRSRV_ERROR eError2; - struct _PMR_CTX_ *psCtx; - IMG_INT iRefCount; - - PVR_ASSERT(psPMR != NULL); + PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem = pvData; + DLLIST_NODE *psNode; + DLLIST_NODE sRetryHead; + IMG_UINT32 uiRetryCount = 0; + PVRSRV_ERROR eError = PVRSRV_OK; - /* Acquire PMR factory lock if provided */ - if (psPMR->psFuncTab->pfnGetPMRFactoryLock) + if (_CanNotFreeZombies(psCleanupItem)) { - psPMR->psFuncTab->pfnGetPMRFactoryLock(); + return PVRSRV_ERROR_RETRY; } + dllist_init(&sRetryHead); - iRefCount = _Unref(psPMR); - - if (iRefCount == 0) + do { - if (psPMR->psFuncTab->pfnFinalize != NULL) + /* hPMRZombieListLock will prevent removing a node while the list is + * processed. If the lock is already acquired by other process which + * intends to remove an item from the list it'll assure the list + * consistency. + * If this thread acquires the lock first it's possible that another + * thread might be holding PMR factory lock. */ + + _ZombieListLock(psCleanupItem->psDevNode); + psNode = dllist_get_next_node(&psCleanupItem->sZombieList); + _ZombieListUnlock(psCleanupItem->psDevNode); + + if (psNode == NULL) + { + continue; + } + + switch (PMR_GetZombieTypeFromNode(psNode)) { - eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData); - - /* PMR unref can be called asynchronously by the kernel or other - * third party modules (eg. display) which doesn't go through the - * usual services bridge. The same PMR can be referenced simultaneously - * in a different path that results in a race condition. - * Hence depending on the race condition, a factory may refuse to destroy - * the resource associated with this PMR if a reference on it was taken - * prior to unref. In that case the PMR factory function returns the error. - * - * When such an error is encountered, the factory needs to ensure the state - * associated with PMR is undisturbed. At this point we just bail out from - * freeing the PMR itself. The PMR handle will then be freed at a later point - * when the same PMR is unreferenced. - * */ - if (PVRSRV_ERROR_PMR_STILL_REFERENCED == eError2) +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + case PMR_ZOMBIE_TYPE_PAGES: { - if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) + PMR_ZOMBIE_PAGES* psZombiePages = PMR_GetZombiePagesFromNode(psNode); + eError = psZombiePages->pfnFactoryFreeZombies(psZombiePages->pvFactoryPages); + _ZombieListLock(psCleanupItem->psDevNode); + dllist_remove_node(psNode); + psCleanupItem->psDevNode->uiPMRZombieCountInCleanup--; + _ZombieListUnlock(psCleanupItem->psDevNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot free zombie pages! Skipping object %p", psZombiePages)); + dllist_add_to_tail(&sRetryHead, psNode); + uiRetryCount++; + } + else { - psPMR->psFuncTab->pfnReleasePMRFactoryLock(); + OSFreeMem(psZombiePages); } - return; + break; } - PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */ - } -#if defined(PDUMP) - /* if allocation is done on the host node don't include it in the PDUMP */ - if (!_IsHostDevicePMR(psPMR)) - { - PDumpPMRFreePMR(psPMR, - psPMR->uiLogicalSize, - (1 << psPMR->uiLog2ContiguityGuarantee), - psPMR->uiLog2ContiguityGuarantee, - psPMR->hPDumpAllocHandle); - } -#endif - -#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) - /* This PMR is about to be destroyed, update its mmap stats record (if present) - * to avoid dangling pointer. Additionally, this is required because mmap stats - * are identified by PMRs and a new PMR down the line "might" get the same address - * as the one we're about to free and we'd like 2 different entries in mmaps - * stats for such cases */ - MMapStatsRemovePMR(psPMR); -#endif - -#ifdef PVRSRV_NEED_PVR_ASSERT - /* If not backed on demand, iLockCount should be 1 otherwise it should be 0 */ - PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); #endif -#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) - { - PVRSRV_ERROR eError; - - /* Delete RI entry */ - if (psPMR->hRIHandle) +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + case PMR_ZOMBIE_TYPE_DEVICE_IMPORT: { - eError = RIDeletePMREntryKM (psPMR->hRIHandle); + PMR_DEVICE_IMPORT *psImport = PMR_GetDeviceImportFromNode(psNode); + _ZombieListLock(psCleanupItem->psDevNode); + dllist_remove_node(psNode); + psCleanupItem->psDevNode->uiPMRZombieCountInCleanup--; + _ZombieListUnlock(psCleanupItem->psDevNode); + _DeviceImportFreeImportZombie(psImport); + break; + } +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ - if (eError != PVRSRV_OK) + case PMR_ZOMBIE_TYPE_PMR: + { + PMR* psPMR = PMR_GetPMRFromNode(psNode); + const PMR_IMPL_FUNCTAB *psFuncTable = psPMR->psFuncTab; + + _FactoryLock(psFuncTable); + _ZombieListLock(psCleanupItem->psDevNode); + /* It is possible that the element might have been removed so + * we have to check if the PMR is still a zombie. + * It's also possible that the PMR has been revived + * (PMRReviveZombieAndRef()), mapped, unmapped and zombified + * again while the lock was not held. + * Considering above only immediately free the PMR if the + * PMR is still a part of this cleanup item. */ + if (psNode == dllist_get_next_node(&psCleanupItem->sZombieList)) + { + dllist_remove_node(psNode); + psCleanupItem->psDevNode->uiPMRZombieCountInCleanup--; + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + /* The PMR cannot be freed if other devices are + * still waiting for the cache flush. */ + if (_DeviceImportBitmapGet(psPMR) != 0) + { + /* Request it to be retried and continue + * to the next zombie item. */ + dllist_add_to_tail(&sRetryHead, psNode); + uiRetryCount++; + _ZombieListUnlock(psCleanupItem->psDevNode); + _FactoryUnlock(psFuncTable); + continue; + } +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + + /* Unlock here to avoid locking dependency with the power lock. + * It's okay to do it here since the factory lock is the one + * that needs to be held during PMR destruction. */ + _ZombieListUnlock(psCleanupItem->psDevNode); + _PMRDestroy(psPMR); + } + else { - PVR_DPF((PVR_DBG_ERROR, "%s: RIDeletePMREntryKM failed: %s", - __func__, - PVRSRVGetErrorString(eError))); - /* continue destroying the PMR */ + _ZombieListUnlock(psCleanupItem->psDevNode); } + _FactoryUnlock(psFuncTable); + break; } } -#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ - psCtx = psPMR->psContext; + } while (psNode != NULL); - OSLockDestroy(psPMR->hLock); + if (uiRetryCount) + { + eError = PVRSRV_ERROR_RETRY; + _ZombieListLock(psCleanupItem->psDevNode); + /* Add the retry items back to this cleanup item for when the + * cleanup item is retried. Oldest items will reside at the head of + * the list. The cleanup item will be placed at the back of the cleanup + * queue to process other dependencies first. */ + dllist_insert_list_at_head(&psCleanupItem->sZombieList, &sRetryHead); + psCleanupItem->psDevNode->uiPMRZombieCountInCleanup += uiRetryCount; + _ZombieListUnlock(psCleanupItem->psDevNode); + } + else + { + OSFreeMem(psCleanupItem); + } - /* Release PMR factory lock acquired if any */ - if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) - { - psPMR->psFuncTab->pfnReleasePMRFactoryLock(); - } + return eError; +} + +IMG_BOOL PMRQueueZombiesForCleanup(PPVRSRV_DEVICE_NODE psDevNode) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem; - OSFreeMem(psPMR); + /* Don't defer the freeing if we are currently unloading the driver + * or if the sync has been destroyed */ + if (psPVRSRVData->bUnload || psDevNode->psMMUCacheSyncPrim == NULL) + { + return IMG_FALSE; + } - /* Decrement live PMR count. Probably only of interest for debugging */ - PVR_ASSERT(psCtx->uiNumLivePMRs > 0); + _ZombieListLock(psDevNode); - OSLockAcquire(psCtx->hLock); - psCtx->uiNumLivePMRs--; - OSLockRelease(psCtx->hLock); + if (dllist_is_empty(&psDevNode->sPMRZombieList)) + { + _ZombieListUnlock(psDevNode); + return IMG_FALSE; } - else + + psCleanupItem = OSAllocMem(sizeof(*psCleanupItem)); + if (psCleanupItem == NULL) + { + _ZombieListUnlock(psDevNode); + return IMG_FALSE; + } + + psCleanupItem->sCleanupThreadFn.pfnFree = _PmrZombieCleanup; + psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem; + psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE; + psCleanupItem->sCleanupThreadFn.eCleanupType = PVRSRV_CLEANUP_TYPE_PMR; + CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn, + CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); + + psCleanupItem->psDevNode = psDevNode; + psCleanupItem->psSync = psDevNode->psMMUCacheSyncPrim; + psCleanupItem->uiRequiredSyncValue = psDevNode->ui32NextMMUInvalidateUpdate; + psCleanupItem->uiRequiredPowerOffCounter = psDevNode->uiPowerOffCounterNext; + + /* This moves the zombie list to the cleanup item. */ + dllist_replace_head(&psDevNode->sPMRZombieList, &psCleanupItem->sZombieList); + psDevNode->uiPMRZombieCountInCleanup += psDevNode->uiPMRZombieCount; + psDevNode->uiPMRZombieCount = 0; + + OSLockRelease(psDevNode->hPMRZombieListLock); + + PVRSRVCleanupThreadAddWork(psDevNode, &psCleanupItem->sCleanupThreadFn); + + return IMG_TRUE; +} + +void +PMRReviveZombieAndRef(PMR *psPMR) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + DLLIST_NODE *psThis, *psNext; + IMG_BOOL bIsOnZombieList = IMG_FALSE; + OS_SPINLOCK_FLAGS uiFlags; + + PVR_ASSERT(psPMR != NULL); + + psDeviceNode = PhysHeapDeviceNode(psPMR->psPhysHeap); + + /* If this was on a list then it's brought back to life. */ + _ZombieListLock(psDeviceNode); + + /* Need to reference this PMR since it was about to be destroyed and its + * reference count must be 0 (can't use _Ref() due to the warning). */ + OSSpinLockAcquire(psPMR->hRefCountLock, uiFlags); + psPMR->uiRefCount++; + OSSpinLockRelease(psPMR->hRefCountLock, uiFlags); + +#if defined(DEBUG) + PVR_LOG(("%s: 0x%p, key:0x%016" IMG_UINT64_FMTSPECX ", numLive:%d", + __func__, psPMR, psPMR->uiKey, OSAtomicRead(&psPMR->psContext->uiNumLivePMRs))); +#endif + + /* If we got to this point the PMR must be on a list. If it's not + * it should mean a race of some sort. */ + PVR_ASSERT(!dllist_is_empty(&psPMR->sHeader.sZombieNode)); + + /* For the sake of correct accounting check if the PMR is in the zombie + * list or in the cleanup item. */ + dllist_foreach_node(&psDeviceNode->sPMRZombieList, psThis, psNext) { - /* Release PMR factory lock acquired if any */ - if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) + if (psThis == &psPMR->sHeader.sZombieNode) { - psPMR->psFuncTab->pfnReleasePMRFactoryLock(); + bIsOnZombieList = IMG_TRUE; + break; } } +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + if (_DeviceImportBitmapGet(psPMR) != 0) { + PMRLockPMR(psPMR); + _DeviceImportsReviveZombies(psPMR); + PMRUnlockPMR(psPMR); + } +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + + /* Revive the PMR (remove it from the zombie list) and therefore + * prevent it's destruction. */ + dllist_remove_node(&psPMR->sHeader.sZombieNode); + _IntFlagClr(psPMR, PMR_FLAG_INTERNAL_IS_ZOMBIE); + + if (bIsOnZombieList) + { + psDeviceNode->uiPMRZombieCount--; + } + else + { + psDeviceNode->uiPMRZombieCountInCleanup--; + } + + _ZombieListUnlock(psDeviceNode); +} + +void +PMRMarkForDeferFree(PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + if (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags)) + { + /* If PMR pages are allocated on demand the freeing is handled + * by `SUPPORT_PMR_PAGES_DEFERRED_FREE` path in + * `PMRUnlockSysPhysAddressesNested()`. */ + return; + } + + _IntFlagSet(psPMR, PMR_FLAG_INTERNAL_DEFER_FREE); } +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ static INLINE IMG_BOOL _PMRIsSparse(const PMR *psPMR) { - return psPMR->bSparseAlloc; + return _IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_SPARSE_ALLOC); } PVRSRV_ERROR PMRCreatePMR(PHYS_HEAP *psPhysHeap, PMR_SIZE_T uiLogicalSize, IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumLogicalChunks, IMG_UINT32 *pui32MappingTable, PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, PMR_FLAGS_T uiFlags, @@ -668,7 +1584,7 @@ PMRCreatePMR(PHYS_HEAP *psPhysHeap, eError = _PMRCreate(uiLogicalSize, ui32NumPhysChunks, - ui32NumVirtChunks, + ui32NumLogicalChunks, pui32MappingTable, uiLog2ContiguityGuarantee, uiFlags, @@ -680,9 +1596,9 @@ PMRCreatePMR(PHYS_HEAP *psPhysHeap, psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap); psPMR->pvFlavourData = pvPrivData; psPMR->eFlavour = eType; - OSAtomicWrite(&psPMR->iRefCount, 1); + psPMR->uiRefCount = 1; - OSStringLCopy(psPMR->szAnnotation, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); + OSStringSafeCopy(psPMR->szAnnotation, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); #if defined(PDUMP) /* if allocation was done on the host node don't include it in the PDUMP */ @@ -704,9 +1620,9 @@ PMRCreatePMR(PHYS_HEAP *psPhysHeap, PDumpPMRMallocPMR(psPMR, uiLogicalSize, - 1ULL<uiLog2ContiguityGuarantee), ui32NumPhysChunks, - ui32NumVirtChunks, + ui32NumLogicalChunks, pui32MappingTable, uiLog2ContiguityGuarantee, bInitialise, @@ -714,6 +1630,12 @@ PMRCreatePMR(PHYS_HEAP *psPhysHeap, &psPMR->hPDumpAllocHandle, ui32PDumpFlags); } +#else + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +#endif + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + psPMR->eState = PMR_STATE_ACTIVE; #endif *ppsPMRPtr = psPMR; @@ -726,10 +1648,24 @@ PMRCreatePMR(PHYS_HEAP *psPhysHeap, return eError; } +PVRSRV_ERROR +PMRLockSysPhysAddresses(PMR *psPMR) +{ + return PMRLockSysPhysAddressesNested(psPMR, 1, 0); +} + +PVRSRV_ERROR +PMRLockSysPhysAddressesN(PMR *psPMR, IMG_UINT32 uiLockCount) +{ + return PMRLockSysPhysAddressesNested(psPMR, uiLockCount, 0); +} + PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR, - IMG_UINT32 ui32NestingLevel) + IMG_UINT32 uiLockCount, + IMG_UINT32 ui32NestingLevel) { PVRSRV_ERROR eError; + IMG_UINT32 uiCallbackValue; PVR_ASSERT(psPMR != NULL); @@ -746,14 +1682,22 @@ PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR, * that physical addresses are valid after this point, and remain valid * until the corresponding PMRUnlockSysPhysAddressesOSMem() */ - _Ref(psPMR); + eError = _Ref(psPMR, uiLockCount); + if (eError != PVRSRV_OK) + { + OSLockRelease(psPMR->hLock); + return eError; + } /* Also count locks separately from other types of references, to * allow for debug assertions */ - /* Only call callback if lockcount transitions from 0 to 1 (or 1 to 2 if not backed on demand) */ - if (OSAtomicIncrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 1 : 2)) + /* Only call callback if lock count transitions from 0 to 1 (or 1 to 2 if not + * backed on demand) */ + uiCallbackValue = uiLockCount + (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1); + + if (OSAtomicAdd(&psPMR->iLockCount, uiLockCount) == uiCallbackValue) { if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL) { @@ -770,30 +1714,36 @@ PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR, return PVRSRV_OK; e1: - OSAtomicDecrement(&psPMR->iLockCount); - _Unref(psPMR); - PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) != 0); + OSAtomicSubtract(&psPMR->iLockCount, uiLockCount); + PVR_ASSERT(_GetRef(psPMR) != 0); OSLockRelease(psPMR->hLock); PVR_ASSERT(eError != PVRSRV_OK); + (void) _Unref(psPMR, uiLockCount, NULL); return eError; } PVRSRV_ERROR -PMRLockSysPhysAddresses(PMR *psPMR) +PMRUnlockSysPhysAddresses(PMR *psPMR) { - return PMRLockSysPhysAddressesNested(psPMR, 0); + return PMRUnlockSysPhysAddressesNested(psPMR, 1, 2); } PVRSRV_ERROR -PMRUnlockSysPhysAddresses(PMR *psPMR) +PMRUnlockSysPhysAddressesN(PMR *psPMR, IMG_UINT32 uiLockCount) { - return PMRUnlockSysPhysAddressesNested(psPMR, 2); + return PMRUnlockSysPhysAddressesNested(psPMR, uiLockCount, 2); } PVRSRV_ERROR -PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel) +PMRUnlockSysPhysAddressesNested(PMR *psPMR, + IMG_UINT32 uiLockCount, + IMG_UINT32 ui32NestingLevel) { - PVRSRV_ERROR eError; + PVRSRV_ERROR eError = PVRSRV_OK; +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_IMPL_ZOMBIEPAGES pvZombiePages = NULL; + PMR_ZOMBIE_PAGES* psPMRZombiePages = NULL; +#endif PVR_ASSERT(psPMR != NULL); @@ -802,44 +1752,128 @@ PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel) * an atomic operation */ OSLockAcquireNested(psPMR->hLock, ui32NestingLevel); - PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); - if (OSAtomicDecrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)) + PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) >= uiLockCount); + + if (OSAtomicSubtract(&psPMR->iLockCount, uiLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)) { if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL) { PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL); +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData, + &pvZombiePages); +#else eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData); - /* must never fail */ - PVR_ASSERT(eError == PVRSRV_OK); +#endif + PVR_LOG_IF_ERROR(eError, "pfnUnlockPhysAddresses"); } } +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + if (pvZombiePages != NULL) + { + psPMRZombiePages = OSAllocZMem(sizeof(PMR_ZOMBIE_PAGES)); + PVR_GOTO_IF_NOMEM(psPMRZombiePages, eError, ErrRelockPhysAddresses); + } +#endif + OSLockRelease(psPMR->hLock); +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + if (pvZombiePages != NULL) + { + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_DEVICE_NODE *psDevNode; + + psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap); + eError = PVRSRVGetDevicePowerState(psDevNode, &ePowerState); + if (eError != PVRSRV_OK) + { + /* Treat unknown power state as ON. */ + ePowerState = PVRSRV_DEV_POWER_STATE_ON; + } + + if (ePowerState == PVRSRV_DEV_POWER_STATE_OFF || + psDevNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR) + { + /* Free preallocated psPMRZombiePages as these won't be used*/ + OSFreeMem(psPMRZombiePages); + + eError = psPMR->psFuncTab->pfnFreeZombiePages(pvZombiePages); + PVR_LOG_GOTO_IF_ERROR(eError, "Error when trying to free zombies immediately.", + ErrReturn); + } + else + { + PVR_ASSERT(psPMRZombiePages != NULL); + psPMRZombiePages->sHeader.eZombieType = PMR_ZOMBIE_TYPE_PAGES; + psPMRZombiePages->pfnFactoryFreeZombies = psPMR->psFuncTab->pfnFreeZombiePages; + psPMRZombiePages->pvFactoryPages = pvZombiePages; + + _ZombieListLock(psDevNode); + dllist_add_to_tail(&psDevNode->sPMRZombieList, &psPMRZombiePages->sHeader.sZombieNode); + psDevNode->uiPMRZombieCount++; + _ZombieListUnlock(psDevNode); + } + } + else + { + OSFreeMem(psPMRZombiePages); + } +#endif + /* We also count the locks as references, so that the PMR is not * freed while someone is using a physical address. */ - _UnrefAndMaybeDestroy(psPMR); +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + (void) _UnrefAndMaybeDestroy(psPMR, uiLockCount); - return PVRSRV_OK; + return eError; +#else + return _UnrefAndMaybeDestroy(psPMR, uiLockCount); +#endif + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +ErrRelockPhysAddresses: + { + IMG_UINT32 uiCallbackValue = + uiLockCount + (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1); + + if (OSAtomicAdd(&psPMR->iLockCount, uiLockCount) == uiCallbackValue) + { + if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL) + { + eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData); + PVR_LOG_IF_ERROR(eError, "pfnLockPhysAddresses"); + } + } + } + + OSLockRelease(psPMR->hLock); + +ErrReturn: + return eError; +#endif } PVRSRV_ERROR PMRMakeLocalImportHandle(PMR *psPMR, PMR **ppsPMR) { - PMRRefPMR(psPMR); + PVRSRV_ERROR eError = PMRRefPMR(psPMR); + PVR_RETURN_IF_ERROR(eError); + *ppsPMR = psPMR; + return PVRSRV_OK; } PVRSRV_ERROR PMRUnmakeLocalImportHandle(PMR *psPMR) { - PMRUnrefPMR(psPMR); - return PVRSRV_OK; + return PMRUnrefPMR(psPMR); } /* @@ -854,22 +1888,31 @@ PMRLocalImportPMR(PMR *psPMR, IMG_DEVMEM_SIZE_T *puiSize, IMG_DEVMEM_ALIGN_T *puiAlign) { - _Ref(psPMR); + PVRSRV_ERROR eError = _Ref(psPMR, 1); + PVR_RETURN_IF_ERROR(eError); /* Return the PMR */ *ppsPMR = psPMR; *puiSize = psPMR->uiLogicalSize; - *puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee; + *puiAlign = IMG_PAGE2BYTES64(psPMR->uiLog2ContiguityGuarantee); return PVRSRV_OK; } +inline IMG_UINT64 +PMRInternalGetUID(PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return psPMR->uiSerialNum; +} + PVRSRV_ERROR PMRGetUID(PMR *psPMR, IMG_UINT64 *pui64UID) { PVR_ASSERT(psPMR != NULL); - *pui64UID = psPMR->uiSerialNum; + *pui64UID = PMRInternalGetUID(psPMR); return PVRSRV_OK; } @@ -884,6 +1927,7 @@ PMRExportPMR(PMR *psPMR, { IMG_UINT64 uiPassword; PMR_EXPORT *psPMRExport; + PVRSRV_ERROR eError; uiPassword = psPMR->uiKey; @@ -891,11 +1935,14 @@ PMRExportPMR(PMR *psPMR, PVR_RETURN_IF_NOMEM(psPMRExport); psPMRExport->psPMR = psPMR; - _Ref(psPMR); + + eError = _Ref(psPMR, 1); + PVR_GOTO_IF_ERROR(eError, ErrFreePMRExport); + /* The layout of a PMR can't change once exported * to make sure the importers view of the memory is * the same as exporter. */ - psPMR->bNoLayoutChange = IMG_TRUE; + PMR_SetLayoutFixed(psPMR, IMG_TRUE); *ppsPMRExportPtr = psPMRExport; *puiSize = psPMR->uiLogicalSize; @@ -903,17 +1950,25 @@ PMRExportPMR(PMR *psPMR, *puiPassword = uiPassword; return PVRSRV_OK; + +ErrFreePMRExport: + OSFreeMem(psPMRExport); + + return eError; } PVRSRV_ERROR PMRUnexportPMR(PMR_EXPORT *psPMRExport) { + PVRSRV_ERROR eError; + PVR_ASSERT(psPMRExport != NULL); PVR_ASSERT(psPMRExport->psPMR != NULL); - PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); + PVR_ASSERT(psPMRExport->psPMR->uiRefCount > 0); - _UnrefAndMaybeDestroy(psPMRExport->psPMR); + eError = _UnrefAndMaybeDestroy(psPMRExport->psPMR, 1); + PVR_LOG_RETURN_IF_ERROR(eError, "_UnrefAndMaybeDestroy"); OSFreeMem(psPMRExport); @@ -929,12 +1984,13 @@ PMRImportPMR(PMR_EXPORT *psPMRExport, PMR **ppsPMR) { PMR *psPMR; + PVRSRV_ERROR eError; - PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); + PVR_ASSERT(psPMRExport->psPMR->uiRefCount > 0); psPMR = psPMRExport->psPMR; - PVR_ASSERT((psPMR->bNoLayoutChange == IMG_TRUE)); + PVR_ASSERT(PMR_IsMemLayoutFixed(psPMR)); if (psPMR->uiKey != uiPassword) { @@ -948,7 +2004,8 @@ PMRImportPMR(PMR_EXPORT *psPMRExport, return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES; } - _Ref(psPMR); + eError = _Ref(psPMR, 1); + PVR_RETURN_IF_ERROR(eError); *ppsPMR = psPMR; @@ -958,67 +2015,15 @@ PMRImportPMR(PMR_EXPORT *psPMRExport, PVRSRV_ERROR PMRUnimportPMR(PMR *psPMR) { - _UnrefAndMaybeDestroy(psPMR); - - return PVRSRV_OK; -} - -#else /* if defined(SUPPORT_INSECURE_EXPORT) */ - -PVRSRV_ERROR -PMRExportPMR(PMR *psPMR, - PMR_EXPORT **ppsPMRExportPtr, - PMR_SIZE_T *puiSize, - PMR_LOG2ALIGN_T *puiLog2Contig, - PMR_PASSWORD_T *puiPassword) -{ - PVR_UNREFERENCED_PARAMETER(psPMR); - PVR_UNREFERENCED_PARAMETER(ppsPMRExportPtr); - PVR_UNREFERENCED_PARAMETER(puiSize); - PVR_UNREFERENCED_PARAMETER(puiLog2Contig); - PVR_UNREFERENCED_PARAMETER(puiPassword); - - return PVRSRV_OK; -} - - -PVRSRV_ERROR -PMRUnexportPMR(PMR_EXPORT *psPMRExport) -{ - PVR_UNREFERENCED_PARAMETER(psPMRExport); - return PVRSRV_OK; -} - - -PVRSRV_ERROR -PMRImportPMR(PMR_EXPORT *psPMRExport, - PMR_PASSWORD_T uiPassword, - PMR_SIZE_T uiSize, - PMR_LOG2ALIGN_T uiLog2Contig, - PMR **ppsPMR) -{ - PVR_UNREFERENCED_PARAMETER(psPMRExport); - PVR_UNREFERENCED_PARAMETER(uiPassword); - PVR_UNREFERENCED_PARAMETER(uiSize); - PVR_UNREFERENCED_PARAMETER(uiLog2Contig); - PVR_UNREFERENCED_PARAMETER(ppsPMR); - - return PVRSRV_OK; + return _UnrefAndMaybeDestroy(psPMR, 1); } -PVRSRV_ERROR -PMRUnimportPMR(PMR *psPMR) -{ - PVR_UNREFERENCED_PARAMETER(psPMR); - return PVRSRV_OK; -} #endif /* if defined(SUPPORT_INSECURE_EXPORT) */ #if defined(SUPPORT_SECURE_EXPORT) PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR) { - _UnrefAndMaybeDestroy(psPMR); - return PVRSRV_OK; + return _UnrefAndMaybeDestroy(psPMR, 1); } static PVRSRV_ERROR _ReleaseSecurePMR(void *psExport) @@ -1041,7 +2046,8 @@ PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection, /* We are acquiring reference to PMR here because OSSecureExport * releases bridge lock and PMR lock for a moment and we don't want PMR * to be removed by other thread in the meantime. */ - _Ref(psPMR); + eError = _Ref(psPMR, 1); + PVR_RETURN_IF_ERROR(eError); eError = OSSecureExport("secure_pmr", _ReleaseSecurePMR, @@ -1054,12 +2060,12 @@ PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection, /* Mark the PMR immutable once exported * This allows the importers and exporter to have * the same view of the memory */ - psPMR->bNoLayoutChange = IMG_TRUE; + PMR_SetLayoutFixed(psPMR, IMG_TRUE); return PVRSRV_OK; e0: PVR_ASSERT(eError != PVRSRV_OK); - _UnrefAndMaybeDestroy(psPMR); + (void) _UnrefAndMaybeDestroy(psPMR, 1); return eError; } @@ -1076,32 +2082,35 @@ PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection, PVR_UNREFERENCED_PARAMETER(psConnection); eError = OSSecureImport(hSecure, (void **) &psPMR); - PVR_GOTO_IF_ERROR(eError, e0); + PVR_GOTO_IF_ERROR(eError, ErrReturnError); PVR_LOG_RETURN_IF_FALSE(PhysHeapDeviceNode(psPMR->psPhysHeap) == psDevNode, - "PMR invalid for this device", - PVRSRV_ERROR_PMR_NOT_PERMITTED); + "PMR invalid for this device", + PVRSRV_ERROR_PMR_NOT_PERMITTED); + + eError = _Ref(psPMR, 1); + PVR_GOTO_IF_ERROR(eError, ErrReturnError); - _Ref(psPMR); /* The PMR should be immutable once exported * This allows the importers and exporter to have * the same view of the memory */ - PVR_ASSERT(psPMR->bNoLayoutChange == IMG_TRUE); + PVR_ASSERT(PMR_IsMemLayoutFixed(psPMR)); /* Return the PMR */ *ppsPMR = psPMR; *puiSize = psPMR->uiLogicalSize; - *puiAlign = 1ull << psPMR->uiLog2ContiguityGuarantee; + *puiAlign = IMG_PAGE2BYTES64(psPMR->uiLog2ContiguityGuarantee); + return PVRSRV_OK; -e0: + +ErrReturnError: PVR_ASSERT(eError != PVRSRV_OK); return eError; } PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR) { - _UnrefAndMaybeDestroy(psPMR); - return PVRSRV_OK; + return _UnrefAndMaybeDestroy(psPMR, 1); } #endif @@ -1165,7 +2174,7 @@ _PMRAcquireKernelMappingData(PMR *psPMR, /* Zero size means map in the whole PMR ... */ *puiLengthOut = (size_t)psPMR->uiLogicalSize; } - else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee)) + else if (uiSize > IMG_PAGE2BYTES64(psPMR->uiLog2ContiguityGuarantee)) { /* ... map in the requested pages ... */ *puiLengthOut = uiSize; @@ -1173,7 +2182,7 @@ _PMRAcquireKernelMappingData(PMR *psPMR, else { /* ... otherwise we just map in one page */ - *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee; + *puiLengthOut = IMG_PAGE2BYTES64(psPMR->uiLog2ContiguityGuarantee); } *phPrivOut = hPriv; @@ -1192,13 +2201,46 @@ PMRAcquireKernelMappingData(PMR *psPMR, size_t *puiLengthOut, IMG_HANDLE *phPrivOut) { - return _PMRAcquireKernelMappingData(psPMR, - uiLogicalOffset, - uiSize, - ppvKernelAddressOut, - puiLengthOut, - phPrivOut, - IMG_FALSE); + PVRSRV_ERROR eError; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + PMRKernelCpuMapCountIncr(psPMR); + + if (PVRSRV_CHECK_OS_LINUX_MOVABLE(PMR_Flags(psPMR))) + { + do + { + eError = _PMRAcquireKernelMappingData(psPMR, + uiLogicalOffset, + uiSize, + ppvKernelAddressOut, + puiLengthOut, + phPrivOut, + IMG_FALSE); + } + while (eError == PVRSRV_ERROR_RETRY); + PVR_LOG_GOTO_IF_ERROR(eError, "_PMRAcquireKernelMappingData", error_fail_decr); + } + else +#endif + { + eError = _PMRAcquireKernelMappingData(psPMR, + uiLogicalOffset, + uiSize, + ppvKernelAddressOut, + puiLengthOut, + phPrivOut, + IMG_FALSE); + PVR_LOG_GOTO_IF_ERROR(eError, "_PMRAcquireKernelMappingData", error_fail_decr); + } + + return eError; + +error_fail_decr: +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + PMRKernelCpuMapCountDecr(psPMR); +#endif + return eError; } PVRSRV_ERROR @@ -1209,13 +2251,28 @@ PMRAcquireSparseKernelMappingData(PMR *psPMR, size_t *puiLengthOut, IMG_HANDLE *phPrivOut) { - return _PMRAcquireKernelMappingData(psPMR, + PVRSRV_ERROR eError; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + PMRKernelCpuMapCountIncr(psPMR); +#endif + + eError = _PMRAcquireKernelMappingData(psPMR, uiLogicalOffset, uiSize, ppvKernelAddressOut, puiLengthOut, phPrivOut, IMG_TRUE); + PVR_LOG_GOTO_IF_ERROR(eError, "_PMRAcquireKernelMappingData", error_fail_decr); + + return eError; + +error_fail_decr: +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + PMRKernelCpuMapCountDecr(psPMR); +#endif + return eError; } PVRSRV_ERROR @@ -1225,9 +2282,14 @@ PMRReleaseKernelMappingData(PMR *psPMR, PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL); PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL); + psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, hPriv); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + PMRKernelCpuMapCountDecr(psPMR); +#endif + return PVRSRV_OK; } @@ -1245,7 +2307,7 @@ PMRReleaseKernelMappingData(PMR *psPMR, Log2PageSize else argument is redundant (set to zero). */ -static void +static PVRSRV_ERROR _PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR, IMG_UINT32 ui32Log2PageSize, IMG_UINT32 ui32NumOfPages, @@ -1262,12 +2324,12 @@ _PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR, IMG_UINT32 idx; /* Must be translating at least a page */ - PVR_ASSERT(ui32NumOfPages); + PVR_ASSERT(ui32NumOfPages > 0); - if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks) + if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumLogicalChunks) { /* Fast path the common case, as logical and physical offsets are - equal we assume the ui32NumOfPages span is also valid */ + * equal we assume the ui32NumOfPages span is also valid */ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset); puiPhysicalOffset[0] = uiOffset; bValid[0] = IMG_TRUE; @@ -1276,7 +2338,7 @@ _PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR, { /* initial offset may not be page aligned, round down */ uiOffset &= ~(uiPageSize-1); - for (idx=1; idx < ui32NumOfPages; idx++) + for (idx = 1; idx < ui32NumOfPages; idx++) { uiOffset += uiPageSize; puiPhysicalOffset[idx] = uiOffset; @@ -1286,46 +2348,50 @@ _PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR, } else { - for (idx=0; idx < ui32NumOfPages; idx++) + for (idx = 0; idx < ui32NumOfPages; idx++) { + IMG_UINT32 uiTranslation; + const IMG_UINT32 uiChunkSize = psMappingTable->uiChunkSize; + ui64ChunkIndex = OSDivide64r64( uiOffset, - TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize), + TRUNCATE_64BITS_TO_32BITS(uiChunkSize), &ui32Remain); - if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID) + /* In some cases ui32NumOfPages can come from the user space which + * means that the uiOffset could go out-of-bounds when the number + * of pages is invalid. */ + if (ui64ChunkIndex >= psMappingTable->ui32NumLogicalChunks) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + + uiTranslation = psMappingTable->aui32Translation[ui64ChunkIndex]; + + if (uiTranslation == TRANSLATION_INVALID) { bValid[idx] = IMG_FALSE; + /* explicitly set to an invalid value */ + puiPhysicalOffset[idx] = IMG_UINT64_C(0xffffffffffffffff); } else { bValid[idx] = IMG_TRUE; + puiPhysicalOffset[idx] = uiTranslation * uiChunkSize + ui32Remain; } if (idx == 0) { - if (ui32Remain == 0) - { - /* Start of chunk so return the chunk size */ - *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize); - } - else - { - *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain); - } - - puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) + ui32Remain; - + *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(uiChunkSize - ui32Remain); /* initial offset may not be page aligned, round down */ uiOffset &= ~(uiPageSize-1); } - else - { - puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize + ui32Remain; - } + uiOffset += uiPageSize; } } + + return PVRSRV_OK; } static PVRSRV_ERROR @@ -1380,7 +2446,9 @@ _PMR_ReadBytesPhysical(PMR *psPMR, else { OSPanic(); +#ifndef __CHECKER__ /* exclude for Smatch */ PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); +#endif } return PVRSRV_OK; @@ -1403,6 +2471,12 @@ PMR_ReadBytes(PMR *psPMR, IMG_DEVMEM_OFFSET_T uiPhysicalOffset; size_t uiBytesCopied = 0; + /* Check for integer overflow as uiLogicalOffset might come from the client */ + if (uiLogicalOffset + uiBufSz < uiLogicalOffset) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) { uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); @@ -1424,13 +2498,15 @@ PMR_ReadBytes(PMR *psPMR, size_t uiRead; IMG_BOOL bValid; - _PMRLogicalOffsetToPhysicalOffset(psPMR, - 0, - 1, - uiLogicalOffset, - &uiPhysicalOffset, - &ui32Remain, - &bValid); + eError = _PMRLogicalOffsetToPhysicalOffset(psPMR, + 0, + 1, + uiLogicalOffset, + &uiPhysicalOffset, + &ui32Remain, + &bValid); + PVR_LOG_RETURN_IF_ERROR(eError, "_PMRLogicalOffsetToPhysicalOffset"); + /* Copy till either then end of the chunk or end * of the buffer */ @@ -1531,7 +2607,9 @@ _PMR_WriteBytesPhysical(PMR *psPMR, * debug tools */ OSPanic(); +#ifndef __CHECKER__ /* exclude for Smatch */ PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0); +#endif } return PVRSRV_OK; @@ -1554,6 +2632,12 @@ PMR_WriteBytes(PMR *psPMR, IMG_DEVMEM_OFFSET_T uiPhysicalOffset; size_t uiBytesCopied = 0; + /* Check for integer overflow as uiLogicalOffset might come from the client */ + if (uiLogicalOffset + uiBufSz < uiLogicalOffset) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) { uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); @@ -1575,13 +2659,14 @@ PMR_WriteBytes(PMR *psPMR, size_t uiWrite; IMG_BOOL bValid; - _PMRLogicalOffsetToPhysicalOffset(psPMR, - 0, - 1, - uiLogicalOffset, - &uiPhysicalOffset, - &ui32Remain, - &bValid); + eError = _PMRLogicalOffsetToPhysicalOffset(psPMR, + 0, + 1, + uiLogicalOffset, + &uiPhysicalOffset, + &ui32Remain, + &bValid); + PVR_LOG_RETURN_IF_ERROR(eError, "_PMRLogicalOffsetToPhysicalOffset"); /* Copy till either then end of the chunk or end of the buffer */ @@ -1628,6 +2713,8 @@ PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData, PVRSRV_MEMALLOCFLAGS_T uiFlags !PVRSRV_CHECK_CPU_WRITEABLE(uiFlags), PVRSRV_ERROR_PMR_NOT_PERMITTED); + PVR_LOG_RETURN_IF_TRUE(PMR_PhysicalSize(psPMR) == 0, "PVRSRV_ERROR_BAD_MAPPING can not map PMR of 0 physical size", PVRSRV_ERROR_BAD_MAPPING); + if (psPMR->psFuncTab->pfnMMap) { return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData); @@ -1636,30 +2723,237 @@ PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData, PVRSRV_MEMALLOCFLAGS_T uiFlags return OSMMapPMRGeneric(psPMR, pOSMMapData); } -void +PVRSRV_ERROR PMRRefPMR(PMR *psPMR) { PVR_ASSERT(psPMR != NULL); - _Ref(psPMR); + return _Ref(psPMR, 1); } PVRSRV_ERROR PMRUnrefPMR(PMR *psPMR) { - _UnrefAndMaybeDestroy(psPMR); - return PVRSRV_OK; + return _UnrefAndMaybeDestroy(psPMR, 1); +} + +PVRSRV_ERROR +PMRRefPMRN(PMR *psPMR, IMG_UINT32 uiRefCount) +{ + PVR_ASSERT(psPMR != NULL); + return _Ref(psPMR, uiRefCount); } PVRSRV_ERROR -PMRUnrefUnlockPMR(PMR *psPMR) +PMRUnrefPMRN(PMR *psPMR, IMG_UINT32 uiRefCount) +{ + PVR_ASSERT(psPMR != NULL); + return _UnrefAndMaybeDestroy(psPMR, uiRefCount); +} + +#define PMR_MAPCOUNT_MIN 0 +#define PMR_MAPCOUNT_MAX IMG_INT32_MAX + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +PVRSRV_ERROR PMRTryRefPMR(PMR *psPMR) { - PMRUnlockSysPhysAddresses(psPMR); + OS_SPINLOCK_FLAGS uiFlags; + + PVR_ASSERT(psPMR != NULL); + + OSSpinLockAcquire(psPMR->hRefCountLock, uiFlags); + + if (psPMR->uiRefCount == 0 || psPMR->uiRefCount == IMG_UINT32_MAX) + { + OSSpinLockRelease(psPMR->hRefCountLock, uiFlags); + + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + psPMR->uiRefCount++; - PMRUnrefPMR(psPMR); + OSSpinLockRelease(psPMR->hRefCountLock, uiFlags); return PVRSRV_OK; } +void +PMRKernelCpuMapCountIncr(PMR *psPMR) +{ + if (OSAtomicAddUnless(&psPMR->iKernelCpuMapCount, 1, PMR_MAPCOUNT_MAX) == PMR_MAPCOUNT_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "%s: iKernelCpuMapCount for PMR: @0x%p (%s) has overflowed.", + __func__, + psPMR, + psPMR->szAnnotation)); + OSWarnOn(1); + } +} + +void +PMRKernelCpuMapCountDecr(PMR *psPMR) +{ + if (OSAtomicSubtractUnless(&psPMR->iKernelCpuMapCount, 1, PMR_MAPCOUNT_MIN) == PMR_MAPCOUNT_MIN) + { + PVR_DPF((PVR_DBG_ERROR, "%s: iKernelCpuMapCount (now %d) for PMR: @0x%p (%s) has underflowed.", + __func__, + (IMG_INT32) OSAtomicRead(&psPMR->iKernelCpuMapCount), + psPMR, + psPMR->szAnnotation)); + OSWarnOn(1); + } +} + +IMG_BOOL +PMR_IsKernelCpuMapped(PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return (OSAtomicRead(&psPMR->iKernelCpuMapCount) > 0); +} + +#endif /* #if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) */ + +void +PMRClientCpuMapCountIncr(PMR *psPMR) +{ + if (OSAtomicAddUnless(&psPMR->iClientCpuMapCount, 1, PMR_MAPCOUNT_MAX) == PMR_MAPCOUNT_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "%s: iClientCpuMapCount for PMR: @0x%p (%s) has overflowed.", + __func__, + psPMR, + psPMR->szAnnotation)); + OSWarnOn(1); + } +} + +void +PMRClientCpuMapCountDecr(PMR *psPMR) +{ + if (OSAtomicSubtractUnless(&psPMR->iClientCpuMapCount, 1, PMR_MAPCOUNT_MIN) == PMR_MAPCOUNT_MIN) + { + PVR_DPF((PVR_DBG_ERROR, "%s: iClientCpuMapCount (now %d) for PMR: @0x%p (%s) has underflowed.", + __func__, + (IMG_INT32) OSAtomicRead(&psPMR->iClientCpuMapCount), + psPMR, + psPMR->szAnnotation)); + OSWarnOn(1); + } +} + +IMG_BOOL +PMR_IsClientCpuMapped(PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return (OSAtomicRead(&psPMR->iClientCpuMapCount) > 0); +} + +static void +PMRGpuResCountIncr(PMR *psPMR) +{ + if (psPMR->iAssociatedResCount == PMR_MAPCOUNT_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "%s: iAssociatedResCount for PMR: @0x%p (%s) has overflowed.", + __func__, + psPMR, + psPMR->szAnnotation)); + OSWarnOn(1); + return; + } + + psPMR->iAssociatedResCount++; +} + +static void +PMRGpuResCountDecr(PMR *psPMR) +{ + if (psPMR->iAssociatedResCount == PMR_MAPCOUNT_MIN) + { + PVR_DPF((PVR_DBG_ERROR, "%s: iAssociatedResCount (now %d) for PMR: @0x%p (%s) has underflowed.", + __func__, + psPMR->iAssociatedResCount, + psPMR, + psPMR->szAnnotation)); + OSWarnOn(1); + return; + } + + psPMR->iAssociatedResCount--; +} + +IMG_BOOL +PMR_IsGpuMultiMapped(PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return psPMR->iAssociatedResCount > 1; +} + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +void +PMRLinkGPUMapping(PMR *psPMR, DLLIST_NODE *psMappingNode) +#else +void +PMRLinkGPUMapping(PMR *psPMR) +#endif +{ + PMRLockHeldAssert(psPMR); + + PMRGpuResCountIncr(psPMR); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + dllist_add_to_head(&psPMR->sGpuMappingListHead, psMappingNode); +#endif +} + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +void +PMRUnlinkGPUMapping(PMR *psPMR, DLLIST_NODE *psMappingNode) +#else +void +PMRUnlinkGPUMapping(PMR *psPMR) +#endif + +{ + PMRLockHeldAssert(psPMR); + + PMRGpuResCountDecr(psPMR); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + dllist_remove_node(psMappingNode); +#endif +} + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +void +PMRNotifyMigrateInProgress(PMR *psPMR) +{ + PMRLockHeldAssert(psPMR); + + psPMR->eState = PMR_STATE_PAGES_IN_MIGRATE; +} + +void +PMRNotifyMigrateComplete(PMR *psPMR) +{ + PMRLockHeldAssert(psPMR); + + psPMR->eState = PMR_STATE_ACTIVE; +} + +PVRSRV_ERROR +PMRRemapGPUPMR(PMR *psPMR, IMG_UINT32 ui32LogicalPgOffset) +{ + if (psPMR->eState != PMR_STATE_PAGES_IN_MIGRATE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Remap requested on PMR not in migrate state.", __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + return DevmemIntRemapPageInPMR(psPMR, &psPMR->sGpuMappingListHead, ui32LogicalPgOffset); +} +#endif + PVRSRV_DEVICE_NODE * PMR_DeviceNode(const PMR *psPMR) { @@ -1684,6 +2978,16 @@ PMR_IsSparse(const PMR *psPMR) return _PMRIsSparse(psPMR); } +#if defined(SUPPORT_PMR_DEFERRED_FREE) +IMG_BOOL +PMR_IsZombie(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return _IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_IS_ZOMBIE); +} +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + /* Function that alters the mutability property * of the PMR * Setting it to TRUE makes sure the PMR memory layout @@ -1693,54 +2997,43 @@ PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag) { PVR_ASSERT(psPMR != NULL); - psPMR->bNoLayoutChange = bFlag; + if (bFlag) + { + _IntFlagSet(psPMR, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE); + } + else + { + _IntFlagClr(psPMR, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE); + } } IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR) { - PVR_ASSERT(psPMR != NULL); - - return psPMR->bNoLayoutChange; -} -void -PMR_LogicalSize(const PMR *psPMR, - IMG_DEVMEM_SIZE_T *puiLogicalSize) -{ PVR_ASSERT(psPMR != NULL); - *puiLogicalSize = psPMR->uiLogicalSize; + return _IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE); } -PVRSRV_ERROR -PMR_PhysicalSize(const PMR *psPMR, - IMG_DEVMEM_SIZE_T *puiPhysicalSize) +IMG_DEVMEM_SIZE_T +PMR_PhysicalSize(const PMR *psPMR) { PVR_ASSERT(psPMR != NULL); /* iLockCount will be > 0 for any backed PMR (backed on demand or not) */ if (OSAtomicRead(&psPMR->iLockCount) > 0) { - if (psPMR->bSparseAlloc) + if (_PMRIsSparse(psPMR)) { - *puiPhysicalSize = psPMR->psMappingTable->uiChunkSize * psPMR->psMappingTable->ui32NumPhysChunks; + return psPMR->psMappingTable->uiChunkSize * psPMR->psMappingTable->ui32NumPhysChunks; } else { - *puiPhysicalSize = psPMR->uiLogicalSize; + return psPMR->uiLogicalSize; } } - else - { - *puiPhysicalSize = 0; - } - return PVRSRV_OK; -} -PHYS_HEAP * -PMR_PhysHeap(const PMR *psPMR) -{ - return psPMR->psPhysHeap; + return 0; } PVRSRV_ERROR @@ -1768,13 +3061,14 @@ PMR_IsOffsetValid(const PMR *psPMR, PVR_GOTO_IF_NOMEM(pui32BytesRemain, eError, e0); } - _PMRLogicalOffsetToPhysicalOffset(psPMR, - ui32Log2PageSize, - ui32NumOfPages, - uiLogicalOffset, - puiPhysicalOffset, - pui32BytesRemain, - pbValid); + eError = _PMRLogicalOffsetToPhysicalOffset(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + uiLogicalOffset, + puiPhysicalOffset, + pui32BytesRemain, + pbValid); + PVR_LOG_IF_ERROR(eError, "_PMRLogicalOffsetToPhysicalOffset"); e0: if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL) @@ -1790,6 +3084,12 @@ PMR_IsOffsetValid(const PMR *psPMR, return eError; } +PHYS_HEAP * +PMR_PhysHeap(const PMR *psPMR) +{ + return psPMR->psPhysHeap; +} + PMR_MAPPING_TABLE * PMR_GetMappingTable(const PMR *psPMR) { @@ -1805,10 +3105,19 @@ PMR_GetLog2Contiguity(const PMR *psPMR) return psPMR->uiLog2ContiguityGuarantee; } -IMG_UINT32 PMRGetMaxChunkCount(PMR *psPMR) +IMG_DEVMEM_SIZE_T +PMR_LogicalSize(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return psPMR->uiLogicalSize; +} + +IMG_UINT32 PMR_GetLogicalChunkCount(const PMR *psPMR) { PVR_ASSERT(psPMR != NULL); - return (PMR_MAX_SUPPORTED_SIZE >> psPMR->uiLog2ContiguityGuarantee); + + return psPMR->psMappingTable->ui32NumLogicalChunks; } const IMG_CHAR * @@ -1818,19 +3127,110 @@ PMR_GetAnnotation(const PMR *psPMR) return psPMR->szAnnotation; } -PMR_IMPL_TYPE -PMR_GetType(const PMR *psPMR) +PMR_IMPL_TYPE +PMR_GetType(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return psPMR->eFlavour; +} + +IMG_CHAR * +PMR_GetTypeStr(const PMR *psPMR) +{ + static IMG_CHAR *pszFlavour[] = { +#define X(type) #type + PMR_IMPL_TYPES +#undef X + }; + + if (psPMR->eFlavour >= PMR_TYPE_LAST) + { + return "INVALID"; + } + + return pszFlavour[psPMR->eFlavour]; +} + +IMG_INT32 +PMR_GetRefCount(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return _GetRef(psPMR); +} + +#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) +PVRSRV_ERROR +PMRGetIPAPolicy(PMR *psPMR, + IMG_UINT8 *pui8IPAPolicy) +{ + IMG_UINT32 ui32FlagsIPAPolicy; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pui8IPAPolicy != NULL, "pui8IPAPolicy"); + ui32FlagsIPAPolicy = PVRSRV_MEMALLOCFLAG_IPA_POLICY(psPMR->uiFlags); + + *pui8IPAPolicy = (IMG_UINT8)ui32FlagsIPAPolicy; + return PVRSRV_OK; +} +#endif /* defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) */ + +#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) +PVRSRV_ERROR +PMRGetIPAInfo(PMR *psPMR, IMG_UINT32 *pui32IPAPolicy, IMG_UINT32 *pui32IPAShift, + IMG_UINT32 *pui32IPAMask, IMG_UINT32 *pui32IPAFlagsValue) { - PVR_ASSERT(psPMR != NULL); - return psPMR->eFlavour; + IMG_UINT32 ui32IPAFlagsPolicyValue; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pui32IPAPolicy != NULL, "pui32IPAPolicy"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pui32IPAShift != NULL, "pui32IPAShift"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pui32IPAMask != NULL, "pui32IPAMask"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pui32IPAFlagsValue != NULL, "pui32IPAFlagsValue"); + + /* Get the underlying heap-provided default IPA settings (if any) */ + *pui32IPAShift = PhysHeapGetIPAShift(psPMR->psPhysHeap); + *pui32IPAPolicy = PhysHeapGetIPAValue(psPMR->psPhysHeap); + *pui32IPAMask = PhysHeapGetIPAMask(psPMR->psPhysHeap); + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Shift, Policy, Mask for Heap %p = %d, %d, %d", __func__, + psPMR->psPhysHeap, *pui32IPAShift, *pui32IPAPolicy, *pui32IPAMask)); + + /* Query the current PMR flags settings for current IPA policy */ + ui32IPAFlagsPolicyValue = PVRSRV_MEMALLOCFLAG_IPA_POLICY(psPMR->uiFlags); + *pui32IPAFlagsValue = ui32IPAFlagsPolicyValue; + return PVRSRV_OK; } +#endif /* PVRSRV_INTERNAL_IPA_FEATURE_TESTING */ -IMG_INT32 -PMR_GetRefCount(const PMR *psPMR) +#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) +PVRSRV_ERROR +PMRModifyIPAPolicy(PMR *psPMR, + IMG_UINT8 ui8NewIPAPolicy) { - PVR_ASSERT(psPMR != NULL); - return OSAtomicRead(&psPMR->iRefCount); + IMG_UINT64 ui64IPAFlagsPolicyValue; + IMG_UINT32 ui32IPAPolicyMaskValue; + + ui32IPAPolicyMaskValue = (IMG_UINT32)(PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK >> PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET); + + /* ui8NewIPAPolicy must be between 0 .. uiMaskValue inclusive */ + PVR_LOG_RETURN_IF_INVALID_PARAM(ui8NewIPAPolicy <= ui32IPAPolicyMaskValue, + "ui8NewIPAPolicy"); + + /* Set the appropriate policy bits in the PMR */ + ui64IPAFlagsPolicyValue = ((IMG_UINT64)(ui8NewIPAPolicy) << PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET) & + PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK; + + psPMR->uiFlags &= ~PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK; + psPMR->uiFlags |= ui64IPAFlagsPolicyValue; + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: ui32IPAPolicy Mask = 0x%x, Value = 0x%x", + __func__, ui32IPAPolicyMaskValue, (IMG_UINT32)ui8NewIPAPolicy)); + PVR_DPF((PVR_DBG_MESSAGE, + "%s: uiFlags = 0x%016" IMG_UINT64_FMTSPECx ", ui64IPAFlags = 0x%016" IMG_UINT64_FMTSPECx, __func__, psPMR->uiFlags, ui64IPAFlagsPolicyValue)); + + return PVRSRV_OK; } +#endif /* defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) */ /* must have called PMRLockSysPhysAddresses() before calling this! */ PVRSRV_ERROR @@ -1839,20 +3239,31 @@ PMR_DevPhysAddr(const PMR *psPMR, IMG_UINT32 ui32NumOfPages, IMG_DEVMEM_OFFSET_T uiLogicalOffset, IMG_DEV_PHYADDR *psDevAddrPtr, - IMG_BOOL *pbValid) + IMG_BOOL *pbValid, + PMR_PHYSADDRMODE_TYPE uiPMRUsage) { IMG_UINT32 ui32Remain; PVRSRV_ERROR eError = PVRSRV_OK; IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC]; IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset; +#if defined(SUPPORT_STATIC_IPA) + IMG_UINT32 ui32FlagsIPAPolicy; /* Local value for the IPA policy */ + IMG_UINT32 ui32IPAHeapShift; /* Phys-heap bit-shift value */ + IMG_UINT32 ui32IPAHeapPolicyValue; /* Phys-heap default policy value */ + IMG_UINT32 ui32IPAHeapClearMask; /* Phys-heap ClearMask bitmask */ + IMG_UINT64 ui64IPAPolicy; /* IPAPolicy value to be applied to physical address(es) */ + IMG_UINT64 ui64IPAClearMask; /* IPAClearMask to be applied to physical address(es) */ +#endif PVR_ASSERT(psPMR != NULL); PVR_ASSERT(ui32NumOfPages > 0); PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL); -#ifdef PVRSRV_NEED_PVR_ASSERT - PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); -#endif + if (OSAtomicRead(&psPMR->iLockCount) <= 0) + { + PVR_DPF((PVR_DBG_ERROR,"Attempt to obtain DevPhysAddr of non-backed PMR")); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) { @@ -1860,22 +3271,139 @@ PMR_DevPhysAddr(const PMR *psPMR, PVR_RETURN_IF_NOMEM(puiPhysicalOffset); } - _PMRLogicalOffsetToPhysicalOffset(psPMR, - ui32Log2PageSize, - ui32NumOfPages, - uiLogicalOffset, - puiPhysicalOffset, - &ui32Remain, - pbValid); + eError = _PMRLogicalOffsetToPhysicalOffset(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + uiLogicalOffset, + puiPhysicalOffset, + &ui32Remain, + pbValid); + PVR_LOG_GOTO_IF_ERROR(eError, "_PMRLogicalOffsetToPhysicalOffset", FreeOffsetArray); + +#if defined(SUPPORT_STATIC_IPA) + /* Need to determine the values to pass into the pfnDevPhysAddr + * for Intermediate Physical Address settings associated with + * this PMR. + * If the ui32FlagsIPAPolicy value is non-zero, the value will be used in + * preference to the default value specified in the physheap config. + * Whichever value is used the associated physheap configuration bit shift and + * mask values are passed to the pfnDevPhysAddr PMR factory function to modify + * the returned address(es). + */ + + PVR_ASSERT(psPMR->psPhysHeap != NULL); + + ui32IPAHeapShift = PhysHeapGetIPAShift(psPMR->psPhysHeap); + ui32IPAHeapPolicyValue = PhysHeapGetIPAValue(psPMR->psPhysHeap); + ui32IPAHeapClearMask = PhysHeapGetIPAMask(psPMR->psPhysHeap); +#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) + ui32FlagsIPAPolicy = PVRSRV_MEMALLOCFLAG_IPA_POLICY(psPMR->uiFlags); + if (ui32FlagsIPAPolicy == 0U) + { + ui32FlagsIPAPolicy = ui32IPAHeapPolicyValue; + } +#else + ui32FlagsIPAPolicy = ui32IPAHeapPolicyValue; /* Use heap default values*/ +#endif /* PVRSRV_INTERNAL_IPA_FEATURE_TESTING */ + /* To handle the 'disabling' of IPAPolicy setting for some callers we + * check to see if the uiPMRUsage is set to DEVICE_USE. + * If so, we simply use the calculated shifts and policy values determined + * above. If disabled BIT_ISSET(uiPMRUsage, CPU_USE) we pass 0 values to the PMR + * factory which will result in no IPA modification being made to the + * phys_heap physical addresses. + */ + if (unlikely(BITMASK_HAS(uiPMRUsage, CPU_USE))) + { + ui32IPAHeapClearMask = 0U; + ui32FlagsIPAPolicy = 0U; + } + ui64IPAPolicy = (IMG_UINT64)ui32FlagsIPAPolicy << ui32IPAHeapShift; + ui64IPAClearMask = (IMG_UINT64)ui32IPAHeapClearMask << ui32IPAHeapShift; +#endif /* SUPPORT_STATIC_IPA */ + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + /* PMRs marked with migrate ability need to wait for migrate to complete + * whilst in the CPU mapping path. PMR lock must be held in this path to + * synchronise obtaining phys addrs. + */ + if (BITMASK_HAS(uiPMRUsage, MAPPING_USE) && BITMASK_HAS(uiPMRUsage, CPU_USE)) + { + PMRLockHeldAssert(psPMR); + + /* We must wait and retry until we are allowed to obtain phys addrs, no timeout + * because we need this to complete and migrate is guaranteed to finish eventually + */ + while (1) + { + if (psPMR->eState == PMR_STATE_ACTIVE) + { + break; + } + /* Allow another thread to take the lock, this should allow migration to continue + * and complete before we take physical addresses for use + */ + PMRUnlockPMR(psPMR); + OSReleaseThreadQuanta(); + PMRLockPMR(psPMR); + } + + eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData, + ui32Log2PageSize, + ui32NumOfPages, + puiPhysicalOffset, +#if defined(SUPPORT_STATIC_IPA) + ui64IPAPolicy, + ui64IPAClearMask, +#endif + pbValid, + psDevAddrPtr); + } + /* PMRs marked with migrate ability may need to retry an attempt at Device mappings + * for a migrate to complete whilst in the mapping path. This is because we hold higher + * synchronisation methods in device mapping paths that also need to release for the + * migrate to complete. + */ + else if (BITMASK_HAS(uiPMRUsage, MAPPING_USE) && BITMASK_HAS(uiPMRUsage, DEVICE_USE)) + { + PMRLockHeldAssert(psPMR); + + /* If PMR is in migrate state then ask the requester to retry */ + if (psPMR->eState != PMR_STATE_ACTIVE) + { + eError = PVRSRV_ERROR_RETRY; + goto FreeOffsetArray; + } - /* Sparse PMR may not always have the first page valid */ - eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData, - ui32Log2PageSize, - ui32NumOfPages, - puiPhysicalOffset, - pbValid, - psDevAddrPtr); - PVR_GOTO_IF_ERROR(eError, FreeOffsetArray); + eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData, + ui32Log2PageSize, + ui32NumOfPages, + puiPhysicalOffset, +#if defined(SUPPORT_STATIC_IPA) + ui64IPAPolicy, + ui64IPAClearMask, +#endif + pbValid, + psDevAddrPtr); + } + else + /* All other paths that are not used in GPU mapping will never return + * PVRSRV_ERROR_RETRY. + */ +#endif + { + /* Sparse PMR may not always have the first page valid */ + eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData, + ui32Log2PageSize, + ui32NumOfPages, + puiPhysicalOffset, +#if defined(SUPPORT_STATIC_IPA) + ui64IPAPolicy, + ui64IPAClearMask, +#endif + pbValid, + psDevAddrPtr); + PVR_GOTO_IF_ERROR(eError, FreeOffsetArray); + } #if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) /* Currently excluded from the default build because of performance @@ -1919,7 +3447,8 @@ PMR_CpuPhysAddr(const PMR *psPMR, IMG_UINT32 ui32NumOfPages, IMG_DEVMEM_OFFSET_T uiLogicalOffset, IMG_CPU_PHYADDR *psCpuAddrPtr, - IMG_BOOL *pbValid) + IMG_BOOL *pbValid, + PMR_PHYSADDRMODE_TYPE uiPMRUsage) { IMG_UINT32 idx; PVRSRV_ERROR eError; @@ -1933,7 +3462,7 @@ PMR_CpuPhysAddr(const PMR *psPMR, } eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages, - uiLogicalOffset, psDevPAddr, pbValid); + uiLogicalOffset, psDevPAddr, pbValid, uiPMRUsage); PVR_GOTO_IF_ERROR(eError, e1); if (_PMRIsSparse(psPMR)) @@ -1980,11 +3509,40 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, { PVRSRV_ERROR eError; - if (IMG_TRUE == psPMR->bNoLayoutChange) + PMRLockPMR(psPMR); + + eError = PMR_ChangeSparseMemUnlocked(psPMR, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + uiSparseFlags); + + PMRUnlockPMR(psPMR); + + return eError; +} + +PVRSRV_ERROR PMR_ChangeSparseMemUnlocked(PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiSparseFlags) +{ + PVRSRV_ERROR eError; +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_IMPL_ZOMBIEPAGES pvZombiePages = NULL; + PMR_ZOMBIE_PAGES* psPMRZombiePages = NULL; +#endif + + if (PMR_IsMemLayoutFixed(psPMR) || PMR_IsClientCpuMapped(psPMR)) { PVR_DPF((PVR_DBG_ERROR, - "%s: This PMR layout cannot be changed", - __func__)); + "%s: This PMR layout cannot be changed - PMR_IsMemLayoutFixed()=%c, _PMR_IsClientCpuMapped()=%c", + __func__, + PMR_IsMemLayoutFixed(psPMR) ? 'Y' : 'n', + PMR_IsClientCpuMapped(psPMR) ? 'Y' : 'n')); return PVRSRV_ERROR_PMR_NOT_PERMITTED; } @@ -1996,12 +3554,24 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, return PVRSRV_ERROR_NOT_IMPLEMENTED; } +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + if (uiSparseFlags & SPARSE_RESIZE_FREE) + { + /* Speculatively preallocate in order to simplify error handling later */ + psPMRZombiePages = OSAllocZMem(sizeof(PMR_ZOMBIE_PAGES)); + PVR_GOTO_IF_NOMEM(psPMRZombiePages, eError, e0); + } +#endif + eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData, psPMR, ui32AllocPageCount, pai32AllocIndices, ui32FreePageCount, pai32FreeIndices, +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + &pvZombiePages, +#endif uiSparseFlags); if (eError != PVRSRV_OK) { @@ -2014,8 +3584,51 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, OSGetCurrentClientProcessIDKM()); } #endif - goto e0; + goto e1; + } + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + if (pvZombiePages != NULL) + { + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_DEVICE_NODE *psDevNode; + + psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap); + eError = PVRSRVGetDevicePowerState(psDevNode, &ePowerState); + if (eError != PVRSRV_OK) + { + /* Treat unknown power state as ON. */ + ePowerState = PVRSRV_DEV_POWER_STATE_ON; + } + + if (ePowerState == PVRSRV_DEV_POWER_STATE_OFF || + psDevNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR) + { + /* Free preallocated psPMRZombiePages as these won't be used*/ + OSFreeMem(psPMRZombiePages); + + eError = psPMR->psFuncTab->pfnFreeZombiePages(pvZombiePages); + PVR_LOG_GOTO_IF_ERROR(eError, "Error when trying to free zombies immediately.", e0); + } + else + { + PVR_ASSERT(psPMRZombiePages != NULL); + psPMRZombiePages->sHeader.eZombieType = PMR_ZOMBIE_TYPE_PAGES; + psPMRZombiePages->pfnFactoryFreeZombies = psPMR->psFuncTab->pfnFreeZombiePages; + psPMRZombiePages->pvFactoryPages = pvZombiePages; + + _ZombieListLock(psDevNode); + dllist_add_to_tail(&psDevNode->sPMRZombieList, &psPMRZombiePages->sHeader.sZombieNode); + psDevNode->uiPMRZombieCount++; + _ZombieListUnlock(psDevNode); + } + } + else + { + /* Free psPMRZombiePages as change sparse has not produced zombie pages */ + OSFreeMem(psPMRZombiePages); } +#endif #if defined(PDUMP) { @@ -2033,7 +3646,7 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, } PDumpPMRChangeSparsePMR(psPMR, - 1 << psPMR->uiLog2ContiguityGuarantee, + IMG_PAGE2BYTES32(psPMR->uiLog2ContiguityGuarantee), ui32AllocPageCount, pai32AllocIndices, ui32FreePageCount, @@ -2045,49 +3658,18 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, #endif -e0: - return eError; -} - - -PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, - IMG_UINT64 sCpuVAddrBase, - IMG_UINT32 ui32AllocPageCount, - IMG_UINT32 *pai32AllocIndices, - IMG_UINT32 ui32FreePageCount, - IMG_UINT32 *pai32FreeIndices) -{ - PVRSRV_ERROR eError; - - if ((NULL == psPMR->psFuncTab) || - (NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap)) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: This type of sparse PMR cannot be changed.", - __func__)); - return PVRSRV_ERROR_NOT_IMPLEMENTED; - } - - if (IMG_TRUE == psPMR->bNoLayoutChange) + return PVRSRV_OK; +e1: +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + if (uiSparseFlags & SPARSE_RESIZE_FREE) { - PVR_DPF((PVR_DBG_ERROR, - "%s: This PMR layout cannot be changed", - __func__)); - return PVRSRV_ERROR_PMR_NOT_PERMITTED; + OSFreeMem(psPMRZombiePages); } - - eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData, - psPMR, - sCpuVAddrBase, - ui32AllocPageCount, - pai32AllocIndices, - ui32FreePageCount, - pai32FreeIndices); - +e0: +#endif return eError; } - #if defined(PDUMP) static PVRSRV_ERROR @@ -2113,8 +3695,7 @@ _PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR, } else #endif - if (DevmemCPUCacheCoherency(psDevNode, psPMR->uiFlags) || - DevmemDeviceCacheCoherency(psDevNode, psPMR->uiFlags)) + if (DevmemDeviceCacheCoherency(psDevNode, psPMR->uiFlags)) { OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, @@ -2141,7 +3722,7 @@ _PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR, } - *puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1); + *puiNewOffset = uiPhysicalOffset & (IMG_PAGE2BYTES64(PMR_GetLog2Contiguity(psPMR))-1); *puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1) << PMR_GetLog2Contiguity(psPMR)); @@ -2163,6 +3744,7 @@ PMR_PDumpSymbolicAddr(const PMR *psPMR, IMG_DEVMEM_OFFSET_T uiPhysicalOffset; IMG_UINT32 ui32Remain; IMG_BOOL bValid; + PVRSRV_ERROR eError; PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize); @@ -2174,13 +3756,14 @@ PMR_PDumpSymbolicAddr(const PMR *psPMR, return PVRSRV_OK; } - _PMRLogicalOffsetToPhysicalOffset(psPMR, - 0, - 1, - uiLogicalOffset, - &uiPhysicalOffset, - &ui32Remain, - &bValid); + eError = _PMRLogicalOffsetToPhysicalOffset(psPMR, + 0, + 1, + uiLogicalOffset, + &uiPhysicalOffset, + &ui32Remain, + &bValid); + PVR_LOG_RETURN_IF_ERROR(eError, "_PMRLogicalOffsetToPhysicalOffset"); if (!bValid) { @@ -2386,7 +3969,7 @@ PMRPDumpLoadMemValue64(PMR *psPMR, IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; IMG_DEVMEM_OFFSET_T uiNextSymName; - IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; + PMR_SIZE_T ui64PMRPageSize = IMG_PAGE2BYTES64(psPMR->uiLog2ContiguityGuarantee); /* Confirm that the device node's ui32InternalID matches the bound * PDump device stored in PVRSRV_DATA. @@ -2398,8 +3981,8 @@ PMRPDumpLoadMemValue64(PMR *psPMR, PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize); /* Especially make sure to not cross a block boundary */ - PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value)) - <= uiPMRPageSize)); + PVR_ASSERT(( ((uiLogicalOffset & (ui64PMRPageSize-1)) + sizeof(ui64Value)) + <= ui64PMRPageSize)); eError = PMRLockSysPhysAddresses(psPMR); PVR_ASSERT(eError == PVRSRV_OK); @@ -2456,18 +4039,18 @@ PMRPDumpCopyMem64(PMR *psDstPMR, IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; IMG_DEVMEM_OFFSET_T uiNextSymName; - const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee; - const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee; + const PMR_SIZE_T ui64DstPMRPageSize = IMG_PAGE2BYTES64(psDstPMR->uiLog2ContiguityGuarantee); + const PMR_SIZE_T ui64SrcPMRPageSize = IMG_PAGE2BYTES64(psSrcPMR->uiLog2ContiguityGuarantee); PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize); /* Especially make sure to not cross a block boundary */ - PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32)) - <= uiSrcPMRPageSize)); + PVR_ASSERT(( ((uiSrcLogicalOffset & (ui64SrcPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= ui64SrcPMRPageSize)); PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize); /* Especially make sure to not cross a block boundary */ - PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32)) - <= uiDstPMRPageSize)); + PVR_ASSERT(( ((uiDstLogicalOffset & (ui64DstPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= ui64DstPMRPageSize)); eError = PMRLockSysPhysAddresses(psSrcPMR); PVR_ASSERT(eError == PVRSRV_OK); @@ -2560,7 +4143,7 @@ PMRPDumpLoadMem(PMR *psPMR, #define PMR_MAX_PDUMP_BUFSZ (1<<21) IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME]; IMG_UINT8 *pcBuffer = NULL; - size_t uiBufSz; + size_t ui64BufSz; IMG_BOOL bValid; IMG_DEVMEM_SIZE_T uiSizeRemain = uiSize; PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); @@ -2573,6 +4156,9 @@ PMRPDumpLoadMem(PMR *psPMR, return PVRSRV_OK; } + /* Forcibly initialise the name to a 'NULL' 0-length string */ + aszParamStreamFilename[0] = '\0'; + PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize); /* Check if pdump client is connected */ @@ -2594,16 +4180,16 @@ PMRPDumpLoadMem(PMR *psPMR, /* get the zero page information. it is constant for this function */ PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset, - &uiBufSz, + &ui64BufSz, &pszParamStreamFileName); } else { - uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR); - PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ); + ui64BufSz = IMG_PAGE2BYTES64(PMR_GetLog2Contiguity(psPMR)); + PVR_ASSERT(IMG_PAGE2BYTES64(PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ); - pcBuffer = OSAllocMem(uiBufSz); + pcBuffer = OSAllocMem(ui64BufSz); PVR_LOG_RETURN_IF_NOMEM(pcBuffer, "OSAllocMem"); @@ -2627,7 +4213,7 @@ PMRPDumpLoadMem(PMR *psPMR, &uiOutOffset, &uiNextSymName); PVR_ASSERT(eError == PVRSRV_OK); - PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz); + PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= ui64BufSz); PMR_IsOffsetValid(psPMR, 0, @@ -2640,6 +4226,7 @@ PMRPDumpLoadMem(PMR *psPMR, if (bValid) { size_t uiNumBytes; + IMG_BOOL bOk2Write = IMG_TRUE; if (bZero) { @@ -2666,29 +4253,35 @@ PMRPDumpLoadMem(PMR *psPMR, &aszParamStreamFilename[0], sizeof(aszParamStreamFilename), &uiParamStreamFileOffset); + if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED) { /* Write to parameter file prevented under the flags and * current state of the driver so skip further writes. */ eError = PVRSRV_OK; + bOk2Write = IMG_FALSE; /* Do *NOT* write anything */ } else if (eError != PVRSRV_OK) { PDUMP_ERROR(psDevNode, eError, "Failed to write PMR memory to parameter file"); + bOk2Write = IMG_FALSE; /* Do *NOT* write anything */ } } - /* Emit the LDB command to the current symbolic address */ - eError = PDumpPMRLDB(psDevNode, - aszMemspaceName, - aszSymbolicName, - uiOutOffset, - uiNumBytes, - pszParamStreamFileName, - uiParamStreamFileOffset, - uiPDumpFlags); + if (bOk2Write) + { + /* Emit the LDB command to the current symbolic address */ + eError = PDumpPMRLDB(psDevNode, + aszMemspaceName, + aszSymbolicName, + uiOutOffset, + uiNumBytes, + pszParamStreamFileName, + uiParamStreamFileOffset, + uiPDumpFlags); + } uiSizeRemain = uiSizeRemain - uiNumBytes; } uiCurrentOffset = uiNextSymName; @@ -3019,17 +4612,20 @@ PDumpPMRFreePMR(PMR *psPMR, /* (IMG_HANDLE*) <- (IMG_HANDLE) */ IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle; + PDUMP_LOCK(PDUMP_FLAGS_NONE); + for (i = 0; i < psPMR->uiNumPDumpBlocks; i++) { if (ahPDumpAllocHandleArray[i] != NULL) { - eError = PDumpFree(PMR_DeviceNode(psPMR), - ahPDumpAllocHandleArray[i]); + eError = PDumpFreeUnlocked(PMR_DeviceNode(psPMR), + ahPDumpAllocHandleArray[i]); PVR_ASSERT(eError == PVRSRV_OK); ahPDumpAllocHandleArray[i] = NULL; } } + PDUMP_UNLOCK(PDUMP_FLAGS_NONE); OSFreeMem(ahPDumpAllocHandleArray); } @@ -3038,7 +4634,7 @@ PDumpPMRMallocPMR(PMR *psPMR, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32ChunkSize, IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumLogicalChunks, IMG_UINT32 *puiMappingTable, IMG_UINT32 uiLog2Contiguity, IMG_BOOL bInitialise, @@ -3077,6 +4673,7 @@ PDumpPMRMallocPMR(PMR *psPMR, phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE)); + PDUMP_LOCK(ui32PDumpFlags); for (i = 0; i < uiNumPhysBlocks; i++) { @@ -3092,20 +4689,22 @@ PDumpPMRMallocPMR(PMR *psPMR, &uiNextSymName); PVR_ASSERT(eError == PVRSRV_OK); - eError = PDumpMalloc(PMR_DeviceNode(psPMR), - aszMemspaceName, - aszSymbolicName, - ui32ChunkSize, - ui32ChunkSize, - bInitialise, - ui8InitValue, - &phPDumpAllocInfo[uiIndex], - ui32PDumpFlags); + eError = PDumpMallocUnlocked(PMR_DeviceNode(psPMR), + aszMemspaceName, + aszSymbolicName, + ui32ChunkSize, + ui32ChunkSize, + bInitialise, + ui8InitValue, + &phPDumpAllocInfo[uiIndex], + ui32PDumpFlags); PVR_LOG_RETURN_VOID_IF_FALSE((eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE), "PDumpPMRMalloc PDump capture bound to other device"); PVR_ASSERT(eError == PVRSRV_OK); } + PDUMP_UNLOCK(ui32PDumpFlags); + /* (IMG_HANDLE) <- (IMG_HANDLE*) */ *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo; @@ -3147,7 +4746,7 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ IMG_DEVMEM_OFFSET_T uiNextSymName; #endif #if !defined(NO_HARDWARE) - IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee; + PMR_SIZE_T ui64PageListPageSize = IMG_PAGE2BYTES64(psPageListPMR->uiLog2ContiguityGuarantee); IMG_UINT64 uiPageListPMRPage = 0; IMG_UINT64 uiPrevPageListPMRPage = 0; IMG_HANDLE hPrivData = NULL; @@ -3158,6 +4757,10 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ IMG_DEV_PHYADDR *pasDevAddrPtr; IMG_BOOL *pbPageIsValid; #endif + PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPageListPMR->psPhysHeap); + IMG_BOOL bCPUCacheSnoop = + (PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) && + psDevNode->pfnGetDeviceSnoopMode(psDevNode) == PVRSRV_DEVICE_SNOOP_CPU_ONLY); uiWordSize = PMR_PM_WORD_SIZE; @@ -3179,6 +4782,8 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error); } + /* Check for integer overflow */ + PVR_GOTO_IF_INVALID_PARAM(uiTableOffset + uiTableLength > uiTableOffset, eError, return_error); /* Check we're not being asked to write off the end of the PMR */ PVR_GOTO_IF_INVALID_PARAM(uiTableOffset + uiTableLength <= psPageListPMR->uiLogicalSize, eError, return_error); @@ -3194,6 +4799,21 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); } + /* the PMR into which we are writing must not be user CPU cacheable: */ + if (!bCPUCacheSnoop && + (PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) || + PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || + PVRSRV_CHECK_CPU_CACHED(uiFlags))) + { + PVR_DPF((PVR_DBG_ERROR, + "Masked flags = 0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC, + (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))); + PVR_DPF((PVR_DBG_ERROR, + "Page list PMR allows CPU caching (0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC ")", + uiFlags)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); + } + if (_PMRIsSparse(psPageListPMR)) { PVR_LOG_GOTO_WITH_ERROR("psPageListPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); @@ -3221,13 +4841,7 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ PVR_LOG_GOTO_IF_NOMEM(pasDevAddrPtr, eError, unlock_phys_addrs); pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL)); - if (pbPageIsValid == NULL) - { - /* Clean-up before exit */ - OSFreeMem(pasDevAddrPtr); - - PVR_LOG_GOTO_WITH_ERROR("pbPageIsValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_devaddr_array); - } + PVR_LOG_GOTO_IF_NOMEM(pbPageIsValid, eError, free_devaddr_array); } else { @@ -3236,7 +4850,7 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ } eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0, - pasDevAddrPtr, pbPageIsValid); + pasDevAddrPtr, pbPageIsValid, DEVICE_USE); PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", free_valid_array); #endif @@ -3256,7 +4870,7 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ PVR_ASSERT(eError == PVRSRV_OK); eError = PMR_PDumpSymbolicAddr(psReferencePMR, - (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, + IMG_PAGES2BYTES64(uiPageIndex, uiLog2PageSize), sizeof(aszPageMemspaceName), &aszPageMemspaceName[0], sizeof(aszPageSymbolicName), @@ -3297,13 +4911,13 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ */ PVR_ASSERT(pbPageIsValid[uiPageIndex]); PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0); - PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); + PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000LL) == 0); uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee; if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage)) { - size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1)); + size_t uiMappingOffset = uiPMROffset & (~(ui64PageListPageSize - 1)); size_t uiMappedSize; /* If we already had a page list mapped, we need to unmap it... */ @@ -3314,7 +4928,7 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ eError = PMRAcquireKernelMappingData(psPageListPMR, uiMappingOffset, - uiPageListPageSize, + ui64PageListPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); @@ -3326,13 +4940,13 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ } uiPrevPageListPMRPage = uiPageListPMRPage; - PVR_ASSERT(uiMappedSize >= uiPageListPageSize); + PVR_ASSERT(uiMappedSize >= ui64PageListPageSize); PVR_ASSERT(pvKernAddr != NULL); - pui32DataPtr = IMG_OFFSET_ADDR(pvKernAddr, (uiPMROffset & (uiPageListPageSize - 1))); + pui32DataPtr = IMG_OFFSET_ADDR(pvKernAddr, (uiPMROffset & (ui64PageListPageSize - 1))); } - PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); + PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000LL) == 0); /* Write the physical page index into the page list PMR */ *pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize); @@ -3345,12 +4959,7 @@ PMRWritePMPageList(/* Target PMR, offset, and length */ #endif } - /* if this memory is allocated as write-combine we must flush write - * buffers */ - if (PVRSRV_CHECK_CPU_WRITE_COMBINE(psPageListPMR->uiFlags)) - { - OSWriteMemoryBarrier(NULL); - } + OSWriteMemoryBarrier(NULL); #if !defined(NO_HARDWARE) if (pasDevAddrPtr != asDevPAddr) @@ -3408,7 +5017,7 @@ PMRZeroingPMR(PMR *psPMR, { IMG_UINT32 uiNumPages; IMG_UINT32 uiPageIndex; - IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize; + PMR_SIZE_T ui64PageSize = IMG_PAGE2BYTES64(uiLog2PageSize); IMG_HANDLE hPrivData = NULL; void *pvKernAddr = NULL; PVRSRV_ERROR eError = PVRSRV_OK; @@ -3423,9 +5032,9 @@ PMRZeroingPMR(PMR *psPMR, if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize) { PVR_DPF((PVR_DBG_ERROR, - "%s: PMR is not a multiple of %u", + "%s: PMR is not a multiple of %" IMG_UINT64_FMTSPEC, __func__, - ui32PageSize)); + ui64PageSize)); PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error); } @@ -3439,20 +5048,20 @@ PMRZeroingPMR(PMR *psPMR, { /* map the physical page (for a given PMR offset) into kernel space */ eError = PMRAcquireKernelMappingData(psPMR, - (size_t)uiPageIndex << uiLog2PageSize, - ui32PageSize, + IMG_PAGES2BYTES64(uiPageIndex, uiLog2PageSize), + ui64PageSize, &pvKernAddr, &uiMappedSize, &hPrivData); PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", AcquireKernelMapping_Error); /* ensure the mapped page size is the same as the physical page size */ - if (uiMappedSize != ui32PageSize) + if (uiMappedSize != ui64PageSize) { PVR_DPF((PVR_DBG_ERROR, - "%s: Physical Page size = 0x%08x, Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx, + "%s: Physical Page size = 0x%08" IMG_UINT64_FMTSPECx ", Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx, __func__, - ui32PageSize, + ui64PageSize, (IMG_UINT64)uiMappedSize)); PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, MappingSize_Error); } @@ -3460,7 +5069,7 @@ PMRZeroingPMR(PMR *psPMR, /* Use the conservative 'DeviceMemSet' here because we can't know * if this PMR will be mapped cached. */ - OSDeviceMemSet(pvKernAddr, 0, ui32PageSize); + OSDeviceMemSet(pvKernAddr, 0, ui64PageSize); /* release mapping */ PMRReleaseKernelMappingData(psPMR, hPrivData); @@ -3468,11 +5077,11 @@ PMRZeroingPMR(PMR *psPMR, } PVR_DPF((PVR_DBG_MESSAGE, - "%s: Zeroing PMR %p done (num pages %u, page size %u)", + "%s: Zeroing PMR %p done (num pages %u, page size %" IMG_UINT64_FMTSPEC ")", __func__, psPMR, uiNumPages, - ui32PageSize)); + ui64PageSize)); return PVRSRV_OK; @@ -3529,9 +5138,10 @@ PMRDumpPageList(PMR *psPMR, eError = PMR_DevPhysAddr(psPMR, uiLog2PageSize, 1, - (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, + IMG_PAGES2BYTES64(uiPageIndex, uiLog2PageSize), &sDevAddrPtr, - &bPageIsValid); + &bPageIsValid, + DEVICE_USE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: PMR %p failed to get DevPhysAddr with error %u", @@ -3565,6 +5175,8 @@ PMRDumpPageList(PMR *psPMR, return eError; } +DEFINE_PMR_ENV_GET_DATA(psPMR, sEnvData) + PVRSRV_ERROR PMRInit(void) { @@ -3585,15 +5197,21 @@ PMRInit(void) _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE; - _gsSingletonPMRContext.uiNumLivePMRs = 0; + OSAtomicWrite(&_gsSingletonPMRContext.uiNumLivePMRs, 0); #if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) eError = MMapStatsInit(); - PVR_LOG_GOTO_IF_ERROR(eError, "MMapStatsInit", out); + PVR_LOG_GOTO_IF_ERROR(eError, "MMapStatsInit", destroy_context_lock); #endif + return PVRSRV_OK; + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) +destroy_context_lock: + OSLockDestroy(_gsSingletonPMRContext.hLock); + _gsSingletonPMRContext.hLock = NULL; +#endif out: - PVR_ASSERT(eError == PVRSRV_OK); return eError; } @@ -3618,17 +5236,20 @@ PMRDeInit(void) MMapStatsDeInit(); #endif - if (_gsSingletonPMRContext.uiNumLivePMRs != 0) + if (OSAtomicRead(&_gsSingletonPMRContext.uiNumLivePMRs) != 0) { PVR_DPF((PVR_DBG_ERROR, "%s: Error: %d live PMRs remain", __func__, - _gsSingletonPMRContext.uiNumLivePMRs)); + OSAtomicRead(&_gsSingletonPMRContext.uiNumLivePMRs))); PVR_DPF((PVR_DBG_ERROR, "%s: This is an unrecoverable error; a subsequent crash is inevitable", __func__)); PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); } - OSLockDestroy(_gsSingletonPMRContext.hLock); + if (_gsSingletonPMRContext.hLock != NULL) + { + OSLockDestroy(_gsSingletonPMRContext.hLock); + } _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE; @@ -3636,3 +5257,212 @@ PMRDeInit(void) PVR_ASSERT(eError == PVRSRV_OK); return eError; } + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +PVRSRV_ERROR +PMRInitDevice(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = OSLockCreate(&psDeviceNode->hPMRZombieListLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + dllist_init(&psDeviceNode->sPMRZombieList); + psDeviceNode->uiPMRZombieCount = 0; + psDeviceNode->uiPMRZombieCountInCleanup = 0; + + return PVRSRV_OK; +} + +void +PMRFreeZombies(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + DECLARE_DLLIST(sZombieList); + DLLIST_NODE *psThis, *psNext; + IMG_INT32 uiZombieCount; + + _ZombieListLock(psDeviceNode); + /* Move the zombie list to a local copy. The original list will become + * an empty list. This will allow us to process the list without holding + * the list lock. */ + dllist_replace_head(&psDeviceNode->sPMRZombieList, &sZombieList); + uiZombieCount = psDeviceNode->uiPMRZombieCount; + psDeviceNode->uiPMRZombieCount = 0; + _ZombieListUnlock(psDeviceNode); + + dllist_foreach_node(&sZombieList, psThis, psNext) + { + dllist_remove_node(psThis); + switch (PMR_GetZombieTypeFromNode(psThis)) + { +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + case PMR_ZOMBIE_TYPE_PAGES: + { + PVRSRV_ERROR eError; + PMR_ZOMBIE_PAGES* psZombiePages = PMR_GetZombiePagesFromNode(psThis); + + eError = psZombiePages->pfnFactoryFreeZombies(psZombiePages->pvFactoryPages); + if (eError != PVRSRV_OK) + { + /* In case of failure to free zombie pages, remove it from + * the sZombieList and add back to the original list. */ + _ZombieListLock(psDeviceNode); + dllist_add_to_tail(&psDeviceNode->sPMRZombieList, psThis); + psDeviceNode->uiPMRZombieCount++; + _ZombieListUnlock(psDeviceNode); + + PVR_DPF((PVR_DBG_ERROR, "Cannot free zombie pages!")); + continue; + } + + OSFreeMem(psZombiePages); + break; + } +#endif + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + case PMR_ZOMBIE_TYPE_DEVICE_IMPORT: + { + PMR_DEVICE_IMPORT *psImport = PMR_GetDeviceImportFromNode(psThis); + _DeviceImportFreeImportZombie(psImport); + break; + } +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + + case PMR_ZOMBIE_TYPE_PMR: + { + PMR *psPMR = PMR_GetPMRFromNode(psThis); + const PMR_IMPL_FUNCTAB *psFuncTable = psPMR->psFuncTab; + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + /* The PMR cannot be freed as other devices are + * still waiting for the cache flush. */ + PMRLockPMR(psPMR); + if (_DeviceImportBitmapGet(psPMR) != 0) + { + PDLLIST_NODE psNodeImport; + PMR_DEVICE_IMPORT *psImport; + /* Transfer the ownership to a different + * device queue that has not been processed yet. + * There will be a PMR_DEVICE_IMPORT on the same + * queue, however, this doesn't have any knock on affects as + * it will be freed before the PMR is reached again. */ + psNodeImport = dllist_get_next_node(&psPMR->sXDeviceImports); + PVR_ASSERT(psNodeImport); + psImport = IMG_CONTAINER_OF(psNodeImport, PMR_DEVICE_IMPORT, sNext); + _ZombieListLock(psImport->psDevNode); + dllist_add_to_tail(&psImport->psDevNode->sPMRZombieList, psThis); + psImport->psDevNode->uiPMRZombieCount++; + _ZombieListUnlock(psImport->psDevNode); + PMRUnlockPMR(psPMR); + break; + } + PMRUnlockPMR(psPMR); +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + + _FactoryLock(psFuncTable); + _PMRDestroy(psPMR); + _FactoryUnlock(psFuncTable); + break; + } + } + uiZombieCount--; + } + + PVR_ASSERT(uiZombieCount == 0); +} + +void +PMRDumpZombies(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + DLLIST_NODE *psThis, *psNext; + + _ZombieListLock(psDeviceNode); + + PVR_DPF((PVR_DBG_ERROR, "Items in zombie list: %u", + psDeviceNode->uiPMRZombieCount)); + + dllist_foreach_node(&psDeviceNode->sPMRZombieList, psThis, psNext) + { + switch (PMR_GetZombieTypeFromNode(psThis)) + { +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + case PMR_ZOMBIE_TYPE_PAGES: + { + PMR_ZOMBIE_PAGES* psZombiePages = PMR_GetZombiePagesFromNode(psThis); + PVR_DPF((PVR_DBG_ERROR, "Zombie Pages = %p", psZombiePages)); + break; + } +#endif + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + case PMR_ZOMBIE_TYPE_DEVICE_IMPORT: + { + PMR_DEVICE_IMPORT* psImport = PMR_GetDeviceImportFromNode(psThis); + PVR_DPF((PVR_DBG_ERROR, "Device Import = %p, DevID = %u, PMR = %px (%s)", + psImport, + psImport->psDevNode->sDevId.ui32InternalID, + psImport->psParent, + PMR_GetAnnotation(psImport->psParent))); + break; + } +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + + case PMR_ZOMBIE_TYPE_PMR: + { + PMR *psPMR = PMR_GetPMRFromNode(psThis); + PVR_DPF((PVR_DBG_ERROR, "PMR = %px, Flavour = %s, Annotation: %s", + psPMR, PMR_GetTypeStr(psPMR), PMR_GetAnnotation(psPMR))); + break; + } + } + } + + _ZombieListUnlock(psDeviceNode); +} + +void +PMRDeInitDevice(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PMRFreeZombies(psDeviceNode); + + OSLockDestroy(psDeviceNode->hPMRZombieListLock); +} +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + +#if defined(PVRSRV_ENABLE_XD_MEM) +IMG_UINT64 +PMR_ImportedDevicesMask(const PMR* psPMR) +{ + return _DeviceImportBitmapGet(psPMR) & ((IMG_UINT64_C(1) << PVRSRV_MAX_DEVICES) - 1); +} +#endif /* defined(PVRSRV_ENABLE_XD_MEM) */ + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) +PVRSRV_ERROR +PMR_RegisterDeviceImport(PMR* psPMR, PPVRSRV_DEVICE_NODE psDevNode) +{ +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PVR_ASSERT(!PMR_IsZombie(psPMR)); +#endif /* !defined(SUPPORT_PMR_DEFERRED_FREE) */ + + if (PMR_DeviceNode(psPMR) != psDevNode) + { +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) + PVRSRV_ERROR eError = _DeviceImportRegister(psPMR, psDevNode); + PVR_LOG_RETURN_IF_ERROR(eError, "_DeviceImportRegister"); +#else + /* `_DeviceImportRegister` already sets the bitmap. + * This is still needs to be set without device import zombie support + * for debugging information, i.e. the RI. */ + _DeviceImportBitmapSet(psPMR, psDevNode); +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */ + } + /* else: We explicitly don't add the PMR's dev node to the list because + * this bitmask lets us know if the PMR is cross device. It's not + * an error to register with the original dev node, as the user is + * declaring "The PMR is using `psDevNode`", not that it's a new + * devnode. */ + return PVRSRV_OK; +} +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/power.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/power.c index 6d0007d12ad6..43647aa9c78c 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/power.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/power.c @@ -40,6 +40,9 @@ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ +#if !defined(__linux__) +#include +#endif /* #if !defined(__linux__) */ #include "pdump_km.h" #include "allocmem.h" @@ -47,10 +50,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "lock.h" #include "pvrsrv.h" +#include "power.h" #include "pvr_debug.h" -#include "htbuffer.h" +#include "htbserver.h" #include "di_server.h" +#define OS_POWERLOCK_TIMEOUT_MS (5000UL) +#define OS_POWERLOCK_TIMEOUT_US (OS_POWERLOCK_TIMEOUT_MS * 1000UL) +#define OS_POWERLOCK_RETRY_COUNT (50) + #if defined(PVRSRV_ENABLE_PROCESS_STATS) typedef struct _EXTRA_POWER_STATS_ { @@ -88,14 +96,19 @@ struct _PVRSRV_POWER_DEV_TAG_ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange; PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest; PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest; + PFN_FORCED_IDLE_CANCEL_REQUEST_ASYNC pfnForcedIdleCancelRequestAsync; PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange; IMG_HANDLE hSysData; - IMG_HANDLE hDevCookie; + IMG_HANDLE psDevNode; PVRSRV_DEV_POWER_STATE eDefaultPowerState; ATOMIC_T eCurrentPowerState; #if defined(PVRSRV_ENABLE_PROCESS_STATS) PVRSRV_POWER_STATS sPowerStats; #endif + /* Counts number of device idle references that have been taken, + * must be protected by power lock. + */ + IMG_INT32 iIdleReqInFlight; }; #if defined(PVRSRV_ENABLE_PROCESS_STATS) @@ -286,7 +299,7 @@ int PVRSRVPowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) ui32Idx != psPowerStats->ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS) { - DIPrintf(psEntry, "%12llu\t%11llu\t%9llu\n", + DIPrintf(psEntry, "%12" IMG_UINT64_FMTSPEC "\t%11" IMG_UINT64_FMTSPEC "\t%9" IMG_UINT64_FMTSPEC "\n", psPowerStats->asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration, psPowerStats->asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration, psPowerStats->asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration); @@ -363,7 +376,12 @@ const char *PVRSRVDevPowerStateToString(PVRSRV_DEV_POWER_STATE eState) device powerlock after releasing it temporarily for some timeout period in function PVRSRVDeviceIdleRequestKM */ +#if defined(DEBUG) +typedef PVRSRV_ERROR (*PFN_POWER_LOCK_ACQUIRE) (PPVRSRV_DEVICE_NODE psDevNode, + const char *pszFile, const unsigned int ui32LineNum); +#else typedef PVRSRV_ERROR (*PFN_POWER_LOCK_ACQUIRE) (PPVRSRV_DEVICE_NODE psDevNode); +#endif static inline IMG_UINT64 PVRSRVProcessStatsGetTimeNs(void) { @@ -426,43 +444,74 @@ IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode) OSGetCurrentClientProcessIDKM() == psDeviceNode->uiPwrLockOwnerPID; } + +#if defined(DEBUG) +static void _PVRSRVPowerLockOwner(PPVRSRV_DEVICE_NODE psDeviceNode, + const char *pszFile, const unsigned int ui32LineNum) +{ + psDeviceNode->sPowerLockOwner.pszFile = pszFile; + psDeviceNode->sPowerLockOwner.ui32LineNum = ui32LineNum; + psDeviceNode->sPowerLockOwner.ui64Timestamp = OSClockus64(); +} +#endif + +#if defined(DEBUG) +PVRSRV_ERROR PVRSRVPowerLock_Debug(PPVRSRV_DEVICE_NODE psDeviceNode, + const char *pszFile, const unsigned int ui32LineNum) +#else PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) +#endif { OSLockAcquire(psDeviceNode->hPowerLock); + psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); +#if defined(DEBUG) + _PVRSRVPowerLockOwner(psDeviceNode, pszFile, ui32LineNum); +#endif - /* Only allow to take powerlock when the system power is on */ - if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) - { - psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); - return PVRSRV_OK; - } - - OSLockRelease(psDeviceNode->hPowerLock); - - return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; + return PVRSRV_OK; } +#if defined(DEBUG) +PVRSRV_ERROR PVRSRVPowerTryLock_Debug(PPVRSRV_DEVICE_NODE psDeviceNode, + const char *pszFile, const unsigned int ui32LineNum) +#else PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode) +#endif { if (!(OSTryLockAcquire(psDeviceNode->hPowerLock))) { return PVRSRV_ERROR_RETRY; } - /* Only allow to take powerlock when the system power is on */ - if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) + psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); +#if defined(DEBUG) + _PVRSRVPowerLockOwner(psDeviceNode, pszFile, ui32LineNum); +#endif + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVPowerTryLockWaitForTimeout(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Take power lock, retry if it's in use in another task. */ + LOOP_UNTIL_TIMEOUT_US(OS_POWERLOCK_TIMEOUT_US) { - psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); + eError = PVRSRVPowerTryLock(psDeviceNode); + if (eError != PVRSRV_ERROR_RETRY) + { + return eError; + } - /* System is powered ON, return OK */ - return PVRSRV_OK; - } - else + OSSleepms(OS_POWERLOCK_TIMEOUT_MS / OS_POWERLOCK_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + if (eError == PVRSRV_ERROR_RETRY) { - /* System is powered OFF, release the lock and return error */ - OSLockRelease(psDeviceNode->hPowerLock); - return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; + return PVRSRV_ERROR_TIMEOUT; } + return eError; } /*! @@ -477,6 +526,20 @@ PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode) PFN_POWER_LOCK_ACQUIRE ******************************************************************************/ +#if defined(DEBUG) +#define _PVRSRVForcedPowerLock(DEV_NODE) \ + _PVRSRVForcedPowerLock_Debug(DEV_NODE, __FILE__, __LINE__) + +static PVRSRV_ERROR _PVRSRVForcedPowerLock_Debug(PPVRSRV_DEVICE_NODE psDeviceNode, + const char *pszFile, const unsigned int ui32LineNum) +{ + OSLockAcquire(psDeviceNode->hPowerLock); + psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); + _PVRSRVPowerLockOwner(psDeviceNode, pszFile, ui32LineNum); + + return PVRSRV_OK; +} +#else static PVRSRV_ERROR _PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) { OSLockAcquire(psDeviceNode->hPowerLock); @@ -484,6 +547,7 @@ static PVRSRV_ERROR _PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) return PVRSRV_OK; } +#endif void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode) { @@ -518,12 +582,36 @@ PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_ERROR PVRSRVSetDeviceCurrentPowerState(PVRSRV_POWER_DEV *psPowerDevice, PVRSRV_DEV_POWER_STATE eNewPowerState) { +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_DEV_POWER_STATE eOldPowerState; +#endif + if (psPowerDevice == NULL) { return PVRSRV_ERROR_INVALID_DEVICE; } +#if !defined(SUPPORT_PMR_DEFERRED_FREE) OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eNewPowerState); +#else + eOldPowerState = OSAtomicExchange(&psPowerDevice->eCurrentPowerState, + eNewPowerState); + + psDeviceNode = psPowerDevice->psDevNode; + PVR_ASSERT(psDeviceNode); + + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF && + eNewPowerState != eOldPowerState) + { + psDeviceNode->uiPowerOffCounter = psDeviceNode->uiPowerOffCounterNext; + + /* It's not really important to know if any zombies were queued. */ + (void) PMRQueueZombiesForCleanup(psDeviceNode); + + psDeviceNode->uiPowerOffCounterNext++; + } +#endif return PVRSRV_OK; } @@ -545,9 +633,9 @@ static PVRSRV_ERROR _PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, if ((psPowerDev && psPowerDev->pfnForcedIdleRequest) && (pfnIsDefaultStateOff == NULL || pfnIsDefaultStateOff(psPowerDev))) { - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError = psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie, + eError = psPowerDev->pfnForcedIdleRequest(psPowerDev->psDevNode, bDeviceOffPermitted); if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) { @@ -557,7 +645,11 @@ static PVRSRV_ERROR _PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); +#if defined(DEBUG) + eErrPwrLockAcq = pfnPowerLockAcquire(psDeviceNode, __FILE__, __LINE__); +#else eErrPwrLockAcq = pfnPowerLockAcquire(psDeviceNode); +#endif if (eErrPwrLockAcq != PVRSRV_OK) { /* We only understand PVRSRV_ERROR_RETRY, so assert on others. @@ -578,7 +670,7 @@ static PVRSRV_ERROR _PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, /* idle request successful or some other error occurred, return */ break; } - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); } else { @@ -599,7 +691,12 @@ inline PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, return _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff, bDeviceOffPermitted, - PVRSRVPowerLock); +#if defined(DEBUG) + PVRSRVPowerLock_Debug +#else + PVRSRVPowerLock +#endif + ); } PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode) @@ -608,20 +705,78 @@ PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode) if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest) { - return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie); + return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->psDevNode); } return PVRSRV_OK; } +PVRSRV_ERROR PVRSRVDeviceIdleLatchedGetKM(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; + PVRSRV_ERROR eError; + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); + + if ((++psPowerDev->iIdleReqInFlight) > 1) + { + PVRSRVPowerUnlock(psDeviceNode); + return PVRSRV_OK; + } + + eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, + &PVRSRVDeviceIsDefaultStateOFF, + IMG_TRUE, +#if defined(DEBUG) + PVRSRVPowerLock_Debug +#else + PVRSRVPowerLock +#endif + ); + if (eError != PVRSRV_OK) + { + psPowerDev->iIdleReqInFlight--; + } + + if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) + { + PVRSRVPowerUnlock(psDeviceNode); + } + + return eError; +} + +PVRSRV_ERROR PVRSRVDeviceIdleLatchedPutAsyncKM(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; + PVRSRV_ERROR eError; + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPowerLock", ErrorOut); + + if (psPowerDev && + (--psPowerDev->iIdleReqInFlight) == 0 && + psPowerDev->pfnForcedIdleCancelRequestAsync) + { + eError = psPowerDev->pfnForcedIdleCancelRequestAsync(psPowerDev->psDevNode); + } + + PVRSRVPowerUnlock(psDeviceNode); + +ErrorOut: + return eError; +} + /*! ****************************************************************************** - @Function PVRSRVDevicePrePowerStateKM + @Function PVRSRVDeviceSystemPrePowerStateKM @Description - Perform device-specific processing required before a power transition + Perform device and system-specific processing required before a power + transition @Input psPowerDevice : Power device @Input eNewPowerState : New power state @@ -631,9 +786,9 @@ PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode) ******************************************************************************/ static -PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_POWER_FLAGS ePwrFlags) +PVRSRV_ERROR PVRSRVDeviceSystemPrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_POWER_FLAGS ePwrFlags) { PVRSRV_DEV_POWER_STATE eCurrentPowerState; IMG_UINT64 ui64SysTimer1 = 0; @@ -651,7 +806,7 @@ PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); /* Call the device's power callback. */ - eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie, + eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->psDevNode, eNewPowerState, eCurrentPowerState, ePwrFlags); @@ -699,7 +854,7 @@ PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, if (psPowerDevice->pfnDevicePrePower != NULL) { /* Call the device's power callback. */ - eError2 = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie, + eError2 = psPowerDevice->pfnDevicePrePower(psPowerDevice->psDevNode, eCurrentPowerState, eNewPowerState, ePwrFlags); @@ -708,7 +863,7 @@ PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, if (eError2 == PVRSRV_OK && psPowerDevice->pfnDevicePostPower != NULL) { /* Call the device's power callback. */ - eError2 = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie, + eError2 = psPowerDevice->pfnDevicePostPower(psPowerDevice->psDevNode, eCurrentPowerState, eNewPowerState, ePwrFlags); @@ -722,11 +877,12 @@ PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, /*! ****************************************************************************** - @Function PVRSRVDevicePostPowerStateKM + @Function PVRSRVDeviceSystemPostPowerStateKM @Description - Perform device-specific processing required after a power transition + Perform device and system-specific processing required after a power + transition @Input psPowerDevice : Power device @Input eNewPowerState : New power state @@ -736,9 +892,9 @@ PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, ******************************************************************************/ static -PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_POWER_FLAGS ePwrFlags) +PVRSRV_ERROR PVRSRVDeviceSystemPostPowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_POWER_FLAGS ePwrFlags) { PVRSRV_DEV_POWER_STATE eCurrentPowerState; IMG_UINT64 ui64SysTimer1 = 0; @@ -775,7 +931,7 @@ PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); /* Call the device's power callback. */ - eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie, + eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->psDevNode, eNewPowerState, eCurrentPowerState, ePwrFlags); @@ -817,18 +973,20 @@ PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode, /* Call power function if the state change or if this is an OS request. */ if (OSAtomicRead(&psPowerDevice->eCurrentPowerState) != eNewPowerState || - BITMASK_ANY(ePwrFlags, PVRSRV_POWER_FLAGS_SUSPEND_REQ | PVRSRV_POWER_FLAGS_RESUME_REQ)) + BITMASK_ANY(ePwrFlags, PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ | PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ)) { - eError = PVRSRVDevicePrePowerStateKM(psPowerDevice, - eNewPowerState, - ePwrFlags); + eError = PVRSRVDeviceSystemPrePowerStateKM(psPowerDevice, + eNewPowerState, + ePwrFlags); PVR_GOTO_IF_ERROR(eError, ErrorExit); - eError = PVRSRVDevicePostPowerStateKM(psPowerDevice, - eNewPowerState, - ePwrFlags); + eError = PVRSRVDeviceSystemPostPowerStateKM(psPowerDevice, + eNewPowerState, + ePwrFlags); PVR_GOTO_IF_ERROR(eError, ErrorExit); + psDeviceNode->eCurrentSysPowerState = eNewPowerState; + /* Signal Device Watchdog Thread about power mode change. */ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) { @@ -878,13 +1036,14 @@ PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, { PVRSRV_ERROR eError; IMG_UINT uiStage = 0; +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); +#endif - PVRSRV_DEV_POWER_STATE eNewDevicePowerState = _IsSystemStatePowered(eNewSysPowerState) - ? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF; - - /* If setting devices to default state, force idle all devices whose default state is off */ - PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff = - (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL; + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_POWER_DEV *psPowerDevice; + PVRSRV_DEV_POWER_STATE eNewDevicePowerState; + PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff; /* Require a proper power state */ if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified) @@ -892,8 +1051,23 @@ PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, return PVRSRV_ERROR_INVALID_PARAMS; } + if (psDeviceNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE) + { + /* Power device is not initialised. */ + PVR_DPF((PVR_DBG_ERROR, "%s: Device not initialised", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* Prevent simultaneous SetPowerStateKM calls */ _PVRSRVForcedPowerLock(psDeviceNode); + psPowerDevice = psDeviceNode->psPowerDev; + + eNewDevicePowerState = _IsSystemStatePowered(eNewSysPowerState) + ? psPowerDevice->eDefaultPowerState : PVRSRV_DEV_POWER_STATE_OFF; + + /* If setting devices to default state, force idle all devices whose default state is off */ + pfnIsDefaultStateOff = + (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_OFF) ? PVRSRVDeviceIsDefaultStateOFF : NULL; /* No power transition requested, so do nothing */ if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState) @@ -903,10 +1077,19 @@ PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, } /* If the device is already off don't send the idle request. */ - if (psDeviceNode->eCurrentSysPowerState != PVRSRV_SYS_POWER_STATE_OFF) + if ((psDeviceNode->eCurrentSysPowerState != PVRSRV_SYS_POWER_STATE_OFF) && (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + && (eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK) +#endif + ) { eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff, - IMG_TRUE, _PVRSRVForcedPowerLock); +#if defined(DEBUG) + IMG_TRUE, _PVRSRVForcedPowerLock_Debug +#else + IMG_TRUE, _PVRSRVForcedPowerLock +#endif + ); if (eError != PVRSRV_OK) { PVR_LOG_ERROR(eError, "_PVRSRVDeviceIdleRequestKM"); @@ -915,12 +1098,42 @@ PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, } } - eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState, - ePwrFlags | PVRSRV_POWER_FLAGS_FORCED); - if (eError != PVRSRV_OK) + /* Call power function if the state change or if this is an OS request. */ + if (OSAtomicRead(&psPowerDevice->eCurrentPowerState) != eNewDevicePowerState || + BITMASK_ANY(ePwrFlags, PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ | PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ)) { + eError = PVRSRVDeviceSystemPrePowerStateKM(psPowerDevice, + eNewDevicePowerState, + ePwrFlags); uiStage = 2; - goto ErrorExit; + PVR_GOTO_IF_ERROR(eError, ErrorExit); + + eError = PVRSRVDeviceSystemPostPowerStateKM(psPowerDevice, + eNewDevicePowerState, + ePwrFlags); + PVR_GOTO_IF_ERROR(eError, ErrorExit); + + /* Signal Device Watchdog Thread about power mode change. */ + if (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_ON) + { + psPVRSRVData->ui32DevicesWatchdogPwrTrans++; +#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT) +#endif + { + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + } +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + else if (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* signal watchdog thread and give it a chance to switch to + * longer / infinite wait time */ + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ } psDeviceNode->eCurrentSysPowerState = eNewSysPowerState; @@ -950,15 +1163,7 @@ PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG *psDevConfig, PVRSRV_DEVICE_NODE *psDevNode = psDevConfig->psDevNode; PVRSRV_SYS_POWER_STATE eCurrentSysPowerState; - if (psDevNode != NULL) - { - eCurrentSysPowerState = psDevNode->eCurrentSysPowerState; - } - else - { - /* assume power is off if no device node */ - eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF; - } + PVRSRVGetSystemPowerState(psDevNode, &eCurrentSysPowerState); /* no power transition requested, so do nothing */ if (eNewSysPowerState == eCurrentSysPowerState) @@ -1001,11 +1206,12 @@ void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE psDeviceNode, PFN_SYS_PRE_POWER pfnSystemPrePower, PFN_SYS_POST_POWER pfnSystemPostPower, PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, - PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest) + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST_ASYNC pfnForcedIdleCancelRequestAsync) { if (psPowerDevice != NULL) { - if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) || (psDeviceNode->bAutoVzFwIsUp)) { psPowerDevice->pfnSystemPrePower = NULL; psPowerDevice->pfnSystemPostPower = NULL; @@ -1020,6 +1226,7 @@ void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE psDeviceNode, psPowerDevice->pfnDevicePostPower = pfnDevicePostPower; psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest; psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest; + psPowerDevice->pfnForcedIdleCancelRequestAsync = pfnForcedIdleCancelRequestAsync; } } @@ -1032,8 +1239,9 @@ PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST_ASYNC pfnForcedIdleCancelRequestAsync, PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, - IMG_HANDLE hDevCookie, + IMG_HANDLE psDevNode, PVRSRV_DEV_POWER_STATE eCurrentPowerState, PVRSRV_DEV_POWER_STATE eDefaultPowerState) { @@ -1055,15 +1263,17 @@ PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, pfnSystemPrePower, pfnSystemPostPower, pfnForcedIdleRequest, - pfnForcedIdleCancelRequest); + pfnForcedIdleCancelRequest, + pfnForcedIdleCancelRequestAsync); psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange; psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange; psPowerDevice->pfnGPUUnitsPowerChange = pfnGPUUnitsPowerChange; psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData; - psPowerDevice->hDevCookie = hDevCookie; + psPowerDevice->psDevNode = psDevNode; PVRSRVSetDeviceCurrentPowerState(psPowerDevice, eCurrentPowerState); psPowerDevice->eDefaultPowerState = eDefaultPowerState; + psPowerDevice->iIdleReqInFlight = 0; #if defined(PVRSRV_ENABLE_PROCESS_STATS) OSCachedMemSet(&psPowerDevice->sPowerStats, 0, sizeof(psPowerDevice->sPowerStats)); @@ -1078,6 +1288,8 @@ void PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode) { if (psDeviceNode->psPowerDev) { + /* RGX device/firmware should now be powered down */ + PVR_ASSERT(PVRSRVIsDevicePowered(psDeviceNode) == IMG_FALSE); OSFreeMem(psDeviceNode->psPowerDev); psDeviceNode->psPowerDev = NULL; } @@ -1111,6 +1323,42 @@ IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode) return (ePowerState == PVRSRV_DEV_POWER_STATE_ON); } +PVRSRV_ERROR PVRSRVGetSystemPowerState(PVRSRV_DEVICE_NODE *psDeviceNode, + PPVRSRV_SYS_POWER_STATE peCurrentSysPowerState) +{ + if (psDeviceNode != NULL) + { + if (psDeviceNode->psDevConfig->pfnGpuDomainPower) + { + *peCurrentSysPowerState = psDeviceNode->psDevConfig->pfnGpuDomainPower(psDeviceNode); + } + else + { + *peCurrentSysPowerState = psDeviceNode->eCurrentSysPowerState; + } + } + else + { + /* assume power is off if no device node */ + *peCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF; + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + } + + return PVRSRV_OK; +} + +IMG_BOOL PVRSRVIsSystemPowered(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_SYS_POWER_STATE eSysPowerState; + + if (PVRSRVGetSystemPowerState(psDeviceNode, &eSysPowerState) != PVRSRV_OK) + { + return IMG_FALSE; + } + + return (eSysPowerState == PVRSRV_SYS_POWER_STATE_ON); +} + PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, IMG_BOOL bIdleDevice, @@ -1157,7 +1405,7 @@ PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, } } - eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie, + eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->psDevNode, eCurrentPowerState); ui64StopTimer = PVRSRVProcessStatsGetTimeUs(); @@ -1188,7 +1436,7 @@ PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); - eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie, + eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->psDevNode, eCurrentPowerState); if (eError != PVRSRV_OK) { @@ -1245,7 +1493,7 @@ PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode, if (psPowerDevice->pfnGPUUnitsPowerChange != NULL) { - PVRSRV_ERROR eError2 = psPowerDevice->pfnGPUUnitsPowerChange(psPowerDevice->hDevCookie, ui32NewValue); + PVRSRV_ERROR eError2 = psPowerDevice->pfnGPUUnitsPowerChange(psPowerDevice->psDevNode, ui32NewValue); if (eError2 != PVRSRV_OK) { diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/process_stats.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/process_stats.c index 4aeb68055e59..77441c578089 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/process_stats.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/process_stats.c @@ -48,7 +48,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "lock.h" #include "allocmem.h" #include "osfunc.h" -#include "lists.h" #include "process_stats.h" #include "ri_server.h" #include "hash.h" @@ -57,10 +56,17 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "proc_stats.h" #include "pvr_ricommon.h" #include "di_server.h" +#include "dllist.h" #if defined(__linux__) #include "trace_events.h" #endif +#if defined(SUPPORT_LINUX_FDINFO) && defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "dkp_impl.h" + +#define PROC_STATS_FDINFO_ENABLED +#endif + /* Enabled OS Statistics entries: DEBUGFS on Linux, undefined for other OSs */ #if defined(__linux__) && ( \ defined(PVRSRV_ENABLE_PERPID_STATS) || \ @@ -81,14 +87,21 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define MAX_DEAD_LIST_PROCESSES (10) +/* + * Maximum size of a process name to be displayed in process_stats. + */ +#define MAX_PROC_NAME_LEN (16) + /* * Definition of all the strings used to format process based statistics. */ #if defined(PVRSRV_ENABLE_PERPID_STATS) /* Array of Process stat type defined using the X-Macro */ -#define X(stat_type, stat_str) stat_str, +#define X(stat_type, stat_str, drm_str) stat_str, static const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY }; +#undef X +#define X(stat_type, stat_str) stat_str, static const IMG_CHAR *const pszDeviceStatType[PVRSRV_DEVICE_STAT_TYPE_COUNT] = { PVRSRV_DEVICE_STAT_KEY }; #undef X #endif @@ -122,6 +135,7 @@ int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); #define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui64StatValue[idx] #define GET_GPUMEM_GLOBAL_STAT_VALUE() \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL) + \ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + \ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA) + \ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + \ @@ -152,6 +166,17 @@ int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); #define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1)) #define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1)) +/* + * Track the search of one process when PVRSRV_DEBUG_LINUX_MEMORY_STATS + * is enabled. + */ +typedef enum _PVRSRV_PROC_SEARCH_STATE_ +{ + PVRSRV_PROC_NOTFOUND, + PVRSRV_PROC_FOUND, + PVRSRV_PROC_RESURRECTED, +} PVRSRV_PROC_SEARCH_STATE; + /* * Structures for holding statistics... */ @@ -159,7 +184,6 @@ int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); typedef struct _PVRSRV_MEM_ALLOC_REC_ { PVRSRV_MEM_ALLOC_TYPE eAllocType; - IMG_UINT64 ui64Key; void* pvCpuVAddr; IMG_CPU_PHYADDR sCpuPAddr; size_t uiBytes; @@ -167,16 +191,20 @@ typedef struct _PVRSRV_MEM_ALLOC_REC_ void* pvAllocdFromFile; IMG_UINT32 ui32AllocdFromLine; #endif - struct _PVRSRV_MEM_ALLOC_REC_* psNext; - struct _PVRSRV_MEM_ALLOC_REC_** ppsThis; } PVRSRV_MEM_ALLOC_REC; + +typedef struct PVRSRV_MEM_ALLOC_PRINT_DATA_TAG +{ + OSDI_IMPL_ENTRY *psEntry; + IMG_PID pid; + IMG_UINT32 ui32NumEntries; +} PVRSRV_MEM_ALLOC_PRINT_DATA; #endif typedef struct _PVRSRV_PROCESS_STATS_ { /* Linked list pointers */ - struct _PVRSRV_PROCESS_STATS_* psNext; - struct _PVRSRV_PROCESS_STATS_* psPrev; + DLLIST_NODE sNode; /* Create per process lock that need to be held * to edit of its members */ @@ -184,6 +212,7 @@ typedef struct _PVRSRV_PROCESS_STATS_ { /* OS level process ID */ IMG_PID pid; + IMG_CHAR processName[MAX_PROC_NAME_LEN]; IMG_UINT32 ui32RefCount; /* Process memory stats */ @@ -196,7 +225,6 @@ typedef struct _PVRSRV_PROCESS_STATS_ { #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) IMG_DEV_VIRTADDR sDevVAddr; IMG_DEV_PHYADDR sDevPAddr; - RGXFWIF_DM eFenceOpType; #endif IMG_DEVMEM_SIZE_T uiOffset; IMG_DEVMEM_SIZE_T uiSize; @@ -210,13 +238,17 @@ typedef struct _PVRSRV_PROCESS_STATS_ { /* Other statistics structures */ #if defined(PVRSRV_ENABLE_MEMORY_STATS) - PVRSRV_MEM_ALLOC_REC* psMemoryRecords; + HASH_TABLE* psMemoryRecords; #endif /* Device stats */ IMG_UINT32 ui32DevCount; IMG_INT32 ai32DevStats[][PVRSRV_DEVICE_STAT_TYPE_COUNT]; } PVRSRV_PROCESS_STATS; +#if defined(PROC_STATS_FDINFO_ENABLED) +static PVRDKF_DKP_HANDLE ghDKPHandle; +#endif + #if defined(ENABLE_DEBUGFS_PIDS) typedef struct _PVRSRV_OS_STAT_ENTRY_ @@ -303,7 +335,7 @@ _prepareStatsHeaderString(IMG_CHAR *pszStatsSpecificStr, const IMG_CHAR* pszGene IMG_CHAR szStatsHeaderFooterStr[75]; /* Prepare text content of the header in a local string */ - OSStringLCopy(szStatsHeaderFooterStr, pszStatsSpecificStr, ARRAY_SIZE(szStatsHeaderFooterStr)); + OSStringSafeCopy(szStatsHeaderFooterStr, pszStatsSpecificStr, ARRAY_SIZE(szStatsHeaderFooterStr)); OSStringLCat(szStatsHeaderFooterStr, pszGenericHeaderStr, ARRAY_SIZE(szStatsHeaderFooterStr)); /* Write all '-' characters to the header string */ @@ -366,11 +398,6 @@ _prepareStatsPrivateData(void) #endif -#if defined(PVRSRV_ENABLE_MEMORY_STATS) -static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC) -static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC) -#endif - /* * Global Boolean to flag when the statistics are ready to monitor * memory allocations. @@ -381,8 +408,8 @@ static IMG_BOOL bProcessStatsInitialised = IMG_FALSE; * Linked lists for process stats. Live stats are for processes which are still running * and the dead list holds those that have exited. */ -static PVRSRV_PROCESS_STATS *g_psLiveList; -static PVRSRV_PROCESS_STATS *g_psDeadList; +static DLLIST_NODE gsLiveList; +static DLLIST_NODE gsDeadList; static POS_LOCK g_psLinkedListLock; /* Lockdep feature in the kernel cannot differentiate between different instances of same lock type. @@ -391,8 +418,6 @@ static POS_LOCK g_psLinkedListLock; * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition. * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */ #define PROCESS_LOCK_SUBCLASS_CURRENT 1 -#define PROCESS_LOCK_SUBCLASS_PREV 2 -#define PROCESS_LOCK_SUBCLASS_NEXT 3 #if defined(ENABLE_DEBUGFS_PIDS) /* * Pointer to OS folder to hold PID folders. @@ -426,15 +451,11 @@ static POS_LOCK gpsSizeTrackingHashTableLock; static PVRSRV_ERROR _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid); -static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats); -static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats); -static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats); - static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats); -static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, - PVRSRV_PROCESS_STATS* psProcessStats, - IMG_UINT64 uiBytes); +static void _DecreaseProcStatValueUnlocked(PVRSRV_MEM_ALLOC_TYPE eAllocType, + PVRSRV_PROCESS_STATS* psProcessStats, + IMG_UINT64 uiBytes); /*************************************************************************/ /*! @Function _FindProcessStatsInLiveList @@ -446,18 +467,18 @@ static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, static PVRSRV_PROCESS_STATS* _FindProcessStatsInLiveList(IMG_PID pid) { - PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList; + DLLIST_NODE *psNode, *psNext; - while (psProcessStats != NULL) + dllist_foreach_node(&gsLiveList, psNode, psNext) { + PVRSRV_PROCESS_STATS* psProcessStats; + psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); + if (psProcessStats->pid == pid) { return psProcessStats; } - - psProcessStats = psProcessStats->psNext; } - return NULL; } /* _FindProcessStatsInLiveList */ @@ -471,18 +492,18 @@ _FindProcessStatsInLiveList(IMG_PID pid) static PVRSRV_PROCESS_STATS* _FindProcessStatsInDeadList(IMG_PID pid) { - PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList; + DLLIST_NODE *psNode, *psNext; - while (psProcessStats != NULL) + dllist_foreach_node(&gsDeadList, psNode, psNext) { + PVRSRV_PROCESS_STATS* psProcessStats; + psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); + if (psProcessStats->pid == pid) { return psProcessStats; } - - psProcessStats = psProcessStats->psNext; } - return NULL; } /* _FindProcessStatsInDeadList */ @@ -514,9 +535,9 @@ _FindProcessStats(IMG_PID pid) static void _CompressMemoryUsage(void) { - PVRSRV_PROCESS_STATS* psProcessStats; - PVRSRV_PROCESS_STATS* psProcessStatsToBeFreed; - IMG_UINT32 ui32ItemsRemaining; + IMG_INT32 i32ItemsRemaining; + DLLIST_NODE *psNode, *psNext; + DLLIST_NODE sToBeFreedHead; /* * We hold the lock whilst checking the list, but we'll release it @@ -525,35 +546,31 @@ _CompressMemoryUsage(void) OSLockAcquire(g_psLinkedListLock); /* Check that the dead list is not bigger than the max size... */ - psProcessStats = g_psDeadList; - psProcessStatsToBeFreed = NULL; - ui32ItemsRemaining = MAX_DEAD_LIST_PROCESSES; + i32ItemsRemaining = MAX_DEAD_LIST_PROCESSES; - while (psProcessStats != NULL && ui32ItemsRemaining > 0) + dllist_init(&sToBeFreedHead); + + dllist_foreach_node(&gsDeadList, psNode, psNext) { - ui32ItemsRemaining--; - if (ui32ItemsRemaining == 0) + i32ItemsRemaining--; + if (i32ItemsRemaining < 0) { /* This is the last allowed process, cut the linked list here! */ - psProcessStatsToBeFreed = psProcessStats->psNext; - psProcessStats->psNext = NULL; - } - else - { - psProcessStats = psProcessStats->psNext; + dllist_remove_node(psNode); + dllist_add_to_tail(&sToBeFreedHead, psNode); } } OSLockRelease(g_psLinkedListLock); - /* Any processes stats remaining will need to be destroyed... */ - while (psProcessStatsToBeFreed != NULL) + dllist_foreach_node(&sToBeFreedHead, psNode, psNext) { - PVRSRV_PROCESS_STATS* psNextProcessStats = psProcessStatsToBeFreed->psNext; - - psProcessStatsToBeFreed->psNext = NULL; + PVRSRV_PROCESS_STATS *psProcessStatsToBeFreed; + psProcessStatsToBeFreed = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RIDeleteEntriesForPID(psProcessStatsToBeFreed->pid); +#endif _DestroyProcessStat(psProcessStatsToBeFreed); - psProcessStatsToBeFreed = psNextProcessStats; } } /* _CompressMemoryUsage */ @@ -567,11 +584,11 @@ static void _MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats) { /* Take the element out of the live list and append to the dead list... */ - _RemoveProcessStatsFromList(psProcessStats); - _AddProcessStatsToFrontOfDeadList(psProcessStats); + PVR_ASSERT(psProcessStats != NULL); + dllist_remove_node(&psProcessStats->sNode); + dllist_add_to_head(&gsDeadList, &psProcessStats->sNode); } /* _MoveProcessToDeadList */ -#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) /* These functions move the process stats from the dead to the live list. * _MoveProcessToLiveList moves the entry in the global lists and * it needs to be protected by g_psLinkedListLock. @@ -582,131 +599,10 @@ static void _MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats) { /* Take the element out of the live list and append to the dead list... */ - _RemoveProcessStatsFromList(psProcessStats); - _AddProcessStatsToFrontOfLiveList(psProcessStats); -} /* _MoveProcessToLiveList */ -#endif - -/*************************************************************************/ /*! -@Function _AddProcessStatsToFrontOfLiveList -@Description Add a statistic to the live list head. -@Input psProcessStats Process stats to add. -*/ /**************************************************************************/ -static void -_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats) -{ - /* This function should always be called under global list lock g_psLinkedListLock. - */ - PVR_ASSERT(psProcessStats != NULL); - - OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); - - if (g_psLiveList != NULL) - { - PVR_ASSERT(psProcessStats != g_psLiveList); - OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV); - g_psLiveList->psPrev = psProcessStats; - OSLockRelease(g_psLiveList->hLock); - psProcessStats->psNext = g_psLiveList; - } - - g_psLiveList = psProcessStats; - - OSLockRelease(psProcessStats->hLock); -} /* _AddProcessStatsToFrontOfLiveList */ - -/*************************************************************************/ /*! -@Function _AddProcessStatsToFrontOfDeadList -@Description Add a statistic to the dead list head. -@Input psProcessStats Process stats to add. -*/ /**************************************************************************/ -static void -_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats) -{ - PVR_ASSERT(psProcessStats != NULL); - OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); - - if (g_psDeadList != NULL) - { - PVR_ASSERT(psProcessStats != g_psDeadList); - OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV); - g_psDeadList->psPrev = psProcessStats; - OSLockRelease(g_psDeadList->hLock); - psProcessStats->psNext = g_psDeadList; - } - - g_psDeadList = psProcessStats; - - OSLockRelease(psProcessStats->hLock); -} /* _AddProcessStatsToFrontOfDeadList */ - -/*************************************************************************/ /*! -@Function _RemoveProcessStatsFromList -@Description Detaches a process from either the live or dead list. -@Input psProcessStats Process stats to remove. -*/ /**************************************************************************/ -static void -_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats) -{ PVR_ASSERT(psProcessStats != NULL); - - OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); - - /* Remove the item from the linked lists... */ - if (g_psLiveList == psProcessStats) - { - g_psLiveList = psProcessStats->psNext; - - if (g_psLiveList != NULL) - { - PVR_ASSERT(psProcessStats != g_psLiveList); - OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV); - g_psLiveList->psPrev = NULL; - OSLockRelease(g_psLiveList->hLock); - - } - } - else if (g_psDeadList == psProcessStats) - { - g_psDeadList = psProcessStats->psNext; - - if (g_psDeadList != NULL) - { - PVR_ASSERT(psProcessStats != g_psDeadList); - OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV); - g_psDeadList->psPrev = NULL; - OSLockRelease(g_psDeadList->hLock); - } - } - else - { - PVRSRV_PROCESS_STATS* psNext = psProcessStats->psNext; - PVRSRV_PROCESS_STATS* psPrev = psProcessStats->psPrev; - - if (psProcessStats->psNext != NULL) - { - PVR_ASSERT(psProcessStats != psNext); - OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT); - psProcessStats->psNext->psPrev = psPrev; - OSLockRelease(psNext->hLock); - } - if (psProcessStats->psPrev != NULL) - { - PVR_ASSERT(psProcessStats != psPrev); - OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV); - psProcessStats->psPrev->psNext = psNext; - OSLockRelease(psPrev->hLock); - } - } - - - /* Reset the pointers in this cell, as it is not attached to anything */ - psProcessStats->psNext = NULL; - psProcessStats->psPrev = NULL; - - OSLockRelease(psProcessStats->hLock); - -} /* _RemoveProcessStatsFromList */ + dllist_remove_node(&psProcessStats->sNode); + dllist_add_to_head(&gsLiveList, &psProcessStats->sNode); +} /* _MoveProcessToLiveList */ static PVRSRV_ERROR _AllocateProcessStats(PVRSRV_PROCESS_STATS **ppsProcessStats, IMG_PID ownerPid) @@ -728,18 +624,46 @@ _AllocateProcessStats(PVRSRV_PROCESS_STATS **ppsProcessStats, IMG_PID ownerPid) psProcessStats->pid = ownerPid; psProcessStats->ui32RefCount = 1; psProcessStats->ui32DevCount = ui32DevCount; +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + psProcessStats->psMemoryRecords = HASH_Create(HASH_INITIAL_SIZE); + PVR_GOTO_IF_NOMEM(psProcessStats->psMemoryRecords, eError, free_process_stats); +#endif eError = OSLockCreateNoStats(&psProcessStats->hLock); - PVR_GOTO_IF_ERROR(eError, e0); + PVR_GOTO_IF_ERROR(eError, destroy_mem_recs); *ppsProcessStats = psProcessStats; return PVRSRV_OK; -e0: +destroy_mem_recs: +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + HASH_Delete(psProcessStats->psMemoryRecords); +free_process_stats: +#endif OSFreeMemNoStats(psProcessStats); return PVRSRV_ERROR_OUT_OF_MEMORY; } +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +static PVRSRV_ERROR _FreeMemStatsEntry(uintptr_t k, uintptr_t v, void* pvPriv) +{ + PVRSRV_MEM_ALLOC_REC *psRecord = (PVRSRV_MEM_ALLOC_REC *)(uintptr_t)v; + + PVR_UNREFERENCED_PARAMETER(pvPriv); + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) + PVR_DPF((PVR_DBG_WARNING, "Mem Stats Record not freed: 0x%" IMG_UINT64_FMTSPECx " %p, size="IMG_SIZE_FMTSPEC", %s:%d", + (IMG_UINT64)(k), psRecord, psRecord->uiBytes, + (IMG_CHAR*)psRecord->pvAllocdFromFile, psRecord->ui32AllocdFromLine)); +#else + PVR_UNREFERENCED_PARAMETER(k); +#endif + OSFreeMemNoStats(psRecord); + + return PVRSRV_OK; +} +#endif + /*************************************************************************/ /*! @Function _DestroyProcessStat @Description Frees memory and resources held by a process statistic. @@ -750,16 +674,15 @@ _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats) { PVR_ASSERT(psProcessStats != NULL); +#if defined(PVRSRV_ENABLE_MEMORY_STATS) OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); /* Free the memory statistics... */ -#if defined(PVRSRV_ENABLE_MEMORY_STATS) - while (psProcessStats->psMemoryRecords) - { - List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryRecords); - } -#endif + HASH_Iterate(psProcessStats->psMemoryRecords, (HASH_pfnCallback)_FreeMemStatsEntry, NULL); + HASH_Delete(psProcessStats->psMemoryRecords); + OSLockRelease(psProcessStats->hLock); +#endif /*Destroy the lock */ OSLockDestroyNoStats(psProcessStats->hLock); @@ -872,6 +795,86 @@ _removeStatisticsEntries(void) } #endif +#if defined(PROC_STATS_FDINFO_ENABLED) + +#define GENERATE_DKP_PRINT(_handle, _key, _value) \ + PVRDKPOutput((_handle), \ + "%s:%-*s%"IMG_UINT64_FMTSPEC"\n", \ + (_key), \ + (int)(50 - sizeof((_key))), \ + "", \ + (IMG_UINT64)(_value)) + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#define GENERATE_PROCESS_STAT_FDINFO(_dkp_handle, _stat_type, _key, _process_stats) \ + do { \ + IMG_UINT32 ui32Value = MAX(0, (_process_stats)->i64StatValue[(_stat_type)]); \ + if (((_stat_type) == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) || \ + ((_stat_type) == PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES)) \ + { \ + ui32Value = RITotalAllocProcessUnlocked((_process_stats)->pid, \ + ((_stat_type) == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) ? PHYS_HEAP_TYPE_LMA : PHYS_HEAP_TYPE_UMA); \ + } \ + GENERATE_DKP_PRINT((_dkp_handle), (_key), ui32Value); \ + } while (0) +#else +#define GENERATE_PROCESS_STAT_FDINFO(_dkp_handle, _stat_type, _key, _process_stats) \ + GENERATE_DKP_PRINT((_dkp_handle), (_key), MAX(0, (_process_stats)->i64StatValue[(_stat_type)])); +#endif + +static void _ProcessStatsDKPShow(PVRSRV_DEVICE_NODE *psDevNode, + int pid, + IMG_HANDLE hPrivData) +{ + PVRSRV_PROCESS_STATS *psProcessStats = _FindProcessStatsInLiveList((IMG_PID)pid); + + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(hPrivData); + + if (psProcessStats == NULL) + { + /* Just return if the specified process is no longer in the process + * stats list. + */ + return; + } + + #define X(_stat_type, _stat_name, _dkp_name) \ + GENERATE_PROCESS_STAT_FDINFO(ghDKPHandle, _stat_type, "drm-memory-"_dkp_name, psProcessStats); + PVRSRV_DKP_MEM_STAT_GROUP_MEMORY + #undef X + + #define X(_stat_type, _stat_name, _dkp_name) \ + GENERATE_PROCESS_STAT_FDINFO(ghDKPHandle, _stat_type, "drm-shared-"_dkp_name, psProcessStats); + PVRSRV_DKP_MEM_STAT_GROUP_SHARED + #undef X + + #define X(_stat_type, _stat_name, _dkp_name) \ + GENERATE_PROCESS_STAT_FDINFO(ghDKPHandle, _stat_type, "drm-total-"_dkp_name, psProcessStats); + PVRSRV_DKP_MEM_STAT_GROUP_TOTAL + #undef X + + #define X(_stat_type, _stat_name, _dkp_name) \ + GENERATE_PROCESS_STAT_FDINFO(ghDKPHandle, _stat_type, "drm-resident-"_dkp_name, psProcessStats); + PVRSRV_DKP_MEM_STAT_GROUP_RESIDENT + #undef X + + #define X(_stat_type, _stat_name, _dkp_name) \ + GENERATE_PROCESS_STAT_FDINFO(ghDKPHandle, _stat_type, "drm-purgeable-"_dkp_name, psProcessStats); + PVRSRV_DKP_MEM_STAT_GROUP_PURGEABLE + #undef X + + #define X(_stat_type, _stat_name, _dkp_name) \ + GENERATE_PROCESS_STAT_FDINFO(ghDKPHandle, _stat_type, "drm-active-"_dkp_name, psProcessStats); + PVRSRV_DKP_MEM_STAT_GROUP_ACTIVE + #undef X +} + +#undef GENERATE_PROCESS_STAT_FDINFO +#undef GENERATE_DKP_PRINT + +#endif + /*************************************************************************/ /*! @Function PVRSRVStatsInitialise @Description Entry point for initialising the statistics module. @@ -882,8 +885,6 @@ PVRSRVStatsInitialise(void) { PVRSRV_ERROR error; - PVR_ASSERT(g_psLiveList == NULL); - PVR_ASSERT(g_psDeadList == NULL); PVR_ASSERT(g_psLinkedListLock == NULL); PVR_ASSERT(gpsSizeTrackingHashTable == NULL); PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE); @@ -917,6 +918,9 @@ PVRSRVStatsInitialise(void) gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE); PVR_GOTO_IF_NOMEM(gpsSizeTrackingHashTable, error, destroy_stats_lock_); + dllist_init(&gsLiveList); + dllist_init(&gsDeadList); + bProcessStatsInitialised = IMG_TRUE; #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) /* Register our 'system' PID to hold driver-wide alloc stats */ @@ -943,6 +947,23 @@ PVRSRVStatsInitialise(void) PVR_LOG_IF_ERROR(error, "DICreateEntry (3)"); } +#if defined(PROC_STATS_FDINFO_ENABLED) + /* We might error here, but drm usage keys related to memory are optional. + * We'll notify that the DKP registration has failed but continue as if nothing + * happened. */ + { + error = PVRSRVRegisterDKP(NULL, + "proc-stats", + &_ProcessStatsDKPShow, + DKP_CONNECTION_FLAG_SERVICES, + &ghDKPHandle); + if (error != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Failed to register process stats DKP", __func__)); + } + } +#endif + return PVRSRV_OK; destroy_stats_lock_: @@ -994,30 +1015,24 @@ static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v, void* pvPr void PVRSRVStatsDestroy(void) { + DLLIST_NODE *psNode, *psNext; + PVR_ASSERT(bProcessStatsInitialised); -#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) - if (psProcStatsDIEntry != NULL) - { - DIDestroyEntry(psProcStatsDIEntry); - psProcStatsDIEntry = NULL; - } +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + /* Deregister our 'system' PID which holds driver-wide alloc stats */ + PVRSRVStatsDeregisterProcess(g_hDriverProcessStats); #endif - /* Destroy the global data entry */ - if (psGlobalMemDIEntry!=NULL) +#if defined(PROC_STATS_FDINFO_ENABLED) + if (ghDKPHandle != NULL) { - DIDestroyEntry(psGlobalMemDIEntry); - psGlobalMemDIEntry = NULL; + PVRSRV_ERROR eError = PVRSRVUnRegisterDKP(NULL, ghDKPHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to unregister process stats DKP", __func__)); + } } - -#if defined(ENABLE_DEBUGFS_PIDS) - _removeStatisticsEntries(); -#endif - -#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) - /* Deregister our 'system' PID which holds driver-wide alloc stats */ - PVRSRVStatsDeregisterProcess(g_hDriverProcessStats); #endif /* Stop monitoring memory allocations... */ @@ -1035,17 +1050,17 @@ PVRSRVStatsDestroy(void) } /* Free the live and dead lists... */ - while (g_psLiveList != NULL) + dllist_foreach_node(&gsLiveList, psNode, psNext) { - PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList; - _RemoveProcessStatsFromList(psProcessStats); + PVRSRV_PROCESS_STATS* psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); + dllist_remove_node(&psProcessStats->sNode); _DestroyProcessStat(psProcessStats); } - while (g_psDeadList != NULL) + dllist_foreach_node(&gsDeadList, psNode, psNext) { - PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList; - _RemoveProcessStatsFromList(psProcessStats); + PVRSRV_PROCESS_STATS* psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); + dllist_remove_node(&psProcessStats->sNode); _DestroyProcessStat(psProcessStats); } @@ -1077,6 +1092,29 @@ PVRSRVStatsDestroy(void) } +void +PVRSRVStatsDestroyDI(void) +{ +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) + if (psProcStatsDIEntry != NULL) + { + DIDestroyEntry(psProcStatsDIEntry); + psProcStatsDIEntry = NULL; + } +#endif + + /* Destroy the global data entry */ + if (psGlobalMemDIEntry != NULL) + { + DIDestroyEntry(psGlobalMemDIEntry); + psGlobalMemDIEntry = NULL; + } + +#if defined(ENABLE_DEBUGFS_PIDS) + _removeStatisticsEntries(); +#endif +} + static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, size_t uiBytes) { @@ -1120,10 +1158,22 @@ static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); break; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_LMA, uiBytes); + break; +#endif + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); break; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_UMA, uiBytes); + break; +#endif + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); break; @@ -1136,6 +1186,12 @@ static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); break; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_ZOMBIE, uiBytes); + break; +#endif + default: PVR_ASSERT(0); break; @@ -1197,10 +1253,22 @@ static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); break; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_LMA, uiBytes); + break; +#endif + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); break; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_UMA, uiBytes); + break; +#endif + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); break; @@ -1213,6 +1281,12 @@ static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); break; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_ZOMBIE, uiBytes); + break; +#endif + default: PVR_ASSERT(0); break; @@ -1249,8 +1323,7 @@ _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid) if (psProcessStats != NULL) { /* Move it back onto the live list! */ - _RemoveProcessStatsFromList(psProcessStats); - _AddProcessStatsToFrontOfLiveList(psProcessStats); + _MoveProcessToLiveList(psProcessStats); } else { @@ -1278,9 +1351,11 @@ _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid) eError = _AllocateProcessStats(&psProcessStats, ownerPid); PVR_GOTO_IF_ERROR(eError, e0); + OSStringSafeCopy(psProcessStats->processName, OSGetCurrentProcessName(), MAX_PROC_NAME_LEN); + /* Add it to the live list... */ OSLockAcquire(g_psLinkedListLock); - _AddProcessStatsToFrontOfLiveList(psProcessStats); + dllist_add_to_head(&gsLiveList, &psProcessStats->sNode); OSLockRelease(g_psLinkedListLock); /* Done */ @@ -1358,17 +1433,17 @@ PVRSRV_ERROR PVRSRVStatsDeviceConnect(PVRSRV_DEVICE_NODE *psDeviceNode) if (psProcessStats != NULL) { - if (ui32DevID < psProcessStats->ui32DevCount) + if ((ui32DevID < psProcessStats->ui32DevCount) || + (ui32DevID == 0 && psProcessStats->ui32DevCount == 0)) { psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]++; UPDATE_MAX_VALUE(psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_MAX_CONNECTIONS], - psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]); - + psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]); } else { PVR_DPF((PVR_DBG_ERROR, "%s: Device index %d is greater than device count %d for PID %d.", - __func__, ui32DevID, psProcessStats->ui32DevCount, ownerPid)); + __func__, ui32DevID, psProcessStats->ui32DevCount, ownerPid)); } } else @@ -1411,7 +1486,8 @@ void PVRSRVStatsDeviceDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode) if (psProcessStats != NULL) { - if (ui32DevID < psProcessStats->ui32DevCount) + if ((ui32DevID < psProcessStats->ui32DevCount) || + (ui32DevID == 0 && psProcessStats->ui32DevCount == 0)) { psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]--; } @@ -1430,38 +1506,364 @@ void PVRSRVStatsDeviceDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode) OSLockRelease(g_psLinkedListLock); } -void -PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, - void *pvCpuVAddr, - IMG_CPU_PHYADDR sCpuPAddr, - size_t uiBytes, - IMG_PID currentPid - DEBUG_MEMSTATS_PARAMS) +/* Assumes PVRSRV_PROCESS_STATS lock (psProcessStats->hLock) has already been taken */ +static void +_IncreaseProcStatValueUnlocked(PVRSRV_PROCESS_STATS* psProcessStats, + PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes) { -#if defined(PVRSRV_ENABLE_MEMORY_STATS) - IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); - PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); - PVRSRV_MEM_ALLOC_REC* psRecord = NULL; - PVRSRV_PROCESS_STATS* psProcessStats; - enum { PVRSRV_PROC_NOTFOUND, - PVRSRV_PROC_FOUND, - PVRSRV_PROC_RESURRECTED - } eProcSearch = PVRSRV_PROC_FOUND; - #if defined(ENABLE_GPU_MEM_TRACEPOINT) - IMG_UINT64 ui64InitialSize; + IMG_UINT64 ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); #endif - /* Don't do anything if we are not initialised or we are shutting down! */ - if (!bProcessStatsInitialised) + /* Update the memory watermarks... */ + switch (eAllocType) { -#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) - PVR_DPF((PVR_DBG_WARNING, - "%s: Called when process statistics module is not initialised", - __func__)); -#endif - return; - } + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES, uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; +#endif + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES, uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; +#endif + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE, uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; +#endif + + default: + { + PVR_ASSERT(0); + } + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, + ui64Size); + } + } +#endif +} + +/* Assumes PVRSRV_PROCESS_STATS lock (psProcessStats->hLock) has already been taken */ +static void +_DecreaseProcStatValueUnlocked(PVRSRV_MEM_ALLOC_TYPE eAllocType, + PVRSRV_PROCESS_STATS* psProcessStats, + IMG_UINT64 uiBytes) +{ +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); +#endif + + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; +#endif + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; +#endif + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE, uiBytes); + if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; +#endif + + default: + { + PVR_ASSERT(0); + } + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); + } + } +#endif +} + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +static INLINE IMG_BOOL +_CheckKeyPhysAddr(PVRSRV_MEM_ALLOC_TYPE eAllocType) +{ + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: +#endif + return IMG_TRUE; + default: + return IMG_FALSE; + } +} +#endif + +void +PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + void *pvCpuVAddr, + IMG_CPU_PHYADDR sCpuPAddr, + size_t uiBytes, + IMG_PID currentPid + DEBUG_MEMSTATS_PARAMS) +{ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_MEM_ALLOC_REC* psRecord = NULL; + PVRSRV_PROCESS_STATS* psProcessStats; + __maybe_unused PVRSRV_PROC_SEARCH_STATE eProcSearch = PVRSRV_PROC_FOUND; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } /* * To prevent a recursive loop, we make the memory allocations for our @@ -1536,11 +1938,11 @@ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, } /* Add it to the live list... */ - _AddProcessStatsToFrontOfLiveList(psProcessStats); + dllist_add_to_head(&gsLiveList, &psProcessStats->sNode); OSLockRelease(g_psLinkedListLock); -#else /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ +#else /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ OSLockRelease(g_psLinkedListLock); goto free_record; #endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ @@ -1562,182 +1964,200 @@ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); - /* Insert the memory record... */ - if (psRecord != NULL) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) { - List_PVRSRV_MEM_ALLOC_REC_Insert(&psProcessStats->psMemoryRecords, psRecord); - } - -#if defined(ENABLE_GPU_MEM_TRACEPOINT) - ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); -#endif + IMG_UINT64 ui64Key; - /* Update the memory watermarks... */ - switch (eAllocType) - { - case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + if (_CheckKeyPhysAddr(eAllocType)) { - if (psRecord != NULL) - { - if (pvCpuVAddr == NULL) - { - break; - } - psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; - } - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + ui64Key = psRecord->sCpuPAddr.uiAddr; } - break; - - case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + else { - if (psRecord != NULL) - { - if (pvCpuVAddr == NULL) - { - break; - } - psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; - } - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + ui64Key = (IMG_UINT64)psRecord->pvCpuVAddr; } - break; - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + /* Insert the memory record... */ + if (!HASH_Insert(psProcessStats->psMemoryRecords, ui64Key, (uintptr_t)psRecord)) { - if (psRecord != NULL) - { - if (pvCpuVAddr == NULL) - { - break; - } - psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; - } - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + PVR_DPF((PVR_DBG_ERROR, + "%s UNABLE TO CREATE mem stats record for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)", + __func__, currentPid, OSGetCurrentProcessName(), uiBytes)); } - break; + } +#endif - case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: - { - if (psRecord != NULL) - { - if (pvCpuVAddr == NULL) - { - break; - } - psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; - } - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; + /* Update the memory watermarks... */ + _IncreaseProcStatValueUnlocked(psProcessStats, + eAllocType, + uiBytes); - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + OSLockRelease(psProcessStats->hLock); + + return; + +free_record: + _decrease_global_stat(eAllocType, uiBytes); + if (psRecord != NULL) + { + OSFreeMemNoStats(psRecord); + } +#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + PVR_UNREFERENCED_PARAMETER(eAllocType); + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(sCpuPAddr); + PVR_UNREFERENCED_PARAMETER(uiBytes); + PVR_UNREFERENCED_PARAMETER(currentPid); +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ +} /* PVRSRVStatsAddMemAllocRecord */ + +void +PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + IMG_UINT64 ui64Key, + IMG_PID currentPid) +{ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + PVRSRV_MEM_ALLOC_REC* psRecord = NULL; + IMG_BOOL bFound = IMG_FALSE; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } + + /* Lock while we find the correct process and remove this record... */ + OSLockAcquire(g_psLinkedListLock); + + if (psPVRSRVData) + { + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) { - if (psRecord != NULL) - { - psRecord->ui64Key = sCpuPAddr.uiAddr; - } - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + psProcessStats = _FindProcessStats(currentCleanupPid); } - break; - - case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + else { - if (psRecord != NULL) - { - if (pvCpuVAddr == NULL) - { - break; - } - psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; - } - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + psProcessStats = _FindProcessStats(currentPid); } - break; + } + else + { + psProcessStats = _FindProcessStats(currentPid); + } + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key); + OSLockRelease(psProcessStats->hLock); + bFound = psRecord != NULL; + } - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + /* If not found, we need to do a full search in case it was allocated to a different PID... */ + if (!bFound) + { + PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats; + DLLIST_NODE *psNode, *psNext; + + /* Search all live lists first... */ + dllist_foreach_node(&gsLiveList, psNode, psNext) { - if (psRecord != NULL) + psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); + if (psProcessStats != psProcessStatsAlreadyChecked) { - psRecord->ui64Key = sCpuPAddr.uiAddr; + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key); + OSLockRelease(psProcessStats->hLock); + bFound = psRecord != NULL; } - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: - { - if (psRecord != NULL) + if (bFound) { - psRecord->ui64Key = sCpuPAddr.uiAddr; + break; } - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); } - break; - case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + /* If not found, then search all dead lists next... */ + if (!bFound) { - if (psRecord != NULL) + dllist_foreach_node(&gsDeadList, psNode, psNext) { - if (pvCpuVAddr == NULL) + psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); + if (psProcessStats != psProcessStatsAlreadyChecked) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key); + OSLockRelease(psProcessStats->hLock); + bFound = psRecord != NULL; + } + + if (bFound) { break; } - psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; } - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); } - break; - - default: - { - PVR_ASSERT(0); - } - break; } -#if defined(ENABLE_GPU_MEM_TRACEPOINT) - if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + /* Update the watermark and remove this record... */ + if (bFound) { - IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); - if (ui64Size != ui64InitialSize) + _decrease_global_stat(eAllocType, psRecord->uiBytes); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + _DecreaseProcStatValueUnlocked(eAllocType, + psProcessStats, + psRecord->uiBytes); + + OSLockRelease(psProcessStats->hLock); + OSLockRelease(g_psLinkedListLock); + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* If all stats are now zero, remove the entry for this thread */ + if (psProcessStats->ui32StatAllocFlags == 0) { - TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); + OSLockAcquire(g_psLinkedListLock); + _MoveProcessToDeadList(psProcessStats); + OSLockRelease(g_psLinkedListLock); + + /* Check if the dead list needs to be reduced */ + _CompressMemoryUsage(); } - } #endif - - OSLockRelease(psProcessStats->hLock); - - return; - -free_record: - if (psRecord != NULL) - { + /* + * Free the record outside the lock so we don't deadlock and so we + * reduce the time the lock is held. + */ OSFreeMemNoStats(psRecord); } -#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -} /* PVRSRVStatsAddMemAllocRecord */ + else + { + OSLockRelease(g_psLinkedListLock); + } + +#else + PVR_UNREFERENCED_PARAMETER(eAllocType); + PVR_UNREFERENCED_PARAMETER(ui64Key); + PVR_UNREFERENCED_PARAMETER(currentPid); +#endif +} /* PVRSRVStatsRemoveMemAllocRecord */ +#if defined(SUPPORT_PMR_DEFERRED_FREE) void -PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, - IMG_UINT64 ui64Key, - IMG_PID currentPid) +PVRSRVStatsTransferMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eCurrentType, + PVRSRV_MEM_ALLOC_TYPE eTargetType, + IMG_UINT64 ui64Key, + IMG_PID currentPid + DEBUG_MEMSTATS_PARAMS) { #if defined(PVRSRV_ENABLE_MEMORY_STATS) IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); @@ -1757,6 +2177,13 @@ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, return; } + if (_CheckKeyPhysAddr(eCurrentType) != _CheckKeyPhysAddr(eTargetType)) + { + PVR_DPF((PVR_DBG_ERROR, "Key type used for current type must match target type, " + "Record transfer failed!")); + return; + } + /* Lock while we find the correct process and remove this record... */ OSLockAcquire(g_psLinkedListLock); @@ -1776,127 +2203,78 @@ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, { psProcessStats = _FindProcessStats(currentPid); } + if (psProcessStats != NULL) { - psRecord = psProcessStats->psMemoryRecords; - while (psRecord != NULL) - { - if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) - { - bFound = IMG_TRUE; - break; - } - - psRecord = psRecord->psNext; - } + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key); + OSLockRelease(psProcessStats->hLock); + bFound = psRecord != NULL; } /* If not found, we need to do a full search in case it was allocated to a different PID... */ if (!bFound) { PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats; + DLLIST_NODE *psNode, *psNext; /* Search all live lists first... */ - psProcessStats = g_psLiveList; - while (psProcessStats != NULL) + dllist_foreach_node(&gsLiveList, psNode, psNext) { + psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); if (psProcessStats != psProcessStatsAlreadyChecked) { - psRecord = psProcessStats->psMemoryRecords; - while (psRecord != NULL) - { - if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) - { - bFound = IMG_TRUE; - break; - } - - psRecord = psRecord->psNext; - } + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key); + OSLockRelease(psProcessStats->hLock); + bFound = psRecord != NULL; } if (bFound) { break; } - - psProcessStats = psProcessStats->psNext; - } - - /* If not found, then search all dead lists next... */ - if (!bFound) - { - psProcessStats = g_psDeadList; - while (psProcessStats != NULL) - { - if (psProcessStats != psProcessStatsAlreadyChecked) - { - psRecord = psProcessStats->psMemoryRecords; - while (psRecord != NULL) - { - if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) - { - bFound = IMG_TRUE; - break; - } - - psRecord = psRecord->psNext; - } - } - - if (bFound) - { - break; - } - - psProcessStats = psProcessStats->psNext; - } } } - /* Update the watermark and remove this record...*/ if (bFound) { - _decrease_global_stat(eAllocType, psRecord->uiBytes); + /* Update the current type watermark by reducing */ + _decrease_global_stat(eCurrentType, psRecord->uiBytes); OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); - _DecreaseProcStatValue(eAllocType, - psProcessStats, - psRecord->uiBytes); + _DecreaseProcStatValueUnlocked(eCurrentType, + psProcessStats, + psRecord->uiBytes); - List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord); - OSLockRelease(psProcessStats->hLock); - OSLockRelease(g_psLinkedListLock); + /* Change the type of the record */ + psRecord->eAllocType = eTargetType; +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) + /* Adjust where the record transfer originated */ + psRecord->pvAllocdFromFile = pvAllocFromFile; + psRecord->ui32AllocdFromLine = ui32AllocFromLine; +#endif -#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) - /* If all stats are now zero, remove the entry for this thread */ - if (psProcessStats->ui32StatAllocFlags == 0) - { - OSLockAcquire(g_psLinkedListLock); - _MoveProcessToDeadList(psProcessStats); - OSLockRelease(g_psLinkedListLock); + /* Update the target watermark by increasing */ + _IncreaseProcStatValueUnlocked(psProcessStats, + eTargetType, + psRecord->uiBytes); + OSLockRelease(psProcessStats->hLock); - /* Check if the dead list needs to be reduced */ - _CompressMemoryUsage(); - } -#endif - /* - * Free the record outside the lock so we don't deadlock and so we - * reduce the time the lock is held. - */ - OSFreeMemNoStats(psRecord); - } - else - { - OSLockRelease(g_psLinkedListLock); + _increase_global_stat(eTargetType, psRecord->uiBytes); } + OSLockRelease(g_psLinkedListLock); + #else -PVR_UNREFERENCED_PARAMETER(eAllocType); +PVR_UNREFERENCED_PARAMETER(eCurrentType); +PVR_UNREFERENCED_PARAMETER(eTargetType); PVR_UNREFERENCED_PARAMETER(ui64Key); +PVR_UNREFERENCED_PARAMETER(currentPid); #endif -} /* PVRSRVStatsRemoveMemAllocRecord */ +} +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, @@ -1957,348 +2335,108 @@ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, { IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); - PVRSRV_PROCESS_STATS* psProcessStats = NULL; - enum { PVRSRV_PROC_NOTFOUND, - PVRSRV_PROC_FOUND, - PVRSRV_PROC_RESURRECTED - } eProcSearch = PVRSRV_PROC_FOUND; - -#if defined(ENABLE_GPU_MEM_TRACEPOINT) - IMG_UINT64 ui64InitialSize; -#endif - - /* Don't do anything if we are not initialised or we are shutting down! */ - if (!bProcessStatsInitialised) - { -#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) - PVR_DPF((PVR_DBG_WARNING, - "%s: Called when process statistics module is not initialised", - __func__)); -#endif - return; - } - - _increase_global_stat(eAllocType, uiBytes); - OSLockAcquire(g_psLinkedListLock); - if (psPVRSRVData) - { - if ((currentPid == psPVRSRVData->cleanupThreadPid) && - (currentCleanupPid != 0)) - { - psProcessStats = _FindProcessStats(currentCleanupPid); - } - else - { - psProcessStats = _FindProcessStatsInLiveList(currentPid); - if (!psProcessStats) - { - psProcessStats = _FindProcessStatsInDeadList(currentPid); - eProcSearch = PVRSRV_PROC_RESURRECTED; - } - } - } - else - { - psProcessStats = _FindProcessStatsInLiveList(currentPid); - if (!psProcessStats) - { - psProcessStats = _FindProcessStatsInDeadList(currentPid); - eProcSearch = PVRSRV_PROC_RESURRECTED; - } - } - - if (psProcessStats == NULL) - { - eProcSearch = PVRSRV_PROC_NOTFOUND; - -#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) - PVR_DPF((PVR_DBG_MESSAGE, - "%s: Process stat increment called for 'unknown' process PID(%d)", - __func__, currentPid)); - - if (bProcessStatsInitialised) - { - if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) - { - OSLockRelease(g_psLinkedListLock); - return; - } - /* Add it to the live list... */ - _AddProcessStatsToFrontOfLiveList(psProcessStats); - } -#else - OSLockRelease(g_psLinkedListLock); -#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ - - } - - if (psProcessStats != NULL) - { -#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) - if (eProcSearch == PVRSRV_PROC_RESURRECTED) - { - PVR_DPF((PVR_DBG_MESSAGE, - "%s: Process stat incremented on 'dead' process PID(%d)", - __func__, currentPid)); - - /* Move process from dead list to live list */ - _MoveProcessToLiveList(psProcessStats); - } -#endif - OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); - /* Release the list lock as soon as we acquire the process lock, - * this ensures if the process is in deadlist the entry cannot be - * deleted or modified - */ - OSLockRelease(g_psLinkedListLock); - -#if defined(ENABLE_GPU_MEM_TRACEPOINT) - ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); -#endif - - /* Update the memory watermarks... */ - switch (eAllocType) - { - case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes); - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: - { - INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); - psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - break; - - default: - { - PVR_ASSERT(0); - } - break; - } - -#if defined(ENABLE_GPU_MEM_TRACEPOINT) - if (psProcessStats->pid != PVR_SYS_ALLOC_PID) - { - IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); - if (ui64Size != ui64InitialSize) - { - TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, - ui64Size); - } - } -#endif - - OSLockRelease(psProcessStats->hLock); - } - -} + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + __maybe_unused PVRSRV_PROC_SEARCH_STATE eProcSearch = PVRSRV_PROC_FOUND; -static void -_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, - PVRSRV_PROCESS_STATS* psProcessStats, - IMG_UINT64 uiBytes) -{ -#if defined(ENABLE_GPU_MEM_TRACEPOINT) - IMG_UINT64 ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); #endif + return; + } - switch (eAllocType) + _increase_global_stat(eAllocType, uiBytes); + OSLockAcquire(g_psLinkedListLock); + if (psPVRSRVData) { - case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: - { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0) - { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - } - break; - - case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes); - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0) - { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } + psProcessStats = _FindProcessStats(currentCleanupPid); } - break; - - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + else { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes); - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0) + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; } } - break; - - case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + } + else + { + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0) - { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; } - break; + } - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: - { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes); - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0) - { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - } - break; + if (psProcessStats == NULL) + { + eProcSearch = PVRSRV_PROC_NOTFOUND; - case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: - { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0) - { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - } - break; +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Process stat increment called for 'unknown' process PID(%d)", + __func__, currentPid)); - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + if (bProcessStatsInitialised) { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes); - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0) + if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + OSLockRelease(g_psLinkedListLock); + return; } + /* Add it to the live list... */ + dllist_add_to_head(&gsLiveList, &psProcessStats->sNode); } - break; +#else + OSLockRelease(g_psLinkedListLock); +#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: - { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes); - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0) - { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - } - break; + } - case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + if (psProcessStats != NULL) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + if (eProcSearch == PVRSRV_PROC_RESURRECTED) { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0) - { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } - } - break; + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Process stat incremented on 'dead' process PID(%d)", + __func__, currentPid)); - case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: - { - DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); - if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT] == 0) - { - psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); - } + /* Move process from dead list to live list */ + _MoveProcessToLiveList(psProcessStats); } - break; +#endif + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + /* Release the list lock as soon as we acquire the process lock, + * this ensures if the process is in deadlist the entry cannot be + * deleted or modified + */ + OSLockRelease(g_psLinkedListLock); - default: - { - PVR_ASSERT(0); - } - break; - } + /* Update the memory watermarks... */ + _IncreaseProcStatValueUnlocked(psProcessStats, + eAllocType, + uiBytes); -#if defined(ENABLE_GPU_MEM_TRACEPOINT) - if (psProcessStats->pid != PVR_SYS_ALLOC_PID) - { - IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); - if (ui64Size != ui64InitialSize) - { - TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); - } + OSLockRelease(psProcessStats->hLock); } -#endif } #if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) { PVRSRV_PROCESS_STATS *psProcessStats; + DLLIST_NODE *psNode, *psNext; DIPrintf(psEntry, "%s,%s,%s,%s,%s,%s,%s\n", @@ -2312,10 +2450,9 @@ int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) OSLockAcquire(g_psLinkedListLock); - psProcessStats = g_psLiveList; - - while (psProcessStats != NULL) + dllist_foreach_node(&gsLiveList, psNode, psNext) { + psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); if (psProcessStats->pid != PVR_SYS_ALLOC_PID) { DIPrintf(psEntry, @@ -2330,8 +2467,6 @@ int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES], psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT]); } - - psProcessStats = psProcessStats->psNext; } OSLockRelease(g_psLinkedListLock); @@ -2390,9 +2525,9 @@ _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry, { OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); /* Decrement the memory stat... */ - _DecreaseProcStatValue(eAllocType, - psProcessStats, - psTrackingHashEntry->uiSizeInBytes); + _DecreaseProcStatValueUnlocked(eAllocType, + psProcessStats, + psTrackingHashEntry->uiSizeInBytes); OSLockRelease(psProcessStats->hLock); } @@ -2465,9 +2600,9 @@ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, */ OSLockRelease(g_psLinkedListLock); /* Update the memory watermarks... */ - _DecreaseProcStatValue(eAllocType, - psProcessStats, - uiBytes); + _DecreaseProcStatValueUnlocked(eAllocType, + psProcessStats, + uiBytes); OSLockRelease(psProcessStats->hLock); #if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) @@ -2555,6 +2690,7 @@ PVRSRVStatsUpdateRenderContextStats(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32Num3DStores, IMG_UINT32 ui32NumCDMStores, IMG_UINT32 ui32NumTDMStores, + IMG_UINT32 ui32NumRayStores, IMG_PID pidOwner) { IMG_PID pidCurrent = pidOwner; @@ -2582,6 +2718,7 @@ PVRSRVStatsUpdateRenderContextStats(PVRSRV_DEVICE_NODE *psDeviceNode, psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores; psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores; psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_TDM_STORES]+= ui32NumTDMStores; + psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_RAY_STORES]+= ui32NumRayStores; OSLockRelease(psProcessStats->hLock); } else @@ -2683,6 +2820,7 @@ GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData) { PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); PVRSRV_PROCESS_STATS* psProcessStats; + DLLIST_NODE *psNode, *psNext; PVR_UNREFERENCED_PARAMETER(pvData); @@ -2692,18 +2830,16 @@ GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData) OSLockAcquire(g_psLinkedListLock); - psProcessStats = g_psLiveList; - - if (psProcessStats == NULL) + if (dllist_is_empty(&gsLiveList)) { DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); } else { - while (psProcessStats != NULL) + dllist_foreach_node(&gsLiveList, psNode, psNext) { + psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); psStatType->pfnStatsPrintElements(psEntry, psProcessStats); - psProcessStats = psProcessStats->psNext; DIPrintf(psEntry, "%s\n", g_szSeparatorStr); } } @@ -2717,6 +2853,7 @@ GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData) { PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); PVRSRV_PROCESS_STATS* psProcessStats; + DLLIST_NODE *psNode, *psNext; PVR_UNREFERENCED_PARAMETER(pvData); @@ -2726,18 +2863,16 @@ GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData) OSLockAcquire(g_psLinkedListLock); - psProcessStats = g_psDeadList; - - if (psProcessStats == NULL) + if (dllist_is_empty(&gsDeadList)) { DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); } else { - while (psProcessStats != NULL) + dllist_foreach_node(&gsDeadList, psNode, psNext) { + psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); psStatType->pfnStatsPrintElements(psEntry, psProcessStats); - psProcessStats = psProcessStats->psNext; DIPrintf(psEntry, "%s\n", g_szSeparatorStr); } } @@ -2759,9 +2894,12 @@ ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, { IMG_UINT32 ui32StatNumber; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RILockAcquireKM(); +#endif OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); - DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); + DIPrintf(psEntry, "PID %u - %s\n", psProcessStats->pid, psProcessStats->processName); /* Print device stats table PVRSRV_DEVICE_STAT_TYPE */ if (psProcessStats->ui32DevCount > 0) @@ -2808,7 +2946,7 @@ ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES)) { /* get the stat from RI */ - IMG_INT32 ui32Total = RITotalAllocProcessKM(psProcessStats->pid, + IMG_INT32 ui32Total = RITotalAllocProcessUnlocked(psProcessStats->pid, (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) ? PHYS_HEAP_TYPE_LMA : PHYS_HEAP_TYPE_UMA); @@ -2837,6 +2975,9 @@ ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, } OSLockRelease(psProcessStats->hLock); +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RILockReleaseKM(); +#endif } /* ProcessStatsPrintElements */ #endif @@ -2919,7 +3060,7 @@ CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, "%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu\n" #endif - DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); + DIPrintf(psEntry, "PID %u - %s\n", psProcessStats->pid, psProcessStats->processName); /* File header info */ DIPrintf(psEntry, @@ -3008,63 +3149,43 @@ CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, #endif #if defined(PVRSRV_ENABLE_MEMORY_STATS) -/*************************************************************************/ /*! -@Function MemStatsPrintElements -@Description Prints all elements for the memory statistic record. -@Input pvStatPtr Pointer to statistics structure. -@Input pfnOSStatsPrintf Printf function to use for output. -*/ /**************************************************************************/ -void -MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, - PVRSRV_PROCESS_STATS *psProcessStats) +static PVRSRV_ERROR _PrintMemStatsEntry(uintptr_t k, uintptr_t v, void* pvPriv) { IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32); IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32); - PVRSRV_MEM_ALLOC_REC *psRecord; IMG_UINT32 ui32ItemNumber; + PVRSRV_MEM_ALLOC_REC *psRecord = (PVRSRV_MEM_ALLOC_REC *)(uintptr_t)v; + PVRSRV_MEM_ALLOC_PRINT_DATA *psPrintData = (PVRSRV_MEM_ALLOC_PRINT_DATA *)pvPriv; + OSDI_IMPL_ENTRY *psEntry = psPrintData->psEntry; - /* Write the header... */ - DIPrintf(psEntry, "PID "); - - DIPrintf(psEntry, "Type VAddress"); - for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) - { - DIPrintf(psEntry, " "); - } - - DIPrintf(psEntry, " PAddress"); - for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) - { - DIPrintf(psEntry, " "); - } - - DIPrintf(psEntry, " Size(bytes)\n"); - - psRecord = psProcessStats->psMemoryRecords; - if (psRecord == NULL) - { - DIPrintf(psEntry, "%-5d\n", psProcessStats->pid); - } - - while (psRecord != NULL) + if (psRecord != NULL) { IMG_BOOL bPrintStat = IMG_TRUE; - DIPrintf(psEntry, "%-5d ", psProcessStats->pid); + DIPrintf(psEntry, "%-5d ", psPrintData->pid); switch (psRecord->eAllocType) { - case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: DIPrintf(psEntry, "KMALLOC "); break; - case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: DIPrintf(psEntry, "VMALLOC "); break; - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_LMA "); break; - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_UMA "); break; - case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: DIPrintf(psEntry, "IOREMAP_PT_LMA "); break; - case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: DIPrintf(psEntry, "VMAP_PT_UMA "); break; - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: DIPrintf(psEntry, "ALLOC_LMA_PAGES "); break; - case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: DIPrintf(psEntry, "ALLOC_UMA_PAGES "); break; - case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: DIPrintf(psEntry, "MAP_UMA_LMA_PAGES "); break; + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: DIPrintf(psEntry, "KMALLOC "); break; + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: DIPrintf(psEntry, "VMALLOC "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_LMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_UMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: DIPrintf(psEntry, "IOREMAP_PT_LMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: DIPrintf(psEntry, "VMAP_PT_UMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: DIPrintf(psEntry, "ALLOC_LMA_PAGES "); break; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: DIPrintf(psEntry, "ZOMBIE_LMA_PAGES "); break; +#endif + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: DIPrintf(psEntry, "ALLOC_UMA_PAGES "); break; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: DIPrintf(psEntry, "ZOMBIE_UMA_PAGES "); break; +#endif + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: DIPrintf(psEntry, "MAP_UMA_LMA_PAGES "); break; case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: DIPrintf(psEntry, "DMA_BUF_IMPORT "); break; - default: DIPrintf(psEntry, "INVALID "); break; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: DIPrintf(psEntry, "DMA_BUF_ZOMBIE "); break; +#endif + default: DIPrintf(psEntry, "INVALID "); break; } if (bPrintStat) @@ -3090,8 +3211,56 @@ MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC "\n", psRecord->uiBytes); #endif } - /* Move to next record... */ - psRecord = psRecord->psNext; + + psPrintData->ui32NumEntries++; + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function MemStatsPrintElements +@Description Prints all elements for the memory statistic record. +@Input pvStatPtr Pointer to statistics structure. +@Input pfnOSStatsPrintf Printf function to use for output. +*/ /**************************************************************************/ +void +MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats) +{ + IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32); + IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32); + IMG_UINT32 ui32ItemNumber; + PVRSRV_MEM_ALLOC_PRINT_DATA sPrintData; + + sPrintData.psEntry = psEntry; + sPrintData.pid = psProcessStats->pid; + sPrintData.ui32NumEntries = 0; + + /* Write the header... */ + DIPrintf(psEntry, "PID "); + + DIPrintf(psEntry, "Type VAddress"); + for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, " "); + } + + DIPrintf(psEntry, " PAddress"); + for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, " "); + } + + DIPrintf(psEntry, " Size(bytes)\n"); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + HASH_Iterate(psProcessStats->psMemoryRecords, (HASH_pfnCallback)_PrintMemStatsEntry, &sPrintData); + OSLockRelease(psProcessStats->hLock); + + if (sPrintData.ui32NumEntries == 0) + { + DIPrintf(psEntry, "%-5d\n", psProcessStats->pid); } } /* MemStatsPrintElements */ #endif @@ -3143,7 +3312,7 @@ int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) { if (OSStringNCompare(pszDriverStatType[ui32StatNumber], "", 1) != 0) { - DIPrintf(psEntry, "%-34s%12llu\n", + DIPrintf(psEntry, "%-34s%12" IMG_UINT64_FMTSPEC "\n", pszDriverStatType[ui32StatNumber], GET_GLOBAL_STAT_VALUE(ui32StatNumber)); } @@ -3175,12 +3344,12 @@ int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, - IMG_UINT32 *pui32MemoryStats) + IMG_UINT64 *pui64MemoryStats) { IMG_INT i; PVRSRV_PROCESS_STATS* psProcessStats; - PVR_LOG_RETURN_IF_INVALID_PARAM(pui32MemoryStats, "pui32MemoryStats"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pui64MemoryStats, "pui64MemoryStats"); if (bAllProcessStats) { @@ -3192,7 +3361,7 @@ PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, for (i = 0; i < ui32ArrSize; i++) { - pui32MemoryStats[i] = (IMG_UINT32)GET_GLOBAL_STAT_VALUE(i); + pui64MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i); } OSLockRelease(gsGlobalStats.hGlobalStatsLock); @@ -3220,7 +3389,7 @@ PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); for (i = 0; i < ui32ArrSize; i++) { - pui32MemoryStats[i] = (IMG_UINT32)psProcessStats->i64StatValue[i]; + pui64MemoryStats[i] = psProcessStats->i64StatValue[i]; } OSLockRelease(psProcessStats->hLock); @@ -3236,7 +3405,7 @@ PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, connected processes. Memstat values provided by this API relate only to the physical memory allocated by the process and does not relate to any of the mapped or imported memory. -@Output pui32TotalMem Total memory usage for all live +@Output pui64TotalMem Total memory usage for all live PIDs connected to the driver. @Output pui32NumberOfLivePids Number of live pids currently connected to the server. @@ -3249,33 +3418,30 @@ PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate memory for ppsPerProcessMemUsageData. */ /**************************************************************************/ -PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem, +PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT64 *pui64TotalMem, IMG_UINT32 *pui32NumberOfLivePids, PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData) { - IMG_UINT32 ui32Counter = 0; IMG_UINT32 ui32NumberOfLivePids = 0; PVRSRV_ERROR eError = PVRSRV_ERROR_PROCESS_NOT_FOUND; - PVRSRV_PROCESS_STATS* psProcessStats = NULL; PVRSRV_PER_PROCESS_MEM_USAGE* psPerProcessMemUsageData = NULL; + DLLIST_NODE *psNode, *psNext; OSLockAcquire(gsGlobalStats.hGlobalStatsLock); - *pui32TotalMem = (IMG_UINT32)GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) + - (IMG_UINT32)GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC) + - (IMG_UINT32)GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + - (IMG_UINT32)GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + - (IMG_UINT32)GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + - (IMG_UINT32)GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA); + *pui64TotalMem = GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA); OSLockRelease(gsGlobalStats.hGlobalStatsLock); OSLockAcquire(g_psLinkedListLock); - psProcessStats = g_psLiveList; - while (psProcessStats != NULL) + dllist_foreach_node(&gsLiveList, psNode, psNext) { - psProcessStats = psProcessStats->psNext; ui32NumberOfLivePids++; } @@ -3286,26 +3452,27 @@ PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem, if (psPerProcessMemUsageData) { - psProcessStats = g_psLiveList; + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + IMG_UINT32 ui32Counter = 0; - while (psProcessStats != NULL) + dllist_foreach_node(&gsLiveList, psNode, psNext) { + psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); psPerProcessMemUsageData[ui32Counter].ui32Pid = (IMG_UINT32)psProcessStats->pid; - psPerProcessMemUsageData[ui32Counter].ui32KernelMemUsage = - (IMG_UINT32)psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] + - (IMG_UINT32)psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC]; + psPerProcessMemUsageData[ui32Counter].ui64KernelMemUsage = + psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] + + psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC]; - psPerProcessMemUsageData[ui32Counter].ui32GraphicsMemUsage = - (IMG_UINT32)psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] + - (IMG_UINT32)psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] + - (IMG_UINT32)psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] + - (IMG_UINT32)psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]; + psPerProcessMemUsageData[ui32Counter].ui64GraphicsMemUsage = + psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] + + psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] + + psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] + + psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]; OSLockRelease(psProcessStats->hLock); - psProcessStats = psProcessStats->psNext; ui32Counter++; } eError = PVRSRV_OK; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/pvr_notifier.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/pvr_notifier.c index 583d4c517d2c..fd9653a11fb4 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/pvr_notifier.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/pvr_notifier.c @@ -196,7 +196,7 @@ PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle) } inline void -PVRSRVSignalGlobalEO(void) +PVRSRVSignalDriverWideEO(void) { PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); @@ -204,13 +204,20 @@ PVRSRVSignalGlobalEO(void) { OSEventObjectSignal(psPVRSRVData->hGlobalEventObject); } + /* Cleanup Thread could be waiting on Cleanup event object, + * signal it as well to ensure work is processed + */ + if (psPVRSRVData->hCleanupEventObject && (OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsQueued) != 0)) + { + OSEventObjectSignal(psPVRSRVData->hCleanupEventObject); + } } inline void PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle) { PVRSRVNotifyCommandCompletion(hCmdCompCallerHandle); - PVRSRVSignalGlobalEO(); + PVRSRVSignalDriverWideEO(); } /*************************************************************************/ /*! @@ -233,7 +240,7 @@ typedef struct DEBUG_REQUEST_ENTRY_TAG typedef struct DEBUG_REQUEST_TABLE_TAG { POSWR_LOCK hLock; - DEBUG_REQUEST_ENTRY asEntry[1]; + DEBUG_REQUEST_ENTRY asEntry[IMG_FLEX_ARRAY_MEMBER]; } DEBUG_REQUEST_TABLE; typedef struct DEBUG_REQUEST_NOTIFY_TAG @@ -275,7 +282,7 @@ _RegisterDebugTableI(DEBUG_REQUEST_TABLE **ppsDebugTable) } psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) + - (sizeof(DEBUG_REQUEST_ENTRY) * (g_ui32DebugOrderTableReqCount-1))); + IMG_FLEX_ARRAY_SIZE(sizeof(DEBUG_REQUEST_ENTRY), g_ui32DebugOrderTableReqCount)); PVR_RETURN_IF_NOMEM(psDebugTable); eError = OSWRLockCreate(&psDebugTable->hLock); @@ -529,14 +536,15 @@ PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode, else { szVerbosityLevel = "unknown"; - PVR_ASSERT(!"Invalid verbosity level received"); + PVR_DPF((PVR_DBG_WARNING, + "%s: Invalid verbosity level received", __func__)); } PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------", szVerbosityLevel); #if defined(RGX_IRQ_HYPERV_HANDLER) - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) #endif { OSDumpVersionInfo(pfnDumpDebugPrintf, pvDumpDebugFile); @@ -575,15 +583,18 @@ PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode, PVR_DUMPDEBUG_LOG("Comparison of UM/KM components: %s", (psPVRSRVData->sDriverInfo.bIsNoMatch) ? "MISMATCH" : "MATCHING"); - PVR_DUMPDEBUG_LOG("KM Arch: %s", - (psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); - - if (!PVRSRV_VZ_MODE_IS(NATIVE)) + if (psPVRSRVData->sDriverInfo.ui8KMBitArch) + { + PVR_DUMPDEBUG_LOG("KM Arch: %s",(psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); + } + else { - PVR_DUMPDEBUG_LOG("Driver Mode: %s", - (PVRSRV_VZ_MODE_IS(HOST)) ? "Host":"Guest"); + PVR_DUMPDEBUG_LOG("KM Arch is undefined"); } + PVR_DUMPDEBUG_LOG("Driver Mode: %s", + PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDevNode) ? "Native" : (PVRSRV_VZ_MODE_IS(HOST, DEVNODE, psDevNode)) ? "Host":"Guest"); + if (psPVRSRVData->sDriverInfo.ui8UMSupportedArch) { if ((psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_BOTH) == @@ -604,6 +615,18 @@ PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode, PVR_DUMPDEBUG_LOG("Window system: %s", (IS_DECLARED(WINDOW_SYSTEM)) ? (WINDOW_SYSTEM) : "Not declared"); + PVR_DUMPDEBUG_LOG("Power lock status: %s", OSLockIsLocked(psDevNode->hPowerLock) ? "Locked" : "Free"); +#if defined(DEBUG) + if (OSLockIsLocked(psDevNode->hPowerLock)) + { + /* Ensure this info is logged before the power lock is taken, in case + * it's already in use. */ + PVR_DUMPDEBUG_LOG("Power lock owner: PID = %u at timestamp %" IMG_UINT64_FMTSPEC " (%s:%u)", + psDevNode->uiPwrLockOwnerPID, psDevNode->sPowerLockOwner.ui64Timestamp, + psDevNode->sPowerLockOwner.pszFile, psDevNode->sPowerLockOwner.ui32LineNum); + } +#endif + /* Driver debug table */ OSWRLockAcquireReadNested(psDriverDebugTable->hLock, DN_LOCKCLASS_DRIVER); /* Device debug table*/ @@ -641,7 +664,8 @@ PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode, if (!pfnDumpDebugPrintf) { - /* Only notify OS of an issue if the debug dump has gone there */ + /* Only notify OS of an issue if the caller requested it, + * using a NULL pointer in pfnDumpDebugPrintf. */ OSWarnOn(IMG_TRUE); } } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv.c index ffd542c5fbc0..934593ff3ae5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv.c @@ -41,15 +41,18 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ +#define CLEANUP_TYPE_STRINGS + #include "img_defs.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "handle.h" + +#include "pmr.h" #include "connection_server.h" #include "osconnection_server.h" #include "pdump_km.h" #include "ra.h" #include "allocmem.h" -#include "pmr.h" #include "pvrsrv.h" #include "srvcore.h" #include "services_km.h" @@ -73,6 +76,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "di_impl_brg.h" #include "htb_debug.h" #include "dma_km.h" +#include "pmr.h" #include "log2.h" @@ -80,6 +84,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "dllist.h" #include "syscommon.h" #include "sysvalidation.h" +#include "hash_functions.h" #include "physmem_lma.h" #include "physmem_osmem.h" @@ -90,6 +95,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "tlintern.h" #include "htbserver.h" +#include "rgxmulticore.h" + //#define MULTI_DEVICE_BRINGUP @@ -103,6 +110,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxinit.h" #include "rgxhwperf.h" #include "rgxfwutils.h" +#include "rgx_bridge_init.h" #endif #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) @@ -119,6 +127,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if defined(SUPPORT_LINUX_DVFS) #include "pvr_dvfs_device.h" +#elif defined(SUPPORT_PDVFS) +#include "pvr_dvfs_common.h" #endif #if defined(SUPPORT_DISPLAY_CLASS) @@ -129,7 +139,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "srvinit.h" #include "rgxutils.h" -#include "oskm_apphint.h" +#include "os_apphint.h" #include "pvrsrv_apphint.h" #include "pvrsrv_tlstreams.h" @@ -165,7 +175,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /*! When unloading try a few times to free everything remaining on the list */ #define CLEANUP_THREAD_UNLOAD_RETRY 4 -#define PVRSRV_TL_CTLR_STREAM_SIZE 4096 +#define PVRSRV_TL_CTRL_STREAM_SIZE 4096 static PVRSRV_DATA *gpsPVRSRVData; static IMG_UINT32 g_ui32InitFlags; @@ -173,36 +183,172 @@ static IMG_UINT32 g_ui32InitFlags; /* mark which parts of Services were initialised */ #define INIT_DATA_ENABLE_PDUMPINIT 0x1U +#define CLEANUP_QUEUE_TOTAL_DPF "QUEUED: %1.5d " +#define CLEANUP_QUEUE_TOTAL_DPF_MAX_SIZE sizeof(CLEANUP_QUEUE_TOTAL_DPF)+1 + +#define CLEANUP_STRING_SUMMARY_MAX_LEN (CLEANUP_QUEUE_TOTAL_DPF_MAX_SIZE+(PVRSRV_CLEANUP_TYPE_LAST * CLEANUP_TYPE_ITEM_DPF_MAX_SIZE)) + +static IMG_CHAR *_ConcatCleanupString(IMG_CHAR *cleanupString) +{ + IMG_UINT32 uiLoop; + IMG_UINT32 strSize = CLEANUP_STRING_SUMMARY_MAX_LEN; + + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + OSSNPrintf(cleanupString, CLEANUP_QUEUE_TOTAL_DPF_MAX_SIZE, CLEANUP_QUEUE_TOTAL_DPF, + OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsQueued)); + + PVR_ASSERT(PVRSRV_CLEANUP_TYPE_CONNECTION == 1); + + for (uiLoop=PVRSRV_CLEANUP_TYPE_CONNECTION; (strSize > CLEANUP_TYPE_ITEM_DPF_MAX_SIZE) && (uiLoop < PVRSRV_CLEANUP_TYPE_LAST); uiLoop++) + { + IMG_CHAR acTempQueued[CLEANUP_TYPE_ITEM_DPF_MAX_SIZE]; + OSSNPrintf(acTempQueued, CLEANUP_TYPE_ITEM_DPF_MAX_SIZE, CLEANUP_TYPE_ITEM_DPF, + PVRSRVGetCleanupName(uiLoop), + OSAtomicRead(&psPVRSRVData->i32CleanupItemTypes[uiLoop])); + OSStringLCat(cleanupString, acTempQueued, strSize); + } + + return cleanupString; +} + /* Callback to dump info of cleanup thread in debug_dump */ static void CleanupThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, void *pvDumpDebugFile) { - PVRSRV_DATA *psPVRSRVData; - psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_CHAR acCleanupString[CLEANUP_STRING_SUMMARY_MAX_LEN]; + + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items: %s", _ConcatCleanupString(acCleanupString)); - PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items Queued : %u", - OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsQueued)); PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items dropped after " - "retry limit reached : %u", - OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsNotCompleted)); + "retry limit reached : %d", + OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsNotCompleted)); +} + +static void _CleanupThreadDecrementStats(PVRSRV_DATA *psPVRSRVData, + PVRSRV_CLEANUP_TYPE eCleanupType) +{ +#if defined(DEBUG) + IMG_CHAR acCleanupString[CLEANUP_STRING_SUMMARY_MAX_LEN]; +#endif + + PVR_ASSERT(OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsQueued) >= 0); + + PVR_DPF((PVR_DBG_MESSAGE, "BEFORE REMOVING ----- %s", _ConcatCleanupString(acCleanupString))); + + OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItemsQueued); + + if ((eCleanupType <= PVRSRV_CLEANUP_TYPE_UNDEF) || ((eCleanupType >= PVRSRV_CLEANUP_TYPE_LAST))) + { + PVR_DPF((PVR_DBG_ERROR, "Incorrect cleanup item passed: %d", eCleanupType)); + } + else + { + PVR_ASSERT(OSAtomicRead(&psPVRSRVData->i32CleanupItemTypes[eCleanupType]) >= 0); + OSAtomicDecrement(&psPVRSRVData->i32CleanupItemTypes[eCleanupType]); + } + + PVR_DPF((PVR_DBG_MESSAGE, "AFTER REMOVING ----- %s", _ConcatCleanupString(acCleanupString))); } +#if defined(DEBUG) +static void _CleanupThreadWorkListDump(PVRSRV_DATA *psPVRSRVData) +{ + DLLIST_NODE *psNode; + DLLIST_NODE *psNextNode; + PVRSRV_DEVICE_NODE *psDeviceNode; + OS_SPINLOCK_FLAGS uiFlags; + char pszCleanupLog[128]; + + OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + + /* Iterate over all devices. */ + for (psDeviceNode = psPVRSRVData->psDeviceNodeList; + psDeviceNode != NULL; + psDeviceNode = psDeviceNode->psNext) + { + if (dllist_is_empty(&psDeviceNode->sCleanupThreadWorkList)) + { + OSSNPrintf(pszCleanupLog, 128, "Dev_%p: CLEAN", psDeviceNode); + PVR_LOG(("%s", pszCleanupLog)); + continue; + } + + OSSNPrintf(pszCleanupLog, 128, "Dev_%p: TASKS", psDeviceNode); + PVR_LOG(("%s", pszCleanupLog)); + + /* Iterate over all cleanup items. */ + dllist_foreach_node(&psDeviceNode->sCleanupThreadWorkList, psNode, psNextNode) + { + PVRSRV_CLEANUP_THREAD_WORK *psData; + + psData = IMG_CONTAINER_OF(psNode, PVRSRV_CLEANUP_THREAD_WORK, sNode); + + PVR_ASSERT(psData != NULL); + + if ((psData->eCleanupType <= PVRSRV_CLEANUP_TYPE_UNDEF) || + ((psData->eCleanupType >= PVRSRV_CLEANUP_TYPE_LAST))) + { + PVR_DPF((PVR_DBG_ERROR, "Incorrect cleanup item found: %d.", psData->eCleanupType)); + continue; + } + + OSSNPrintf(pszCleanupLog, 128, "+ %p: type %u, depends-HW %s (%s:%d)", + psData, + psData->eCleanupType, + (psData->bDependsOnHW) ? "Yes" : " No", + psData->pszFun, psData->ui32LineNum); + PVR_LOG(("%s", pszCleanupLog)); + } + } + + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); +} +#endif + /* Add work to the cleanup thread work list. * The work item will be executed by the cleanup thread */ -void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData) +#if defined(DEBUG) +void PVRSRVCleanupThreadAddWork_Debug(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_CLEANUP_THREAD_WORK *psData, + const char *pszFun, const unsigned int ui32LineNum) +#else +void PVRSRVCleanupThreadAddWork_Int(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_CLEANUP_THREAD_WORK *psData) +#endif { +#if defined(DEBUG) + IMG_CHAR acCleanupString[CLEANUP_STRING_SUMMARY_MAX_LEN]; +#endif + PVRSRV_DATA *psPVRSRVData; PVRSRV_ERROR eError; + OS_SPINLOCK_FLAGS uiFlags; + + PVR_ASSERT(psData != NULL); + + if ((psData->eCleanupType <= PVRSRV_CLEANUP_TYPE_UNDEF) || + ((psData->eCleanupType >= PVRSRV_CLEANUP_TYPE_LAST))) + { + PVR_DPF((PVR_DBG_ERROR, "Incorrect cleanup item passed: %d.", psData->eCleanupType)); + } psPVRSRVData = PVRSRVGetPVRSRVData(); - PVR_ASSERT(psData != NULL); + PVR_DPF((PVR_DBG_MESSAGE, "AFTER QUEUEING ----- %s", _ConcatCleanupString(acCleanupString))); + + + /* Cleanup thread work item added during device destructing: + 1. bDependsOnHW: Item may use device resources. Not safe to clean. + 2. !bDependsOnHW: Clean item immediately to prevent memory leak. */ + if (psPVRSRVData->bUnload #if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) - if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload) -#else - if (psPVRSRVData->bUnload) + || psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK #endif + || (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING && !psData->bDependsOnHW)) { CLEANUP_THREAD_FN pfnFree = psData->pfnFree; @@ -210,6 +356,8 @@ void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData) eError = pfnFree(psData->pvData); + PVR_DPF((PVR_DBG_MESSAGE, "BEFORE ADDING ----- %s", _ConcatCleanupString(acCleanupString))); + if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "Failed to free resource " @@ -220,125 +368,338 @@ void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData) } else { - OS_SPINLOCK_FLAGS uiFlags; + /* + * Access psData before putting it in the work list. + * Cleanup thread will free psData after it is done with the cleanup. + * We should consider psData potentially freed after it was put into the worklist. + */ + + OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsQueued); + OSAtomicIncrement(&psPVRSRVData->i32CleanupItemTypes[psData->eCleanupType]); +#if defined(DEBUG) + psData->pszFun = pszFun; + psData->ui32LineNum = ui32LineNum; +#endif /* add this work item to the list */ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); - dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode); + OSAtomicIncrement(&psDeviceNode->i32NumCleanupItems); + dllist_add_to_tail(&psDeviceNode->sCleanupThreadWorkList, &psData->sNode); OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); - OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsQueued); - /* signal the cleanup thread to ensure this item gets processed */ eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject); + + PVR_DPF((PVR_DBG_MESSAGE, "AFTER ADDING ----- %s", _ConcatCleanupString(acCleanupString))); PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); } } -/* Pop an item from the head of the cleanup thread work list */ -static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData) +void PVRSRVCleanupThreadWaitForDevice(PVRSRV_DEVICE_NODE *psDeviceNode) { - DLLIST_NODE *psNode; +#if defined(DEBUG) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); +#endif + IMG_INT32 i32NumCleanupItems; + + PVR_ASSERT(psDeviceNode != NULL); + + if (gpsPVRSRVData->hCleanupThread == NULL) + { + return; + } + + LOOP_UNTIL_TIMEOUT_US(OS_THREAD_DESTROY_TIMEOUT_US) + { + PVRSRV_ERROR eError; + + if (gpsPVRSRVData->hCleanupEventObject) + { + eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + i32NumCleanupItems = OSAtomicRead(&psDeviceNode->i32NumCleanupItems); + + if (i32NumCleanupItems == 0) + { +#if defined(DEBUG) + OS_SPINLOCK_FLAGS uiFlags; + + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + PVR_LOG_IF_FALSE(dllist_is_empty(&psDeviceNode->sCleanupThreadWorkList), + "Cleanup thread work list is not empty"); + + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); +#endif + break; + } + + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US / OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + if (i32NumCleanupItems != 0) + { + PVR_LOG(("Failed to flush device cleanup queue. %d items remain.", i32NumCleanupItems)); + } + +#if defined(DEBUG) + _CleanupThreadWorkListDump(psPVRSRVData); +#endif +} + +static INLINE DLLIST_NODE *_CleanupThreadWorkListLast(PVRSRV_DATA *psPVRSRVData) +{ + DLLIST_NODE *psNode = NULL; + PVRSRV_DEVICE_NODE *psDeviceNode; OS_SPINLOCK_FLAGS uiFlags; + OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); - psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList); + + /* We treat the Host device node as the last node in the list. */ + psNode = dllist_get_prev_node(&psPVRSRVData->psHostMemDeviceNode->sCleanupThreadWorkList); if (psNode != NULL) { + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); + return psNode; + } + + /* Iterate over all devices and find the last node of the last device. */ + for (psDeviceNode = psPVRSRVData->psDeviceNodeList; + psDeviceNode != NULL; + psDeviceNode = psDeviceNode->psNext) + { + DLLIST_NODE *psCurrNode = dllist_get_prev_node(&psDeviceNode->sCleanupThreadWorkList); + if (psCurrNode != NULL) + { + psNode = psCurrNode; + } + } + + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); + + return psNode; +} + +/* Pop an item from the head of the cleanup thread work lists. + * + * This pops an item in a round robin manner: + * + * 1. It starts from the first device and moves to next device if no cleanup + * item found. + * 2. If no items were found in the regular devices bound lists it moves to the + * Host device list. + * 3. In case there was nothing in the Host device items list but there was + * something in the devices lists, it returns first found device bound item + * + * If `*ppsDeviceNode` is not NULL the lookup of the next cleanup item will + * start from the next devices after `*ppsDeviceNode`. This makes sure that + * we always pop a first item from each device and move to the next device. + * We prevent "starvation" of some devices this way. + */ +static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData, + PVRSRV_DEVICE_NODE **ppsDeviceNode) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = NULL, *psFirstDevice = NULL; + DLLIST_NODE *psNode = NULL, *psFirstNode = NULL; + OS_SPINLOCK_FLAGS uiFlags; + + OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + + /* always need to start from the beginning of the list in case some of the + * devices were already destroyed */ + for (psDeviceNode = psPVRSRVData->psDeviceNodeList; + psDeviceNode != NULL; + psDeviceNode = psDeviceNode->psNext) + { + /* not safe to run cleanup during deinit */ + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING) + { + continue; + } + + psNode = dllist_get_next_node(&psDeviceNode->sCleanupThreadWorkList); + + /* remember first non-NULL node and device in case no items are found + * on later devices */ + if (psNode != NULL && psFirstNode == NULL) + { + psFirstNode = psNode; + psFirstDevice = psDeviceNode; + } + + /* to ensure that further device don't get starved skip the last device + * until all devices are processed and then move to non-device bound + * items */ + if (*ppsDeviceNode != NULL) { + if (psDeviceNode == *ppsDeviceNode) + { + *ppsDeviceNode = NULL; + } + + /* in case this is the last device and next iteration exits the + * loop */ + psNode = NULL; + + continue; + } + + if (psNode != NULL) + { + *ppsDeviceNode = psDeviceNode; + dllist_remove_node(psNode); + + break; + } + } + + /* if no item found in the regular devices check also the Host device */ + if (psNode == NULL) + { + psNode = dllist_get_next_node(&psPVRSRVData->psHostMemDeviceNode->sCleanupThreadWorkList); + if (psNode != NULL) + { + dllist_remove_node(psNode); + } + *ppsDeviceNode = psPVRSRVData->psHostMemDeviceNode; + } + + /* if no item found for the Host device check if there was a cleanup item + * in one of the previous devices, this starts processing items from the + * beginning without one extra empty call to "wrap around" */ + if (psNode == NULL && psFirstNode != NULL) + { + psNode = psFirstNode; + *ppsDeviceNode = psFirstDevice; dllist_remove_node(psNode); } + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); return psNode; } /* Process the cleanup thread work list */ static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData, - IMG_BOOL *pbUseGlobalEO) + IMG_BOOL *pbUseHWTimeout) { DLLIST_NODE *psNodeIter, *psNodeLast; PVRSRV_ERROR eError; IMG_BOOL bNeedRetry = IMG_FALSE; OS_SPINLOCK_FLAGS uiFlags; + PVRSRV_CLEANUP_TYPE eCleanupType; + PVRSRV_DEVICE_NODE *psDeviceNode = NULL; - /* any callback functions which return error will be - * moved to the back of the list, and additional items can be added - * to the list at any time so we ensure we only iterate from the - * head of the list to the current tail (since the tail may always - * be changing) - */ - - OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); - psNodeLast = dllist_get_prev_node(&psPVRSRVData->sCleanupThreadWorkList); - OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + /* Reset HWTimeout Flag */ + *pbUseHWTimeout = IMG_FALSE; + psNodeLast = _CleanupThreadWorkListLast(psPVRSRVData); if (psNodeLast == NULL) { /* no elements to clean up */ return IMG_FALSE; } + /* any callback functions which return error will be + * moved to the back of the list, and additional items can be added + * to the list at any time so we ensure we only iterate from the + * head of the list to the current tail (since the tail may always + * be changing) + */ do { - psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData); + PVRSRV_CLEANUP_THREAD_WORK *psData; + CLEANUP_THREAD_FN pfnFree; + IMG_BOOL bRetry = IMG_FALSE; - if (psNodeIter != NULL) + psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData, &psDeviceNode); + if (psNodeIter == NULL) { - PVRSRV_CLEANUP_THREAD_WORK *psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode); - CLEANUP_THREAD_FN pfnFree; + return IMG_FALSE; + } - /* get the function pointer address here so we have access to it - * in order to report the error in case of failure, without having - * to depend on psData not having been freed - */ - pfnFree = psData->pfnFree; + psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode); - *pbUseGlobalEO = psData->bDependsOnHW; - eError = pfnFree(psData->pvData); + /* get the function pointer address here so we have access to it + * in order to report the error in case of failure, without having + * to depend on psData not having been freed + */ + pfnFree = psData->pfnFree; + eCleanupType = psData->eCleanupType; + eError = pfnFree(psData->pvData); - if (eError != PVRSRV_OK) + if (eError != PVRSRV_OK) + { + /* Move to back of the list, if this item's + * retry count hasn't hit zero. + */ + if (CLEANUP_THREAD_IS_RETRY_TIMEOUT(psData)) { - /* move to back of the list, if this item's - * retry count hasn't hit zero. - */ - if (CLEANUP_THREAD_IS_RETRY_TIMEOUT(psData)) + if (CLEANUP_THREAD_RETRY_TIMEOUT_NOT_REACHED(psData)) { - if (CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(psData)) + bNeedRetry = IMG_TRUE; + bRetry = IMG_TRUE; + /* If any items require retry and are HW dependent + * use the HW timeout + */ + if (psData->bDependsOnHW) { - bNeedRetry = IMG_TRUE; + *pbUseHWTimeout = psData->bDependsOnHW; } } - else + } + else + { + if (psData->ui32RetryCount > 0) { - if (psData->ui32RetryCount-- > 0) + psData->ui32RetryCount--; + bNeedRetry = IMG_TRUE; + bRetry = IMG_TRUE; + /* If any items require retry and are HW dependent + * use the HW timeout + */ + if (psData->bDependsOnHW) { - bNeedRetry = IMG_TRUE; + *pbUseHWTimeout = psData->bDependsOnHW; } } + } - if (bNeedRetry) - { - OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); - dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter); - OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); - } - else - { - PVR_DPF((PVR_DBG_ERROR, "Failed to free resource " - "(callback " IMG_PFN_FMTSPEC "). " - "Retry limit reached", - pfnFree)); - OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItemsQueued); - OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsNotCompleted); - - } + /* If the work depends on HW then we should add it to the back of the list, + * the cleanup thread will sleep for longer if required and the next MISR + * from the device will wake the task again in which it might be ready. + */ + if (bRetry || psData->bDependsOnHW) + { + /* If any items on the work list depend on HW + * and didn't get cleaned up. + */ + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + dllist_add_to_tail(&psDeviceNode->sCleanupThreadWorkList, psNodeIter); + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); } else { - OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItemsQueued); + PVR_DPF((PVR_DBG_ERROR, "Failed to free resource (callback " IMG_PFN_FMTSPEC "). " + "Retry limit reached", pfnFree)); + OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsNotCompleted); + OSAtomicDecrement(&psDeviceNode->i32NumCleanupItems); + /* Dropping item */ + _CleanupThreadDecrementStats(psPVRSRVData, eCleanupType); } } - } while ((psNodeIter != NULL) && (psNodeIter != psNodeLast)); + else + { + /* Ok returned */ + OSAtomicDecrement(&psDeviceNode->i32NumCleanupItems); + _CleanupThreadDecrementStats(psPVRSRVData, eCleanupType); + } + } while (psNodeIter != NULL && psNodeIter != psNodeLast); return bNeedRetry; } @@ -363,8 +724,6 @@ static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData) eError = OSSpinLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock); PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Exit); - dllist_init(&psPVRSRVData->sCleanupThreadWorkList); - Exit: return eError; } @@ -373,17 +732,15 @@ static void CleanupThread(void *pvData) { PVRSRV_DATA *psPVRSRVData = pvData; IMG_BOOL bRetryWorkList = IMG_FALSE; - IMG_HANDLE hGlobalEvent; + IMG_BOOL bUseHWTimeout = IMG_FALSE; IMG_HANDLE hOSEvent; PVRSRV_ERROR eRc; - IMG_BOOL bUseGlobalEO = IMG_FALSE; IMG_UINT32 uiUnloadRetry = 0; + DLLIST_NODE *psNodeIter, *psNodeLast; /* Store the process id (pid) of the clean-up thread */ psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID(); psPVRSRVData->cleanupThreadTid = OSGetCurrentThreadID(); - OSAtomicWrite(&psPVRSRVData->i32NumCleanupItemsQueued, 0); - OSAtomicWrite(&psPVRSRVData->i32NumCleanupItemsNotCompleted, 0); PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... ")); @@ -393,20 +750,16 @@ static void CleanupThread(void *pvData) eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent); PVR_ASSERT(eRc == PVRSRV_OK); - eRc = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hGlobalEvent); - PVR_ASSERT(eRc == PVRSRV_OK); - /* While the driver is in a good state and is not being unloaded * try to free any deferred items when signalled */ while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) { - IMG_HANDLE hEvent; - + IMG_UINT64 ui64Timeoutus; if (psPVRSRVData->bUnload) { - if (dllist_is_empty(&psPVRSRVData->sCleanupThreadWorkList) || - uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY) + if (dllist_is_empty(&psPVRSRVData->psHostMemDeviceNode->sCleanupThreadWorkList) || + uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY) { break; } @@ -419,18 +772,26 @@ static void CleanupThread(void *pvData) * Bridge lock re-acquired on our behalf before the wait call returns. */ - if (bRetryWorkList && bUseGlobalEO) + if (bRetryWorkList && bUseHWTimeout) { - hEvent = hGlobalEvent; + /* If item depends on HW we are + * waiting for GPU work to finish, so + * use MAX_HW_TIME_US as timeout (this + * will be set appropriately when + * running on systems with emulated + * hardware, etc). + */ + ui64Timeoutus = MAX_HW_TIME_US; } else { - hEvent = hOSEvent; + /* Use the default retry timeout. */ + ui64Timeoutus = CLEANUP_THREAD_WAIT_RETRY_TIMEOUT; } - eRc = OSEventObjectWaitKernel(hEvent, + eRc = OSEventObjectWaitKernel(hOSEvent, bRetryWorkList ? - CLEANUP_THREAD_WAIT_RETRY_TIMEOUT : + ui64Timeoutus : CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT); if (eRc == PVRSRV_ERROR_TIMEOUT) { @@ -445,7 +806,32 @@ static void CleanupThread(void *pvData) PVR_LOG_ERROR(eRc, "OSEventObjectWaitKernel"); } - bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, &bUseGlobalEO); + bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, + &bUseHWTimeout); + } + + psNodeLast = _CleanupThreadWorkListLast(psPVRSRVData); + if (psNodeLast != NULL) + { + do + { + PVRSRV_DEVICE_NODE *psDeviceNode = NULL; + PVRSRV_CLEANUP_THREAD_WORK *psData; + psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData, &psDeviceNode); + if (psNodeIter == NULL) + { + break; + } + + psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode); + OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsNotCompleted); + OSAtomicDecrement(&psDeviceNode->i32NumCleanupItems); + /* Dropping item */ + _CleanupThreadDecrementStats(psPVRSRVData, psData->eCleanupType); + } + while (psNodeIter != NULL && psNodeIter != psNodeLast); + + PVR_DPF((PVR_DBG_ERROR, "Cleanup Thread Failed to free %d resources", OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsNotCompleted))); } OSSpinLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock); @@ -453,9 +839,6 @@ static void CleanupThread(void *pvData) eRc = OSEventObjectClose(hOSEvent); PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose"); - eRc = OSEventObjectClose(hGlobalEvent); - PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose"); - PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... ")); } @@ -469,6 +852,127 @@ uintptr_t PVRSRVCleanupThreadGetTid(void) return gpsPVRSRVData->cleanupThreadTid; } +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) +/* + * Firmware is unresponsive. + * The Host will initiate a recovery process during which the + * Firmware and GPU are reset and returned to a working state. + */ +static PVRSRV_ERROR HandleFwHostSideRecovery(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32CtxIdx = 0U; + IMG_UINT32 ui32Nodes = 0U; + + OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); + /* Get the number of nodes in a linked list */ + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + ++ui32Nodes; + } + + /* Any client contexts active at the moment? */ + if (ui32Nodes > 0U) + { + /* Free the active context buffer previously allocated */ + if (psDevInfo->psRGXFWIfActiveContextBufDesc) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfActiveContextBufDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfActiveContextBufDesc); + psDevInfo->psRGXFWIfActiveContextBufDesc = NULL; + } + + /* Setup allocations to store the active contexts */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, + (ui32Nodes + 1) * sizeof(RGXFWIF_ACTIVE_CONTEXT_BUF_DATA), + "FwSysActiveContextBufData", + &psDevInfo->psRGXFWIfActiveContextBufDesc, + (void *) &psDevInfo->psRGXFWIfSysInit->sActiveContextBufBase.ui32Addr, + (void **) &psDevInfo->psRGXFWIfActiveContextBuf, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation",Error); + + /* List of contexts to be rekicked by FW powering up the device */ + dllist_foreach_node_backwards(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + psDevInfo->psRGXFWIfActiveContextBuf[ui32CtxIdx].psContext = + RGXGetFWCommonContextAddrFromServerCommonCtx(psDevInfo, psNode); + ++ui32CtxIdx; + } + /* Null context as the terminator marker */ + psDevInfo->psRGXFWIfActiveContextBuf[ui32CtxIdx].psContext.ui32Addr = 0; + } + OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); + + /* Host can't expect a response on power-down request as FW is in BAD state */ + eError = PVRSRVSetDeviceCurrentPowerState(psDeviceNode->psPowerDev, PVRSRV_DEV_POWER_STATE_OFF); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVSetDeviceCurrentPowerState OFF", Error); + + /* Flag to be set to notify FW while recovering from crash */ + psDevInfo->psRGXFWIfSysInit->bFwHostRecoveryMode = IMG_TRUE; + + /* Flush here because we have setup a fw alloc addr in the structure earlier */ + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfSysInit, FLUSH); + + /* Power-on the device resetting GPU & FW */ + OSLockAcquire(psDeviceNode->hPowerLock); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + PVRSRV_POWER_FLAGS_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVSetDevicePowerStateKM ON", Error); + OSLockRelease(psDeviceNode->hPowerLock); + +Error: + return eError; +} +#endif + + +void PVRSRVDeviceSetState(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_STATE eNewDevState) +{ + if (eNewDevState == psDeviceNode->eDevState) + { + return; + } + + switch (eNewDevState) + { + case PVRSRV_DEVICE_STATE_PCI_ERROR: + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetDebugDevStateString(eNewDevState))); + psDeviceNode->eDevState = eNewDevState; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PMRFreeZombies(psDeviceNode); +#endif + break; + } + case PVRSRV_DEVICE_STATE_CREATING: + case PVRSRV_DEVICE_STATE_CREATED: + case PVRSRV_DEVICE_STATE_ACTIVE: + case PVRSRV_DEVICE_STATE_FROZEN: + case PVRSRV_DEVICE_STATE_DEINIT: + case PVRSRV_DEVICE_STATE_DESTRUCTING: + case PVRSRV_DEVICE_STATE_BAD: + { + /* PCI_ERROR is a terminal state. Reload driver to recover. */ + if (eNewDevState != PVRSRV_DEVICE_STATE_PCI_ERROR) + { + psDeviceNode->eDevState = eNewDevState; + } + break; + } + case PVRSRV_DEVICE_STATE_UNDEFINED: + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unknown state (%d)", __func__, eNewDevState)); + break; + } + } +} + static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) { @@ -490,6 +994,9 @@ static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, return; } + /* Block here if needed */ + PVRSRVBlockIfFrozen(psDeviceNode); + if (psDeviceNode->pfnUpdateHealthStatus != NULL) { eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, bCheckAfterTimePassed); @@ -514,22 +1021,7 @@ static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); #if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) - /* - * Firmware is unresponsive. - * The Host will initiate a recovery process during which the - * Firmware and GPU are reset and returned to a working state. - */ - - /* Host can't expect response after sending power-down request */ - eError = PVRSRVSetDeviceCurrentPowerState(psDeviceNode->psPowerDev, PVRSRV_DEV_POWER_STATE_OFF); - PVR_LOG_IF_ERROR(eError, "Setting current power state failed on host-side recovery"); - - OSLockAcquire(psDeviceNode->hPowerLock); - eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, - PVRSRV_DEV_POWER_STATE_ON, - PVRSRV_POWER_FLAGS_NONE); - PVR_LOG_IF_ERROR(eError, "RGX power-on failed on host-side recovery"); - OSLockRelease(psDeviceNode->hPowerLock); + HandleFwHostSideRecovery(psDeviceNode); #endif } } @@ -599,7 +1091,7 @@ static void HWPerfPeriodicHostEventsThread(void *pvData) * indefinite sleep. */ bInfiniteSleep = IMG_FALSE; #if defined(SUPPORT_RGX) - RGXSRV_HWPERF_HOST_INFO(psDevInfo, RGX_HWPERF_INFO_EV_MEM_USAGE); + RGXSRV_HWPERF_HOST_INFO(psDevInfo, RGX_HWPERF_INFO_EV_MEM64_USAGE); #endif } } @@ -663,9 +1155,6 @@ static DWT_SIGNAL _DwtWait(PVRSRV_DATA *psPVRSRVData, IMG_HANDLE hOSEvent, eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64) ui32Timeout * 1000); -#ifdef PVR_TESTING_UTILS - psPVRSRVData->ui32DevicesWdWakeupCounter++; -#endif if (eError == PVRSRV_OK) { @@ -857,9 +1346,6 @@ static void DevicesWatchdogThread(void *pvData) to initialise) or for the event signal (shutdown or power on). */ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000); -#ifdef PVR_TESTING_UTILS - psPVRSRVData->ui32DevicesWdWakeupCounter++; -#endif if (eError == PVRSRV_OK) { if (psPVRSRVData->bUnload) @@ -926,11 +1412,7 @@ static void DevicesWatchdogThread(void *pvData) #if defined(SUPPORT_AUTOVZ) static void AutoVzWatchdogThread_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) { - if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) - { - return; - } - else if (psDeviceNode->pfnUpdateAutoVzWatchdog != NULL) + if (psDeviceNode->pfnUpdateAutoVzWatchdog != NULL) { psDeviceNode->pfnUpdateAutoVzWatchdog(psDeviceNode); } @@ -1003,15 +1485,9 @@ static PVRSRV_ERROR InitialiseInfoPageTimeouts(PVRSRV_DATA *psPVRSRVData) */ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_RETRIES] = 10; -#if defined(VIRTUAL_PLATFORM) - psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1200000U; -#else -#if defined(EMULATOR) - psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 20000U; -#else - psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1000U; -#endif /* EMULATOR */ -#endif + + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = + MAX_HW_TIME_US / 1000U; return PVRSRV_OK; } @@ -1045,6 +1521,80 @@ static void _ThreadsDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, } } +#define PVRVZMODE_STR_SIZE_MAX (10U) +#define PVRVZMODE_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(PVRVZMODE_STR_SIZE_MAX))+1) + +static const IMG_CHAR *const asModeStrings[] = +{ + "native", + "host", + "guest", + "default" +}; + +static void _InitDefaultVzDriverMode(PVRSRV_DATA *psPVRSRVData, void *pvAppHintState) +{ + char *pszMode, pszModeAppHint[PVRVZMODE_BUFFER_SIZE]; + static const IMG_CHAR *pszDefault = ""; + PVRSRV_DRIVER_MODE aeInitModes[PVRSRV_MAX_DEVICES]; + IMG_BOOL bRet; + IMG_UINT ui32Dev; + + for (ui32Dev = 0; ui32Dev < PVRSRV_MAX_DEVICES; ui32Dev++) + { + aeInitModes[ui32Dev] = DRIVER_MODE_DEFAULT; + } + + pszMode = pszModeAppHint; + pszModeAppHint[0] = '\0'; + + bRet = (IMG_BOOL) OSGetAppHintSTRING(APPHINT_NO_DEVICE, + pvAppHintState, + DriverMode, + pszDefault, + pszModeAppHint, + PVRVZMODE_BUFFER_SIZE); + PVR_GOTO_IF_FALSE(bRet, uninit_default); + + ui32Dev = 0; + + while (*pszMode && (ui32Dev < PVRSRV_MAX_DEVICES)) + { + PVRSRV_DRIVER_MODE eMode; + + while (*pszMode == ',') + { + /* skip commas */ + pszMode++; + } + + for (eMode = DRIVER_MODE_NATIVE; eMode <= DRIVER_MODE_DEFAULT; eMode++) + { + /* if no matching strings found, device's mode remains DEFAULT */ + if (OSStringNCompare(pszMode, + asModeStrings[eMode], + OSStringLength(asModeStrings[eMode])) == 0) + { + aeInitModes[ui32Dev] = eMode; + } + } + + while ((*pszMode != '\0') && (*pszMode != ',')) + { + /* advance until the next command or string end */ + pszMode++; + } + + ui32Dev++; + } + +uninit_default: + for (ui32Dev = 0; ui32Dev < PVRSRV_MAX_DEVICES; ui32Dev++) + { + psPVRSRVData->aeModuleParamDriverMode[ui32Dev] = aeInitModes[ui32Dev]; + } +} + PVRSRV_ERROR PVRSRVCommonDriverInit(void) { @@ -1072,6 +1622,10 @@ PVRSRVCommonDriverInit(void) return PVRSRV_ERROR_ALREADY_EXISTS; } +#if defined(SUPPORT_RGX) + RGXBridgeDriverInit(); +#endif + eError = DIInit(); PVR_GOTO_IF_ERROR(eError, Error); @@ -1089,9 +1643,9 @@ PVRSRVCommonDriverInit(void) PVR_GOTO_IF_ERROR(eError, Error); /* - * Allocate the device-independent data + * Allocate the device-independent data. Do NOT track this allocation. */ - psPVRSRVData = OSAllocZMem(sizeof(*gpsPVRSRVData)); + psPVRSRVData = OSAllocZMemNoStats(sizeof(*gpsPVRSRVData)); PVR_GOTO_IF_NOMEM(psPVRSRVData, eError, Error); /* Now it is set up, point gpsPVRSRVData to the actual data */ @@ -1117,9 +1671,6 @@ PVRSRVCommonDriverInit(void) eError = ServerBridgeInit(); PVR_GOTO_IF_ERROR(eError, Error); - eError = PhysHeapInit(); - PVR_GOTO_IF_ERROR(eError, Error); - eError = DevmemIntInit(); PVR_GOTO_IF_ERROR(eError, Error); @@ -1142,10 +1693,10 @@ PVRSRVCommonDriverInit(void) #endif bAppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG; - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug, + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug, &bAppHintDefault, &bEnablePageFaultDebug); - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); if (bEnablePageFaultDebug) { @@ -1180,21 +1731,28 @@ PVRSRVCommonDriverInit(void) eError = PVRSRVHandleInit(); PVR_GOTO_IF_ERROR(eError, Error); - OSCreateKMAppHintState(&pvAppHintState); + OSCreateAppHintState(&pvAppHintState); ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, CleanupThreadPriority, + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, CleanupThreadPriority, &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority); ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, WatchdogThreadPriority, + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, WatchdogThreadPriority, &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority); bAppHintDefault = PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING; - OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableFullSyncTracking, + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableFullSyncTracking, &bAppHintDefault, &bEnableFullSyncTracking); - OSFreeKMAppHintState(pvAppHintState); + + _InitDefaultVzDriverMode(gpsPVRSRVData, pvAppHintState); + + + OSFreeAppHintState(pvAppHintState); pvAppHintState = NULL; + eError = HostMemDeviceCreate(&gpsPVRSRVData->psHostMemDeviceNode); + PVR_GOTO_IF_ERROR(eError, Error); + eError = _CleanupThreadPrepare(gpsPVRSRVData); PVR_LOG_GOTO_IF_ERROR(eError, "_CleanupThreadPrepare", Error); @@ -1243,9 +1801,6 @@ PVRSRVCommonDriverInit(void) PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Error); #endif - eError = HostMemDeviceCreate(&gpsPVRSRVData->psHostMemDeviceNode); - PVR_GOTO_IF_ERROR(eError, Error); - /* Initialise the Transport Layer */ eError = TLInit(); PVR_GOTO_IF_ERROR(eError, Error); @@ -1258,8 +1813,14 @@ PVRSRVCommonDriverInit(void) /* Initialise TL control stream */ eError = TLStreamCreate(&psPVRSRVData->hTLCtrlStream, - PVRSRV_TL_CTLR_STREAM, PVRSRV_TL_CTLR_STREAM_SIZE, - TL_OPMODE_DROP_OLDEST, NULL, NULL, NULL, + PVRSRV_TL_CTRL_STREAM, + PVRSRV_TL_CTRL_STREAM_SIZE, + TL_OPMODE_DROP_OLDEST, + NULL, + NULL, + NULL, + NULL, + NULL, NULL); if (eError != PVRSRV_OK) { @@ -1288,6 +1849,14 @@ PVRSRVCommonDriverInit(void) psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; } + { + IMG_UINT64 *pui64InfoPage; + + pui64InfoPage = IMG_OFFSET_ADDR_DW(psPVRSRVData->pui32InfoPage, + DEVMEM_INFO_PHYS_BUF_MAX_SIZE); + *pui64InfoPage = PMR_MAX_SUPPORTED_SIZE; + } + /* Initialise the Host Trace Buffer */ eError = HTBInit(); PVR_GOTO_IF_ERROR(eError, Error); @@ -1296,6 +1865,13 @@ PVRSRVCommonDriverInit(void) RGXHWPerfClientInitAppHintCallbacks(); #endif + eError = OSLockCreate(&psPVRSRVData->hClientStreamTableLock); + PVR_GOTO_IF_ERROR(eError, Error); + + psPVRSRVData->psClientStreamTable = HASH_Create_Extended(16, PRVSRVTL_MAX_STREAM_NAME_SIZE, + HASH_Djb2_Hash, HASH_Djb2_Compare); + PVR_GOTO_IF_NOMEM(psPVRSRVData->psClientStreamTable, eError, Error); + /* Late init. client cache maintenance via info. page */ eError = CacheOpInit2(); PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpInit2", Error); @@ -1341,6 +1917,16 @@ PVRSRVCommonDriverDeInit(void) return; } + if (gpsPVRSRVData->psClientStreamTable != NULL) + { + HASH_Delete(gpsPVRSRVData->psClientStreamTable); + } + + if (gpsPVRSRVData->hClientStreamTableLock != NULL) + { + OSLockDestroy(gpsPVRSRVData->hClientStreamTableLock); + } + if (gpsPVRSRVData->pui32InfoPage != NULL) { bEnablePageFaultDebug = GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; @@ -1366,7 +1952,7 @@ PVRSRVCommonDriverDeInit(void) /* Stop and cleanup the devices watchdog thread */ if (gpsPVRSRVData->hAutoVzWatchdogThread) { - LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + LOOP_UNTIL_TIMEOUT_US(OS_THREAD_DESTROY_TIMEOUT_US) { if (gpsPVRSRVData->hAutoVzWatchdogEvObj) { @@ -1381,7 +1967,7 @@ PVRSRVCommonDriverDeInit(void) break; } OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); } @@ -1396,7 +1982,7 @@ PVRSRVCommonDriverDeInit(void) /* Stop and cleanup the devices watchdog thread */ if (gpsPVRSRVData->hDevicesWatchdogThread) { - LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + LOOP_UNTIL_TIMEOUT_US(OS_THREAD_DESTROY_TIMEOUT_US) { if (gpsPVRSRVData->hDevicesWatchdogEvObj) { @@ -1411,7 +1997,7 @@ PVRSRVCommonDriverDeInit(void) break; } OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); } @@ -1427,7 +2013,7 @@ PVRSRVCommonDriverDeInit(void) */ if (gpsPVRSRVData->hCleanupThread) { - LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + LOOP_UNTIL_TIMEOUT_US(OS_THREAD_DESTROY_TIMEOUT_US) { if (gpsPVRSRVData->hCleanupEventObject) { @@ -1442,7 +2028,7 @@ PVRSRVCommonDriverDeInit(void) break; } OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); } @@ -1504,10 +2090,6 @@ PVRSRVCommonDriverDeInit(void) BridgeDispatcherDeinit(); -#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) - RIDeInitKM(); -#endif - #if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) CPUMappingHistoryDeInit(); #endif @@ -1526,12 +2108,10 @@ PVRSRVCommonDriverDeInit(void) ServerBridgeDeInit(); - PhysHeapDeinit(); - HTB_DestroyDIEntry(); #if defined(PVRSRV_ENABLE_PROCESS_STATS) - PVRSRVStatsDestroy(); + PVRSRVStatsDestroyDI(); /* Stage 1 freeing */ #endif /* PVRSRV_ENABLE_PROCESS_STATS */ DebugCommonDeInitDriver(); @@ -1547,7 +2127,15 @@ PVRSRVCommonDriverDeInit(void) OSWRLockDestroy(gpsPVRSRVData->hDeviceNodeListLock); - OSFreeMem(gpsPVRSRVData); +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsDestroy(); /* Stage 2 freeing */ +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RIDeInitKM(); +#endif +#endif /* PVRSRV_ENABLE_PROCESS_STATS */ + + OSFreeMemNoStats(gpsPVRSRVData); /* Not trackable */ + gpsPVRSRVData = NULL; } @@ -1559,6 +2147,8 @@ static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, /* Only dump info once */ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hDebugRequestHandle; + PVR_UNREFERENCED_PARAMETER(ui32VerbLevel); + PVR_DUMPDEBUG_LOG("------[ System Summary Device ID:%d ]------", psDeviceNode->sDevId.ui32InternalID); switch (psDeviceNode->eCurrentSysPowerState) @@ -1581,14 +2171,14 @@ static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile); } -PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, - PHYS_HEAP_USAGE_FLAGS ui32Flags) +PHYS_HEAP_CONFIG* PVRSRVFindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, + PHYS_HEAP_USAGE_FLAGS ui32Flags) { IMG_UINT32 i; for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) { - if (psDevConfig->pasPhysHeaps[i].ui32UsageFlags == ui32Flags) + if (BITMASK_HAS(psDevConfig->pasPhysHeaps[i].ui32UsageFlags, ui32Flags)) { return &psDevConfig->pasPhysHeaps[i]; } @@ -1597,43 +2187,66 @@ PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, return NULL; } +PVRSRV_ERROR PVRSRVAcquireInternalID(IMG_UINT32 *pui32InternalID) +{ + IMG_UINT32 ui32InternalID = 0; + IMG_BOOL bFound = IMG_FALSE; + + for (ui32InternalID = 0; + ui32InternalID < PVRSRV_MAX_DEVICES; + ui32InternalID++) + { + if (PVRSRVGetDeviceInstance(ui32InternalID) == NULL) + { + bFound = IMG_TRUE; + break; + } + } + + if (bFound) + { + *pui32InternalID = ui32InternalID; + return PVRSRV_OK; + } + else + { + return PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE; + } +} + PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, - IMG_INT32 i32OsDeviceID, + IMG_INT32 i32KernelDeviceID, PVRSRV_DEVICE_NODE **ppsDeviceNode) { PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); PVRSRV_ERROR eError; PVRSRV_DEVICE_CONFIG *psDevConfig; PVRSRV_DEVICE_NODE *psDeviceNode; - IMG_UINT32 ui32AppHintDefault; - IMG_UINT32 ui32AppHintDriverMode; + IMG_UINT32 ui32InternalID; -#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) - IMG_UINT32 ui32AppHintPhysMemTestPasses; -#endif void *pvAppHintState = NULL; #if defined(PVRSRV_ENABLE_PROCESS_STATS) IMG_HANDLE hProcessStats; #endif IMG_BOOL bAppHintDefault; IMG_BOOL bEnablePageFaultDebug = IMG_FALSE; +#if defined(SUPPORT_AUTOVZ) + IMG_BOOL bAutoVzGPUPowerdown = IMG_FALSE; +#endif - MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceCreate: DevId %d", i32OsDeviceID); - - /* Read driver mode (i.e. native, host or guest) AppHint early as it is - required by SysDevInit */ - ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DriverMode, - &ui32AppHintDefault, &ui32AppHintDriverMode); - psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode); - psPVRSRVData->bForceApphintDriverMode = PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode); + MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceCreate: DevId %d", i32KernelDeviceID); /* Determine if we've got EnablePageFaultDebug set or not */ bAppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG; - OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug, + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug, &bAppHintDefault, &bEnablePageFaultDebug); - OSFreeKMAppHintState(pvAppHintState); + +#if defined(SUPPORT_AUTOVZ) + bAppHintDefault = PVRSRV_APPHINT_AUTOVZGPUPOWERDOWN; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, AutoVzGPUPowerdown, + &bAppHintDefault, &bAutoVzGPUPowerdown); +#endif + OSFreeAppHintState(pvAppHintState); pvAppHintState = NULL; psDeviceNode = OSAllocZMemNoStats(sizeof(*psDeviceNode)); @@ -1645,14 +2258,26 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", ErrorFreeDeviceNode); #endif + dllist_init(&psDeviceNode->sCleanupThreadWorkList); + OSAtomicWrite(&psDeviceNode->i32NumCleanupItems, 0); + /* Record setting of EnablePageFaultDebug in device-node */ psDeviceNode->bEnablePFDebug = bEnablePageFaultDebug; - psDeviceNode->sDevId.i32OsDeviceID = i32OsDeviceID; - psDeviceNode->sDevId.ui32InternalID = psPVRSRVData->ui32RegisteredDevices; +#if defined(SUPPORT_AUTOVZ) + psDeviceNode->bAutoVzAllowGPUPowerdown = bAutoVzGPUPowerdown; +#endif + psDeviceNode->sDevId.i32KernelDeviceID = i32KernelDeviceID; + eError = PVRSRVAcquireInternalID(&ui32InternalID); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAcquireInternalID", ErrorDeregisterStats); eError = SysDevInit(pvOSDevice, &psDevConfig); PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats); + /* Lock down the InternalID for this device by saving the value into + * its device node. This is done after SysDevInit to allow the system + * layer to identify the current device's the internal ID the same way. */ + psDeviceNode->sDevId.ui32InternalID = ui32InternalID; + PVR_ASSERT(psDevConfig); PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice); PVR_ASSERT(!psDevConfig->psDevNode); @@ -1664,8 +2289,14 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, "it must be GPU_LOCAL or CPU_LOCAL"); PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats); } + PVR_DPF((PVR_DBG_MESSAGE, "Device PhysHeap Default: %s", + (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_CPU_LOCAL) + ? "CPU_LOCAL" : "GPU_LOCAL")); - psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT; + PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_CREATING); + + psDeviceNode->psDevConfig = psDevConfig; + psDevConfig->psDevNode = psDeviceNode; if (psDevConfig->pfnGpuDomainPower) { @@ -1674,9 +2305,7 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, else { /* If the System Layer doesn't provide a function to query the power state - * of the system hardware, use a default implementation that keeps track of - * the power state locally and assumes the system starting state */ - psDevConfig->pfnGpuDomainPower = PVRSRVDefaultDomainPower; + * of the system hardware, assume the system starting state below. */ #if defined(SUPPORT_AUTOVZ) psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; @@ -1685,37 +2314,15 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, #endif } - psDeviceNode->psDevConfig = psDevConfig; - psDevConfig->psDevNode = psDeviceNode; - -#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) - if (PVRSRV_VZ_MODE_IS(NATIVE)) - { - /* Read AppHint - Configurable memory test pass count */ - ui32AppHintDefault = 0; - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysMemTestPasses, - &ui32AppHintDefault, &ui32AppHintPhysMemTestPasses); - OSFreeKMAppHintState(pvAppHintState); - pvAppHintState = NULL; - if (ui32AppHintPhysMemTestPasses > 0) - { - eError = PhysMemTest(psDevConfig, ui32AppHintPhysMemTestPasses); - PVR_LOG_GOTO_IF_ERROR(eError, "PhysMemTest", ErrorSysDevDeInit); - } - } + /* Next update value will be 0xFFFFFFF7 since sync prim starts with 0xFFFFFFF6. + * Has to be set before call to PMRInitDevice(). */ + psDeviceNode->ui32NextMMUInvalidateUpdate = 0xFFFFFFF7U; +#if defined(SUPPORT_PMR_DEFERRED_FREE) || defined(SUPPORT_MMU_DEFERRED_FREE) + psDeviceNode->uiPowerOffCounter = 0; + psDeviceNode->uiPowerOffCounterNext = 1; #endif - /* Initialise the paravirtualised connection */ - if (!PVRSRV_VZ_MODE_IS(NATIVE)) - { - PvzConnectionInit(); - PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit); - } - - BIT_SET(psDevConfig->psDevNode->ui32VmState, RGXFW_HOST_OS); - eError = PVRSRVRegisterDeviceDbgTable(psDeviceNode); PVR_GOTO_IF_ERROR(eError, ErrorPvzConnectionDeInit); @@ -1725,6 +2332,11 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, eError = PhysHeapInitDeviceHeaps(psDeviceNode, psDevConfig); PVR_GOTO_IF_ERROR(eError, ErrorPowerLockDeInit); +#if defined(SUPPORT_PMR_DEFERRED_FREE) + eError = PMRInitDevice(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorPhysHeapDeInitDeviceHeaps); +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + #if defined(SUPPORT_RGX) /* Requirements: * registered GPU and FW local heaps */ @@ -1734,8 +2346,29 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, { PVR_LOG_ERROR(eError, "RGXRegisterDevice"); eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED; - goto ErrorPhysHeapDeInitDeviceHeaps; + goto ErrorPMRDeInitDevice; + } + +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + if (PVRSRV_VZ_MODE_IS(NATIVE, DEVCFG, psDevConfig)) + { + IMG_UINT32 ui32AppHintDefault = 0; + IMG_UINT32 ui32AppHintPhysMemTestPasses; + + /* Read AppHint - Configurable memory test pass count */ + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysMemTestPasses, + &ui32AppHintDefault, &ui32AppHintPhysMemTestPasses); + OSFreeAppHintState(pvAppHintState); + pvAppHintState = NULL; + + if (ui32AppHintPhysMemTestPasses > 0) + { + eError = PhysMemTest(psDeviceNode, psDevConfig, ui32AppHintPhysMemTestPasses); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysMemTest", ErrorDeInitRgx); + } } +#endif #endif /* Inform the device layer PhysHeaps are now initialised so that device @@ -1743,7 +2376,7 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, if (psDeviceNode->pfnPhysMemDeviceHeapsInit != NULL) { eError = psDeviceNode->pfnPhysMemDeviceHeapsInit(psDeviceNode); - PVR_GOTO_IF_ERROR(eError, ErrorPhysHeapDeInitDeviceHeaps); + PVR_GOTO_IF_ERROR(eError, ErrorPMRDeInitDevice); } /* Carry out initialisation of a dedicated FW MMU data, if the FW CPU has @@ -1774,16 +2407,15 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, #if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) eError = InitDVFS(psDeviceNode); PVR_LOG_GOTO_IF_ERROR(eError, "InitDVFS", ErrorDVFSInitFail); +#elif defined(SUPPORT_PDVFS) && !defined(NO_HARDWARE) + eError = InitPDVFS(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "InitPDVFS", ErrorPDVFSInitFail); #endif OSAtomicWrite(&psDeviceNode->iNumClockSpeedChanges, 0); -#if defined(PVR_TESTING_UTILS) - TUtilsInit(psDeviceNode); -#endif - - OSWRLockCreate(&psDeviceNode->hMemoryContextPageFaultNotifyListLock); - if (psDeviceNode->hMemoryContextPageFaultNotifyListLock == NULL) + OSWRLockCreate(&psDeviceNode->hPageFaultNotifyLock); + if (psDeviceNode->hPageFaultNotifyLock == NULL) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for PF notify list", __func__)); @@ -1849,6 +2481,15 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, psPVRSRVData->ui32RegisteredDevices++; OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock); + /* Initialise the paravirtualised connection */ + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVCFG, psDevConfig)) + { + PvzConnectionInit(psDevConfig); + PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit); + } + + BIT_SET(psDevConfig->psDevNode->ui32VmState, RGXFW_HOST_DRIVER_ID); + *ppsDeviceNode = psDeviceNode; #if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) @@ -1862,9 +2503,8 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, PVRSRVStatsDeregisterProcess(hProcessStats); #endif -#if defined(SUPPORT_VALIDATION) - OSLockCreateNoStats(&psDeviceNode->hValidationLock); -#endif + + PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_CREATED); return PVRSRV_OK; @@ -1891,16 +2531,15 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, DebugCommonDeInitDevice(psDeviceNode); ErrorDestroyMemoryContextPageFaultNotifyListLock: - OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock); - psDeviceNode->hMemoryContextPageFaultNotifyListLock = NULL; + OSWRLockDestroy(psDeviceNode->hPageFaultNotifyLock); + psDeviceNode->hPageFaultNotifyLock = NULL; ErrorPageFaultLockFailCreate: -#if defined(PVR_TESTING_UTILS) - TUtilsDeinit(psDeviceNode); -#endif #if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) ErrorDVFSInitFail: +#elif defined(SUPPORT_PDVFS) && !defined(NO_HARDWARE) +ErrorPDVFSInitFail: #endif if (psDeviceNode->hDbgReqNotify) @@ -1919,7 +2558,11 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, DevDeInitRGX(psDeviceNode); #endif ErrorFwMMUDeinit: +ErrorPMRDeInitDevice: +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PMRDeInitDevice(psDeviceNode); ErrorPhysHeapDeInitDeviceHeaps: +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ PhysHeapDeInitDeviceHeaps(psDeviceNode); ErrorPowerLockDeInit: PVRSRVPowerLockDeInit(psDeviceNode); @@ -1927,9 +2570,9 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, PVRSRVUnregisterDeviceDbgTable(psDeviceNode); ErrorPvzConnectionDeInit: psDevConfig->psDevNode = NULL; - if (!PVRSRV_VZ_MODE_IS(NATIVE)) + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVCFG, psDevConfig)) { - PvzConnectionDeInit(); + PvzConnectionDeInit(psDevConfig); } ErrorSysDevDeInit: SysDevDeInit(psDevConfig); @@ -2010,6 +2653,9 @@ static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice, PVRSRV_ERROR_INVALID_PARAMS); psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevice->pvDevice; + + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags, + INVALIDATE); ui32State = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; if (pbValue) @@ -2023,20 +2669,47 @@ static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice, PVRSRV_ERROR PVRSRVCommonDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode) { - IMG_BOOL bInitSuccesful = IMG_FALSE; + IMG_BOOL bInitSuccessful = IMG_FALSE; #if defined(PVRSRV_ENABLE_PROCESS_STATS) IMG_HANDLE hProcessStats; #endif PVRSRV_ERROR eError; - MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceInitialise: DevId %d", psDeviceNode->sDevId.i32OsDeviceID); + PDUMPCOMMENT(psDeviceNode, "Common Device Initialisation"); + + MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceInitialise: DevId %d", psDeviceNode->sDevId.i32KernelDeviceID); - if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT) + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_CREATED) { PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__)); return PVRSRV_ERROR_INIT_FAILURE; } + /* Allocate an OSEventObject to use in the Freeze / Thaw transitioning to + * allow LBIST requests to be serviced. This object is held for the life of + * this device-node. + */ + eError = OSEventObjectCreate("PVRSRV_DEVICE_THREAD_EVENT_OBJECT", + &psDeviceNode->hDeviceThreadEvObj); + PVR_LOG_RETURN_IF_ERROR(eError, "OSEventObjectCreate"); + + eError = OSEventObjectOpen(psDeviceNode->hDeviceThreadEvObj, + &psDeviceNode->hDeviceFreezeThaw); + + if (PVRSRV_OK != eError) + { + OSEventObjectDestroy(psDeviceNode->hDeviceThreadEvObj); + + PVR_LOG_RETURN_IF_ERROR(eError, "OSEventObjectOpen"); + } + + /* Initial zero-set for the number of Frozen threads */ + OSAtomicWrite(&psDeviceNode->iFreezeCount, 0); + OSAtomicWrite(&psDeviceNode->iTotalFreezes, 0); + + /* Initial zero-set for number of active threads on this device */ + OSAtomicWrite(&psDeviceNode->iThreadsActive, 0); + /* Allocate devmem_history backing store for the device if we have * EnablePageFaultDebug set */ @@ -2071,67 +2744,64 @@ PVRSRV_ERROR PVRSRVCommonDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode) PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVStatsRegisterProcess"); #endif + eError = MMU_InitDevice(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "MMU_InitDevice"); + #if defined(SUPPORT_RGX) eError = RGXInit(psDeviceNode); PVR_LOG_GOTO_IF_ERROR(eError, "RGXInit", Exit); #endif -#if defined(SUPPORT_DMA_TRANSFER) - PVRSRVInitialiseDMA(psDeviceNode); - PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVInitialiseDMA", Exit); -#endif - - bInitSuccesful = IMG_TRUE; + bInitSuccessful = IMG_TRUE; #if defined(SUPPORT_RGX) Exit: #endif - eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful); + eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccessful); PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceFinalise"); #if defined(SUPPORT_RGX) - if (!PVRSRV_VZ_MODE_IS(GUEST)) - { - PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating, - _ReadStateFlag, _SetStateFlag, - APPHINT_OF_DRIVER_NO_DEVICE, - (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN)); - PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap, - _ReadStateFlag, _SetStateFlag, - APPHINT_OF_DRIVER_NO_DEVICE, - (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP)); - PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger, - _ReadStateFlag, _SetStateFlag, + if (bInitSuccessful && (eError == PVRSRV_OK)) + { + RGXInitMultiCoreInfo(psDeviceNode); + + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating, + _ReadStateFlag, _SetStateFlag, + APPHINT_OF_DRIVER_NO_DEVICE, + (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap, + _ReadStateFlag, _SetStateFlag, + APPHINT_OF_DRIVER_NO_DEVICE, + (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN)); + } + + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging, + _ReadDeviceFlag, _SetDeviceFlag, psDeviceNode, - (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER)); - PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory, - _ReadStateFlag, _SetStateFlag, + (void*)((uintptr_t)RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist, + _ReadDeviceFlag, _SetDeviceFlag, psDeviceNode, - (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY)); - PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList, - _ReadStateFlag, _SetStateFlag, + (void*)((uintptr_t)RGXKM_DEVICE_STATE_ZERO_FREELIST)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic, + RGXQueryPdumpPanicDisable, RGXSetPdumpPanicDisable, psDeviceNode, - (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN)); + NULL); } - - PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging, - _ReadDeviceFlag, _SetDeviceFlag, - psDeviceNode, - (void*)((uintptr_t)RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)); - PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist, - _ReadDeviceFlag, _SetDeviceFlag, - psDeviceNode, - (void*)((uintptr_t)RGXKM_DEVICE_STATE_ZERO_FREELIST)); -#if defined(SUPPORT_VALIDATION) - PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_GPUUnitsPowerChange, - _ReadDeviceFlag, _SetDeviceFlag, - psDeviceNode, - (void*)((uintptr_t)RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN)); -#endif - PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic, - RGXQueryPdumpPanicDisable, RGXSetPdumpPanicDisable, - psDeviceNode, - NULL); #endif #if defined(PVRSRV_ENABLE_PROCESS_STATS) @@ -2142,14 +2812,17 @@ PVRSRV_ERROR PVRSRVCommonDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode) return eError; } -PVRSRV_ERROR PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) +void PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); PVRSRV_ERROR eError; #if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) IMG_BOOL bForceUnload = IMG_FALSE; + PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); - if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + if ((PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) || + (eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_DEAD) || + (eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_FAULT)) { bForceUnload = IMG_TRUE; } @@ -2165,34 +2838,29 @@ PVRSRV_ERROR PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) DevicememHistoryDeviceDestroy(psDeviceNode); } - MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceDestroy: DevId %d", psDeviceNode->sDevId.i32OsDeviceID); + MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceDestroy: DevId %d", psDeviceNode->sDevId.i32KernelDeviceID); - psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT; + PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_DEINIT); -#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) - UnregisterDVFSDevice(psDeviceNode); -#endif + PVRSRVCleanupThreadWaitForDevice(psDeviceNode); OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock); List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); psPVRSRVData->ui32RegisteredDevices--; OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock); + PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_DESTRUCTING); + +#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) + UnregisterDVFSDevice(psDeviceNode); +#endif + #if defined(__linux__) pvr_apphint_device_unregister(psDeviceNode); #endif /* defined(__linux__) */ DebugCommonDeInitDevice(psDeviceNode); - if (psDeviceNode->hMemoryContextPageFaultNotifyListLock != NULL) - { - OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock); - } - -#if defined(SUPPORT_VALIDATION) - OSLockDestroyNoStats(psDeviceNode->hValidationLock); - psDeviceNode->hValidationLock = NULL; -#endif #if defined(SUPPORT_FALLBACK_FENCE_SYNC) SyncFbDeregisterDevice(psDeviceNode); @@ -2214,8 +2882,9 @@ PVRSRV_ERROR PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) psSync->pui32LinAddr, psDeviceNode->ui32NextMMUInvalidateUpdate-1, 0xFFFFFFFF, - POLL_FLAG_LOG_ERROR); - PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPollForValueKM"); + POLL_FLAG_LOG_ERROR, + NULL); + PVR_LOG_IF_ERROR(eError, "PVRSRVPollForValueKM"); /* Important to set the device node pointer to NULL * before we free the sync-prim to make sure we don't @@ -2235,6 +2904,8 @@ PVRSRV_ERROR PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) eError = PVRSRVPowerLock(psDeviceNode); if (eError == PVRSRV_OK) { + IMG_BOOL bHasPowerLock = IMG_TRUE; + #if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) /* * Firmware probably not responding if bForceUnload is set, but we still want to unload the @@ -2245,52 +2916,34 @@ PVRSRV_ERROR PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) { /* Force idle device */ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE); - if (eError != PVRSRV_OK) + PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); + if (eError == PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) { - PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); - if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) - { - PVRSRVPowerUnlock(psDeviceNode); - } - return eError; + bHasPowerLock = IMG_FALSE; } } - /* Power down the device if necessary */ - eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, - PVRSRV_DEV_POWER_STATE_OFF, - PVRSRV_POWER_FLAGS_FORCED); - PVRSRVPowerUnlock(psDeviceNode); - - if (eError != PVRSRV_OK) + if (bHasPowerLock) { - PVR_LOG_ERROR(eError, "PVRSRVSetDevicePowerStateKM"); - - PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + /* Power down the device if necessary */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + PVRSRV_POWER_FLAGS_FORCED); + PVRSRVPowerUnlock(psDeviceNode); - /* - * If the driver is okay then return the error, otherwise we can ignore - * this error. - */ - if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) - { - return eError; - } - else + if (eError != PVRSRV_OK) { - PVR_DPF((PVR_DBG_MESSAGE, - "%s: Will continue to unregister as driver status is not OK", - __func__)); + PVR_LOG_ERROR(eError, "PVRSRVSetDevicePowerStateKM"); + + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); } } } -#if defined(PVR_TESTING_UTILS) - TUtilsDeinit(psDeviceNode); -#endif - #if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) DeinitDVFS(psDeviceNode); +#elif defined(SUPPORT_PDVFS) && !defined(NO_HARDWARE) + DeinitPDVFS(psDeviceNode); #endif if (psDeviceNode->hDbgReqNotify) @@ -2302,32 +2955,52 @@ PVRSRV_ERROR PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) SyncServerDeinit(psDeviceNode); + MMU_DeInitDevice(psDeviceNode); + #if defined(SUPPORT_RGX) DevDeInitRGX(psDeviceNode); #endif +#if defined(SUPPORT_PMR_DEFERRED_FREE) + /* must be called before PhysHeapDeInitDeviceHeaps() */ + PMRDeInitDevice(psDeviceNode); +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + PhysHeapDeInitDeviceHeaps(psDeviceNode); PVRSRVPowerLockDeInit(psDeviceNode); PVRSRVUnregisterDeviceDbgTable(psDeviceNode); + if (psDeviceNode->hPageFaultNotifyLock != NULL) + { + OSWRLockDestroy(psDeviceNode->hPageFaultNotifyLock); + } + /* Release the Connection-Data lock as late as possible. */ if (psDeviceNode->hConnectionsLock) { OSLockDestroy(psDeviceNode->hConnectionsLock); } + /* Release the hDeviceThreadEvObj as late as possible. */ + if (psDeviceNode->hDeviceThreadEvObj) + { + OSEventObjectClose(psDeviceNode->hDeviceFreezeThaw); + + OSEventObjectDestroy(psDeviceNode->hDeviceThreadEvObj); + } + + PVR_ASSERT(OSAtomicRead(&psDeviceNode->iFreezeCount) == 0); + psDeviceNode->psDevConfig->psDevNode = NULL; - if (!PVRSRV_VZ_MODE_IS(NATIVE)) + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode)) { - PvzConnectionDeInit(); + PvzConnectionDeInit(psDeviceNode->psDevConfig); } SysDevDeInit(psDeviceNode->psDevConfig); OSFreeMemNoStats(psDeviceNode); - - return PVRSRV_OK; } /**************************************************************************/ /*! @@ -2375,9 +3048,6 @@ PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, */ SyncPrimSet(psDeviceNode->psMMUCacheSyncPrim, 0xFFFFFFF6UL); - /* Next update value will be 0xFFFFFFF7 since sync prim starts with 0xFFFFFFF6 */ - psDeviceNode->ui32NextMMUInvalidateUpdate = 0xFFFFFFF7UL; - eError = PVRSRVPowerLock(psDeviceNode); PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPowerLock", ErrorExit); @@ -2408,15 +3078,22 @@ PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, } #endif - eError = PVRSRVDevInitCompatCheck(psDeviceNode); - if (eError != PVRSRV_OK) + /* + * Guest driver must do a runtime compatibility check against the + * data provided by the Firmware. + */ + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed compatibility check for device %p (%s)", - __func__, psDeviceNode, PVRSRVGetErrorString(eError))); - PVRSRVPowerUnlock(psDeviceNode); - PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); - goto ErrorExit; + eError = PVRSRVDevInitCompatCheck(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed compatibility check for device %p (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + PVRSRVPowerUnlock(psDeviceNode); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + goto ErrorExit; + } } PDUMPPOWCMDSTART(psDeviceNode); @@ -2448,6 +3125,7 @@ PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, __func__, psDeviceNode, PVRSRVGetErrorString(eError))); PVRSRVPowerUnlock(psDeviceNode); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); goto ErrorExit; } @@ -2466,19 +3144,19 @@ PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitHWPerfCounters", ErrorExit); eError = RGXPdumpDrainKCCB(psDevInfo, - psDevInfo->psKernelCCBCtl->ui32WriteOffset); + psDevInfo->psKernelCCBCtlLocal->ui32WriteOffset); PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", ErrorExit); } #endif #endif /* defined(SUPPORT_RGX) */ /* Now that the device(s) are fully initialised set them as active */ - psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE; + PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_ACTIVE); eError = PVRSRV_OK; } else { /* Initialisation failed so set the device(s) into a bad state */ - psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD; + PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_BAD); eError = PVRSRV_ERROR_NOT_INITIALISED; } @@ -2488,7 +3166,7 @@ PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, ErrorExit: /* Initialisation failed so set the device(s) into a bad state */ - psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD; + PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_BAD); return eError; } @@ -2506,12 +3184,13 @@ PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) PollForValueKM */ static -PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32 __iomem * pui32LinMemAddr, - IMG_UINT32 ui32Value, - IMG_UINT32 ui32Mask, - IMG_UINT32 ui32Timeoutus, - IMG_UINT32 ui32PollPeriodus, - POLL_FLAGS ePollFlags) +PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Timeoutus, + IMG_UINT32 ui32PollPeriodus, + POLL_FLAGS ePollFlags, + PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate) { #if defined(NO_HARDWARE) PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); @@ -2524,8 +3203,15 @@ PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32 __iomem * pui32LinMemAddr, #else IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */ - LOOP_UNTIL_TIMEOUT(ui32Timeoutus) + LOOP_UNTIL_TIMEOUT_US(ui32Timeoutus) { + if (pfnFwInvalidate) + { + pfnFwInvalidate((const volatile void __force *)pui32LinMemAddr, + sizeof(*pui32LinMemAddr), + PVRSRV_CACHE_OP_INVALIDATE); + } + ui32ActualValue = OSReadHWReg32((void __iomem *)pui32LinMemAddr, 0) & ui32Mask; if (ui32ActualValue == ui32Value) @@ -2538,8 +3224,17 @@ PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32 __iomem * pui32LinMemAddr, return PVRSRV_ERROR_TIMEOUT; } - OSWaitus(ui32PollPeriodus); - } END_LOOP_UNTIL_TIMEOUT(); +#define ONE_MSEC_IN_USECS (1000U) + + if (ui32PollPeriodus <= ONE_MSEC_IN_USECS) + { + OSWaitus(ui32PollPeriodus); + } + else + { + OSSleepms(ui32PollPeriodus / ONE_MSEC_IN_USECS); + } + } END_LOOP_UNTIL_TIMEOUT_US(); if (BITMASK_HAS(ePollFlags, POLL_FLAG_LOG_ERROR)) { @@ -2557,17 +3252,19 @@ PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32 __iomem * pui32LinMemAddr, PVRSRVPollForValueKM */ PVRSRV_ERROR PVRSRVPollForValueKM (PVRSRV_DEVICE_NODE *psDevNode, - volatile IMG_UINT32 __iomem *pui32LinMemAddr, - IMG_UINT32 ui32Value, - IMG_UINT32 ui32Mask, - POLL_FLAGS ePollFlags) + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + POLL_FLAGS ePollFlags, + PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate) { PVRSRV_ERROR eError; eError = PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, MAX_HW_TIME_US, MAX_HW_TIME_US/WAIT_TRY_COUNT, - ePollFlags); + ePollFlags, + pfnFwInvalidate); if (eError != PVRSRV_OK && BITMASK_HAS(ePollFlags, POLL_FLAG_DEBUG_DUMP)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", @@ -2580,9 +3277,10 @@ PVRSRV_ERROR PVRSRVPollForValueKM (PVRSRV_DEVICE_NODE *psDevNode, } PVRSRV_ERROR -PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, - IMG_UINT32 ui32Value, - IMG_UINT32 ui32Mask) +PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate) { #if defined(NO_HARDWARE) PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); @@ -2601,8 +3299,15 @@ PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectOpen", EventObjectOpenError); eError = PVRSRV_ERROR_TIMEOUT; /* Initialiser for following loop */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { + if (pfnFwInvalidate) + { + pfnFwInvalidate((const volatile void __force *)pui32LinMemAddr, + sizeof(*pui32LinMemAddr), + PVRSRV_CACHE_OP_INVALIDATE); + } + ui32ActualValue = (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask); if (ui32ActualValue == ui32Value) @@ -2632,7 +3337,7 @@ PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, ui32Mask)); } } - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); OSEventObjectClose(hOSEvent); @@ -2697,16 +3402,6 @@ IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig) return IMG_FALSE; } -IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig) -{ - if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) || - (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS)) - { - return IMG_TRUE; - } - return IMG_FALSE; -} - IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig) { return psDevConfig->bHasNonMappableLocalMemory; @@ -2788,14 +3483,14 @@ PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData) #if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) /* functions only used on rogue, but header defining them is common */ -void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState) +void SetAxiProtOSid(IMG_HANDLE hSysData, IMG_UINT32 ui32OSid, IMG_BOOL bState) { - SysSetAxiProtOSid(ui32OSid, bState); + SysSetAxiProtOSid(hSysData, ui32OSid, bState); } -void SetTrustedDeviceAceEnabled(void) +void SetTrustedDeviceAceEnabled(IMG_HANDLE hSysData) { - SysSetTrustedDeviceAceEnabled(); + SysSetTrustedDeviceAceEnabled(hSysData); } #endif @@ -2855,7 +3550,7 @@ PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void) eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); } - LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + LOOP_UNTIL_TIMEOUT_US(OS_THREAD_DESTROY_TIMEOUT_US) { eError = OSThreadDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread); if (PVRSRV_OK == eError) @@ -2864,7 +3559,7 @@ PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void) break; } OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj) @@ -2906,7 +3601,7 @@ PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstance(IMG_UINT32 uiInstance) return psDevNode; } -PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance) +PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByKernelDevID(IMG_INT32 i32OSInstance) { PVRSRV_DEVICE_NODE *psDevNode; @@ -2914,14 +3609,14 @@ PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance) for (psDevNode = gpsPVRSRVData->psDeviceNodeList; psDevNode != NULL; psDevNode = psDevNode->psNext) { - if (i32OSInstance == psDevNode->sDevId.i32OsDeviceID) + if (i32OSInstance == psDevNode->sDevId.i32KernelDeviceID) { MULTI_DEVICE_BRINGUP_DPF("%s: Found DevId %d. Retrieving node.", __func__, i32OSInstance); break; } else { - MULTI_DEVICE_BRINGUP_DPF("%s: Searching for DevId %d: Id %d not matching", __func__, i32OSInstance, psDevNode->sDevId.i32OsDeviceID); + MULTI_DEVICE_BRINGUP_DPF("%s: Searching for DevId %d: Id %d not matching", __func__, i32OSInstance, psDevNode->sDevId.i32KernelDeviceID); } } OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock); @@ -2933,11 +3628,244 @@ PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance) return psDevNode; } -/* Default function for querying the power state of the system */ -PVRSRV_SYS_POWER_STATE PVRSRVDefaultDomainPower(PVRSRV_DEVICE_NODE *psDevNode) +#define _FROZEN 1 /* Device is already frozen */ +#define _NOT_FROZEN 0 /* Device is not frozen */ + +/* Freeze the specified device if not already frozen */ +PVRSRV_ERROR PVRSRVDeviceFreeze(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + IMG_BOOL bHasPowerLock = IMG_FALSE; + + /* Verify that given argument *IS* a recognised device node */ + PVRSRV_DEVICE_NODE *lpsDevNode; + + OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock); + for (lpsDevNode = gpsPVRSRVData->psDeviceNodeList; + lpsDevNode != NULL && lpsDevNode != psDevNode; + lpsDevNode = lpsDevNode->psNext) + ; + OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock); + + PVR_LOG_RETURN_IF_FALSE((lpsDevNode == psDevNode), "Device node not known", + PVRSRV_ERROR_INVALID_PARAMS); + + /* Mark the device as 'frozen' if not already marked */ + if (OSAtomicCompareExchange(&psDevNode->eFrozen, _NOT_FROZEN, + _FROZEN) == _FROZEN) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device %p already frozen", __func__, + psDevNode)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Ensure there are no other threads active within this device. */ + while (OSAtomicRead(&psDevNode->iThreadsActive) > 0) + { + OSReleaseThreadQuanta(); /* Let other threads execute */ + } + + /* Attempt to idle the GPU without power-down. If this fails, we try with + * a potential power-down. + */ + eError = PVRSRVPowerLock(psDevNode); + if (eError != PVRSRV_OK) + { + /* Device is powered down, we can continue + * as there are no in-flight requests in the GPU. + */ + PVR_DPF((PVR_DBG_WARNING, "%s: Unable to Idle Device (%p) [%u/%d]" + " Device powered down", + __func__, psDevNode, psDevNode->sDevId.ui32InternalID, + psDevNode->sDevId.i32KernelDeviceID)); + eError = PVRSRV_OK; + } + else + { + bHasPowerLock = IMG_TRUE; + + eError = PVRSRVDeviceIdleRequestKM(psDevNode, + &PVRSRVDeviceIsDefaultStateOFF, + IMG_TRUE); + } + + if (eError != PVRSRV_OK) + { + /* Failed to Idle. Need to remove the pending _FROZEN state. */ + (void) OSAtomicExchange(&psDevNode->eFrozen, _NOT_FROZEN); + PVR_DPF((PVR_DBG_ERROR, "%s: Unable to Idle Device (%p) [%u/%d]", + __func__, psDevNode, psDevNode->sDevId.ui32InternalID, + psDevNode->sDevId.i32KernelDeviceID)); + + if (bHasPowerLock) + { + PVRSRVPowerUnlock(psDevNode); + } + return eError; + } + + /* Now change the device-state to not be ACTIVE until we unfreeze */ + if (psDevNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected Device State %u for device %p", + __func__, psDevNode->eDevState, psDevNode)); + + /* Reset device to UNFROZEN */ + (void) OSAtomicExchange(&psDevNode->eFrozen, _NOT_FROZEN); + + if (bHasPowerLock) + { + (void) PVRSRVDeviceIdleCancelRequestKM(psDevNode); + PVRSRVPowerUnlock(psDevNode); + } + else + { + (void) PVRSRVSetDeviceSystemPowerState(psDevNode, + PVRSRV_SYS_POWER_STATE_ON, + PVRSRV_POWER_FLAGS_NONE); + } + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVRSRVDeviceSetState(psDevNode, PVRSRV_DEVICE_STATE_FROZEN); + + PVR_DPF((PVR_DBG_VERBOSE, "%s: Device (%p) [%u/%d] FROZEN", __func__, + psDevNode, + psDevNode->sDevId.ui32InternalID, + psDevNode->sDevId.i32KernelDeviceID)); + + OSAtomicIncrement(&psDevNode->iTotalFreezes); + + if (bHasPowerLock) + { + PVRSRVPowerUnlock(psDevNode); + } + + return PVRSRV_OK; +} + +/* Unfreeze / Thaw the specified device if frozen */ +PVRSRV_ERROR PVRSRVDeviceThaw(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *lpsDevNode; + IMG_BOOL bHasPowerLock = IMG_FALSE; + + /* Verify that the given argument *IS* a recognised device node */ + OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock); + for (lpsDevNode = gpsPVRSRVData->psDeviceNodeList; + lpsDevNode != NULL && lpsDevNode != psDevNode; + lpsDevNode = lpsDevNode->psNext) + ; + OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock); + + PVR_LOG_RETURN_IF_FALSE((lpsDevNode == psDevNode), "Device node not known", + PVRSRV_ERROR_INVALID_PARAMS); + + /* Unfreeze the device */ + if (OSAtomicCompareExchange(&psDevNode->eFrozen, _FROZEN, + _NOT_FROZEN) == _NOT_FROZEN) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device (%p) [%u/%d] already unfrozen", + __func__, + psDevNode, psDevNode->sDevId.ui32InternalID, + psDevNode->sDevId.i32KernelDeviceID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psDevNode->eDevState != PVRSRV_DEVICE_STATE_FROZEN) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unexpected Device state %u for device (%p) [%u/%d]", + __func__, psDevNode->eDevState, psDevNode, + psDevNode->sDevId.ui32InternalID, + psDevNode->sDevId.i32KernelDeviceID)); + } + else + { + PVRSRVDeviceSetState(psDevNode, PVRSRV_DEVICE_STATE_ACTIVE); + + PVR_DPF((PVR_DBG_VERBOSE, "%s: Device (%p) [%u/%d] UNFROZEN", __func__, + psDevNode, + psDevNode->sDevId.ui32InternalID, + psDevNode->sDevId.i32KernelDeviceID)); + } + + /* Now unblock the device by clearing any ForcedIdle state and/or + * powering-on the device if needed. + */ + eError = PVRSRVPowerLock(psDevNode); + + bHasPowerLock = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE; + + if (bHasPowerLock) + { + eError = PVRSRVDeviceIdleCancelRequestKM(psDevNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Could not cancel Idle state: Device (%p) [%u/%d] '%s'", + __func__, + psDevNode, + psDevNode->sDevId.ui32InternalID, + psDevNode->sDevId.i32KernelDeviceID, + PVRSRVGetErrorString(eError))); + } + + PVRSRVPowerUnlock(psDevNode); + } + else + { + /* Force device back into POWER_STATE_ON as it must already be OFF. */ + (void) PVRSRVSetDeviceSystemPowerState(psDevNode, + PVRSRV_SYS_POWER_STATE_ON, + PVRSRV_POWER_FLAGS_NONE); + } + + /* Unblock any waiting threads */ + while (OSAtomicRead(&psDevNode->iFreezeCount) > 0) + { + eError = OSEventObjectSignal(psDevNode->hDeviceThreadEvObj); + if (OSAtomicRead(&psDevNode->iFreezeCount) > 0) + { + /* Sleep for 1ms to allow waiter to receive signal */ + OSSleepms(1U); + } + } + + /* Ensure that any blocked queues get rescheduled if we've woken up a + * waiter. + */ + PVRSRVCheckStatus(NULL); + + return PVRSRV_OK; +} + +PVRSRV_DRIVER_MODE PVRSRVGetVzModeByDevNum(IMG_UINT32 ui32DevNum) { - return psDevNode->eCurrentSysPowerState; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DRIVER_MODE eRetMode = DRIVER_MODE_NATIVE; + PVRSRV_DEVICE_NODE *psDevNode; + + OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); + + /* Iterate over all devices. */ + for (psDevNode = psPVRSRVData->psDeviceNodeList; + psDevNode != NULL; + psDevNode = psDevNode->psNext) + { + if (psDevNode->sDevId.ui32InternalID == ui32DevNum) + { + eRetMode = psDevNode->psDevConfig->eDriverMode; + break; + } + } + + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); + return eRetMode; } + /***************************************************************************** End of file (pvrsrv.c) *****************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv_bridge_init.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv_bridge_init.c index 2ce4ae0a858a..13beb3f5c454 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv_bridge_init.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv_bridge_init.c @@ -94,8 +94,10 @@ void DeinitRGXHWPERFBridge(void); PVRSRV_ERROR InitRGXREGCONFIGBridge(void); void DeinitRGXREGCONFIGBridge(void); #endif +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) PVRSRV_ERROR InitRGXKICKSYNCBridge(void); void DeinitRGXKICKSYNCBridge(void); +#endif #endif /* SUPPORT_RGX */ PVRSRV_ERROR InitCACHEBridge(void); void DeinitCACHEBridge(void); @@ -103,7 +105,7 @@ void DeinitCACHEBridge(void); PVRSRV_ERROR InitSMMBridge(void); void DeinitSMMBridge(void); #endif -#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +#if defined(PVRSRV_ENABLE_HTB) PVRSRV_ERROR InitHTBUFFERBridge(void); void DeinitHTBUFFERBridge(void); #endif @@ -119,10 +121,6 @@ void DeinitDEVICEMEMHISTORYBridge(void); PVRSRV_ERROR InitVALIDATIONBridge(void); void DeinitVALIDATIONBridge(void); #endif -#if defined(PVR_TESTING_UTILS) -PVRSRV_ERROR InitTUTILSBridge(void); -void DeinitTUTILSBridge(void); -#endif PVRSRV_ERROR InitSYNCTRACKINGBridge(void); void DeinitSYNCTRACKINGBridge(void); #if defined(SUPPORT_WRAP_EXTMEM) @@ -187,7 +185,7 @@ ServerBridgeInit(void) PVR_LOG_IF_ERROR(eError, "InitSMMBridge"); #endif -#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +#if defined(PVRSRV_ENABLE_HTB) eError = InitHTBUFFERBridge(); PVR_LOG_IF_ERROR(eError, "InitHTBUFFERBridge"); #endif @@ -205,10 +203,6 @@ ServerBridgeInit(void) PVR_LOG_IF_ERROR(eError, "InitVALIDATIONBridge"); #endif -#if defined(PVR_TESTING_UTILS) - eError = InitTUTILSBridge(); - PVR_LOG_IF_ERROR(eError, "InitTUTILSBridge"); -#endif eError = InitDEVICEMEMHISTORYBridge(); PVR_LOG_IF_ERROR(eError, "InitDEVICEMEMHISTORYBridge"); @@ -252,8 +246,10 @@ ServerBridgeInit(void) PVR_LOG_IF_ERROR(eError, "InitRGXREGCONFIGBridge"); #endif +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) eError = InitRGXKICKSYNCBridge(); PVR_LOG_IF_ERROR(eError, "InitRGXKICKSYNCBridge"); +#endif eError = InitRGXTIMERQUERYBridge(); PVR_LOG_IF_ERROR(eError, "InitRGXTIMERQUERYBridge"); @@ -317,9 +313,6 @@ void ServerBridgeDeInit(void) DeinitPDUMPBridge(); #endif -#if defined(PVR_TESTING_UTILS) - DeinitTUTILSBridge(); -#endif #if defined(SUPPORT_DISPLAY_CLASS) DeinitDCBridge(); @@ -331,7 +324,7 @@ void ServerBridgeDeInit(void) DeinitSMMBridge(); #endif -#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +#if defined(PVRSRV_ENABLE_HTB) DeinitHTBUFFERBridge(); #endif @@ -377,9 +370,10 @@ void ServerBridgeDeInit(void) DeinitRGXREGCONFIGBridge(); #endif +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) DeinitRGXKICKSYNCBridge(); +#endif DeinitRGXTIMERQUERYBridge(); - #endif /* SUPPORT_RGX */ } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv_pool.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv_pool.c index d62a062a944c..9fa95ea98756 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv_pool.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/pvrsrv_pool.c @@ -213,7 +213,7 @@ PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool, psPool->uiNumFree--; } -#if defined(DEBUG) || defined(SUPPORT_VALIDATION) +#if defined(DEBUG) /* Don't poison the IN buffer as that is copied from client and would be * waste of cycles. */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/ri_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/ri_server.c index cc30b2866875..370e41dd4553 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/ri_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/ri_server.c @@ -41,7 +41,7 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ -#if defined(__linux__) +#if defined(__linux__) && defined(__KERNEL__) #include #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) #include @@ -72,6 +72,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "dllist.h" #include "pmr.h" +#include "physheap.h" /* include/device.h */ #include "device.h" @@ -90,15 +91,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define _RI_INITIAL_HASH_TABLE_SIZE 64 -/* - * Values written to the 'valid' field of RI structures when created and - * cleared prior to being destroyed. The code can then check this value - * before accessing the provided pointer contents as a valid RI structure. - */ -#define _VALID_RI_LIST_ENTRY 0x66bccb66 -#define _VALID_RI_SUBLIST_ENTRY 0x77cddc77 -#define _INVALID 0x00000000 - /* * If this define is set to 1, details of the linked lists (addresses, * prev/next ptrs, etc) are also output when function RIDumpList() is called. @@ -112,21 +104,31 @@ typedef IMG_UINT64 _RI_BASE_T; /* No +1 in SIZE macros since sizeof includes \0 byte in size */ #define RI_PROC_BUF_SIZE 16 +#define RI_ANNO_BUF_SIZE 80 +#define RI_ANNO_FRMT_SIZE (sizeof(RI_ANNO_FRMT)) #define RI_DEV_ID_BUF_SIZE 4 -#define RI_MEMDESC_SUM_FRMT "PID %d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\ +#define RI_MEMDESC_SUM_FRMT "PID:%d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\ "Imported:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) = "\ "Total:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K)\n" #define RI_MEMDESC_SUM_BUF_SIZE (sizeof(RI_MEMDESC_SUM_FRMT)+5+RI_PROC_BUF_SIZE+30+60) -#define RI_PMR_SUM_FRMT "PID %d %s PMRs Alloc'd:0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K "\ +#define RI_PMR_SUM_FRMT "PID:%d %s PMRs Alloc'd:0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K "\ "[Physical: 0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K]\n" #define RI_PMR_SUM_BUF_SIZE (sizeof(RI_PMR_SUM_FRMT)+(20+40)) -#define RI_PMR_ENTRY_FRMT "%%sPID:%%-5d DEV:%%s <%%p>\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c" -#define RI_PMR_ENTRY_BUF_SIZE (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+RI_DEV_ID_BUF_SIZE+16+PVR_ANNOTATION_MAX_LEN+10+10)) +#define RI_FREED_BY_DRIVER "{Freed by KM}" + +#define RI_PMR_ENTRY_IMPORTED_FRMT_BEGIN "{Imported to DEV:" +#define RI_PMR_ENTRY_IMPORTED_FRMT_END "\b}" +#define RI_PMR_ENTRY_IMPORTED_DEV_FRMT "%2u," +#define RI_PMR_ENTRY_IMPORTED_DEV_SIZE 3 +#define RI_PMR_ENTRY_IMPORTED_BUF_SIZE (sizeof(RI_PMR_ENTRY_IMPORTED_FRMT_BEGIN)+sizeof(RI_PMR_ENTRY_IMPORTED_FRMT_END) + PVRSRV_MAX_DEVICES * RI_PMR_ENTRY_IMPORTED_DEV_SIZE) + +#define RI_PMR_ENTRY_FRMT "%%sPID:%%-5d DEV:%%s <%%p>\t%%-%ds\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%s%%s%%c" +#define RI_PMR_ENTRY_BUF_SIZE (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+RI_DEV_ID_BUF_SIZE+16+(PVR_ANNOTATION_MAX_LEN/2)+PHYS_HEAP_NAME_SIZE+10+10)+ sizeof(RI_FREED_BY_DRIVER)) #define RI_PMR_ENTRY_FRMT_SIZE (sizeof(RI_PMR_ENTRY_FRMT)) /* Use %5d rather than %d so the output aligns in server/kernel.log, debugFS sees extra spaces */ @@ -140,9 +142,9 @@ static IMG_CHAR g_szSysAllocImport[RI_SYS_ALLOC_IMPORT_FRMT_SIZE]; #define RI_MEMDESC_ENTRY_IMPORT_FRMT "{Import from PID %d}" #define RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_IMPORT_FRMT)+5) -#define RI_MEMDESC_ENTRY_FRMT "%%sPID:%%-5d DEV:%%s 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%c" +#define RI_MEMDESC_ENTRY_FRMT "%%sPID:%%-5d DEV:%%s 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s %%s%%c" #define RI_MEMDESC_ENTRY_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+5+RI_DEV_ID_BUF_SIZE+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\ - RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE)) + RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE)) + sizeof(RI_FREED_BY_DRIVER) #define RI_MEMDESC_ENTRY_FRMT_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)) @@ -151,25 +153,64 @@ static IMG_CHAR g_szSysAllocImport[RI_SYS_ALLOC_IMPORT_FRMT_SIZE]; MAX(RI_MEMDESC_SUM_BUF_SIZE,\ RI_PMR_SUM_BUF_SIZE)))) +typedef struct _RI_PMR_INFO_ +{ + uintptr_t uiAddr; + PHYS_HEAP *psHeap; + PVRSRV_DEVICE_NODE *psDeviceNode; + size_t uiLogicalSize; + size_t uiPhysicalSize; + IMG_CHAR *pszAnnotation; +#if defined(PVRSRV_ENABLE_XD_MEM) + IMG_UINT64 uiXDevices; +#endif +} RI_PMR_INFO; + + +#define RI_DEVID_MASK 0xFF /* Highest value is PVRSRV_HOST_DEVICE_ID[255] */ + +/* Flags for Memdescs(RI_SUBLIST) */ +#define RI_IMPORT_FLAG (8) /* Indicates the PMR is an import */ +#define RI_SUBALLOC_FLAG (9) /* Indicates the PMR is sub-allocatable */ + +/* Flags for PMRs(RI_LIST) */ +#define RI_SYSALLOC_PMR_FLAG (10) /* Indicates the PMR belongs to the 'system' process */ +#define RI_PMR_PHYS_COUNTED_BY_DEBUGFS_FLAG (11) /* Indicates size counted prior, to prevent double reads */ + +/* Shared flags */ +#define RI_RACC_FLAG (12) /* Freed by Driver after disconnect */ +#define RI_VALID_FLAG (13) /* Valid on creation, Only invalid after delete */ +#define RI_HAS_PMR_INFO (14) /* Entry has RI_PMR_INFO object rather than reference to a PMR */ +#define GET_DEVICE_ID(entry) ((entry)->ui16Flags & RI_DEVID_MASK) +#define SET_DEVICE_ID(entry,id) BITMASK_SET((entry)->ui16Flags, (id & RI_DEVID_MASK)) +#define IS_HOST_DEVICE(entry) (((entry)->ui16Flags & RI_DEVID_MASK) == PVRSRV_HOST_DEVICE_ID) +#define IS_IMPORT(entry) BIT_ISSET((entry)->ui16Flags, RI_IMPORT_FLAG) +#define IS_SUBALLOC(entry) BIT_ISSET((entry)->ui16Flags, RI_SUBALLOC_FLAG) + +#define IS_SYSPMR(entry) BIT_ISSET((entry)->ui16Flags, RI_SYSALLOC_PMR_FLAG) +#define IS_COUNTED_BY_DEBUGFS(entry) BIT_ISSET((entry)->ui16Flags, RI_PMR_PHYS_COUNTED_BY_DEBUGFS_FLAG) + +#define IS_RACC(entry) BIT_ISSET((entry)->ui16Flags, RI_RACC_FLAG) +#define IS_VALID(entry) BIT_ISSET((entry)->ui16Flags, RI_VALID_FLAG) +#define HAS_PMR_INFO(entry) BIT_ISSET((entry)->ui16Flags, RI_HAS_PMR_INFO) /* Structure used to make linked sublist of memory allocations (MEMDESC) */ struct _RI_SUBLIST_ENTRY_ { DLLIST_NODE sListNode; + DLLIST_NODE sProcListNode; /* Sublist entries that share the same PID. + IMPORTANT: This list does not contain a head node! */ + CONNECTION_DATA *psConnection; struct _RI_LIST_ENTRY_ *psRI; - IMG_UINT32 valid; - IMG_BOOL bIsImport; - IMG_BOOL bIsSuballoc; - IMG_PID pid; - IMG_UINT32 ui32DevID; - IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; + IMG_CHAR *pszTextB; /* Annotation+(NUL)+ProcName+(NUL) */ IMG_DEV_VIRTADDR sVAddr; IMG_UINT64 ui64Offset; IMG_UINT64 ui64Size; - IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN+1]; - DLLIST_NODE sProcListNode; + IMG_PID pid; + IMG_UINT16 ui16TextBLength; /* Total length of Annotation+(NUL)+ProcName+(NUL) */ + IMG_UINT16 ui16Flags; /* Refer to above */ }; /* @@ -179,26 +220,67 @@ struct _RI_SUBLIST_ENTRY_ struct _RI_LIST_ENTRY_ { DLLIST_NODE sListNode; - DLLIST_NODE sSysAllocListNode; DLLIST_NODE sSubListFirst; - IMG_UINT32 valid; - PMR *psPMR; + union { + PMR *psPMR; + RI_PMR_INFO *psPmrInfo; + } pmr_info; IMG_PID pid; - IMG_UINT32 ui32DevID; - IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; IMG_UINT16 ui16SubListCount; - IMG_UINT16 ui16MaxSubListCount; - IMG_UINT32 ui32RIPMRFlags; /* Flags used to indicate the type of allocation */ - IMG_UINT32 ui32Flags; /* Flags used to indicate if PMR appears in ri debugfs output */ + IMG_UINT16 ui16Flags; /* Refer to above */ }; +#define GET_KEY(entry) ( \ + HAS_PMR_INFO(entry) \ + ? (void*)(entry)->pmr_info.psPmrInfo \ + : (void*)(entry)->pmr_info.psPMR \ + ) + +#define GET_ADDR(entry) ( \ + HAS_PMR_INFO(entry) \ + ? (void*)(entry)->pmr_info.psPmrInfo->uiAddr \ + : (void*)(entry)->pmr_info.psPMR \ + ) +#define GET_HEAP(entry) ( \ + HAS_PMR_INFO(entry) \ + ? (entry)->pmr_info.psPmrInfo->psHeap \ + : PMR_PhysHeap((entry)->pmr_info.psPMR) \ + ) +#define GET_DEVNODE(entry) ( \ + HAS_PMR_INFO(entry) \ + ? (entry)->pmr_info.psPmrInfo->psDeviceNode \ + : (PVRSRV_DEVICE_NODE *) PMR_DeviceNode((entry)->pmr_info.psPMR) \ + ) +#define GET_LOGICAL_SIZE(entry) ( \ + HAS_PMR_INFO(entry) \ + ? (entry)->pmr_info.psPmrInfo->uiLogicalSize \ + : PMR_LogicalSize((entry)->pmr_info.psPMR) \ + ) +#define GET_PHYSICAL_SIZE(entry) ( \ + HAS_PMR_INFO(entry) \ + ? (entry)->pmr_info.psPmrInfo->uiPhysicalSize \ + : PMR_PhysicalSize((entry)->pmr_info.psPMR) \ + ) +#define GET_NAME(entry) ( \ + HAS_PMR_INFO(entry) \ + ? (entry)->pmr_info.psPmrInfo->pszAnnotation \ + : PMR_GetAnnotation((entry)->pmr_info.psPMR) \ + ) +#define GET_XDEVICES(entry) ( \ + HAS_PMR_INFO(entry) \ + ? (entry)->pmr_info.psPmrInfo->uiXDevices \ + : PMR_ImportedDevicesMask((entry)->pmr_info.psPMR) \ + ) + +/* pszTextB = ,0,,0 */ +/* Retrieve the string pointer to the ProcName */ +#define GET_PROC(entry) ((IMG_CHAR*) (strchr((entry)->pszTextB, '\0') + 1)) + typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY; typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY; -static IMG_UINT16 g_ui16RICount; -static HASH_TABLE *g_pRIHashTable; -static IMG_UINT16 g_ui16ProcCount; -static HASH_TABLE *g_pProcHashTable; +static HASH_TABLE *g_pPMR2RIListHashTable; +static HASH_TABLE *g_PID2RISublistHashTable; static POS_LOCK g_hRILock; @@ -219,7 +301,7 @@ static IMG_BOOL bRIDeInitDeferred = IMG_FALSE; * Used as head of linked-list of PMR RI entries - this is useful when we wish * to iterate all PMR list entries (when we don't have a PMR ref) */ -static DLLIST_NODE sListFirst; +static DLLIST_NODE g_sClientsListHead; /* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */ static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString); @@ -232,9 +314,6 @@ static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPri static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid); #define _RIOutput(x) PVR_LOG(x) -#define RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS 0x1 -#define RI_FLAG_SYSALLOC_PMR 0x2 - static IMG_UINT32 _ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); @@ -307,6 +386,66 @@ static void _RIUnlock(void) */ static IMG_UINT32 g_ui32SysAllocPMRCount; +static IMG_BOOL RICheckListHandle(RI_HANDLE hRIHandle) +{ + RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *) hRIHandle, *psTableEntry; + void *pPMRHashKey; + IMG_BOOL bValid = IMG_FALSE; + + if ((GET_DEVICE_ID(psRIEntry) > PVRSRV_MAX_DEVICES && + !IS_HOST_DEVICE(psRIEntry)) || + !g_pPMR2RIListHashTable) + { + //Shortcut check + return IMG_FALSE; + } + + pPMRHashKey = GET_KEY(psRIEntry); + + _RILock(); + + /* Look-up psPMR in Hash Table */ + psTableEntry = (RI_LIST_ENTRY *) HASH_Retrieve_Extended(g_pPMR2RIListHashTable, + &pPMRHashKey); + if (psTableEntry != NULL) + { + bValid = IS_VALID(psTableEntry); + } + + _RIUnlock(); + + return bValid; +} + +static IMG_BOOL RICheckSubListHandle(RI_HANDLE hRIHandle) +{ + RI_SUBLIST_ENTRY *psRISubEntry, *psTableEntry; + uintptr_t hashData; + IMG_PID pid; + IMG_BOOL bValid = IMG_FALSE; + + + psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; + if ((GET_DEVICE_ID(psRISubEntry) > PVRSRV_MAX_DEVICES) || !g_PID2RISublistHashTable) + { + //Shortcut check + return bValid; + } + pid = psRISubEntry->pid; + + _RILock(); + /* Look-up psPMR in Hash Table */ + hashData = HASH_Retrieve_Extended (g_PID2RISublistHashTable, (void *)&pid); + if (hashData) + { + psTableEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); + bValid = IS_VALID(psTableEntry); + } + + /* Release RI Lock */ + _RIUnlock(); + return bValid; +} PVRSRV_ERROR RIInitKM(void) { @@ -319,7 +458,7 @@ PVRSRV_ERROR RIInitKM(void) RI_SYS_ALLOC_IMPORT_FRMT_SIZE, RI_SYS_ALLOC_IMPORT_FRMT, PVR_SYS_ALLOC_PID); - PVR_LOG_IF_FALSE((iCharsWritten>0 && iCharsWritten<(IMG_INT32)RI_SYS_ALLOC_IMPORT_FRMT_SIZE), \ + PVR_LOG_IF_FALSE((iCharsWritten>0 && iCharsWritten<(IMG_INT32)RI_SYS_ALLOC_IMPORT_FRMT_SIZE), "OSSNPrintf failed to initialise g_szSysAllocImport"); eError = OSLockCreate(&g_hSysAllocPidListLock); @@ -343,15 +482,16 @@ PVRSRV_ERROR RIInitKM(void) #endif return eError; } + void RIDeInitKM(void) { #if (USE_RI_LOCK == 1) - if (g_ui16RICount > 0) + if (g_pPMR2RIListHashTable != NULL && HASH_Count(g_pPMR2RIListHashTable) > 0) { PVR_DPF((PVR_DBG_WARNING, "%s: called with %d entries remaining - deferring OSLockDestroy()", __func__, - g_ui16RICount)); + HASH_Count(g_pPMR2RIListHashTable))); bRIDeInitDeferred = IMG_TRUE; } else @@ -421,28 +561,29 @@ void RILockReleaseKM(void) PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, IMG_PID ui32Owner) { + PVRSRV_ERROR eError = PVRSRV_OK; PMR *pPMRHashKey = psPMR; RI_LIST_ENTRY *psRIEntry; uintptr_t hashData; /* if Hash table has not been created, create it now */ - if (!g_pRIHashTable) + if (!g_pPMR2RIListHashTable) { - g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); - g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); + g_pPMR2RIListHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); + g_PID2RISublistHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); } - PVR_RETURN_IF_NOMEM(g_pRIHashTable); - PVR_RETURN_IF_NOMEM(g_pProcHashTable); + PVR_LOG_RETURN_IF_NOMEM(g_pPMR2RIListHashTable, "HASH_Create_Extended"); + PVR_LOG_RETURN_IF_NOMEM(g_PID2RISublistHashTable, "HASH_Create_Extended"); - PVR_RETURN_IF_INVALID_PARAM(psPMR); + PVR_LOG_RETURN_IF_INVALID_PARAM(psPMR, "psPMR"); /* Acquire RI Lock */ _RILock(); /* Look-up psPMR in Hash Table */ - hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + hashData = HASH_Retrieve_Extended (g_pPMR2RIListHashTable, (void *)&pPMRHashKey); psRIEntry = (RI_LIST_ENTRY *)hashData; - if (!psRIEntry) + if (psRIEntry == NULL) { PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR); @@ -450,32 +591,27 @@ PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, * If failed to find a matching existing entry, create a new one */ psRIEntry = (RI_LIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_LIST_ENTRY)); - if (!psRIEntry) - { - /* Release RI Lock */ - _RIUnlock(); - /* Error - no memory to allocate for new RI entry */ - return PVRSRV_ERROR_OUT_OF_MEMORY; - } - else + PVR_LOG_GOTO_IF_NOMEM(psRIEntry, eError, exit_); { PMR_FLAGS_T uiPMRFlags = PMR_Flags(psPMR); + IMG_UINT32 ui32RICount = HASH_Count(g_pPMR2RIListHashTable); /* * Add new RI Entry */ - if (g_ui16RICount == 0) + if (ui32RICount == 0) { /* Initialise PMR entry linked-list head */ - dllist_init(&sListFirst); + dllist_init(&g_sClientsListHead); + } + else if (ui32RICount == IMG_UINT32_MAX) + { + PVR_LOG_GOTO_WITH_ERROR("ui32RICount", eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, exit_); } - g_ui16RICount++; - dllist_init (&(psRIEntry->sSysAllocListNode)); + dllist_init (&(psRIEntry->sListNode)); dllist_init (&(psRIEntry->sSubListFirst)); - psRIEntry->ui16SubListCount = 0; - psRIEntry->ui16MaxSubListCount = 0; - psRIEntry->valid = _VALID_RI_LIST_ENTRY; + BIT_SET(psRIEntry->ui16Flags, RI_VALID_FLAG); /* Check if this PMR should be accounted for under the * PVR_SYS_ALLOC_PID debugFS entry. This should happen if @@ -484,51 +620,40 @@ PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, * or the owner PID is PVR_SYS_ALLOC_PID. * Also record host dev node allocs on the system PID. */ - if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT || + if (psDeviceNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE || PVRSRV_CHECK_FW_MAIN(uiPMRFlags) || ui32Owner == PVR_SYS_ALLOC_PID || psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) { - psRIEntry->ui32RIPMRFlags = RI_FLAG_SYSALLOC_PMR; - OSSNPrintf(psRIEntry->ai8ProcName, - RI_PROC_BUF_SIZE, - "SysProc"); + BIT_SET(psRIEntry->ui16Flags, RI_SYSALLOC_PMR_FLAG); psRIEntry->pid = PVR_SYS_ALLOC_PID; OSLockAcquire(g_hSysAllocPidListLock); /* Add this psRIEntry to the list of entries for PVR_SYS_ALLOC_PID */ - dllist_add_to_tail(&g_sSysAllocPidListHead, (PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); + dllist_add_to_tail(&g_sSysAllocPidListHead, (PDLLIST_NODE)&(psRIEntry->sListNode)); OSLockRelease(g_hSysAllocPidListLock); g_ui32SysAllocPMRCount++; } else { - psRIEntry->ui32RIPMRFlags = 0; psRIEntry->pid = ui32Owner; + dllist_add_to_tail(&g_sClientsListHead, (PDLLIST_NODE)&(psRIEntry->sListNode)); } - - OSSNPrintf(psRIEntry->ai8ProcName, - RI_PROC_BUF_SIZE, - "%s", - OSGetCurrentClientProcessNameKM()); - /* Add PMR entry to linked-list of all PMR entries */ - dllist_init (&(psRIEntry->sListNode)); - dllist_add_to_tail(&sListFirst, (PDLLIST_NODE)&(psRIEntry->sListNode)); } - psRIEntry->psPMR = psPMR; - psRIEntry->ui32Flags = 0; - psRIEntry->ui32DevID = psDeviceNode->sDevId.ui32InternalID; + psRIEntry->pmr_info.psPMR = psPMR; + SET_DEVICE_ID(psRIEntry, psDeviceNode->sDevId.ui32InternalID); /* Create index entry in Hash Table */ - HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry); + HASH_Insert_Extended (g_pPMR2RIListHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry); /* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */ PMRStoreRIHandle(psPMR, psRIEntry); } +exit_: /* Release RI Lock */ _RIUnlock(); - return PVRSRV_OK; + return eError; } /*! @@ -562,25 +687,27 @@ PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR) The new entry will be inserted at the head of the sublist of the indicated PMR list entry, and assigned the values provided. + @input psConnection - Reference to the Services connection + @input psDeviceNode - Reference to the device node @input psPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates @input ui32TextBSize - Length of string provided in psz8TextB parameter @input psz8TextB - String describing this secondary reference (may be null) @input ui64Offset - Offset from the start of the PMR at which this allocation begins @input ui64Size - Size of this allocation - @input bIsImport - Flag indicating if this is an allocation or an import - @input bIsSuballoc - Flag indicating if this is a sub-allocation + @input uiFlags - Flags indicating if this is a sub-allocation or an import @output phRIHandle - Handle to the created RI entry @Return PVRSRV_ERROR ******************************************************************************/ -PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, +PVRSRV_ERROR RIWriteMEMDESCEntryKM(void* psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + PMR *psPMR, IMG_UINT32 ui32TextBSize, const IMG_CHAR *psz8TextB, IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size, - IMG_BOOL bIsImport, - IMG_BOOL bIsSuballoc, + PVRSRV_MEMALLOCFLAGS_T uiFlags, RI_HANDLE *phRIHandle) { RI_SUBLIST_ENTRY *psRISubEntry; @@ -589,12 +716,21 @@ PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, uintptr_t hashData; IMG_PID pid; + IMG_CHAR szProcName[RI_PROC_BUF_SIZE]; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + /* Check Hash tables have been created (meaning at least one PMR has been defined) */ - PVR_RETURN_IF_INVALID_PARAM(g_pRIHashTable); - PVR_RETURN_IF_INVALID_PARAM(g_pProcHashTable); + PVR_LOG_RETURN_IF_INVALID_PARAM(g_pPMR2RIListHashTable, "g_pPMR2RIListHashTable"); + PVR_LOG_RETURN_IF_INVALID_PARAM(g_PID2RISublistHashTable, "g_PID2RISublistHashTable"); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psPMR, "psPMR"); + PVR_LOG_RETURN_IF_INVALID_PARAM(phRIHandle, "phRIHandle"); - PVR_RETURN_IF_INVALID_PARAM(psPMR); - PVR_RETURN_IF_INVALID_PARAM(phRIHandle); + /* Only allow request for a firmware context that comes from a direct bridge + * (psConnection == NULL). */ + PVR_LOG_RETURN_IF_INVALID_PARAM(!(psConnection != NULL && PVRSRV_CHECK_RI_FWKMD_ALLOC(uiFlags)), + "psConnection for firmware context"); /* Acquire RI Lock */ _RILock(); @@ -602,12 +738,13 @@ PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, *phRIHandle = NULL; /* Look-up psPMR in Hash Table */ - hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + hashData = HASH_Retrieve_Extended (g_pPMR2RIListHashTable, (void *)&pPMRHashKey); psRIEntry = (RI_LIST_ENTRY *)hashData; if (!psRIEntry) { /* Release RI Lock */ _RIUnlock(); + PVR_DPF((PVR_DBG_ERROR, "RI Entry not found for PMR: '%s'", PMR_GetAnnotation(psPMR))); return PVRSRV_ERROR_INVALID_PARAMS; } @@ -621,66 +758,134 @@ PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, } else { - /* - * Insert new entry in sublist - */ - PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst)); - - /* - * Insert new entry before currentNode - */ - if (!currentNode) - { - currentNode = &(psRIEntry->sSubListFirst); - } - dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode)); - psRISubEntry->psRI = psRIEntry; + dllist_add_to_head(&(psRIEntry->sSubListFirst), &(psRISubEntry->sListNode)); /* Increment number of entries in sublist */ psRIEntry->ui16SubListCount++; - if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount) - { - psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount; - } - psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; + BIT_SET(psRISubEntry->ui16Flags, RI_VALID_FLAG); + } + + if (PVRSRV_CHECK_RI_IMPORT(uiFlags)) + { + BIT_SET(psRISubEntry->ui16Flags, RI_IMPORT_FLAG); + } + else + { + BIT_UNSET(psRISubEntry->ui16Flags, RI_IMPORT_FLAG); + } + + if (PVRSRV_CHECK_RI_SUBALLOC(uiFlags)) + { + BIT_SET(psRISubEntry->ui16Flags, RI_SUBALLOC_FLAG); + } + else + { + BIT_UNSET(psRISubEntry->ui16Flags, RI_SUBALLOC_FLAG); } /* If allocation is made during device or driver initialisation, * track the MEMDESC entry under PVR_SYS_ALLOC_PID, otherwise use - * the current PID. - * Record host dev node allocations on the system PID. - */ + * the current PID. */ { - PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psRISubEntry->psRI->psPMR); + PVRSRV_DEVICE_NODE *psRIDeviceNode = GET_DEVNODE(psRISubEntry->psRI); + IMG_INT iRet; + + /* HostMemDevice doesn't update eDevState hence there's no need to test + * for it. */ + if (psRIDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) + { + /* Imports on HostMemDev should be attributed to the importing + * process. This way if an import is not freed before disconnect + * it will be outlined in the gpu_mem_area for the given process. + * Otherwise attribute the rest of the records to PVR_SYS_ALLOC_PID. */ + if (IS_IMPORT(psRISubEntry)) + { + psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); + iRet = OSStringSafeCopy(szProcName, OSGetCurrentClientProcessNameKM(), + RI_PROC_BUF_SIZE); + } + else + { + psRISubEntry->pid = PVR_SYS_ALLOC_PID; + iRet = OSStringSafeCopy(szProcName, "SysProc", RI_PROC_BUF_SIZE); - if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT || - psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) + if (psRISubEntry->pid != psRISubEntry->psRI->pid) + { + PVR_LOG(("%s(1): current PID = %u, RI entry PID = %u, RI sub-entry PID = %u", + __func__, OSGetCurrentClientProcessIDKM(), psRISubEntry->psRI->pid, + psRISubEntry->pid)); + } + } + } + else + { + /* All allocations done during device initialisation or belonging + * to the Firmware should be attributed to PVR_SYS_ALLOC_PID. */ + if (psRIDeviceNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE || + PVRSRV_CHECK_RI_FWKMD_ALLOC(uiFlags)) + { + psRISubEntry->pid = PVR_SYS_ALLOC_PID; + iRet = OSStringSafeCopy(szProcName, "SysProc", RI_PROC_BUF_SIZE); + + if (psRISubEntry->pid != psRISubEntry->psRI->pid) + { + PVR_LOG(("%s(2): current PID = %u, RI entry PID = %u, RI sub-entry PID = %u", + __func__, OSGetCurrentClientProcessIDKM(), psRISubEntry->psRI->pid, + psRISubEntry->pid)); + } + } + else + { + psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); + iRet = OSStringSafeCopy(szProcName, OSGetCurrentClientProcessNameKM(), + RI_PROC_BUF_SIZE); + } + } + if (iRet < 0) { - psRISubEntry->pid = psRISubEntry->psRI->pid; + PVR_DPF((PVR_DBG_WARNING, "%s: process name has been truncated from '%s' to '%s'", + __func__, OSGetCurrentClientProcessNameKM(), szProcName)); + psRISubEntry->ui16TextBLength = RI_PROC_BUF_SIZE - 1; } else { - psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); + psRISubEntry->ui16TextBLength = iRet; } - if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) + if (psz8TextB == NULL) + { + psz8TextB = ""; + ui32TextBSize = 0; + } + if (ui32TextBSize > (RI_ANNO_BUF_SIZE)) { PVR_DPF((PVR_DBG_WARNING, "%s: TextBSize too long (%u). Text will be truncated " - "to %zu characters", __func__, - ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); + "to %d characters", __func__, + ui32TextBSize, RI_ANNO_BUF_SIZE)); + ui32TextBSize = RI_ANNO_BUF_SIZE; } - /* copy ai8TextB field data */ - OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); + /* copy TextB field data plus separator char and terminator */ + psRISubEntry->ui16TextBLength += ui32TextBSize + 2; + psRISubEntry->pszTextB = OSAllocZMemNoStats(psRISubEntry->ui16TextBLength); + if (!psRISubEntry->pszTextB) + { + OSFreeMemNoStats(psRISubEntry); + _RIUnlock(); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + /* TextB is formatted as Annotation+NullTerm+ProcName so we can still + * print annotation without proc name. + * If any of the strings is too long it will be truncated. */ + (void) OSStringSafeCopy(psRISubEntry->pszTextB, psz8TextB, ui32TextBSize + 1); + (void) OSStringSafeCopy(psRISubEntry->pszTextB + ui32TextBSize + 1, szProcName, + psRISubEntry->ui16TextBLength - ui32TextBSize - 1); - psRISubEntry->ui32DevID = psDeviceNode->sDevId.ui32InternalID; psRISubEntry->ui64Offset = ui64Offset; psRISubEntry->ui64Size = ui64Size; - psRISubEntry->bIsImport = bIsImport; - psRISubEntry->bIsSuballoc = bIsSuballoc; - OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); + psRISubEntry->psConnection = psConnection; dllist_init (&(psRISubEntry->sProcListNode)); } @@ -689,15 +894,24 @@ PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, */ /* look-up pid in Hash Table */ pid = psRISubEntry->pid; - hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); + hashData = HASH_Retrieve_Extended (g_PID2RISublistHashTable, (void *)&pid); if (!hashData) { /* * No allocations for this pid yet */ - HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); - /* Increment number of entries in proc hash table */ - g_ui16ProcCount++; + if (!HASH_Insert_Extended(g_PID2RISublistHashTable, + (void *) &pid, + (uintptr_t) &psRISubEntry->sProcListNode)) + { + dllist_remove_node(&psRISubEntry->sListNode); + psRIEntry->ui16SubListCount--; + OSFreeMemNoStats(psRISubEntry->pszTextB); + OSFreeMemNoStats(psRISubEntry); + + _RIUnlock(); + return PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED; + } } else { @@ -711,6 +925,7 @@ PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, */ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode)); } + *phRIHandle = (RI_HANDLE)psRISubEntry; /* Release RI Lock */ _RIUnlock(); @@ -740,6 +955,8 @@ PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, To remove entries from the per-process list, just use RIDeleteMEMDESCEntryKM(). + @input psConnection - Reference to the Services connection + @input psDeviceNode - Reference to the device node @input psz8TextB - String describing this secondary reference (may be null) @input ui64Size - Size of this allocation @input ui64DevVAddr - Virtual address of this entry @@ -748,7 +965,7 @@ PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, @Return PVRSRV_ERROR ******************************************************************************/ -PVRSRV_ERROR RIWriteProcListEntryKM(CONNECTION_DATA *psConnection, +PVRSRV_ERROR RIWriteProcListEntryKM(void* psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32TextBSize, const IMG_CHAR *psz8TextB, @@ -759,15 +976,14 @@ PVRSRV_ERROR RIWriteProcListEntryKM(CONNECTION_DATA *psConnection, uintptr_t hashData = 0; IMG_PID pid; RI_SUBLIST_ENTRY *psRISubEntry = NULL; + IMG_INT iRet; - PVR_UNREFERENCED_PARAMETER(psConnection); - - if (!g_pRIHashTable) + if (!g_pPMR2RIListHashTable) { - g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); - g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); + g_pPMR2RIListHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); + g_PID2RISublistHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); - if (!g_pRIHashTable || !g_pProcHashTable) + if (!g_pPMR2RIListHashTable || !g_PID2RISublistHashTable) { /* Error - no memory to allocate for Hash table(s) */ return PVRSRV_ERROR_OUT_OF_MEMORY; @@ -788,28 +1004,50 @@ PVRSRV_ERROR RIWriteProcListEntryKM(CONNECTION_DATA *psConnection, return PVRSRV_ERROR_OUT_OF_MEMORY; } - psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; + BIT_SET(psRISubEntry->ui16Flags, RI_VALID_FLAG); psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); - psRISubEntry->ui32DevID = psDeviceNode->sDevId.ui32InternalID; + SET_DEVICE_ID(psRISubEntry, psDeviceNode->sDevId.ui32InternalID); - if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) + if (psz8TextB == NULL) + { + psz8TextB = ""; + ui32TextBSize = 0; + } + if (ui32TextBSize > (RI_ANNO_BUF_SIZE)) { PVR_DPF((PVR_DBG_WARNING, - "%s: TextBSize too long (%u). Text will be truncated " - "to %zu characters", __func__, - ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); + "%s: TextBSize too long (%u). Text will be truncated " + "to %d characters", __func__, + ui32TextBSize, RI_ANNO_BUF_SIZE)); + ui32TextBSize = RI_ANNO_BUF_SIZE; } - /* copy ai8TextB field data */ - OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); - - psRISubEntry->ui64Offset = 0; + /* copy TextB field data */ + psRISubEntry->ui16TextBLength = ui32TextBSize + RI_PROC_BUF_SIZE; + psRISubEntry->pszTextB = OSAllocZMemNoStats(psRISubEntry->ui16TextBLength); + if (!psRISubEntry->pszTextB) + { + OSFreeMemNoStats(psRISubEntry); + _RIUnlock(); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + /* TextB is formatted as Annotation+NullTerm+ProcName so we can still + * print annotation without proc name. + */ + (void) OSStringSafeCopy(psRISubEntry->pszTextB, psz8TextB, ui32TextBSize + 1); + iRet = OSStringSafeCopy(psRISubEntry->pszTextB + ui32TextBSize + 1, + OSGetCurrentClientProcessNameKM(), + psRISubEntry->ui16TextBLength - ui32TextBSize - 1); + if (iRet < 0) + { + PVR_DPF((PVR_DBG_WARNING, "%s: process name has been truncated from '%s' to '%s'", + __func__, OSGetCurrentClientProcessNameKM(), + psRISubEntry->pszTextB + ui32TextBSize + 1)); + } psRISubEntry->ui64Size = ui64Size; psRISubEntry->sVAddr.uiAddr = ui64DevVAddr; - psRISubEntry->bIsImport = IMG_FALSE; - psRISubEntry->bIsSuballoc = IMG_FALSE; - OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); + psRISubEntry->psConnection = psConnection; dllist_init (&(psRISubEntry->sProcListNode)); /* @@ -817,15 +1055,21 @@ PVRSRV_ERROR RIWriteProcListEntryKM(CONNECTION_DATA *psConnection, */ /* look-up pid in Hash Table */ pid = psRISubEntry->pid; - hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); + hashData = HASH_Retrieve_Extended (g_PID2RISublistHashTable, (void *)&pid); if (!hashData) { /* * No allocations for this pid yet */ - HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); - /* Increment number of entries in proc hash table */ - g_ui16ProcCount++; + if (!HASH_Insert_Extended(g_PID2RISublistHashTable, + (void *) &pid, + (uintptr_t) &psRISubEntry->sProcListNode)) + { + OSFreeMemNoStats(psRISubEntry->pszTextB); + OSFreeMemNoStats(psRISubEntry); + _RIUnlock(); + return PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED; + } } else { @@ -865,10 +1109,10 @@ PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, { RI_SUBLIST_ENTRY *psRISubEntry; - PVR_RETURN_IF_INVALID_PARAM(hRIHandle); + PVR_LOG_RETURN_IF_INVALID_PARAM(hRIHandle, "hRIHandle"); psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; - if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + if (!IS_VALID(psRISubEntry)) { /* Pointer does not point to valid structure */ return PVRSRV_ERROR_INVALID_PARAMS; @@ -885,127 +1129,157 @@ PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, return PVRSRV_OK; } -/*! -******************************************************************************* - - @Function RIDeletePMREntryKM +static PVRSRV_ERROR RIDeletePMREntryUnlocked(RI_LIST_ENTRY *psRIEntry) +{ + PMR *pPMRHashKey; + uintptr_t hashValue = 0; - @Description - Delete a Resource Information entry. + /* Remove the HASH table index entry */ + pPMRHashKey = GET_KEY(psRIEntry); + hashValue = HASH_Remove_Extended(g_pPMR2RIListHashTable, (void *)&pPMRHashKey); + PVR_LOG_RETURN_IF_INVALID_PARAM(hashValue, "RI"); - @input hRIHandle - Handle of object whose reference info is to be deleted + BIT_UNSET(psRIEntry->ui16Flags, RI_VALID_FLAG); - @Return PVRSRV_ERROR + /* Remove PMR entry from linked-list of PMR entries */ + dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode)); -******************************************************************************/ -PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle) -{ - RI_LIST_ENTRY *psRIEntry; - PMR *pPMRHashKey; - PVRSRV_ERROR eResult = PVRSRV_OK; + if (IS_SYSPMR(psRIEntry)) + { + g_ui32SysAllocPMRCount--; + } - PVR_RETURN_IF_INVALID_PARAM(hRIHandle); + if (IS_RACC(psRIEntry)) + { + OSFreeMemNoStats(psRIEntry->pmr_info.psPmrInfo); + } - psRIEntry = (RI_LIST_ENTRY *)hRIHandle; + /* Now, free the memory used to store the RI entry */ + OSFreeMemNoStats(psRIEntry); + psRIEntry = NULL; - if (psRIEntry->valid != _VALID_RI_LIST_ENTRY) + /* If the hash table is now empty we can delete the RI hash table */ + if (HASH_Count(g_pPMR2RIListHashTable) == 0) { - /* Pointer does not point to valid structure */ - return PVRSRV_ERROR_INVALID_PARAMS; + HASH_Delete(g_pPMR2RIListHashTable); + g_pPMR2RIListHashTable = NULL; } + return PVRSRV_OK; +} - if (psRIEntry->ui16SubListCount == 0) - { - /* Acquire RI lock*/ - _RILock(); +static PVRSRV_ERROR _RICreateAndSetPmrInfo(RI_LIST_ENTRY *const psRIEntry) +{ + uintptr_t uiEntry; + void* pPMRHashKey; + const IMG_CHAR *const pszAnnotation = PMR_GetAnnotation(psRIEntry->pmr_info.psPMR); + const IMG_UINT32 uiLength = OSStringNLength(pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN) + 1; - /* Remove the HASH table index entry */ - pPMRHashKey = psRIEntry->psPMR; - HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey); + RI_PMR_INFO *psPmrInfo = OSAllocZMemNoStats(sizeof(*psPmrInfo) + uiLength); + PVR_LOG_RETURN_IF_NOMEM(psPmrInfo, "OSAllocZMemNoStats"); - psRIEntry->valid = _INVALID; + psPmrInfo->uiAddr = (uintptr_t) psRIEntry->pmr_info.psPMR; + psPmrInfo->psHeap = PMR_PhysHeap(psRIEntry->pmr_info.psPMR); + psPmrInfo->psDeviceNode = (PVRSRV_DEVICE_NODE *) PMR_DeviceNode(psRIEntry->pmr_info.psPMR); + psPmrInfo->uiLogicalSize = PMR_LogicalSize(psRIEntry->pmr_info.psPMR); + psPmrInfo->uiPhysicalSize = PMR_PhysicalSize(psRIEntry->pmr_info.psPMR); - /* Remove PMR entry from linked-list of PMR entries */ - dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode)); + psPmrInfo->pszAnnotation = IMG_OFFSET_ADDR(psPmrInfo, sizeof(*psPmrInfo)); + OSStringSafeCopy(psPmrInfo->pszAnnotation, pszAnnotation, uiLength); - if (psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) - { - dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); - g_ui32SysAllocPMRCount--; - } +#if defined(PVRSRV_ENABLE_XD_MEM) + psPmrInfo->uiXDevices = PMR_ImportedDevicesMask(psRIEntry->pmr_info.psPMR); +#endif /* defined(PVRSRV_ENABLE_XD_MEM) */ - /* Now, free the memory used to store the RI entry */ - OSFreeMemNoStats(psRIEntry); - psRIEntry = NULL; + /* Swap the key from the PMR address to the psPmrInfo's address. */ - /* - * Decrement number of RI entries - if this is now zero, - * we can delete the RI hash table - */ - if (--g_ui16RICount == 0) - { - HASH_Delete(g_pRIHashTable); - g_pRIHashTable = NULL; + pPMRHashKey = GET_KEY(psRIEntry); + uiEntry = HASH_Remove_Extended(g_pPMR2RIListHashTable, (void*) &pPMRHashKey); + /* Should be removing itself to change the key. */ + PVR_ASSERT(((void*) uiEntry) == ((void*) psRIEntry)); - _RIUnlock(); + psRIEntry->pmr_info.psPmrInfo = psPmrInfo; + BIT_SET(psRIEntry->ui16Flags, RI_HAS_PMR_INFO); - /* If deInit has been deferred, we can now destroy the RI Lock */ - if (bRIDeInitDeferred) - { - OSLockDestroy(g_hRILock); - } - } - else - { - /* Release RI lock*/ - _RIUnlock(); - } - /* - * Make the handle NULL once PMR RI entry is deleted - */ - hRIHandle = NULL; - } - else - { - eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP; - } + pPMRHashKey = GET_KEY(psRIEntry); + HASH_Insert_Extended(g_pPMR2RIListHashTable, (void*) &pPMRHashKey, (uintptr_t) psRIEntry); - return eResult; + return PVRSRV_OK; } /*! ******************************************************************************* - @Function RIDeleteMEMDESCEntryKM + @Function RIDeletePMREntryKM @Description Delete a Resource Information entry. - Entry can be from RIEntry list or ProcList. @input hRIHandle - Handle of object whose reference info is to be deleted @Return PVRSRV_ERROR ******************************************************************************/ -PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle) +PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle) { - RI_LIST_ENTRY *psRIEntry = NULL; - RI_SUBLIST_ENTRY *psRISubEntry; - uintptr_t hashData; - IMG_PID pid; + PVRSRV_ERROR eError = PVRSRV_OK; + RI_LIST_ENTRY *psRIEntry; - PVR_RETURN_IF_INVALID_PARAM(hRIHandle); + psRIEntry = (RI_LIST_ENTRY *) hRIHandle; - psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; - if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + if (!RICheckListHandle(hRIHandle)) { - /* Pointer does not point to valid structure */ - return PVRSRV_ERROR_INVALID_PARAMS; + /* Pointer does not point to valid structure. */ + PVR_LOG_GOTO_WITH_ERROR("RICheckListHandle", eError, PVRSRV_ERROR_INVALID_PARAMS, ErrReturn); + } + else if (IS_RACC(psRIEntry)) + { + /* Keep this entry so that it can be inspected as a memory leak in the + * gpu_mem_area stats. */ + return PVRSRV_OK; + } + + if (psRIEntry->ui16SubListCount != 0) + { + PVR_DPF((PVR_DBG_WARNING, "%p not deleted. RIEntry(%s) still has %u allocation(s)", + psRIEntry, GET_NAME(psRIEntry), psRIEntry->ui16SubListCount)); + + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP, + ErrCreateAndAssignPmrInfo); } - /* Acquire RI lock*/ _RILock(); + eError = RIDeletePMREntryUnlocked(psRIEntry); + + _RIUnlock(); + + /* If deInit has been deferred, we can now destroy the RI Lock */ + if (bRIDeInitDeferred && (g_pPMR2RIListHashTable == NULL || HASH_Count(g_pPMR2RIListHashTable) == 0)) + { + OSLockDestroy(g_hRILock); + } + + return PVRSRV_OK; + +ErrCreateAndAssignPmrInfo: + { + PVRSRV_ERROR eError2; + _RILock(); + eError2 = _RICreateAndSetPmrInfo(psRIEntry); + _RIUnlock(); + PVR_LOG_RETURN_IF_ERROR(eError2, "_RICreateAndSetPmrInfo"); + } +ErrReturn: + return eError; +} + +static PVRSRV_ERROR RIDeleteMemdescEntryUnlocked(RI_SUBLIST_ENTRY *psRISubEntry) +{ + RI_LIST_ENTRY *psRIEntry = NULL; + uintptr_t hashData; + uintptr_t hashValue = 0; + IMG_PID pid; + /* For entries which do have a parent PMR remove the node from the sublist */ if (psRISubEntry->psRI) { @@ -1015,33 +1289,36 @@ PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle) dllist_remove_node(&(psRISubEntry->sListNode)); } - psRISubEntry->valid = _INVALID; + BIT_UNSET(psRISubEntry->ui16Flags, RI_VALID_FLAG); /* Remove the entry from the proc allocations linked list */ pid = psRISubEntry->pid; /* If this is the only allocation for this pid, just remove it from the hash table */ if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) { - HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); - /* Decrement number of entries in proc hash table, and delete the hash table if there are now none */ - if (--g_ui16ProcCount == 0) + hashValue = HASH_Remove_Extended(g_PID2RISublistHashTable, (void *)&pid); + PVR_LOG_RETURN_IF_INVALID_PARAM(hashValue, "PID"); + /* Delete the hash table if there are now no entries */ + if (HASH_Count(g_PID2RISublistHashTable) == 0) { - HASH_Delete(g_pProcHashTable); - g_pProcHashTable = NULL; + HASH_Delete(g_PID2RISublistHashTable); + g_PID2RISublistHashTable = NULL; } } else { - hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); + hashData = HASH_Retrieve_Extended (g_PID2RISublistHashTable, (void *)&pid); if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode)) { - HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); - HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode))); + hashValue = HASH_Remove_Extended(g_PID2RISublistHashTable, (void *)&pid); + PVR_LOG_RETURN_IF_INVALID_PARAM(hashValue, "PID"); + HASH_Insert_Extended (g_PID2RISublistHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode))); } } dllist_remove_node(&(psRISubEntry->sProcListNode)); /* Now, free the memory used to store the sublist entry */ + OSFreeMemNoStats(psRISubEntry->pszTextB); OSFreeMemNoStats(psRISubEntry); psRISubEntry = NULL; @@ -1052,6 +1329,42 @@ PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle) { psRIEntry->ui16SubListCount--; } + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIDeleteMEMDESCEntryKM + + @Description + Delete a Resource Information entry. + Entry can be from RIEntry list or ProcList. + + @input hRIHandle - Handle of object whose reference info is to be deleted + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RI_SUBLIST_ENTRY *psRISubEntry; + + psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; + if (!RICheckSubListHandle(hRIHandle)) + { + /* Pointer does not point to valid structure */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + else if (IS_RACC(psRISubEntry)) + { + return PVRSRV_OK; + } + /* Acquire RI lock*/ + _RILock(); + + eError = RIDeleteMemdescEntryUnlocked(psRISubEntry); /* Release RI lock*/ _RIUnlock(); @@ -1061,7 +1374,7 @@ PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle) */ hRIHandle = NULL; - return PVRSRV_OK; + return eError; } /*! @@ -1082,9 +1395,9 @@ PVRSRV_ERROR RIDeleteListKM(void) _RILock(); - if (g_pRIHashTable) + if (g_pPMR2RIListHashTable) { - eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries, NULL); + eResult = HASH_Iterate(g_pPMR2RIListHashTable, (HASH_pfnCallback)_DeleteAllEntries, NULL); if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) { /* @@ -1100,9 +1413,9 @@ PVRSRV_ERROR RIDeleteListKM(void) * still entries left in the per-process hash table because they were added with * RIWriteProcListEntryKM() and have no PMR parent associated. */ - if (g_pProcHashTable) + if (g_PID2RISublistHashTable) { - eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries, NULL); + eResult = HASH_Iterate(g_PID2RISublistHashTable, (HASH_pfnCallback) _DeleteAllProcEntries, NULL); if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) { /* @@ -1206,13 +1519,13 @@ IMG_BOOL RIGetListEntryKM(IMG_PID pid, { case RI_GET_STATE_MEMDESCS_LIST_START: /* look-up pid in Hash Table, to obtain first entry for pid */ - hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey); + hashData = HASH_Retrieve_Extended(g_PID2RISublistHashTable, (void *)&hashKey); if (hashData) { if (*ppHandle) { psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle; - if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + if (!IS_VALID(psRISubEntry)) { psRISubEntry = NULL; } @@ -1220,7 +1533,7 @@ IMG_BOOL RIGetListEntryKM(IMG_PID pid, else { psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); - if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + if (!IS_VALID(psRISubEntry)) { psRISubEntry = NULL; } @@ -1231,7 +1544,7 @@ IMG_BOOL RIGetListEntryKM(IMG_PID pid, { PDLLIST_NODE psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode); - if (psRISubEntry->bIsImport) + if (IS_IMPORT(psRISubEntry)) { ui64TotalImport += psRISubEntry->ui64Size; } @@ -1247,8 +1560,8 @@ IMG_BOOL RIGetListEntryKM(IMG_PID pid, if (szProcName[0] == '\0') { - OSStringLCopy(szProcName, (pid == PVR_SYS_ALLOC_PID) ? - PVRSRV_MODNAME : psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE); + OSStringSafeCopy(szProcName, (pid == PVR_SYS_ALLOC_PID) ? + PVRSRV_MODNAME : GET_PROC(psRISubEntry), RI_PROC_BUF_SIZE); } @@ -1309,22 +1622,17 @@ IMG_BOOL RIGetListEntryKM(IMG_PID pid, if (szProcName[0] == '\0') { - OSStringLCopy(szProcName, PVRSRV_MODNAME, RI_PROC_BUF_SIZE); + OSStringSafeCopy(szProcName, PVRSRV_MODNAME, RI_PROC_BUF_SIZE); } if (psSysAllocNode != NULL && psSysAllocNode != &g_sSysAllocPidListHead) { - IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; - - psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); + psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sListNode); _GeneratePMREntryString(psRIEntry, IMG_TRUE, RI_PMR_ENTRY_BUF_SIZE, acStringBuffer); - PMR_LogicalSize(psRIEntry->psPMR, - &uiPMRLogicalSize); - ui64TotalPMRAlloc += uiPMRLogicalSize; - PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); - ui64TotalPMRBacked += uiPMRPhysicalBacking; + ui64TotalPMRAlloc += GET_LOGICAL_SIZE(psRIEntry); + ui64TotalPMRBacked += GET_PHYSICAL_SIZE(psRIEntry); ui32ProcessedSysAllocPMRCount++; if (ui32ProcessedSysAllocPMRCount > g_ui32SysAllocPMRCount+1) @@ -1347,35 +1655,32 @@ IMG_BOOL RIGetListEntryKM(IMG_PID pid, /* Iterate through the 'touched' PMRs and display details */ if (!psNode) { - psNode = dllist_get_next_node(&sListFirst); + psNode = dllist_get_next_node(&g_sClientsListHead); } else { psNode = dllist_get_next_node(psNode); } - while ((psNode != NULL && psNode != &sListFirst) && + while ((psNode != NULL && psNode != &g_sClientsListHead) && !bPMRToDisplay) { psRIEntry = IMG_CONTAINER_OF(psNode, RI_LIST_ENTRY, sListNode); if (psRIEntry->pid == pid) { - IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; - /* This PMR was 'touched', so display details and unflag it*/ _GeneratePMREntryString(psRIEntry, IMG_TRUE, RI_PMR_ENTRY_BUF_SIZE, acStringBuffer); - PMR_LogicalSize(psRIEntry->psPMR, &uiPMRLogicalSize); - ui64TotalPMRAlloc += uiPMRLogicalSize; - PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); - ui64TotalPMRBacked += uiPMRPhysicalBacking; + ui64TotalPMRAlloc += GET_LOGICAL_SIZE(psRIEntry); + ui64TotalPMRBacked += GET_PHYSICAL_SIZE(psRIEntry); /* Remember the name of the process for 1 PMR for the summary */ if (szProcName[0] == '\0') { - OSStringLCopy(szProcName, psRIEntry->ai8ProcName, RI_PROC_BUF_SIZE); + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode); + OSStringSafeCopy(szProcName, GET_PROC(psRISubEntry), RI_PROC_BUF_SIZE); } bPMRToDisplay = IMG_TRUE; } @@ -1385,7 +1690,7 @@ IMG_BOOL RIGetListEntryKM(IMG_PID pid, } } - if (psNode == NULL || (psNode == &sListFirst)) + if (psNode == NULL || (psNode == &g_sClientsListHead)) { g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; } @@ -1444,15 +1749,15 @@ static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, const IMG_CHAR *pszAnnotationText; IMG_PID uiRIPid = 0; PMR* psRIPMR = NULL; - IMG_UINT32 ui32RIPMRFlags = 0; - IMG_BOOL bHostDevice = psRISubEntry->ui32DevID == PVRSRV_HOST_DEVICE_ID; + IMG_BOOL bSysPMR = IMG_FALSE; + IMG_BOOL bHostDevice = IS_HOST_DEVICE(psRISubEntry); IMG_CHAR szDeviceID[RI_DEV_ID_BUF_SIZE]; if (psRISubEntry->psRI != NULL) { uiRIPid = psRISubEntry->psRI->pid; - psRIPMR = psRISubEntry->psRI->psPMR; - ui32RIPMRFlags = psRISubEntry->psRI->ui32RIPMRFlags; + psRIPMR = (PMR*)GET_ADDR(psRISubEntry->psRI); + bSysPMR = IS_SYSPMR(psRISubEntry->psRI); } OSSNPrintf(szEntryFormat, @@ -1467,7 +1772,7 @@ static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, RI_MEMDESC_ENTRY_PROC_BUF_SIZE, RI_MEMDESC_ENTRY_PROC_FRMT, psRISubEntry->pid, - psRISubEntry->ai8ProcName); + GET_PROC(psRISubEntry)); } if (!bHostDevice) @@ -1475,27 +1780,27 @@ static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, OSSNPrintf(szDeviceID, sizeof(szDeviceID), "%-3d", - psRISubEntry->ui32DevID); + GET_DEVICE_ID(psRISubEntry)); } - if (psRISubEntry->bIsImport && psRIPMR) + if (IS_IMPORT(psRISubEntry) && psRIPMR) { OSSNPrintf((IMG_CHAR *)&szImport, RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE, RI_MEMDESC_ENTRY_IMPORT_FRMT, uiRIPid); /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ - pszAnnotationText = PMR_GetAnnotation(psRIPMR); + pszAnnotationText = GET_NAME(psRISubEntry->psRI); } - else if (!psRISubEntry->bIsSuballoc && psRIPMR) + else if (!IS_SUBALLOC(psRISubEntry) && psRIPMR) { /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ - pszAnnotationText = PMR_GetAnnotation(psRIPMR); + pszAnnotationText = GET_NAME(psRISubEntry->psRI); } else { /* Set pszAnnotationText to that of the MEMDESC RI entry */ - pszAnnotationText = psRISubEntry->ai8TextB; + pszAnnotationText = psRISubEntry->pszTextB; } /* Don't print memdescs if they are local imports @@ -1503,7 +1808,7 @@ static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, */ if (bDebugFs && ((psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset) == 0) && - (psRISubEntry->bIsImport && ((psRISubEntry->pid == uiRIPid) + (IS_IMPORT(psRISubEntry) && ((psRISubEntry->pid == uiRIPid) || (psRISubEntry->pid == PVR_SYS_ALLOC_PID)))) { /* Don't print this entry */ @@ -1522,8 +1827,11 @@ static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, (bDebugFs ? "" : (char *)szProc), psRISubEntry->ui64Size, psRIPMR, - (psRISubEntry->bIsImport ? (char *)&szImport : ""), - (!psRISubEntry->bIsImport && (ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) && (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "", + (IS_IMPORT(psRISubEntry) ? (char *)&szImport : ""), + (!IS_IMPORT(psRISubEntry) && + (bSysPMR) && + (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "", + (IS_RACC(psRISubEntry) ? RI_FREED_BY_DRIVER : ""), (bDebugFs ? '\n' : ' ')); } } @@ -1535,31 +1843,64 @@ static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, IMG_CHAR *pszEntryString) { const IMG_CHAR* pszAnnotationText; - IMG_DEVMEM_SIZE_T uiLogicalSize = 0; - IMG_DEVMEM_SIZE_T uiPhysicalSize = 0; + const IMG_CHAR* pszHeapText; + IMG_CHAR szEntryFormat[RI_PMR_ENTRY_FRMT_SIZE]; - IMG_BOOL bHostDevice = psRIEntry->ui32DevID == PVRSRV_HOST_DEVICE_ID; + IMG_BOOL bHostDevice = IS_HOST_DEVICE(psRIEntry); IMG_CHAR szDeviceID[RI_DEV_ID_BUF_SIZE]; - - PMR_LogicalSize(psRIEntry->psPMR, &uiLogicalSize); - - PMR_PhysicalSize(psRIEntry->psPMR, &uiPhysicalSize); +#if defined(PVRSRV_ENABLE_XD_MEM) + IMG_CHAR szXDevices[RI_PMR_ENTRY_IMPORTED_BUF_SIZE]; +#else + IMG_CHAR szXDevices[] = ""; +#endif /* defined(PVRSRV_ENABLE_XD_MEM) */ OSSNPrintf(szEntryFormat, RI_PMR_ENTRY_FRMT_SIZE, RI_PMR_ENTRY_FRMT, - DEVMEM_ANNOTATION_MAX_LEN); + (DEVMEM_ANNOTATION_MAX_LEN/2), + PHYS_HEAP_NAME_SIZE); /* Set pszAnnotationText to that PMR RI entry */ - pszAnnotationText = (IMG_PCHAR) PMR_GetAnnotation(psRIEntry->psPMR); + pszAnnotationText = GET_NAME(psRIEntry); + + /* Acquire PhysHeap Name to that PMR RI entry */ + pszHeapText = PhysHeapName(GET_HEAP(psRIEntry)); if (!bHostDevice) { OSSNPrintf(szDeviceID, sizeof(szDeviceID), "%-3d", - psRIEntry->ui32DevID); + GET_DEVICE_ID(psRIEntry)); + } + +#if defined(PVRSRV_ENABLE_XD_MEM) + if (GET_XDEVICES(psRIEntry) != 0) + { + IMG_UINT64 uiXDevices = GET_XDEVICES(psRIEntry); + IMG_UINT8 i; + IMG_CHAR *pszHead = szXDevices; + + pszHead += OSSNPrintf(pszHead, + sizeof(szXDevices) - (pszHead - szXDevices), + RI_PMR_ENTRY_IMPORTED_FRMT_BEGIN); + for (i = 0; i < PVRSRV_MAX_DEVICES; i++) + { + if (uiXDevices & (IMG_UINT64_C(1) << i)) + { + pszHead += OSSNPrintf(pszHead, + sizeof(szXDevices) - (pszHead - szXDevices), + RI_PMR_ENTRY_IMPORTED_DEV_FRMT, + i); + } + } + pszHead += OSSNPrintf(pszHead, + sizeof(szXDevices) - (pszHead - szXDevices), + RI_PMR_ENTRY_IMPORTED_FRMT_END); + } else { + szXDevices[0] = '\0'; } +#endif /* defined(PVRSRV_ENABLE_XD_MEM) */ OSSNPrintf(pszEntryString, ui16MaxStrLen, @@ -1567,10 +1908,13 @@ static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, (bDebugFs ? "" : " "), psRIEntry->pid, (bHostDevice ? "- " : szDeviceID), - (void*)psRIEntry->psPMR, + GET_ADDR(psRIEntry), pszAnnotationText, - uiLogicalSize, - uiPhysicalSize, + pszHeapText, + GET_LOGICAL_SIZE(psRIEntry), + GET_PHYSICAL_SIZE(psRIEntry), + (IS_RACC(psRIEntry) ? RI_FREED_BY_DRIVER : ""), + szXDevices, (bDebugFs ? '\n' : ' ')); } @@ -1600,17 +1944,16 @@ static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid) IMG_PID hashKey; PMR *pPMRHashKey = psPMR; IMG_BOOL bDisplayedThisPMR = IMG_FALSE; - IMG_UINT64 ui64LogicalSize = 0; - PVR_RETURN_IF_INVALID_PARAM(psPMR); + PVR_LOG_RETURN_IF_INVALID_PARAM(psPMR, "psPMR"); - if (g_pRIHashTable && g_pProcHashTable) + if (g_pPMR2RIListHashTable && g_PID2RISublistHashTable) { if (pid != 0) { /* look-up pid in Hash Table */ hashKey = pid; - hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); + hashData = HASH_Retrieve_Extended (g_PID2RISublistHashTable, (void *)&hashKey); if (hashData) { psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); @@ -1623,7 +1966,7 @@ static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid) else { /* Look-up psPMR in Hash Table */ - hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + hashData = HASH_Retrieve_Extended (g_pPMR2RIListHashTable, (void *)&pPMRHashKey); psRIEntry = (RI_LIST_ENTRY *)hashData; } if (!psRIEntry) @@ -1637,13 +1980,11 @@ static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid) /* Output details for RI entry */ if (!pid) { - PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); - _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, - PMR_GetAnnotation(psRIEntry->psPMR), - psRIEntry->psPMR, + GET_NAME(psRIEntry), + GET_ADDR(psRIEntry), (IMG_UINT)psRIEntry->ui16SubListCount, - ui64LogicalSize)); + GET_LOGICAL_SIZE(psRIEntry))); bDisplayedThisPMR = IMG_TRUE; } ui16SubEntriesParsed = 0; @@ -1669,13 +2010,11 @@ static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid) } if (!bDisplayedThisPMR) { - PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); - _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, - PMR_GetAnnotation(psRIEntry->psPMR), - psRIEntry->psPMR, + GET_NAME(psRIEntry), + GET_ADDR(psRIEntry), (IMG_UINT)psRIEntry->ui16SubListCount, - ui64LogicalSize)); + GET_LOGICAL_SIZE(psRIEntry))); bDisplayedThisPMR = IMG_TRUE; } } @@ -1769,9 +2108,9 @@ static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid) ******************************************************************************/ PVRSRV_ERROR RIDumpAllKM(void) { - if (g_pRIHashTable) + if (g_pPMR2RIListHashTable) { - return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries, NULL); + return HASH_Iterate(g_pPMR2RIListHashTable, (HASH_pfnCallback)_DumpAllEntries, NULL); } return PVRSRV_OK; } @@ -1795,7 +2134,7 @@ PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid) PVRSRV_ERROR eError; IMG_UINT32 dummyPMR; - if (!g_pProcHashTable) + if (!g_PID2RISublistHashTable) { return PVRSRV_OK; } @@ -1827,112 +2166,129 @@ PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid) specified heap type (in bytes). ******************************************************************************/ -static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) +static IMG_INT32 _TotalAllocsForProcess(const IMG_PID pid, const PHYS_HEAP_TYPE ePhysHeapType) { - RI_LIST_ENTRY *psRIEntry = NULL; - RI_SUBLIST_ENTRY *psInitialRISubEntry = NULL; - RI_SUBLIST_ENTRY *psRISubEntry = NULL; - uintptr_t hashData = 0; - IMG_PID hashKey; IMG_INT32 i32TotalPhysical = 0; - if (g_pRIHashTable && g_pProcHashTable) + if (g_pPMR2RIListHashTable && g_PID2RISublistHashTable) { if (pid == PVR_SYS_ALLOC_PID) { - IMG_UINT32 ui32ProcessedSysAllocPMRCount = 0; DLLIST_NODE *psSysAllocNode = NULL; OSLockAcquire(g_hSysAllocPidListLock); - psSysAllocNode = dllist_get_next_node(&g_sSysAllocPidListHead); - while (psSysAllocNode && psSysAllocNode != &g_sSysAllocPidListHead) + + for (psSysAllocNode = dllist_get_next_node(&g_sSysAllocPidListHead); + psSysAllocNode != NULL && psSysAllocNode != &g_sSysAllocPidListHead; + psSysAllocNode = dllist_get_next_node(psSysAllocNode)) { - psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); - ui32ProcessedSysAllocPMRCount++; - if (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType) + const RI_LIST_ENTRY *const psRIEntry = + IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sListNode); + + /* Exclude RACC entries from the stats. This memory should have + * been already freed during connection destruction so they should + * not affect figure showing memory usage. Also RACC entries existence + * is to show unfreed memory in `gpu_mem_area`, not to affect other + * process stats. Finally if the RI entry exists only due to sub-entries + * referencing it but the underlying PMR has been freed don't include + * it in the total stats. */ + if (PhysHeapGetType(GET_HEAP(psRIEntry)) == ePhysHeapType && + !IS_RACC(psRIEntry) && !HAS_PMR_INFO(psRIEntry)) { - IMG_UINT64 ui64PhysicalSize; + IMG_UINT64 ui64PhysicalSize = GET_PHYSICAL_SIZE(psRIEntry); - PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); - if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) + if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > IMG_INT32_MAX)) { - PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); + PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32", + __func__)); } i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); } - psSysAllocNode = dllist_get_next_node(psSysAllocNode); } + OSLockRelease(g_hSysAllocPidListLock); } else { + RI_LIST_ENTRY *psRIEntry = NULL; + RI_SUBLIST_ENTRY *psInitialRISubEntry = NULL, *psRISubEntry = NULL; + uintptr_t hashData = 0; + if (pid != 0) { /* look-up pid in Hash Table */ - hashKey = pid; - hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); + IMG_PID hashKey = pid; + hashData = HASH_Retrieve_Extended(g_PID2RISublistHashTable, (void *)&hashKey); if (hashData) { psInitialRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); - psRISubEntry = psInitialRISubEntry; - if (psRISubEntry) + if (psInitialRISubEntry != NULL) { - psRIEntry = psRISubEntry->psRI; + psRISubEntry = psInitialRISubEntry; + psRIEntry = psInitialRISubEntry->psRI; } } } - while (psRISubEntry && psRIEntry) + while (psRISubEntry != NULL && psRIEntry != NULL) { - if (!psRISubEntry->bIsImport && !(psRIEntry->ui32RIPMRFlags & RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS) && - (pid == PVR_SYS_ALLOC_PID || !(psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)) && - (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType)) - { - IMG_UINT64 ui64PhysicalSize; + DLLIST_NODE *psNextNode; + if (!IS_IMPORT(psRISubEntry) && + !IS_RACC(psRISubEntry) && + !IS_COUNTED_BY_DEBUGFS(psRIEntry) && + !IS_SYSPMR(psRIEntry) && + (PhysHeapGetType(GET_HEAP(psRIEntry)) == ePhysHeapType)) + { + IMG_UINT64 ui64PhysicalSize = GET_PHYSICAL_SIZE(psRIEntry); - PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); - if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) + if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > IMG_INT32_MAX)) { - PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); + PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32", + __func__)); } + i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); - psRIEntry->ui32RIPMRFlags |= RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; + BIT_SET(psRIEntry->ui16Flags, RI_PMR_PHYS_COUNTED_BY_DEBUGFS_FLAG); } - if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || - (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) + + psNextNode = dllist_get_next_node(&(psRISubEntry->sProcListNode)); + if (psNextNode == NULL || psNextNode == (PDLLIST_NODE)hashData) { psRISubEntry = NULL; psRIEntry = NULL; } else { - psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), - RI_SUBLIST_ENTRY, sProcListNode); + psRISubEntry = IMG_CONTAINER_OF(psNextNode, RI_SUBLIST_ENTRY, sProcListNode); if (psRISubEntry) { psRIEntry = psRISubEntry->psRI; } } } + psRISubEntry = psInitialRISubEntry; if (psRISubEntry) { psRIEntry = psRISubEntry->psRI; } - while (psRISubEntry && psRIEntry) + + while (psRISubEntry != NULL && psRIEntry != NULL) { - psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; - if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || - (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) + const DLLIST_NODE *const psNextNode = + dllist_get_next_node(&(psRISubEntry->sProcListNode)); + + BIT_UNSET(psRIEntry->ui16Flags, RI_PMR_PHYS_COUNTED_BY_DEBUGFS_FLAG); + + if (psNextNode == NULL || psNextNode == (PDLLIST_NODE)hashData) { psRISubEntry = NULL; psRIEntry = NULL; } else { - psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), - RI_SUBLIST_ENTRY, sProcListNode); + psRISubEntry = IMG_CONTAINER_OF(psNextNode, RI_SUBLIST_ENTRY, sProcListNode); if (psRISubEntry) { psRIEntry = psRISubEntry->psRI; @@ -1941,6 +2297,7 @@ static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapTyp } } } + return i32TotalPhysical; } @@ -1957,19 +2314,13 @@ static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapTyp @Return Amount of physical backing allocated (in bytes) ******************************************************************************/ -IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) +IMG_INT32 RITotalAllocProcessUnlocked(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) { IMG_INT32 i32BackingTotal = 0; - if (g_pProcHashTable) + if (g_PID2RISublistHashTable) { - /* Acquire RI lock*/ - _RILock(); - i32BackingTotal = _TotalAllocsForProcess(pid, ePhysHeapType); - - /* Release RI lock*/ - _RIUnlock(); } return i32BackingTotal; } @@ -2005,14 +2356,14 @@ static PVRSRV_ERROR _DumpProcessList(PMR *psPMR, psDevVAddr->uiAddr = 0; - PVR_RETURN_IF_INVALID_PARAM(psPMR); + PVR_LOG_RETURN_IF_INVALID_PARAM(psPMR, "psPMR"); - if (g_pRIHashTable && g_pProcHashTable) + if (g_pPMR2RIListHashTable && g_PID2RISublistHashTable) { PVR_ASSERT(psPMR && pid); /* Look-up psPMR in Hash Table */ - hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + hashData = HASH_Retrieve_Extended (g_pPMR2RIListHashTable, (void *)&pPMRHashKey); psRIEntry = (RI_LIST_ENTRY *)hashData; if (!psRIEntry) @@ -2070,7 +2421,7 @@ PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, { PVRSRV_ERROR eError; - if (!g_pProcHashTable) + if (!g_PID2RISublistHashTable) { return PVRSRV_OK; } @@ -2090,6 +2441,140 @@ PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, } #endif +static PVRSRV_ERROR _MarkRACCEntries(uintptr_t k, uintptr_t v, void *psConnection) +{ + /* This is not a head node. See sProcListNode. */ + DLLIST_NODE *psListStart = (DLLIST_NODE *) v; + DLLIST_NODE *psNextNode = psListStart; + + PVR_UNREFERENCED_PARAMETER(k); + + do + { + RI_SUBLIST_ENTRY *psRISubEntry = IMG_CONTAINER_OF(psNextNode, RI_SUBLIST_ENTRY, + sProcListNode); + + if (psRISubEntry->psConnection == psConnection) + { + RI_LIST_ENTRY *psRIEntry = psRISubEntry->psRI; + + BIT_SET(psRISubEntry->ui16Flags, RI_RACC_FLAG); + + /* RI sub-entry may not have an RI entry associated with it. If that's the + * case just skip processing it. */ + if (psRIEntry != NULL) + { + if (!IS_RACC(psRIEntry) && + !HAS_PMR_INFO(psRIEntry) && + /* Mark as RACC only if the entry doesn't belong to the system process. + * System process allocations are alive for the whole driver lifetime + * hence they will always exist at a connection closed for every process + * that references them. */ + psRIEntry->pid != PVR_SYS_ALLOC_PID) + { + PVRSRV_ERROR eError; + + eError = _RICreateAndSetPmrInfo(psRIEntry); + PVR_LOG_RETURN_IF_ERROR(eError, "_RICreateAndSetPmrInfo"); + + BIT_SET(psRIEntry->ui16Flags, RI_RACC_FLAG); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RIEntry(%s) is already RACC", GET_NAME(psRIEntry))); + } + } + } + + psNextNode = dllist_get_next_node(psNextNode); + } while (psNextNode != NULL && psNextNode != psListStart); + + return PVRSRV_OK; +} + +void RIConnectionClosed(void* psConnection) +{ + PVRSRV_ERROR eError; + if (g_PID2RISublistHashTable) + { + _RILock(); + eError = HASH_Iterate(g_PID2RISublistHashTable, (HASH_pfnCallback)_MarkRACCEntries, psConnection); + PVR_LOG_IF_FALSE(eError == PVRSRV_OK, "_MarkRACCEntries"); + _RIUnlock(); + } +} + +static PVRSRV_ERROR DeleteRACCEntry(RI_SUBLIST_ENTRY *psRISubEntry) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RI_LIST_ENTRY *psRIEntry = psRISubEntry->psRI; + + PVR_LOG_IF_FALSE_VA(PVR_DBG_WARNING, IS_RACC(psRISubEntry), "Non-RACC entry(%s)", + psRISubEntry->pszTextB); + + eError = RIDeleteMemdescEntryUnlocked(psRISubEntry); + PVR_LOG_IF_ERROR(eError, "RIDeleteMemdescEntryUnlocked"); + + /* RI sub-entry may not have an RI entry associated with it. If that's the + * case just skip processing it. */ + if (psRIEntry != NULL) + { + if (psRIEntry->ui16SubListCount) + { + PVR_DPF((PVR_DBG_MESSAGE, "%p: More than 1 sublist present! (%s)", + psRIEntry, __func__)); + } + else + { + eError = RIDeletePMREntryUnlocked(psRIEntry); + PVR_LOG_IF_ERROR(eError, "RIDeletePMREntryUnlocked"); + } + } + + return eError; +} + +PVRSRV_ERROR RIDeleteEntriesForPID(IMG_PID pid) +{ + /* This is not a head node. See sProcListNode. */ + DLLIST_NODE *psListStart; + DLLIST_NODE *psNode; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_PID hashKey = pid; + + _RILock(); + + PVR_LOG_GOTO_IF_FALSE(g_PID2RISublistHashTable != NULL, "g_PID2RISublistHashTable is NULL", ErrHashError); + + psListStart = (DLLIST_NODE *) HASH_Retrieve_Extended(g_PID2RISublistHashTable, + (void *) &hashKey); + PVR_GOTO_IF_FALSE(psListStart != NULL, ExitUnlockAndReturn); + psNode = psListStart; + + do + { + DLLIST_NODE *psNext = dllist_get_next_node(psNode); + RI_SUBLIST_ENTRY *psRISubEntry = IMG_CONTAINER_OF(psNode, + RI_SUBLIST_ENTRY, + sProcListNode); + + eError = DeleteRACCEntry(psRISubEntry); + PVR_LOG_GOTO_IF_ERROR(eError, "DeleteRACCEntry", ExitUnlockAndReturn); + + psNode = psNext; + } while (psNode != NULL && psNode != psListStart); + + goto ExitUnlockAndReturn; + +ErrHashError: + eError = PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE; + +ExitUnlockAndReturn: + _RIUnlock(); + + return eError; +} + static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) { RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v; @@ -2097,7 +2582,7 @@ static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) PVR_UNREFERENCED_PARAMETER (k); PVR_UNREFERENCED_PARAMETER (pvPriv); - return RIDumpListKM(psRIEntry->psPMR); + return RIDumpListKM(GET_ADDR(psRIEntry)); } static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) @@ -2121,7 +2606,7 @@ static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) * If we've deleted the Hash table, return * an error to stop the iterator... */ - if (!g_pRIHashTable) + if (!g_pPMR2RIListHashTable) { eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; } @@ -2138,7 +2623,7 @@ static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPri PVR_UNREFERENCED_PARAMETER (pvPriv); eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry); - if (eResult == PVRSRV_OK && !g_pProcHashTable) + if (eResult == PVRSRV_OK && !g_PID2RISublistHashTable) { /* * If we've deleted the Hash table, return diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/scp.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/scp.c deleted file mode 100644 index f752ff8e1e5a..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/scp.c +++ /dev/null @@ -1,687 +0,0 @@ -/*************************************************************************/ /*! -@File scp.c -@Title Software Command Processor -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description The software command processor allows commands queued and - deferred until their synchronisation requirements have been meet. -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#include "scp.h" -#include "allocmem.h" -#include "pvr_notifier.h" -#include "pvrsrv.h" -#include "pvr_debug.h" -#include "osfunc.h" -#include "lock.h" -#include "sync_server.h" -#include "sync_internal.h" -#include "rgxhwperf.h" - -#include "pvrsrv_sync_server.h" - -struct _SCP_CONTEXT_ -{ - PVRSRV_DEVICE_NODE *psDevNode; /*ui32WriteOffset, - psContext->ui32ReadOffset, - psContext->ui32CCBSize); - if (ui32FreeSpace >= ui32Size) - { - *ppvBufferSpace = (void *)((IMG_UINT8 *)psContext->pvCCB + - psContext->ui32WriteOffset); - return PVRSRV_OK; - } - else - { - return PVRSRV_ERROR_RETRY; - } -} - -/*************************************************************************/ /*! -@Function _SCPAlloc - -@Description Allocate space in the software command processor, handling the - case where we wrap around the CCB. - -@Input psContext Context to allocate from - -@Input ui32Size Size to allocate - -@Output ppvBufferSpace Pointer to space allocated - -@Return PVRSRV_OK if the allocation was successful -*/ -/*****************************************************************************/ -static -PVRSRV_ERROR _SCPAlloc(SCP_CONTEXT *psContext, - IMG_UINT32 ui32Size, - void **ppvBufferSpace) -{ - if ((ui32Size + PADDING_COMMAND_SIZE) > psContext->ui32CCBSize) - { - PVR_DPF((PVR_DBG_WARNING, "Command size (%d) too big for CCB\n", ui32Size)); - return PVRSRV_ERROR_CMD_TOO_BIG; - } - - /* - Check we don't overflow the end of the buffer and make sure we have - enough for the padding command - */ - if ((psContext->ui32WriteOffset + ui32Size + PADDING_COMMAND_SIZE) > psContext->ui32CCBSize) - { - SCP_COMMAND *psCommand; - void *pvCommand; - PVRSRV_ERROR eError; - IMG_UINT32 ui32Remain = psContext->ui32CCBSize - psContext->ui32WriteOffset; - - /* We're at the end of the buffer without enough contiguous space */ - eError = __SCPAlloc(psContext, ui32Remain, &pvCommand); - if (eError != PVRSRV_OK) - { - PVR_ASSERT(eError == PVRSRV_ERROR_RETRY); - return eError; - } - psCommand = pvCommand; - psCommand->ui32CmdType = SCP_COMMAND_PADDING; - psCommand->ui32CmdSize = ui32Remain; - - UPDATE_CCB_OFFSET(psContext->ui32WriteOffset, ui32Remain, psContext->ui32CCBSize); - } - - return __SCPAlloc(psContext, ui32Size, ppvBufferSpace); -} - -/*************************************************************************/ /*! -@Function _SCPInsert - -@Description Insert the a finished command that was written into the CCB - space allocated in a previous call to _SCPAlloc. - This makes the command ready to be processed. - -@Input psContext Context to allocate from - -@Input ui32Size Size to allocate - -@Return None -*/ -/*****************************************************************************/ -static -void _SCPInsert(SCP_CONTEXT *psContext, - IMG_UINT32 ui32Size) -{ - /* - * Update the write offset. - */ - UPDATE_CCB_OFFSET(psContext->ui32WriteOffset, - ui32Size, - psContext->ui32CCBSize); -} - -/*************************************************************************/ /*! -@Function _SCPCommandReady - -@Description Check if a command is ready. Checks to see if the command - has had its fences met and is ready to go. - -@Input psCommand Command to check - -@Return PVRSRV_OK if the command is ready -*/ -/*****************************************************************************/ -static -PVRSRV_ERROR _SCPCommandReady(PVRSRV_DEVICE_NODE *psDevNode, SCP_COMMAND *psCommand) -{ - PVR_ASSERT(psCommand->ui32CmdType != SCP_COMMAND_INVALID); - - if (psCommand->ui32CmdType == SCP_COMMAND_PADDING) - { - return PVRSRV_OK; - } - - /* Check for the provided acquire fence */ - if (SyncIsFenceObjValid(&psCommand->sAcquireFenceObj)) - { - PVRSRV_ERROR eErr; - - eErr = SyncFenceWaitKM(psDevNode, &psCommand->sAcquireFenceObj, 0); - /* PVRSRV_ERROR_TIMEOUT means active. In this case we will retry later again. If the - * return value is an error we will release this fence and proceed. - * This makes sure that we are not getting stuck here when a fence transitions into - * an error state for whatever reason. */ - if (eErr == PVRSRV_ERROR_TIMEOUT) - { - return PVRSRV_ERROR_FAILED_DEPENDENCIES; - } - else - { - PVR_LOG_IF_ERROR(eErr, "SyncFenceWaitKM"); - /* Release the fence. */ - SyncFenceReleaseKM(&psCommand->sAcquireFenceObj); - SyncClearFenceObj(&psCommand->sAcquireFenceObj); - } - } - /* Command is ready */ - if (psCommand->pfnReady(psCommand->pvReadyData)) - { - return PVRSRV_OK; - } - - /* - If we got here it means the command is ready to go, but the SCP client - isn't ready for the command - */ - return PVRSRV_ERROR_NOT_READY; -} - -/*************************************************************************/ /*! -@Function _SCPCommandDo - -@Description Run a command - -@Input psCommand Command to run - -@Return PVRSRV_OK if the command is ready -*/ -/*****************************************************************************/ -static -void _SCPCommandDo(SCP_COMMAND *psCommand) -{ - if (psCommand->ui32CmdType == SCP_COMMAND_CALLBACK) - { - psCommand->pfnDo(psCommand->pvReadyData, psCommand->pvCompleteData); - } -} - -/*************************************************************************/ /*! -@Function _SCPDumpCommand - -@Description Dump a SCP command - -@Input psCommand Command to dump - -@Return None -*/ -/*****************************************************************************/ -static void _SCPDumpCommand(SCP_COMMAND *psCommand, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile) -{ - PVR_DUMPDEBUG_LOG("\tCommand type = %d (@%p)", psCommand->ui32CmdType, psCommand); - - if (psCommand->ui32CmdType == SCP_COMMAND_CALLBACK) - { - if (SyncIsFenceObjValid(&psCommand->sAcquireFenceObj)) - { - SyncDumpFence(&psCommand->sAcquireFenceObj, pfnDumpDebugPrintf, pvDumpDebugFile); - } - - if (SyncIsTimelineObjValid(&psCommand->sSWTimelineObj)) - { - SyncSWDumpTimeline(&psCommand->sSWTimelineObj, pfnDumpDebugPrintf, pvDumpDebugFile); - } - } -} - -/***************************************************************************** - * Public interface functions * - *****************************************************************************/ - -/* - SCPCreate -*/ -PVRSRV_ERROR SCPCreate(PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32CCBSizeLog2, - SCP_CONTEXT **ppsContext) -{ - SCP_CONTEXT *psContext; - IMG_UINT32 ui32Power2QueueSize = 1 << ui32CCBSizeLog2; - PVRSRV_ERROR eError; - - /* allocate an internal queue info structure */ - psContext = OSAllocZMem(sizeof(SCP_CONTEXT)); - PVR_LOG_RETURN_IF_NOMEM(psContext, "OSAllocZMem"); - - psContext->psDevNode = psDevNode; - - /* allocate the command queue buffer - allow for overrun */ - psContext->pvCCB = OSAllocMem(ui32Power2QueueSize); - PVR_LOG_GOTO_IF_NOMEM(psContext->pvCCB, eError, ErrorExitNoCCB); - - psContext->ui32CCBSize = ui32Power2QueueSize; - - eError = OSLockCreate(&psContext->hLock); - PVR_GOTO_IF_ERROR(eError, ErrorExit); - - SCP_DEBUG_PRINT("%s: New SCP %p of size %d", - __func__, psContext, ui32Power2QueueSize); - - *ppsContext = psContext; - - return PVRSRV_OK; - -ErrorExit: - OSFreeMem(psContext->pvCCB); - psContext->pvCCB = NULL; - -ErrorExitNoCCB: - OSFreeMem(psContext); - - return eError; -} - -/* - SCPAllocCommand -*/ -PVRSRV_ERROR SCPAllocCommand(SCP_CONTEXT *psContext, - PVRSRV_FENCE iAcquireFence, - SCPReady pfnCommandReady, - SCPDo pfnCommandDo, - size_t ui32ReadyDataByteSize, - size_t ui32CompleteDataByteSize, - void **ppvReadyData, - void **ppvCompleteData, - PVRSRV_TIMELINE iReleaseFenceTimeline, - PVRSRV_FENCE *piReleaseFence) -{ - PVRSRV_ERROR eError; - SCP_COMMAND *psCommand; - IMG_UINT32 ui32CommandSize; - - SCP_DEBUG_PRINT("%s: iAcquireFence=%d, iReleaseFenceTimeline=%d, piReleaseFence=<%p>", - __func__, iAcquireFence, iReleaseFenceTimeline, piReleaseFence); - - /* Round up the incoming data sizes to be pointer granular */ - ui32ReadyDataByteSize = (ui32ReadyDataByteSize & (~(sizeof(void *)-1))) + sizeof(void *); - ui32CompleteDataByteSize = (ui32CompleteDataByteSize & (~(sizeof(void *)-1))) + sizeof(void *); - - /* Total command size */ - ui32CommandSize = sizeof(SCP_COMMAND) + - ui32ReadyDataByteSize + - ui32CompleteDataByteSize; - - eError = _SCPAlloc(psContext, ui32CommandSize, (void **) &psCommand); - if (eError != PVRSRV_OK) - { - SCP_DEBUG_PRINT("%s: Failed to allocate command of size %d for ctx %p (%s)", __func__, ui32CommandSize, psContext, PVRSRVGetErrorString(eError)); - return eError; - } - - if (piReleaseFence && iReleaseFenceTimeline != PVRSRV_NO_TIMELINE) - { - /* Create a release fence for the caller. */ - eError = SyncSWTimelineFenceCreateKM(psContext->psDevNode, - iReleaseFenceTimeline, - "pvr_scp_retire", - piReleaseFence); - if (eError != PVRSRV_OK) - { - SCP_DEBUG_PRINT("%s: SyncSWTimelineFenceCreateKM() returned %s", __func__, PVRSRVGetErrorString(eError)); - return eError; - } - } - - SCP_DEBUG_PRINT("%s: New Command %p for ctx %p of size %d", - __func__, psCommand, psContext, ui32CommandSize); - - /* setup the command */ - psCommand->ui32CmdSize = ui32CommandSize; - psCommand->ui32CmdType = SCP_COMMAND_CALLBACK; - - psCommand->pfnReady = pfnCommandReady; - psCommand->pfnDo = pfnCommandDo; - - psCommand->pvReadyData = IMG_OFFSET_ADDR(psCommand, - sizeof(SCP_COMMAND)); - - psCommand->pvCompleteData = IMG_OFFSET_ADDR(psCommand, - (sizeof(SCP_COMMAND) + - ui32ReadyDataByteSize)); - - /* Copy over the fences */ - if (iAcquireFence != PVRSRV_NO_FENCE) - { - SyncGetFenceObj(iAcquireFence, &psCommand->sAcquireFenceObj); - } - else - { - SyncClearFenceObj(&psCommand->sAcquireFenceObj); - } - - if (piReleaseFence && - *piReleaseFence != PVRSRV_NO_FENCE && - iReleaseFenceTimeline != PVRSRV_NO_TIMELINE) - { - eError = SyncSWGetTimelineObj(iReleaseFenceTimeline, &psCommand->sSWTimelineObj); - PVR_LOG_RETURN_IF_ERROR(eError, "SyncSWGetTimelineObj"); - eError = SyncGetFenceObj(*piReleaseFence, &psCommand->sReleaseFenceObj); - PVR_LOG_RETURN_IF_ERROR(eError, "SyncGetFenceObj"); - } - else - { - SyncClearTimelineObj(&psCommand->sSWTimelineObj); - SyncClearFenceObj(&psCommand->sReleaseFenceObj); - } - - *ppvReadyData = psCommand->pvReadyData; - *ppvCompleteData = psCommand->pvCompleteData; - - return PVRSRV_OK; -} - -/* - SCPSubmitCommand -*/ -PVRSRV_ERROR SCPSubmitCommand(SCP_CONTEXT *psContext) -{ - SCP_COMMAND *psCommand; - - PVR_RETURN_IF_INVALID_PARAM(psContext); - - psCommand = IMG_OFFSET_ADDR(psContext->pvCCB, psContext->ui32WriteOffset); - - SCP_DEBUG_PRINT("%s: Submit command %p for ctx %p", - __func__, psCommand, psContext); - - _SCPInsert(psContext, psCommand->ui32CmdSize); - - return PVRSRV_OK; -} - -/* - SCPRun -*/ -PVRSRV_ERROR SCPRun(SCP_CONTEXT *psContext) -{ - SCP_COMMAND *psCommand; - PVRSRV_ERROR eError = PVRSRV_OK; - - PVR_RETURN_IF_INVALID_PARAM(psContext); - - OSLockAcquire(psContext->hLock); - while (psContext->ui32DepOffset != psContext->ui32WriteOffset) - { - psCommand = IMG_OFFSET_ADDR(psContext->pvCCB, psContext->ui32DepOffset); - - /* See if the command is ready to go */ - eError = _SCPCommandReady(psContext->psDevNode, psCommand); - - SCP_DEBUG_PRINT("%s: Processes command %p for ctx %p (%d)", - __func__, psCommand, psContext, eError); - - if (eError == PVRSRV_OK) - { - /* processed cmd so update queue */ - UPDATE_CCB_OFFSET(psContext->ui32DepOffset, - psCommand->ui32CmdSize, - psContext->ui32CCBSize); - } - else - { - /* As soon as we hit a command that can't run break out */ - break; - } - - /* Run the command */ - _SCPCommandDo(psCommand); - } - OSLockRelease(psContext->hLock); - - return eError; -} - -PVRSRV_ERROR SCPFlush(SCP_CONTEXT *psContext) -{ - if (psContext->ui32ReadOffset != psContext->ui32WriteOffset) - { - return PVRSRV_ERROR_RETRY; - } - - return PVRSRV_OK; -} - -/* This looks like a reasonable value. Number of traced syncs should - * not exceed 20. */ -#define MAX_TRACED_UFOS 20 - -/* - SCPCommandComplete -*/ -void SCPCommandComplete(SCP_CONTEXT *psContext, - IMG_BOOL bIgnoreFences) -{ - SCP_COMMAND *psCommand; - IMG_BOOL bContinue = IMG_TRUE; - - if (psContext == NULL) - { - return; - } - - if (psContext->ui32ReadOffset == psContext->ui32DepOffset) - { - PVR_DPF((PVR_DBG_ERROR, "SCPCommandComplete: Called with nothing to do!")); - return; - } - - while (bContinue) - { - psCommand = IMG_OFFSET_ADDR(psContext->pvCCB, psContext->ui32ReadOffset); - - if (psCommand->ui32CmdType == SCP_COMMAND_CALLBACK) - { - - - if (SyncIsFenceObjValid(&psCommand->sReleaseFenceObj)) - { - SyncSWTimelineAdvanceKM(psContext->psDevNode, &psCommand->sSWTimelineObj); - SyncSWTimelineReleaseKM(&psCommand->sSWTimelineObj); - SyncClearTimelineObj(&psCommand->sSWTimelineObj); - - /* Destroy the release fence */ - SyncFenceReleaseKM(&psCommand->sReleaseFenceObj); - SyncClearFenceObj(&psCommand->sReleaseFenceObj); - } - bContinue = IMG_FALSE; - } - - /* processed cmd so update queue */ - UPDATE_CCB_OFFSET(psContext->ui32ReadOffset, - psCommand->ui32CmdSize, - psContext->ui32CCBSize); - - SCP_DEBUG_PRINT("%s: Complete command %p for ctx %p (continue: %d)", - __func__, psCommand, psContext, bContinue); - - } -} - -IMG_BOOL SCPHasPendingCommand(SCP_CONTEXT *psContext) -{ - return psContext->ui32DepOffset != psContext->ui32WriteOffset; -} - -void SCPDumpStatus(SCP_CONTEXT *psContext, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile) -{ - PVR_ASSERT(psContext != NULL); - - /* - Acquire the lock to ensure that the SCP isn't run while - while we're dumping info - */ - OSLockAcquire(psContext->hLock); - - PVR_DUMPDEBUG_LOG("Pending command:"); - if (psContext->ui32DepOffset == psContext->ui32WriteOffset) - { - PVR_DUMPDEBUG_LOG("\tNone"); - } - else - { - SCP_COMMAND *psCommand; - IMG_UINT32 ui32DepOffset = psContext->ui32DepOffset; - - while (ui32DepOffset != psContext->ui32WriteOffset) - { - /* Dump the command we're pending on */ - psCommand = IMG_OFFSET_ADDR(psContext->pvCCB, ui32DepOffset); - - _SCPDumpCommand(psCommand, pfnDumpDebugPrintf, pvDumpDebugFile); - - /* processed cmd so update queue */ - UPDATE_CCB_OFFSET(ui32DepOffset, - psCommand->ui32CmdSize, - psContext->ui32CCBSize); - } - } - - PVR_DUMPDEBUG_LOG("Active command(s):"); - if (psContext->ui32DepOffset == psContext->ui32ReadOffset) - { - PVR_DUMPDEBUG_LOG("\tNone"); - } - else - { - SCP_COMMAND *psCommand; - IMG_UINT32 ui32ReadOffset = psContext->ui32ReadOffset; - - while (ui32ReadOffset != psContext->ui32DepOffset) - { - psCommand = IMG_OFFSET_ADDR(psContext->pvCCB, ui32ReadOffset); - - _SCPDumpCommand(psCommand, pfnDumpDebugPrintf, pvDumpDebugFile); - - /* processed cmd so update queue */ - UPDATE_CCB_OFFSET(ui32ReadOffset, - psCommand->ui32CmdSize, - psContext->ui32CCBSize); - } - } - - OSLockRelease(psContext->hLock); -} - - -/* - SCPDestroy -*/ -void SCPDestroy(SCP_CONTEXT *psContext) -{ - /* - The caller must ensure that they completed all queued operations - before calling this function - */ - - PVR_ASSERT(psContext->ui32ReadOffset == psContext->ui32WriteOffset); - - OSLockDestroy(psContext->hLock); - psContext->hLock = NULL; - OSFreeMem(psContext->pvCCB); - psContext->pvCCB = NULL; - OSFreeMem(psContext); -} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/srvcore.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/srvcore.c index af083adedc6b..203c89adda15 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/srvcore.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/srvcore.c @@ -48,7 +48,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_bridge.h" #include "connection_server.h" #include "device.h" -#include "htbuffer.h" +#include "htbserver.h" #include "pdump_km.h" @@ -61,6 +61,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv.h" #include "power.h" +#include "os_apphint.h" + #if defined(SUPPORT_RGX) #include "rgxdevice.h" #include "rgxinit.h" @@ -215,7 +217,7 @@ void BridgeDispatchTableStartOffsetsInit(void) #if defined(DEBUG_BRIDGE_KM) #if defined(INTEGRITY_OS) -PVRSRV_ERROR PVRSRVPrintBridgeStats() +void PVRSRVPrintBridgeStats(void) { IMG_UINT32 ui32Index; IMG_UINT32 ui32Remainder; @@ -289,28 +291,100 @@ CopyToUserWrapper(CONNECTION_DATA *psConnection, return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); } -#else -INLINE PVRSRV_ERROR -CopyFromUserWrapper(CONNECTION_DATA *psConnection, - IMG_UINT32 ui32DispatchTableEntry, - void *pvDest, - void __user *pvSrc, - IMG_UINT32 ui32Size) +#endif + +/**************************************************************************/ /*! +@Function DeviceDefaultPhysHeapFreeMemCheck + +@Description Check if the required amount of free space is available in the + Default PhysHeap for a connection to be made. + +@Input psDeviceNode The device the connection is being + made on. +@Input ui32MinMemInMBs The minimum memory required to be + available in the Default PhysHeap. + +@Return PVRSRV_OK if successful else a PVRSRV_ERROR. +*/ /***************************************************************************/ +static PVRSRV_ERROR DeviceDefaultPhysHeapFreeMemCheck(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MinMemInMBs) { - PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); - return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); + PHYS_HEAP *psDefaultHeap = NULL; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL, "psDeviceNode"); + + psDefaultHeap = psDeviceNode->apsPhysHeap[psDeviceNode->psDevConfig->eDefaultHeap]; + if (psDefaultHeap == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to get device's default PhysHeap")); + return PVRSRV_ERROR_INVALID_HEAP; + } + + if (PhysHeapGetType(psDefaultHeap) == PHYS_HEAP_TYPE_LMA) + { + IMG_UINT64 ui64FreePhysHeapMem; + + eError = PhysHeapFreeMemCheck(psDefaultHeap, + MB2B(ui32MinMemInMBs), + &ui64FreePhysHeapMem); + if (eError == PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY) + { + PVR_DPF((PVR_DBG_ERROR, "Default PhysHeap contains less than the " + "minimum free space required to acquire a connection. " + "Free space: %"IMG_UINT64_FMTSPEC"MB " + "Minimum required: %uMB", + B2MB(ui64FreePhysHeapMem), + ui32MinMemInMBs)); + } + } + + return eError; } -INLINE PVRSRV_ERROR -CopyToUserWrapper(CONNECTION_DATA *psConnection, - IMG_UINT32 ui32DispatchTableEntry, - void __user *pvDest, - void *pvSrc, - IMG_UINT32 ui32Size) + +/**************************************************************************/ /*! +@Function CheckConnectionPhysHeapMem + +@Description Check if there is enough memory in the PhysHeaps to allow a + connection to be made. + +@Input psConnection The connection being made. + +@Return PVRSRV_OK if successful else a PVRSRV_ERROR. +*/ /***************************************************************************/ +static PVRSRV_ERROR CheckConnectionPhysHeapMem(CONNECTION_DATA *psConnection) { - PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); - return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); + IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_PHYSHEAPMINMEMONCONNECTION; + IMG_UINT32 ui32AppHintPhysHeapMinMemOnConnection = 0; + void *pvAppHintState = NULL; + PVRSRV_DEVICE_NODE *psDeviceNode = NULL; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_RETURN_IF_INVALID_PARAM(psConnection); + + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysHeapMinMemOnConnection, + &ui32AppHintDefault, &ui32AppHintPhysHeapMinMemOnConnection); + OSFreeAppHintState(pvAppHintState); + + psDeviceNode = OSGetDevNode(psConnection); + + if (ui32AppHintPhysHeapMinMemOnConnection != 0) + { + eError = DeviceDefaultPhysHeapFreeMemCheck(psDeviceNode, + ui32AppHintPhysHeapMinMemOnConnection); + PVR_LOG_RETURN_IF_ERROR(eError, "DeviceDefaultPhysHeapFreeMemCheck"); + + if (psDeviceNode->pfnCheckForSufficientFWPhysMem != NULL + && RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION > 0) + { + eError = psDeviceNode->pfnCheckForSufficientFWPhysMem(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnCheckForSufficientFWPhysMem"); + } + } + + return eError; } -#endif PVRSRV_ERROR PVRSRVConnectKM(CONNECTION_DATA *psConnection, @@ -329,10 +403,16 @@ PVRSRVConnectKM(CONNECTION_DATA *psConnection, PVRSRV_DATA *psSRVData = NULL; IMG_UINT64 ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize(); static IMG_BOOL bIsFirstConnection=IMG_FALSE; - #if defined(SUPPORT_RGX) PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#endif + + /* Check the minimum free PhysHeap memory is available before allowing + * the connection to succeed */ + eError = CheckConnectionPhysHeapMem(psConnection); + PVR_RETURN_IF_ERROR(eError); +#if defined(SUPPORT_RGX) /* Gather BVNC information to output to UM */ *ui64PackedBvnc = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, @@ -364,11 +444,6 @@ PVRSRVConnectKM(CONNECTION_DATA *psConnection, { *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG; } - /*Is the system device cache coherent?*/ - if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) - { - *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG; - } } /* Has the system device non-mappable local memory?*/ @@ -403,7 +478,7 @@ PVRSRVConnectKM(CONNECTION_DATA *psConnection, } /* Is the system DMA capable? */ - if (psDeviceNode->bHasSystemDMA) + if (psDeviceNode->psDevConfig->bHasDma) { *pui32CapabilityFlags |= PVRSRV_SYSTEM_DMA_USED; } @@ -419,7 +494,7 @@ PVRSRVConnectKM(CONNECTION_DATA *psConnection, } #endif -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) { IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; IMG_BOOL bOSidAxiProtReg = IMG_FALSE; @@ -459,7 +534,7 @@ PVRSRVConnectKM(CONNECTION_DATA *psConnection, ui32OSidReg, bOSidAxiProtReg?"TRUE":"FALSE")); - SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD); + SetAxiProtOSid(psDeviceNode->psDevConfig->hSysData, ui32OSidReg, ui32OSidAxiProtTD); } } #endif /* defined(EMULATOR) */ @@ -478,7 +553,7 @@ PVRSRVConnectKM(CONNECTION_DATA *psConnection, ui32OSid, ui32OSidReg)); } -#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ +#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */ #if defined(SUPPORT_WORKLOAD_ESTIMATION) /* Only enabled if enabled in the UM */ @@ -625,7 +700,7 @@ PVRSRVConnectKM(CONNECTION_DATA *psConnection, IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE, PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC, - psDeviceNode->sDevId.i32OsDeviceID, + psDeviceNode->sDevId.i32KernelDeviceID, psConnection->pid); eError = TLStreamCreate(&psConnection->hClientTLStream, @@ -633,7 +708,7 @@ PVRSRVConnectKM(CONNECTION_DATA *psConnection, PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT, TL_OPMODE_DROP_NEWER | TL_FLAG_ALLOCATE_ON_FIRST_OPEN, - NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL, NULL); if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS) { PVR_LOG_ERROR(eError, "TLStreamCreate"); @@ -792,6 +867,16 @@ PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject) return PVRSRV_OK; } +static void _DumpDebugUMReqPrintWrapper(void *pvPriv, const IMG_CHAR *pszFmt, ...) +{ + va_list pvArgs; + PVR_UNREFERENCED_PARAMETER(pvPriv); + + va_start(pvArgs, pszFmt); + PVRSRVReleasePrintfVArgs(pszFmt, pvArgs); + va_end(pvArgs); +} + /* PVRSRVDumpDebugInfoKM */ @@ -800,13 +885,15 @@ PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32VerbLevel) { + PVR_UNREFERENCED_PARAMETER(psConnection); + if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX) { return PVRSRV_ERROR_INVALID_PARAMS; } PVR_LOG(("User requested PVR debug info")); - PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, NULL, NULL); + PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, _DumpDebugUMReqPrintWrapper, NULL); return PVRSRV_OK; } @@ -838,6 +925,8 @@ PVRSRV_ERROR PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode) { + PVR_UNREFERENCED_PARAMETER(psConnection); + #if defined(PVRSRV_RESET_ON_HWTIMEOUT) PVR_LOG(("User requested OS reset")); OSPanic(); @@ -1000,6 +1089,10 @@ UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, IMG_UINT32 ui32Index) * @param pszIOCName * @param pfFunction * @param pszFunctionName + * @param hBridgeLock + * @param pszBridgeLockName + * @param ui32InBufferSize + * @param ui32OutBufferSize * * @return ********************************************************************************/ @@ -1010,7 +1103,9 @@ _SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, BridgeWrapperFunction pfFunction, const IMG_CHAR *pszFunctionName, POS_LOCK hBridgeLock, - const IMG_CHAR *pszBridgeLockName) + const IMG_CHAR *pszBridgeLockName, + IMG_UINT32 ui32InBufferSize, + IMG_UINT32 ui32OutBufferSize) { static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX; /* -1 */ @@ -1127,6 +1222,8 @@ _SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, { g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction; g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock; + g_BridgeDispatchTable[ui32Index].ui32InBufferSize = ui32InBufferSize; + g_BridgeDispatchTable[ui32Index].ui32OutBufferSize = ui32OutBufferSize; #if defined(DEBUG_BRIDGE_KM) g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName; g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName; @@ -1228,6 +1325,10 @@ PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection, PVR_DBG_BREAK; #endif +#if !defined(PVRSRV_ENABLE_HTB) + PVR_UNREFERENCED_PARAMETER(ui32Timestamp); +#endif + if (psBridgePackageKM->ui32BridgeID >= BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT) { PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d", @@ -1283,18 +1384,26 @@ PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection, #if defined(DEBUG_BRIDGE_KM) BridgeGlobalStatsLock(); - PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry index=%d, (bridge module %d, function %d)", - __func__, + PVR_DPF((PVR_DBG_MESSAGE, "%s: %s idx:%d mod:%d, func:%d", + __func__, g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName, ui32DispatchTableEntryIndex, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID)); - PVR_DPF((PVR_DBG_MESSAGE, "%s: %s", - __func__, - g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName)); g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32CallCount++; g_BridgeGlobalStats.ui32IOCTLCount++; BridgeGlobalStatsUnlock(); #endif + if (psBridgePackageKM->ui32InBufferSize != g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32InBufferSize || + psBridgePackageKM->ui32OutBufferSize != g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32OutBufferSize) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Bridge buffer sizes mismatch! " + "In: User(%u), Kernel(%u) - Out: User(%u), Kernel(%u)", + __func__, + psBridgePackageKM->ui32InBufferSize, g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32InBufferSize, + psBridgePackageKM->ui32OutBufferSize, g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32OutBufferSize)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); + } + if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL) { OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock); @@ -1314,23 +1423,7 @@ PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection, ui64TimeStart = OSClockns64(); #endif - if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small " - "(data size %u, buffer size %u)!", __func__, - psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE)); - PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); - } - #if !defined(INTEGRITY_OS) - if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small " - "(data size %u, buffer size %u)!", __func__, - psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE)); - PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); - } - if ((CopyFromUserWrapper (psConnection, ui32DispatchTableEntryIndex, psBridgeIn, @@ -1454,13 +1547,13 @@ PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection, return err; } -PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemStatArray) +PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT64 *pui64MemStatArray) { #if !defined(__QNXNTO__) return PVRSRVFindProcessMemStats(pid, ui32ArrSize, bAllProcessStats, - pui32MemStatArray); + pui64MemStatArray); #else PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); @@ -1468,3 +1561,61 @@ PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IM #endif } + +void PVRSRVBlockIfFrozen(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Short-circuit if we're not marked as frozen */ + if (OSAtomicRead(&psDevNode->eFrozen) == 0) + { + return; + } + + while ((psDevNode->eDevState == PVRSRV_DEVICE_STATE_FROZEN) && + ((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_TIMEOUT))) + { + IMG_HANDLE hEvent; + + eError = OSEventObjectOpen(psDevNode->hDeviceThreadEvObj, &hEvent); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to open event object (%d)", __func__, + eError)); + + /* Continue with the loop by resetting to PVRSRV_OK. This makes it + * a busy wait (with a 10ms delay) if we cannot grab an EvObj + * reference. + */ + eError = PVRSRV_OK; + OSSleepms(10U); /* Allow failure (OOM etc.) to resolve */ + continue; + } + + if (hEvent != NULL) + { + /* Register that we've got an interest in the device */ + (void) OSAtomicIncrement(&psDevNode->iFreezeCount); + + eError = OSEventObjectWait(hEvent); + + OSEventObjectClose(hEvent); + + (void) OSAtomicDecrement(&psDevNode->iFreezeCount); + } + } + + /* We expect to exit the above loop only when the device is no longer + * FROZEN. If we are still marked as frozen that is an unexpected error + * so log it. + */ + if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_FROZEN) + { + PVR_LOG_IF_ERROR(eError, "OSEventObjectWait"); + if (eError != PVRSRV_OK) + { + OSDumpStack(); + } + } +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_checkpoint.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_checkpoint.c index 3154155d8616..a72ec7dc1809 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_checkpoint.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_checkpoint.c @@ -60,13 +60,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv.h" #include "pdump_km.h" #include "info_page.h" +#include "os_apphint.h" +#include "rgxfwutils.h" #include "pvrsrv_sync_km.h" #include "rgxhwperf.h" -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -#include "rgxsoctimer.h" -#endif + #if defined(PVRSRV_NEED_PVR_DPF) @@ -116,29 +116,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* Set the size of the sync checkpoint pool (not used if 0). * A pool will be maintained for each sync checkpoint context. + * SYNC_CHECKPOINT_POOL_LIMIT must be a power of 2 (POT), + * as the pool wrap mask is calculated using it. */ -#if defined(PDUMP) -#define SYNC_CHECKPOINT_POOL_SIZE 0 -#else -#define SYNC_CHECKPOINT_POOL_SIZE 128 -#define SYNC_CHECKPOINT_POOL_MASK (SYNC_CHECKPOINT_POOL_SIZE - 1) -#endif - -/* The 'sediment' value represents the minimum number of - * sync checkpoints which must be in the pool before one - * will be allocated from the pool rather than from memory. - * This effectively helps avoid re-use of a sync checkpoint - * just after it has been returned to the pool, making - * debugging somewhat easier to understand. - */ -#define SYNC_CHECKPOINT_POOL_SEDIMENT 20 +#define SYNC_CHECKPOINT_POOL_LIMIT 1024 -#if (SYNC_CHECKPOINT_POOL_SIZE & (SYNC_CHECKPOINT_POOL_SIZE - 1)) != 0 -#error "SYNC_CHECKPOINT_POOL_SIZE must be power of 2." +#if (SYNC_CHECKPOINT_POOL_LIMIT & (SYNC_CHECKPOINT_POOL_LIMIT - 1)) != 0 +#error "SYNC_CHECKPOINT_POOL_LIMIT must be power of 2." #endif -#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE 10 - /* This defines the maximum amount of synchronisation memory that can be allocated per sync checkpoint context. @@ -148,19 +134,26 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define MAX_SYNC_CHECKPOINT_MEM (4 * 1024 * 1024) - -typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_ -{ - IMG_UINT32 ui32BlockCount; /*!< Number of contexts in the list */ - IMG_UINT32 ui32BlockListSize; /*!< Size of the array contexts */ - SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */ -} SYNC_CHECKPOINT_BLOCK_LIST; - +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) +/* Flags used to indicate state of pool */ +#define SYNC_CHECKPOINT_POOL_FULL (1) +#define SYNC_CHECKPOINT_POOL_VALID (1 << 7) +#define CHECKPOINT_POOL_FULL(ctxctl) \ + ctxctl->ui8PoolStateFlags & SYNC_CHECKPOINT_POOL_FULL +#define CHECKPOINT_POOL_VALID(ctxctl) \ + ctxctl->ui8PoolStateFlags & SYNC_CHECKPOINT_POOL_VALID +#define SET_CHECKPOINT_POOL_FULL(ctxctl) \ + ctxctl->ui8PoolStateFlags |= SYNC_CHECKPOINT_POOL_FULL +#define SET_CHECKPOINT_POOL_VALID(ctxctl) \ + ctxctl->ui8PoolStateFlags |= SYNC_CHECKPOINT_POOL_VALID +#define CLEAR_CHECKPOINT_POOL_FULL(ctxctl) \ + ctxctl->ui8PoolStateFlags &= ~SYNC_CHECKPOINT_POOL_FULL +#define CLEAR_CHECKPOINT_POOL_VALID(ctxctl) \ + ctxctl->ui8PoolStateFlags &= ~SYNC_CHECKPOINT_POOL_VALID +#endif struct _SYNC_CHECKPOINT_CONTEXT_CTL_ { SHARED_DEV_CONNECTION psDeviceNode; - PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; - PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; /* * Used as head of linked-list of sync checkpoints for which * SyncCheckpointFree() has been called, but have outstanding @@ -172,17 +165,43 @@ struct _SYNC_CHECKPOINT_CONTEXT_CTL_ /* Lock to protect the deferred cleanup list */ POS_SPINLOCK hDeferredCleanupListLock; -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) - SYNC_CHECKPOINT *psSyncCheckpointPool[SYNC_CHECKPOINT_POOL_SIZE]; - IMG_BOOL bSyncCheckpointPoolFull; - IMG_BOOL bSyncCheckpointPoolValid; - IMG_UINT32 ui32SyncCheckpointPoolCount; - IMG_UINT32 ui32SyncCheckpointPoolWp; - IMG_UINT32 ui32SyncCheckpointPoolRp; - POS_SPINLOCK hSyncCheckpointPoolLock; /*! Protects access to the checkpoint pool control data. */ + /* Counters to provide stats for number of checkpoints used at any one time */ + IMG_UINT32 ui32CurrentInUseSyncCheckpoints; + IMG_UINT32 ui32MaxInUseSyncCheckpoints; + IMG_UINT32 ui32CurrentInUseMirroredSyncCPs; + IMG_UINT32 ui32MaxInUseMirroredSyncCPs; + /* Lock to protect the checkpoint stats */ + POS_SPINLOCK hSyncCheckpointStatsLock; +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) + IMG_UINT32 ui32SyncCheckpointPoolSize; /*! Allocated size of the pool */ + IMG_UINT32 ui32SyncCheckpointPoolCount; /*! Number of checkpoints currently in pool */ + IMG_UINT32 ui32SyncCheckpointPoolWp; /*! Pool write pointer */ + IMG_UINT32 ui32SyncCheckpointPoolRp; /*! Pool read pointer */ +#if defined(PDUMP) + SYNC_CHECKPOINT_BLOCK *psSyncCheckpointPoolBlock; /*! Block that all pool checkpoints come from */ + IMG_BOOL bAllocateFromCheckpointPool; /*! PDumps only use the pool once it has allocated all its checkpoints */ +#endif + POS_SPINLOCK hSyncCheckpointPoolLock; /*! Lock to protect access to pool control data */ + IMG_UINT8 ui8PoolStateFlags; /*! Flags to indicate state of pool */ + /*! Array of SYNC_CHECKPOINTs. Must be last member in structure */ + SYNC_CHECKPOINT *apsSyncCheckpointPool[IMG_FLEX_ARRAY_MEMBER]; /*! The allocated checkpoint pool */ #endif }; /*_SYNC_CHECKPOINT_CONTEXT_CTL is already typedef-ed in sync_checkpoint_internal.h */ +struct SYNC_CHECKPOINT_CONTEXT_TAG +{ +#if defined(PDUMP) + DLLIST_NODE sSyncCheckpointBlockListHead; /*!< List head for the sync chkpt blocks in this context*/ + DLLIST_NODE sListNode; /*!< List node for the sync chkpt context list*/ + POS_LOCK hSyncCheckpointBlockListLock; /*!< sync chkpt blocks list lock*/ +#endif + RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ + RA_ARENA *psSubAllocRA; /*!< RA context */ + _PSYNC_CHECKPOINT_CONTEXT_CTL psContextCtl; + ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */ + ATOMIC_T hRefCount; /*!< Ref count for this context */ +}; /*_SYNC_CHECKPOINT_CONTEXT is already typedef-ed in sync_checkpoint_internal.h */ + /* this is the max number of sync checkpoint records we will search or dump * at any time. */ @@ -192,7 +211,6 @@ struct _SYNC_CHECKPOINT_CONTEXT_CTL_ struct SYNC_CHECKPOINT_RECORD { - PVRSRV_DEVICE_NODE *psDevNode; SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< handle to SYNC_CHECKPOINT_BLOCK */ IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */ IMG_UINT32 ui32FwBlockAddr; @@ -206,7 +224,7 @@ struct SYNC_CHECKPOINT_RECORD static PFN_SYNC_CHECKPOINT_STRUCT *g_psSyncCheckpointPfnStruct = NULL; -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext); static IMG_BOOL _PutCheckpointInPool(SYNC_CHECKPOINT *psSyncCheckpoint); static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext); @@ -234,17 +252,19 @@ static inline void RGXSRVHWPerfSyncCheckpointUFOIsSignalled(PVRSRV_RGXDEV_INFO * if (psSyncCheckpointInt) { - if ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || - (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)) + IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + + if ((ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || + (ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)) { sSyncData.sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); - sSyncData.sCheckSuccess.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + sSyncData.sCheckSuccess.ui32Value = ui32State; eEv = RGX_HWPERF_UFO_EV_CHECK_SUCCESS; } else { sSyncData.sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); - sSyncData.sCheckFail.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + sSyncData.sCheckFail.ui32Value = ui32State; sSyncData.sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; eEv = RGX_HWPERF_UFO_EV_CHECK_FAIL; } @@ -320,10 +340,17 @@ void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext) } else if (OSAtomicDecrement(&psContextInt->hRefCount) == 0) { +#if defined(PDUMP) + PPVRSRV_DEVICE_NODE psDeviceNode = psCtxCtl->psDeviceNode; +#endif + /* SyncCheckpointContextDestroy only when no longer referenced */ OSSpinLockDestroy(psCtxCtl->hDeferredCleanupListLock); psCtxCtl->hDeferredCleanupListLock = NULL; -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + + OSSpinLockDestroy(psCtxCtl->hSyncCheckpointStatsLock); + psCtxCtl->hSyncCheckpointStatsLock = NULL; +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) if (psCtxCtl->ui32SyncCheckpointPoolCount) { PVR_DPF((PVR_DBG_WARNING, @@ -333,15 +360,24 @@ void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext) (void *) psContext, psCtxCtl->ui32SyncCheckpointPoolCount)); } - psCtxCtl->bSyncCheckpointPoolValid = IMG_FALSE; + CLEAR_CHECKPOINT_POOL_FULL(psCtxCtl); OSSpinLockDestroy(psCtxCtl->hSyncCheckpointPoolLock); psCtxCtl->hSyncCheckpointPoolLock = NULL; #endif OSFreeMem(psContextInt->psContextCtl); RA_Delete(psContextInt->psSpanRA); RA_Delete(psContextInt->psSubAllocRA); - OSLockDestroy(psContextInt->hLock); - psContextInt->hLock = NULL; + +#if defined(PDUMP) + PVR_ASSERT(dllist_is_empty(&psContext->sSyncCheckpointBlockListHead)); + + OSLockAcquire(psDeviceNode->hSyncCheckpointContextListLock); + dllist_remove_node(&psContext->sListNode); + OSLockRelease(psDeviceNode->hSyncCheckpointContextListLock); + + OSLockDestroy(psContext->hSyncCheckpointBlockListLock); +#endif + OSFreeMem(psContext); } } @@ -379,12 +415,11 @@ _AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext, psSyncBlk->psContext = psContext; /* Allocate sync checkpoint block */ - psDevNode = psContext->psDevNode; + psDevNode = psContext->psContextCtl->psDeviceNode; PVR_LOG_GOTO_IF_INVALID_PARAM(psDevNode, eError, fail_alloc_ufo_block); - psSyncBlk->psDevNode = psDevNode; - eError = psDevNode->pfnAllocUFOBlock(psDevNode, + SYNC_CHECKPOINT_POOL_LIMIT * sizeof(SYNC_CHECKPOINT_FW_OBJ), &psSyncBlk->hMemDesc, &psSyncBlk->ui32FirmwareAddr, &psSyncBlk->ui32SyncBlockSize); @@ -424,7 +459,8 @@ _FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk) OSLockAcquire(psSyncBlk->hLock); if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount)) { - PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; + _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncBlk->psContext; + PVRSRV_DEVICE_NODE *psDevNode = psContext->psContextCtl->psDeviceNode; #if defined(PDUMP) OSLockAcquire(psSyncBlk->psContext->hSyncCheckpointBlockListLock); @@ -450,9 +486,7 @@ _SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena, RA_FLAGS_T uFlags, RA_LENGTH_T uBaseAlignment, const IMG_CHAR *pszAnnotation, - RA_BASE_T *puiBase, - RA_LENGTH_T *puiActualSize, - RA_PERISPAN_HANDLE *phImport) + RA_IMPORT *psImport) { _SYNC_CHECKPOINT_CONTEXT *psContext = hArena; SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL; @@ -495,9 +529,9 @@ _SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena, PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize), "uiSpanSize invalid"); - *puiBase = psSyncBlock->uiSpanBase; - *puiActualSize = psSyncBlock->ui32SyncBlockSize; - *phImport = psSyncBlock; + psImport->base = psSyncBlock->uiSpanBase; + psImport->uSize = psSyncBlock->ui32SyncBlockSize; + psImport->hPriv = psSyncBlock; return PVRSRV_OK; fail_spanalloc: @@ -534,7 +568,8 @@ static INLINE IMG_UINT32 _SyncCheckpointGetOffset(SYNC_CHECKPOINT *psSyncInt) { IMG_UINT64 ui64Temp; - ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase; + ui64Temp = (IMG_UINT64)psSyncInt->uiAllocatedAddr - + (IMG_UINT64)psSyncInt->psSyncCheckpointBlock->uiSpanBase; PVR_ASSERT(ui64TemppfnFenceResolve)) { @@ -589,6 +627,16 @@ SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, return eError; } +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + psContextCtl = psSyncCheckpointContext->psContextCtl; + + PVR_DPF((PVR_DBG_WARNING, + "%s: Checkpoint %p DeviceID = [%u/%d]\n", __func__, + psSyncCheckpointContext, + psContextCtl->psDeviceNode->sDevId.ui32InternalID, + psContextCtl->psDeviceNode->sDevId.i32KernelDeviceID)); +#endif + if (papsSyncCheckpoints) { eError = g_psSyncCheckpointPfnStruct->pfnFenceResolve( @@ -614,6 +662,8 @@ SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; } } +#else + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); #endif if (*pui32NumSyncCheckpoints > MAX_SYNC_CHECKPOINTS_PER_FENCE) @@ -660,6 +710,65 @@ SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, return eError; } +PVRSRV_ERROR +SyncCheckpointResolveExportFence(PVRSRV_FENCE hExportFence, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PSYNC_CHECKPOINT *ppsSyncCheckpoint, + PDUMP_FLAGS_T ui32PDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (unlikely(!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnExportFenceResolve)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "pfnExportFenceResolve is NULL"); + return eError; + } + + if (ppsSyncCheckpoint) + { + eError = g_psSyncCheckpointPfnStruct->pfnExportFenceResolve( + hExportFence, + psSyncCheckpointContext, + ppsSyncCheckpoint); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_LOG_RETURN_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnExportFenceResolve"); + +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: g_psSyncCheckpointPfnStruct->pfnExportFenceResolve() for fence %d returned the following checkpoint:", + __func__, + hExportFence)); + + PVR_DPF((PVR_DBG_WARNING, + "%s: psSyncCheckpoint:<%p>", + __func__, + (void*)*ppsSyncCheckpoint)); + } +#endif + +#if defined(PDUMP) + if (*ppsSyncCheckpoint) + { + SYNC_CHECKPOINT *psSyncCheckpoint = (SYNC_CHECKPOINT *)(*ppsSyncCheckpoint); + psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; + } +#else + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +#endif + + return eError; +} + PVRSRV_ERROR SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode, const IMG_CHAR *pszFenceName, @@ -714,9 +823,12 @@ SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode, else { PVR_DPF((PVR_DBG_WARNING, - "%s created new fence<%d> for timeline<%d> using " - "sync checkpoint context<%p>, new sync_checkpoint=<%p>", + "%s [%u/%d] created new fence<%d> for timeline<%d> " + "using sync checkpoint context<%p>, " + "new sync_checkpoint=<%p>", __func__, + psDevNode->sDevId.ui32InternalID, + psDevNode->sDevId.i32KernelDeviceID, *phNewFence, hTimeline, (void*)psSyncCheckpointContext, @@ -733,6 +845,8 @@ SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode, psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; } } +#else + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); #endif } return eError; @@ -828,7 +942,26 @@ SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData) g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines(pvPrivateData); } return eError; +} + +PVRSRV_ERROR +SyncCheckpointNoHWSignalExportFence(PVRSRV_FENCE iExportFenceToSignal) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnNoHWSignalExpFence) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnNoHWSignalExpFence is NULL"); + } + else + { + g_psSyncCheckpointPfnStruct->pfnNoHWSignalExpFence(iExportFenceToSignal); + } + return eError; } PVRSRV_ERROR @@ -857,6 +990,124 @@ SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, IMG_UINT32 *pui32Vad return eError; } +static PVRSRV_ERROR +_AllocSyncCheckpoint(_SYNC_CHECKPOINT_CONTEXT *psContext, + SYNC_CHECKPOINT **ppsSyncCheckpoint) +{ + SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL; + PVRSRV_ERROR eError; + + /* Allocate sync checkpoint */ + psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint)); + PVR_LOG_RETURN_IF_NOMEM(psNewSyncCheckpoint, "OSAllocMem"); /* Sets OOM error code */ + + eError = RA_Alloc(psContext->psSubAllocRA, + sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj), + RA_NO_IMPORT_MULTIPLIER, + 0, + sizeof(IMG_UINT32), + NULL, + &psNewSyncCheckpoint->uiAllocatedAddr, + NULL, + (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock); + + PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", fail_ra_alloc); + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", + __func__, + (void*)psContext->psSubAllocRA, + psNewSyncCheckpoint->uiAllocatedAddr)); +#endif + psNewSyncCheckpoint->psSyncCheckpointFwObj = + (volatile SYNC_CHECKPOINT_FW_OBJ*)(void *)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr + + (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32))); + psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + + _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1; + OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount); + +#if defined(DEBUG) + psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; +#endif + psNewSyncCheckpoint->sListNode.psPrevNode = NULL; + psNewSyncCheckpoint->sListNode.psNextNode = NULL; +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called to allocate new sync checkpoint<%p> for context<%p>", + __func__, (void*)psNewSyncCheckpoint, (void*)psContext)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpointFwObj<%p>", + __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint FwAddr=0x%x", + __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint))); +#endif + *ppsSyncCheckpoint = psNewSyncCheckpoint; + return PVRSRV_OK; + +fail_ra_alloc: + OSFreeMem(psNewSyncCheckpoint); + return eError; +} + +/* Poisons and frees the checkpoint + * Decrements context refcount. */ +static void _FreeSyncCheckpoint(SYNC_CHECKPOINT *psSyncCheckpoint) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; + + psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0; + psSyncCheckpoint->psSyncCheckpointFwObj = NULL; +#if defined(DEBUG) + psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED; +#endif + + RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, + psSyncCheckpoint->uiAllocatedAddr); + psSyncCheckpoint->psSyncCheckpointBlock = NULL; + + OSFreeMem(psSyncCheckpoint); + + OSAtomicDecrement(&psContext->hCheckpointCount); +} + +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) +static PVRSRV_ERROR +_PrepopulateSyncCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext, + IMG_UINT32 ui32InitPoolSize) +{ + IMG_UINT32 ui32SyncCheckpoint; + SYNC_CHECKPOINT *psNewSyncCheckpoint; + PVRSRV_ERROR eError; + + /* Allocate sync checkpoints and place in the pool */ + for (ui32SyncCheckpoint=0; ui32SyncCheckpointpsContextCtl->psSyncCheckpointPoolBlock = psNewSyncCheckpoint->psSyncCheckpointBlock; + } +#endif + + if (!_PutCheckpointInPool(psNewSyncCheckpoint)) + { + _FreeSyncCheckpoint(psNewSyncCheckpoint); + } + } +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING,"%s: Sync checkpoint pool [%d/%d]", + __func__, psContext->psContextCtl->ui32SyncCheckpointPoolCount, + psContext->psContextCtl->ui32SyncCheckpointPoolSize)); +#endif + return PVRSRV_OK; +} +#endif /* if (SYNC_CHECKPOINT_POOL_LIMIT > 0) */ + PVRSRV_ERROR SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext) @@ -864,40 +1115,84 @@ SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, _SYNC_CHECKPOINT_CONTEXT *psContext = NULL; _SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL; PVRSRV_ERROR eError = PVRSRV_OK; + IMG_CHAR azTempName[PVRSRV_SYNC_NAME_LENGTH] = {0}; +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) +#if !defined(PDUMP) + void *pvAppHintState = NULL; + const IMG_UINT32 ui32DefaultMaxPoolLog2Size = PVRSRV_APPHINT_CHECKPOINTPOOLMAXLOG2; + const IMG_UINT32 ui32DefaultInitPoolLog2Size = PVRSRV_APPHINT_CHECKPOINTPOOLINITLOG2; +#endif + IMG_UINT32 ui32MaxPoolLog2Size; + IMG_UINT32 ui32InitPoolLog2Size; + IMG_UINT32 ui32InitPoolSize = 0; +#endif + IMG_UINT32 ui32MaxPoolSize = 0; PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpointContext != NULL), "ppsSyncCheckpointContext invalid", PVRSRV_ERROR_INVALID_PARAMS); +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) +#if defined(PDUMP) + /* + Pdumps use the maximum pool size to limit checkpoint address reuse and start with + a minimum pool to avoid extra work allocating checkpoints which might not be used. + Since during a pdump capture the pool is setup once during the pdump init phase + and not emptied / de-initialised between captures we need to map at least one + checkpoint at creation time, so that the pdump records the CPU mapping of the + pool. Not doing this would lead to errors on subsequent pdump captures because + only the first pdump would contain the mapping. + */ + ui32MaxPoolLog2Size = 10; + ui32InitPoolLog2Size = 0; + ui32InitPoolSize = 1; +#else + /* Read AppHints to determine the size of the sync checkpoint pool, if specified */ + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, SyncCheckpointPoolMaxLog2, + &ui32DefaultMaxPoolLog2Size, &ui32MaxPoolLog2Size); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, SyncCheckpointPoolInitLog2, + &ui32DefaultInitPoolLog2Size, &ui32InitPoolLog2Size); + OSFreeAppHintState(pvAppHintState); +#endif + + if (ui32MaxPoolLog2Size > 0) + { + ui32MaxPoolSize = 1 << ui32MaxPoolLog2Size; + if (ui32MaxPoolSize > SYNC_CHECKPOINT_POOL_LIMIT) + { + ui32MaxPoolSize = SYNC_CHECKPOINT_POOL_LIMIT; + } + } +#endif + psContext = OSAllocMem(sizeof(*psContext)); PVR_LOG_GOTO_IF_NOMEM(psContext, eError, fail_alloc); /* Sets OOM error code */ - psContextCtl = OSAllocMem(sizeof(*psContextCtl)); + /* psContextCtl includes allocation for the sync checkpoint pool) */ + psContextCtl = OSAllocMem(sizeof(*psContextCtl) + IMG_FLEX_ARRAY_SIZE(sizeof(SYNC_CHECKPOINT*), ui32MaxPoolSize)); PVR_LOG_GOTO_IF_NOMEM(psContextCtl, eError, fail_alloc2); /* Sets OOM error code */ - eError = OSLockCreate(&psContext->hLock); - PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:1", fail_create_context_lock); - eError = OSSpinLockCreate(&psContextCtl->hDeferredCleanupListLock); PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:1", fail_create_deferred_cleanup_lock); -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointPoolLock); PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:2", fail_create_pool_lock); #endif dllist_init(&psContextCtl->sDeferredCleanupListHead); -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) psContextCtl->ui32SyncCheckpointPoolCount = 0; psContextCtl->ui32SyncCheckpointPoolWp = 0; psContextCtl->ui32SyncCheckpointPoolRp = 0; - psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE; - psContextCtl->bSyncCheckpointPoolValid = IMG_TRUE; +#if defined(PDUMP) + psContextCtl->psSyncCheckpointPoolBlock = NULL; + psContextCtl->bAllocateFromCheckpointPool = IMG_FALSE; #endif - psContext->psDevNode = psDevNode; - - OSSNPrintf(psContext->azName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim RA-%p", psContext); - OSSNPrintf(psContext->azSpanName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext); + psContextCtl->ui8PoolStateFlags = SYNC_CHECKPOINT_POOL_VALID; +#endif + psContextCtl->psDeviceNode = (SHARED_DEV_CONNECTION)psDevNode; /* Create the RA for sub-allocations of the sync checkpoints @@ -907,10 +1202,11 @@ SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, back the blocksize when it does the import which overrides what we specify here. */ - psContext->psSubAllocRA = RA_Create(psContext->azName, + OSSNPrintf(azTempName, PVRSRV_SYNC_NAME_LENGTH,"Sync Prim RA-%p", psContext); + psContext->psSubAllocRA = RA_Create(azTempName, /* Params for imports */ _Log2(sizeof(IMG_UINT32)), - RA_LOCKCLASS_2, + RA_LOCKCLASS_3, _SyncCheckpointBlockImport, _SyncCheckpointBlockUnimport, psContext, @@ -927,7 +1223,8 @@ SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, ensures that all are imports are added to the RA in a linear fashion */ - psContext->psSpanRA = RA_Create(psContext->azSpanName, + OSSNPrintf(azTempName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext); + psContext->psSpanRA = RA_Create(azTempName, /* Params for imports */ 0, RA_LOCKCLASS_1, @@ -967,17 +1264,64 @@ SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); dllist_add_to_tail(&psDevNode->sSyncCheckpointContextListHead, &psContext->sListNode); OSLockRelease(psDevNode->hSyncCheckpointContextListLock); +#endif + + psContextCtl->ui32CurrentInUseSyncCheckpoints = 0; + psContextCtl->ui32MaxInUseSyncCheckpoints = 0; + psContextCtl->ui32CurrentInUseMirroredSyncCPs = 0; + psContextCtl->ui32MaxInUseMirroredSyncCPs = 0; + eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointStatsLock); + PVR_GOTO_IF_ERROR(eError, fail_span_stat); +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) + /* Pre-populate the sync checkpoint pool, if specified */ + psContextCtl->ui32SyncCheckpointPoolSize = ui32MaxPoolSize; + + /* Ensure ui32MaxPoolSize is a POT and does not exceed SYNC_CHECKPOINT_POOL_LIMIT, + * and ui32InitPoolSize does not exceed ui32MaxPoolSize. + */ + if (psContextCtl->ui32SyncCheckpointPoolSize > SYNC_CHECKPOINT_POOL_LIMIT) + { + PVR_DPF((PVR_DBG_WARNING, "%s: AppHint SyncCheckpointPoolMaxLog2(%d) would exceed " + "SYNC_CHECKPOINT_POOL_LIMIT(%d) - limiting to %d", + __func__, ui32MaxPoolLog2Size, + SYNC_CHECKPOINT_POOL_LIMIT, SYNC_CHECKPOINT_POOL_LIMIT)); + psContextCtl->ui32SyncCheckpointPoolSize = SYNC_CHECKPOINT_POOL_LIMIT; + } + + if (ui32InitPoolLog2Size > 0) + { + ui32InitPoolSize = 1 << ui32InitPoolLog2Size; + } + if (ui32InitPoolSize > psContextCtl->ui32SyncCheckpointPoolSize) + { + PVR_DPF((PVR_DBG_WARNING, "%s: AppHint SyncCheckpointPoolInitLog2(%d) would exceed " + "ui32SyncCheckpointPoolSize(%d) - limiting to %d", + __func__, ui32InitPoolLog2Size, + psContextCtl->ui32SyncCheckpointPoolSize, + psContextCtl->ui32SyncCheckpointPoolSize)); + ui32InitPoolSize = psContextCtl->ui32SyncCheckpointPoolSize; + } + + if (ui32InitPoolSize > 0) + { + eError = _PrepopulateSyncCheckpointPool(psContext, ui32InitPoolSize); + PVR_LOG_RETURN_IF_ERROR_VA(eError, "_PrepopulateSyncCheckpointPool(%d)", ui32InitPoolSize); + } #endif return PVRSRV_OK; - +fail_span_stat: +#if defined(PDUMP) + OSLockDestroy(psContext->hSyncCheckpointBlockListLock); + psContext->hSyncCheckpointBlockListLock = NULL; +#endif fail_span_add: RA_Delete(psContext->psSpanRA); fail_span: RA_Delete(psContext->psSubAllocRA); fail_suballoc: -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) OSSpinLockDestroy(psContextCtl->hSyncCheckpointPoolLock); psContextCtl->hSyncCheckpointPoolLock = NULL; fail_create_pool_lock: @@ -985,9 +1329,6 @@ SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, OSSpinLockDestroy(psContextCtl->hDeferredCleanupListLock); psContextCtl->hDeferredCleanupListLock = NULL; fail_create_deferred_cleanup_lock: - OSLockDestroy(psContext->hLock); - psContext->hLock = NULL; -fail_create_context_lock: OSFreeMem(psContextCtl); fail_alloc2: OSFreeMem(psContext); @@ -995,25 +1336,6 @@ SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, return eError; } -/* Poisons and frees the checkpoint - * Decrements context refcount. */ -static void _FreeSyncCheckpoint(SYNC_CHECKPOINT *psSyncCheckpoint) -{ - _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; - - psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0; - psSyncCheckpoint->psSyncCheckpointFwObj = NULL; - psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED; - - RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, - psSyncCheckpoint->uiSpanAddr); - psSyncCheckpoint->psSyncCheckpointBlock = NULL; - - OSFreeMem(psSyncCheckpoint); - - OSAtomicDecrement(&psContext->hCheckpointCount); -} - PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext) { PVRSRV_ERROR eError = PVRSRV_OK; @@ -1025,7 +1347,7 @@ PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpo "psSyncCheckpointContext invalid", PVRSRV_ERROR_INVALID_PARAMS); - psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode; + psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psContextCtl->psDeviceNode; #if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) PVR_DPF((PVR_DBG_WARNING, @@ -1037,7 +1359,7 @@ PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpo _CheckDeferredCleanupList(psContext); -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) { IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext); @@ -1078,6 +1400,9 @@ PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpo { SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); bool bDeferredFree = dllist_node_is_in_list(&psSyncCheckpoint->sDeferredFreeListNode); +#if defined(DEBUG) + IMG_UINT32 ui32State = psSyncCheckpoint->psSyncCheckpointFwObj->ui32State; +#endif /* Line below avoids build error in release builds (where PVR_DPF is not defined) */ PVR_UNREFERENCED_PARAMETER(bDeferredFree); @@ -1088,9 +1413,9 @@ PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpo psSyncCheckpoint->ui32UID, psSyncCheckpoint->azName, OSAtomicRead(&psSyncCheckpoint->hRefCount), - psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ? + ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ? "PVRSRV_SYNC_CHECKPOINT_SIGNALLED" : - psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE ? + ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE ? "PVRSRV_SYNC_CHECKPOINT_ACTIVE" : "PVRSRV_SYNC_CHECKPOINT_ERRORED", psSyncCheckpoint->ui32FWAddr, OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), @@ -1106,24 +1431,9 @@ PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpo } else { - IMG_INT iRf2 = 0; - - iRf2 = OSAtomicRead(&psContext->hRefCount); SyncCheckpointContextUnref(psSyncCheckpointContext); } -#if defined(PDUMP) - if (dllist_is_empty(&psContext->sSyncCheckpointBlockListHead)) - { - OSLockDestroy(psContext->hSyncCheckpointBlockListLock); - psContext->hSyncCheckpointBlockListLock = NULL; - - OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); - dllist_remove_node(&psContext->sListNode); - OSLockRelease(psDevNode->hSyncCheckpointContextListLock); - } -#endif - return eError; } @@ -1137,14 +1447,15 @@ SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL; _SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext; PVRSRV_DEVICE_NODE *psDevNode; + OS_SPINLOCK_FLAGS uiFlags; PVRSRV_ERROR eError; PVR_LOG_RETURN_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS); PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS); - psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psDevNode; + psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psContextCtl->psDeviceNode; -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) #if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) PVR_DPF((PVR_DBG_WARNING, "%s Entry, Getting checkpoint from pool", __func__)); @@ -1162,53 +1473,37 @@ SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, /* If pool is empty (or not defined) alloc the new sync checkpoint */ if (!psNewSyncCheckpoint) { - psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint)); + eError = _AllocSyncCheckpoint(psSyncContextInt, &psNewSyncCheckpoint); PVR_LOG_GOTO_IF_NOMEM(psNewSyncCheckpoint, eError, fail_alloc); /* Sets OOM error code */ - eError = RA_Alloc(psSyncContextInt->psSubAllocRA, - sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj), - RA_NO_IMPORT_MULTIPLIER, - 0, - sizeof(IMG_UINT32), - (IMG_CHAR*)pszCheckpointName, - &psNewSyncCheckpoint->uiSpanAddr, - NULL, - (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock); - PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", fail_raalloc); +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) && defined(PDUMP) + /* If the pool is not yet enabled and we allocated all the block, then the pool is ready to use. */ + if (!psSyncContextInt->psContextCtl->bAllocateFromCheckpointPool) + { + RA_USAGE_STATS sRAUsageStats; -#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) - PVR_DPF((PVR_DBG_WARNING, - "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", - __func__, - (void*)psSyncContextInt->psSubAllocRA, - psNewSyncCheckpoint->uiSpanAddr)); -#endif - psNewSyncCheckpoint->psSyncCheckpointFwObj = - (volatile SYNC_CHECKPOINT_FW_OBJ*)(void *)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr + - (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32))); - psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + - _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1; - OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount); - psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; -#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) - PVR_DPF((PVR_DBG_WARNING, - "%s called to allocate new sync checkpoint<%p> for context<%p>", - __func__, (void*)psNewSyncCheckpoint, (void*)psSyncContext)); - PVR_DPF((PVR_DBG_WARNING, - "%s psSyncCheckpointFwObj<%p>", - __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj)); - PVR_DPF((PVR_DBG_WARNING, - "%s psSyncCheckpoint FwAddr=0x%x", - __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint))); - PVR_DPF((PVR_DBG_WARNING, - "%s pszCheckpointName = %s", - __func__, pszCheckpointName)); - PVR_DPF((PVR_DBG_WARNING, - "%s psSyncCheckpoint Timeline=%d", - __func__, hTimeline)); + RA_Get_Usage_Stats(psSyncContextInt->psSubAllocRA, &sRAUsageStats); + + if (sRAUsageStats.ui64FreeArenaSize < sizeof(SYNC_CHECKPOINT_FW_OBJ)) + { + psSyncContextInt->psContextCtl->bAllocateFromCheckpointPool = IMG_TRUE; + } + } #endif } + OSSpinLockAcquire(psSyncContextInt->psContextCtl->hSyncCheckpointStatsLock, uiFlags); + if (++psSyncContextInt->psContextCtl->ui32CurrentInUseSyncCheckpoints > psSyncContextInt->psContextCtl->ui32MaxInUseSyncCheckpoints) + { + psSyncContextInt->psContextCtl->ui32MaxInUseSyncCheckpoints = psSyncContextInt->psContextCtl->ui32CurrentInUseSyncCheckpoints; + } + if ((hTimeline == SYNC_CHECKPOINT_MIRRORED_CHECKPOINT) && + ++psSyncContextInt->psContextCtl->ui32CurrentInUseMirroredSyncCPs > psSyncContextInt->psContextCtl->ui32MaxInUseMirroredSyncCPs) + { + psSyncContextInt->psContextCtl->ui32MaxInUseMirroredSyncCPs = psSyncContextInt->psContextCtl->ui32CurrentInUseMirroredSyncCPs; + } + OSSpinLockRelease(psSyncContextInt->psContextCtl->hSyncCheckpointStatsLock, uiFlags); + psNewSyncCheckpoint->hTimeline = hTimeline; OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1); OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0); @@ -1220,7 +1515,7 @@ SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, if (pszCheckpointName) { /* Copy over the checkpoint name annotation */ - OSStringLCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); + OSStringSafeCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); } else { @@ -1246,7 +1541,7 @@ SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, hFence, psNewSyncCheckpoint->ui32FWAddr, psNewSyncCheckpoint->azName, - sizeof(psNewSyncCheckpoint->azName)); + OSStringLength(psNewSyncCheckpoint->azName)); if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) { @@ -1255,7 +1550,7 @@ SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, if (pszCheckpointName) { /* Copy the checkpoint name annotation into a fixed-size array */ - OSStringLCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); + OSStringSafeCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); } else { @@ -1281,14 +1576,11 @@ SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, } } - { - OS_SPINLOCK_FLAGS uiFlags; - /* Add the sync checkpoint to the device list */ - OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); - dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList, - &psNewSyncCheckpoint->sListNode); - OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); - } + /* Add the sync checkpoint to the device list */ + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList, + &psNewSyncCheckpoint->sListNode); + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); *ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint; @@ -1301,8 +1593,6 @@ SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, #endif return PVRSRV_OK; -fail_raalloc: - OSFreeMem(psNewSyncCheckpoint); fail_alloc: return eError; } @@ -1311,9 +1601,10 @@ static void SyncCheckpointUnref(SYNC_CHECKPOINT *psSyncCheckpointInt) { _SYNC_CHECKPOINT_CONTEXT *psContext; PVRSRV_DEVICE_NODE *psDevNode; + OS_SPINLOCK_FLAGS uiFlags; psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext; - psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode; + psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psContextCtl->psDeviceNode; /* * Without this reference, the context may be destroyed as soon @@ -1323,7 +1614,9 @@ static void SyncCheckpointUnref(SYNC_CHECKPOINT *psSyncCheckpointInt) */ SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext); +#if defined(DEBUG) PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE); +#endif if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount)) { PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed")); @@ -1334,31 +1627,38 @@ static void SyncCheckpointUnref(SYNC_CHECKPOINT *psSyncCheckpointInt) if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount))) { + PVRSRV_ERROR eError; + #if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) PVR_DPF((PVR_DBG_WARNING, "%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..", __func__)); #endif + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) && psSyncCheckpointInt->hRecord) { - PVRSRV_ERROR eError; /* remove this sync record */ eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); } - { - OS_SPINLOCK_FLAGS uiFlags; - /* Remove the sync checkpoint from the global list */ - OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); - dllist_remove_node(&psSyncCheckpointInt->sListNode); - OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); - } + /* Remove the sync checkpoint from the global list */ + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_remove_node(&psSyncCheckpointInt->sListNode); + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + OSSpinLockAcquire(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); + psContext->psContextCtl->ui32CurrentInUseSyncCheckpoints--; + if (psSyncCheckpointInt->hTimeline == SYNC_CHECKPOINT_MIRRORED_CHECKPOINT) + { + psContext->psContextCtl->ui32CurrentInUseMirroredSyncCPs--; + } + OSSpinLockRelease(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); + +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) #if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) PVR_DPF((PVR_DBG_WARNING, "%s attempting to return sync checkpoint to the pool", @@ -1367,7 +1667,7 @@ static void SyncCheckpointUnref(SYNC_CHECKPOINT *psSyncCheckpointInt) if (!_PutCheckpointInPool(psSyncCheckpointInt)) #endif { -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) #if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it", @@ -1376,12 +1676,12 @@ static void SyncCheckpointUnref(SYNC_CHECKPOINT *psSyncCheckpointInt) #endif #if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) PVR_DPF((PVR_DBG_WARNING, - "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", + "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32AllocatedAddr=0x%llx", __func__, psSyncCheckpointInt->ui32UID, (void*)psSyncCheckpointInt, (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA, - psSyncCheckpointInt->uiSpanAddr)); + psSyncCheckpointInt->uiAllocatedAddr)); #endif _FreeSyncCheckpoint(psSyncCheckpointInt); } @@ -1431,6 +1731,7 @@ void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint) PVR_LOG_RETURN_VOID_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); #if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) +#if defined(DEBUG) PVR_DPF((PVR_DBG_WARNING, "%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x", __func__, @@ -1438,6 +1739,14 @@ void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint) (void*)psSyncCheckpoint, (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)), psSyncCheckpointInt->ui32ValidationCheck)); +#else + PVR_DPF((PVR_DBG_WARNING, + "%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d", + __func__, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpoint, + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)))); +#endif #endif SyncCheckpointUnref(psSyncCheckpointInt); } @@ -1451,20 +1760,23 @@ SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSync if (psSyncCheckpointInt) { - PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), + IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + + PVR_LOG_IF_FALSE((ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), "psSyncCheckpoint already signalled"); - if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + if (ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) { #if defined(SUPPORT_RGX) - PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; + PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); #endif psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; -#if defined(PDUMP) - _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ui32FenceSyncFlags); +#if defined(SUPPORT_RGX) && defined(PDUMP) + _SyncCheckpointUpdatePDump(psContext->psContextCtl->psDeviceNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ui32FenceSyncFlags); #endif } else @@ -1475,7 +1787,7 @@ SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSync __func__, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, psSyncCheckpointInt->ui32UID, - psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)); + ui32State)); } } } @@ -1489,13 +1801,16 @@ SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint) if (psSyncCheckpointInt) { - PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), + IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + + PVR_LOG_IF_FALSE((ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), "psSyncCheckpoint already signalled"); - if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + if (ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) { #if defined(SUPPORT_RGX) - PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; + PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE); #endif @@ -1510,7 +1825,7 @@ SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint) __func__, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, psSyncCheckpointInt->ui32UID, - psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)); + ui32State)); #endif } } @@ -1525,19 +1840,22 @@ SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncF if (psSyncCheckpointInt) { - PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), + IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + + PVR_LOG_IF_FALSE((ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), "psSyncCheckpoint already signalled"); - if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + if (ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) { #if defined(SUPPORT_RGX) - PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; + PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; if (!(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint); - sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + sSyncData.sUpdate.ui32OldValue = ui32State; sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_ERRORED; RGXSRV_HWPERF_UFO(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData, @@ -1547,8 +1865,8 @@ SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncF psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED; -#if defined(PDUMP) - _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_ERRORED, ui32FenceSyncFlags); +#if defined(SUPPORT_RGX) && defined(PDUMP) + _SyncCheckpointUpdatePDump(psContext->psContextCtl->psDeviceNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_ERRORED, ui32FenceSyncFlags); #endif } } @@ -1563,13 +1881,52 @@ IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 if (psSyncCheckpointInt) { + IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; #if defined(SUPPORT_RGX) - PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); #endif - bRet = ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || - (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)); + + switch (ui32State) + { + case PVRSRV_SYNC_CHECKPOINT_UNDEF: + case PVRSRV_SYNC_CHECKPOINT_ACTIVE: + { + break; + } + case PVRSRV_SYNC_CHECKPOINT_SIGNALLED: + case PVRSRV_SYNC_CHECKPOINT_ERRORED: + { + bRet = IMG_TRUE; + break; + } + default: + { + + PVRSRV_DEVICE_NODE *psDeviceNode = psContext->psContextCtl->psDeviceNode; + +#if defined(SUPPORT_RGX) + RGXUpdateHealthStatus(psDeviceNode, IMG_FALSE); +#endif + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PCI error - state=0x%x, ID=%d, %s, fwaddr=%#08x", + __func__, ui32State, psSyncCheckpoint->ui32UID, + psSyncCheckpoint->azName, psSyncCheckpoint->ui32FWAddr)); + bRet = IMG_TRUE; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: unknown state (0x%x) - ID=%d, %s, fwaddr=%#08x", + __func__, ui32State, psSyncCheckpoint->ui32UID, + psSyncCheckpoint->azName, psSyncCheckpoint->ui32FWAddr)); + } + break; + } + + } #if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) PVR_DPF((PVR_DBG_WARNING, @@ -1593,7 +1950,8 @@ SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceS if (psSyncCheckpointInt) { #if defined(SUPPORT_RGX) - PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; + PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); #endif @@ -1703,10 +2061,13 @@ SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint) PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt); +#if defined(DEBUG) if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) +#endif { return &psSyncCheckpointInt->sCheckpointUFOAddr; } +#if defined(DEBUG) else { PVR_DPF((PVR_DBG_ERROR, @@ -1715,6 +2076,7 @@ SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint) (void*)psSyncCheckpointInt, psSyncCheckpointInt->ui32ValidationCheck)); } +#endif invalid_chkpt: return NULL; @@ -1728,10 +2090,13 @@ SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint) PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt); +#if defined(DEBUG) if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) +#endif { ui32Ret = psSyncCheckpointInt->ui32FWAddr; } +#if defined(DEBUG) else { PVR_DPF((PVR_DBG_ERROR, @@ -1740,6 +2105,7 @@ SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint) (void*)psSyncCheckpointInt, psSyncCheckpointInt->ui32ValidationCheck)); } +#endif invalid_chkpt: return ui32Ret; @@ -1758,10 +2124,12 @@ SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint) "%s returning ID for sync checkpoint<%p>", __func__, (void*)psSyncCheckpointInt)); +#if defined(DEBUG) PVR_DPF((PVR_DBG_WARNING, "%s (validationCheck=0x%x)", __func__, psSyncCheckpointInt->ui32ValidationCheck)); +#endif #endif ui32Ret = psSyncCheckpointInt->ui32UID; #if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) @@ -1909,6 +2277,34 @@ void SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui3 #endif } +PVRSRV_ERROR SyncCheckpointRollbackExportFence(PVRSRV_FENCE hExportFence) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (unlikely(!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnExportFenceRollback)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "pfnExportFenceRollback is NULL"); + return eError; + } + + if (hExportFence != PVRSRV_NO_FENCE) + { + eError = g_psSyncCheckpointPfnStruct->pfnExportFenceRollback(hExportFence); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_LOG_RETURN_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnExportFenceRollback"); + + return eError; +} + static void _SyncCheckpointState(PDLLIST_NODE psNode, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile) @@ -1937,9 +2333,21 @@ static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle DLLIST_NODE *psNode, *psNext; OS_SPINLOCK_FLAGS uiFlags; + if (psDevNode->hSyncCheckpointContext == NULL) return; + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) { PVR_DUMPDEBUG_LOG("------[ Active Sync Checkpoints ]------"); + + OSSpinLockAcquire(psDevNode->hSyncCheckpointContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); + PVR_DUMPDEBUG_LOG("(SyncCP Counts: InUse:%d Max:%d)", + psDevNode->hSyncCheckpointContext->psContextCtl->ui32CurrentInUseSyncCheckpoints, + psDevNode->hSyncCheckpointContext->psContextCtl->ui32MaxInUseSyncCheckpoints); + PVR_DUMPDEBUG_LOG("(SyncCP Mirrored Counts: InUse:%d Max:%d)", + psDevNode->hSyncCheckpointContext->psContextCtl->ui32CurrentInUseMirroredSyncCPs, + psDevNode->hSyncCheckpointContext->psContextCtl->ui32MaxInUseMirroredSyncCPs); + OSSpinLockRelease(psDevNode->hSyncCheckpointContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) { @@ -2166,8 +2574,8 @@ _SyncCheckpointRecordAdd( const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt) { struct SYNC_CHECKPOINT_RECORD * psSyncRec; - _SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext; - PVRSRV_DEVICE_NODE *psDevNode = psContext->psDevNode; + _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)hSyncCheckpointBlock->psContext; + PVRSRV_DEVICE_NODE *psDevNode = psContext->psContextCtl->psDeviceNode; PVRSRV_ERROR eError = PVRSRV_OK; PVR_RETURN_IF_INVALID_PARAM(phRecord); @@ -2177,7 +2585,6 @@ _SyncCheckpointRecordAdd( psSyncRec = OSAllocMem(sizeof(*psSyncRec)); PVR_LOG_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); /* Sets OOM error code */ - psSyncRec->psDevNode = psDevNode; psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock; psSyncRec->ui32SyncOffset = ui32SyncOffset; psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; @@ -2190,7 +2597,7 @@ _SyncCheckpointRecordAdd( if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; /* Copy over the class name annotation */ - OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); + OSStringSafeCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); } else { @@ -2232,11 +2639,12 @@ _SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord) { struct SYNC_CHECKPOINT_RECORD **ppFreedSync; struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord; + _SYNC_CHECKPOINT_CONTEXT *psContext = pSync->psSyncCheckpointBlock->psContext; PVRSRV_DEVICE_NODE *psDevNode; PVR_RETURN_IF_INVALID_PARAM(hRecord); - psDevNode = pSync->psDevNode; + psDevNode = psContext->psContextCtl->psDeviceNode; OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); @@ -2287,7 +2695,8 @@ static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncChec pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr, psSyncCheckpointRec->ui32SyncOffset); - PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x (r%d:e%d:f%d) State=%s (%s)", + PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u " + "FWAddr=0x%08x (r%d:e%d:f%d) State=%s (%s)", psSyncCheckpointRec->uiPID, ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID, (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset), @@ -2562,15 +2971,14 @@ MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData) OSLockRelease(psDevNode->hSyncCheckpointSignalLock); } +#if defined(PDUMP) PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence) { PVRSRV_ERROR eError; PSYNC_CHECKPOINT *apsCheckpoints = NULL; SYNC_CHECKPOINT *psSyncCheckpoint = NULL; IMG_UINT32 i, uiNumCheckpoints = 0; -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) - PVRSRV_RGXDEV_INFO *psDevInfo; -#endif + _SYNC_CHECKPOINT_CONTEXT *psContext; if (hFence != PVRSRV_NO_FENCE) { @@ -2587,7 +2995,8 @@ PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence) { /* Flushing deferred fence signals to pdump */ psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0]; - MISRHandler_PdumpDeferredSyncSignalPoster(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode); + psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpoint->psSyncCheckpointBlock->psContext; + MISRHandler_PdumpDeferredSyncSignalPoster(psContext->psContextCtl->psDeviceNode); } for (i=0; i < uiNumCheckpoints; i++) @@ -2595,7 +3004,8 @@ PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence) psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[i]; if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) { - PDUMPCOMMENTWITHFLAGS(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode, + psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpoint->psSyncCheckpointBlock->psContext; + PDUMPCOMMENTWITHFLAGS(psContext->psContextCtl->psDeviceNode, psSyncCheckpoint->ui32PDumpFlags, "Wait for Fence %s (ID:%d)", psSyncCheckpoint->azName, @@ -2611,18 +3021,7 @@ PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence) } } -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) - /* Sampling of USC timers can only be done after synchronisation for a 3D kick is over */ - if (uiNumCheckpoints) - { - psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0]; - psDevInfo = psSyncCheckpoint->psSyncCheckpointBlock->psDevNode->pvDevice; - if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) - { - RGXValidateSOCUSCTimer(psDevInfo, PDUMP_CONT, 0, 0, NULL); - } - } -#endif + /* Free the memory that was allocated for the sync checkpoint list returned */ if (apsCheckpoints) @@ -2632,6 +3031,7 @@ PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence) return PVRSRV_OK; } +#endif /* #if defined(PDUMP) */ static PVRSRV_ERROR _SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent) @@ -2670,10 +3070,11 @@ _SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent) static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext) { _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; - PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psContext->psDevNode; + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psCtxCtl->psDeviceNode; DECLARE_DLLIST(sCleanupList); DLLIST_NODE *psNode, *psNext; OS_SPINLOCK_FLAGS uiFlags; + PVRSRV_ERROR eError; #if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) PVR_DPF((PVR_DBG_WARNING, "%s called", __func__)); @@ -2703,7 +3104,6 @@ static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext) if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) && psSyncCheckpointInt->hRecord) { - PVRSRV_ERROR eError; /* remove this sync record */ eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); @@ -2741,7 +3141,15 @@ static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext) RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + /* Unref the checkpoint in use */ + OSSpinLockAcquire(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); + psContext->psContextCtl->ui32CurrentInUseSyncCheckpoints--; + if (psSyncCheckpointInt->hTimeline == SYNC_CHECKPOINT_MIRRORED_CHECKPOINT) + { + psContext->psContextCtl->ui32CurrentInUseMirroredSyncCPs--; + } + OSSpinLockRelease(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) #if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) PVR_DPF((PVR_DBG_WARNING, "%s attempting to return sync(ID:%d),%p> to pool", @@ -2752,15 +3160,18 @@ static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext) if (!_PutCheckpointInPool(psSyncCheckpointInt)) #endif { -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) #if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it", __func__)); #endif #endif #if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) + } else { +#endif PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)", __func__, @@ -2775,27 +3186,37 @@ static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext) } } -#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext) { _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; SYNC_CHECKPOINT *psSyncCheckpoint = NULL; OS_SPINLOCK_FLAGS uiFlags; + if (psCtxCtl->ui32SyncCheckpointPoolSize == 0) + { + goto pool_not_used; + } + /* Acquire sync checkpoint pool lock */ OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); /* Check if we can allocate from the pool */ - if (psCtxCtl->bSyncCheckpointPoolValid && - (psCtxCtl->ui32SyncCheckpointPoolCount > SYNC_CHECKPOINT_POOL_SEDIMENT)) + if (CHECKPOINT_POOL_VALID(psCtxCtl) && +#if defined(PDUMP) + psCtxCtl->bAllocateFromCheckpointPool && +#endif + (psCtxCtl->ui32SyncCheckpointPoolCount > 0)) { /* Get the next sync checkpoint from the pool */ - psSyncCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; + psSyncCheckpoint = psCtxCtl->apsSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; psCtxCtl->ui32SyncCheckpointPoolRp = - (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK; + (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & (psCtxCtl->ui32SyncCheckpointPoolSize-1); psCtxCtl->ui32SyncCheckpointPoolCount--; - psCtxCtl->bSyncCheckpointPoolFull = IMG_FALSE; + CLEAR_CHECKPOINT_POOL_FULL(psCtxCtl); +#if defined(DEBUG) psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; +#endif #if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) PVR_DPF((PVR_DBG_WARNING, "%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, " @@ -2803,7 +3224,7 @@ static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psConte __func__, psSyncCheckpoint->ui32UID, psCtxCtl->ui32SyncCheckpointPoolCount, - SYNC_CHECKPOINT_POOL_SIZE, + psCtxCtl->ui32SyncCheckpointPoolSize, (void *) psContext, psCtxCtl->ui32SyncCheckpointPoolRp, psCtxCtl->ui32SyncCheckpointPoolWp)); @@ -2812,6 +3233,7 @@ static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psConte /* Release sync checkpoint pool lock */ OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); +pool_not_used: return psSyncCheckpoint; } @@ -2822,30 +3244,47 @@ static IMG_BOOL _PutCheckpointInPool(SYNC_CHECKPOINT *psSyncCheckpoint) IMG_BOOL bReturnedToPool = IMG_FALSE; OS_SPINLOCK_FLAGS uiFlags; + if (psCtxCtl->ui32SyncCheckpointPoolSize == 0) + { + return IMG_FALSE; + } + /* Acquire sync checkpoint pool lock */ OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); /* Check if pool has space */ - if (psCtxCtl->bSyncCheckpointPoolValid && !psCtxCtl->bSyncCheckpointPoolFull) + if (CHECKPOINT_POOL_VALID(psCtxCtl) && +#if defined(PDUMP) + (psSyncCheckpoint->psSyncCheckpointBlock == psCtxCtl->psSyncCheckpointPoolBlock) && +#endif + !(CHECKPOINT_POOL_FULL(psCtxCtl))) { /* Put the sync checkpoint into the next write slot in the pool */ - psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolWp] = psSyncCheckpoint; + psCtxCtl->apsSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolWp] = psSyncCheckpoint; psCtxCtl->ui32SyncCheckpointPoolWp = - (psCtxCtl->ui32SyncCheckpointPoolWp + 1) & SYNC_CHECKPOINT_POOL_MASK; + (psCtxCtl->ui32SyncCheckpointPoolWp + 1) & (psCtxCtl->ui32SyncCheckpointPoolSize-1); psCtxCtl->ui32SyncCheckpointPoolCount++; /* Update if the checkpoint that was just added filled up the pool */ - psCtxCtl->bSyncCheckpointPoolFull = - (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp); + if (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp) + { + SET_CHECKPOINT_POOL_FULL(psCtxCtl); +#if defined(PDUMP) + /* The pool is now fully allocated and ready to use. */ + psCtxCtl->bAllocateFromCheckpointPool = IMG_TRUE; +#endif + } bReturnedToPool = IMG_TRUE; psSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_UNDEF; +#if defined(DEBUG) psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL; +#endif #if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) PVR_DPF((PVR_DBG_WARNING, "%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d", __func__, psSyncCheckpoint->ui32UID, psCtxCtl->ui32SyncCheckpointPoolCount, - SYNC_CHECKPOINT_POOL_SIZE, + psCtxCtl->ui32SyncCheckpointPoolSize, psCtxCtl->ui32SyncCheckpointPoolRp, psCtxCtl->ui32SyncCheckpointPoolWp)); #endif @@ -2863,26 +3302,21 @@ static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext) DECLARE_DLLIST(sCleanupList); DLLIST_NODE *psThis, *psNext; OS_SPINLOCK_FLAGS uiFlags; - IMG_UINT32 ui32ItemsFreed = 0, ui32NullScpCount = 0, ui32PoolCount; - IMG_BOOL bPoolValid; + IMG_UINT32 ui32ItemsFreed = 0, ui32NullScpCount = 0; /* Acquire sync checkpoint pool lock */ OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); - bPoolValid = psCtxCtl->bSyncCheckpointPoolValid; - ui32PoolCount = psCtxCtl->ui32SyncCheckpointPoolCount; - /* While the pool still contains sync checkpoints, free them */ - while (bPoolValid && psCtxCtl->ui32SyncCheckpointPoolCount > 0) + while (CHECKPOINT_POOL_VALID(psCtxCtl) && psCtxCtl->ui32SyncCheckpointPoolCount > 0) { /* Get the sync checkpoint from the next read slot in the pool */ - psCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; + psCheckpoint = psCtxCtl->apsSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; psCtxCtl->ui32SyncCheckpointPoolRp = - (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK; + (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & (psCtxCtl->ui32SyncCheckpointPoolSize-1); psCtxCtl->ui32SyncCheckpointPoolCount--; - psCtxCtl->bSyncCheckpointPoolFull = - ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) && - (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp)); + + CLEAR_CHECKPOINT_POOL_FULL(psCtxCtl); if (psCheckpoint) { @@ -2903,9 +3337,9 @@ static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext) /* go through the local list and free all of the sync checkpoints */ #if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) - PVR_DPF((PVR_DBG_WARNING, "%s psContext=<%p>, bSyncCheckpointPoolValid=%d, " + PVR_DPF((PVR_DBG_WARNING, "%s psContext=<%p>, ui8PoolStateFlags=0x%x, " "uiSyncCheckpointPoolCount=%d", __func__, (void *) psContext, - bPoolValid, ui32PoolCount)); + psCtxCtl->ui8PoolStateFlags, psCtxCtl->ui32SyncCheckpointPoolCount)); if (ui32NullScpCount > 0) { @@ -2919,22 +3353,25 @@ static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext) psCheckpoint = IMG_CONTAINER_OF(psThis, SYNC_CHECKPOINT, sListNode); #if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) +#if defined(DEBUG) if (psCheckpoint->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL) { PVR_DPF((PVR_DBG_WARNING, "%s pool contains invalid entry " "(ui32ValidationCheck=0x%x)", __func__, psCheckpoint->ui32ValidationCheck)); } - +#endif PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint(ID:%d)", __func__, psCheckpoint->ui32UID)); +#if defined(DEBUG) PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint->ui32ValidationCheck=0x%x", __func__, psCheckpoint->ui32ValidationCheck)); +#endif PVR_DPF((PVR_DBG_WARNING, - "%s psSyncCheckpoint->uiSpanAddr=0x%llx", - __func__, psCheckpoint->uiSpanAddr)); + "%s psSyncCheckpoint->uiAllocatedAddr=0x%llx", + __func__, psCheckpoint->uiAllocatedAddr)); PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>", __func__, (void *) psCheckpoint->psSyncCheckpointBlock)); @@ -2952,7 +3389,7 @@ static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext) psCheckpoint->ui32UID, (void *) psCheckpoint, (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, - psCheckpoint->uiSpanAddr)); + psCheckpoint->uiAllocatedAddr)); #endif dllist_remove_node(psThis); @@ -2963,4 +3400,55 @@ static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext) return ui32ItemsFreed; } -#endif /* (SYNC_CHECKPOINT_POOL_SIZE > 0) */ +#endif /* (SYNC_CHECKPOINT_POOL_LIMIT > 0) */ + +IMG_BOOL SyncCheckpointCommonDeviceIDs(PSYNC_CHECKPOINT_CONTEXT psContext, + IMG_HANDLE hDevRef) +{ + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDevRef; + + if (unlikely((hDevRef == NULL) || (psContext == NULL))) + { + return IMG_TRUE; + } + + return (psContext->psContextCtl->psDeviceNode->sDevId.i32KernelDeviceID == + psDevNode->sDevId.i32KernelDeviceID) ? IMG_TRUE : IMG_FALSE; +} + +PVRSRV_ERROR SyncCheckpointGetCounters(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 *puiInUse, + IMG_UINT32 *puiMax, + IMG_UINT32 *puiXDInUse, + IMG_UINT32 *puiXDMax) +{ + PSYNC_CHECKPOINT_CONTEXT psSyncContext; + OS_SPINLOCK_FLAGS uiFlags; + + PVR_LOG_RETURN_IF_FALSE((psDevNode != NULL), "psDevNode invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + PVR_RETURN_IF_FALSE((psDevNode->hSyncCheckpointContext != NULL), PVRSRV_OK); + + psSyncContext = (PSYNC_CHECKPOINT_CONTEXT)psDevNode->hSyncCheckpointContext; + + PVR_LOG_RETURN_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", + PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE((puiInUse != NULL), "puiInUse invalid", + PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE((puiMax != NULL), "puiMax invalid", + PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE((puiXDInUse != NULL), "puiXDInUse invalid", + PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE((puiXDMax != NULL), "puiXDMax invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + OSSpinLockAcquire(psSyncContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); + *puiInUse = psSyncContext->psContextCtl->ui32CurrentInUseSyncCheckpoints; + *puiMax = psSyncContext->psContextCtl->ui32MaxInUseSyncCheckpoints; + *puiXDInUse = psSyncContext->psContextCtl->ui32CurrentInUseMirroredSyncCPs; + *puiXDMax = psSyncContext->psContextCtl->ui32MaxInUseMirroredSyncCPs; + OSSpinLockRelease(psSyncContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_fallback_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_fallback_server.c index c1916e2367f4..47abd65c83ab 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_fallback_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_fallback_server.c @@ -64,9 +64,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pdump_km.h" #include "allocmem.h" -#if defined(PVR_TESTING_UTILS) -#include "tutils_km.h" -#endif #include "ossecure_export.h" @@ -290,7 +287,6 @@ static IMG_BOOL _SyncFbFenceAddPt(PVRSRV_FENCE_SERVER *psFence, PVRSRV_SYNC_PT *psSyncPt); static PVRSRV_ERROR _SyncFbSWTimelineFenceCreate(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_TIMELINE_SERVER *psTl, - IMG_UINT32 uiFenceNameSize, const IMG_CHAR *pszFenceName, PVRSRV_FENCE_SERVER **ppsOutputFence, IMG_UINT64 *pui64SyncPtIdx); @@ -555,6 +551,8 @@ static void _SyncFbDebugRequest(IMG_HANDLE hDebugRequestHandle, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile) { + PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle); + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) { IMG_UINT32 i; @@ -648,6 +646,8 @@ static void _SyncFbTimelineUpdate_NotifyCMD(void *psSyncFbContext) PDLLIST_NODE psCurrentPt, psNextPt; IMG_BOOL bSignalled = IMG_FALSE, bSignal = IMG_FALSE; + PVR_UNREFERENCED_PARAMETER(psSyncFbContext); + PVR_DPF_ENTERED; /* Outer loop over all timelines */ @@ -822,7 +822,7 @@ PVRSRV_ERROR SyncFbRegisterSyncFunctions(void) gsSyncFbContext.sSyncCheckpointReg.pfnNoHWUpdateTimelines = &_SyncFbTimelineUpdate_NotifyCMD; gsSyncFbContext.sSyncCheckpointReg.pfnFreeCheckpointListMem = OSFreeMem; gsSyncFbContext.sSyncCheckpointReg.pfnDumpInfoOnStalledUFOs = &SyncFbDumpInfoOnStalledUFOs; - OSStringLCopy(gsSyncFbContext.sSyncCheckpointReg.pszImplName, "SyncFb", SYNC_CHECKPOINT_IMPL_MAX_STRLEN); + OSStringSafeCopy(gsSyncFbContext.sSyncCheckpointReg.pszImplName, "SyncFb", SYNC_CHECKPOINT_IMPL_MAX_STRLEN); #if defined(PDUMP) gsSyncFbContext.sSyncCheckpointReg.pfnSyncFenceGetCheckpoints = &SyncFbFenceGetCheckpoints; #endif @@ -861,12 +861,12 @@ PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) psNewDeviceEntry = OSAllocMem(sizeof(*psNewDeviceEntry)); PVR_GOTO_IF_NOMEM(psNewDeviceEntry, eError, e4); + psNewDeviceEntry->psDevice = psDeviceNode; + OSLockAcquire(gsSyncFbContext.hFbContextLock); dllist_add_to_tail(&gsSyncFbContext.sDeviceList, &psNewDeviceEntry->sDeviceListNode); OSLockRelease(gsSyncFbContext.hFbContextLock); - psNewDeviceEntry->psDevice = psDeviceNode; - eError = PVRSRVRegisterDeviceDbgRequestNotify(&psNewDeviceEntry->hDBGNotify, psDeviceNode, _SyncFbDebugRequest, @@ -887,6 +887,7 @@ PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) e3: OSLockDestroy(gsSyncFbContext.hFbContextLock); e2: + HASH_Delete_Extended(gsSyncFbContext.sCheckpointHashTable, IMG_TRUE); OSEventObjectDestroy(gsSyncFbContext.hSyncEventObject); e1: PVR_DPF_RETURN_RC(eError); @@ -932,7 +933,7 @@ PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRVUnregisterCmdCompleteNotify(gsSyncFbContext.hCMDNotify); - + HASH_Delete_Extended(gsSyncFbContext.sCheckpointHashTable, IMG_TRUE); eError = OSEventObjectDestroy(gsSyncFbContext.hSyncEventObject); PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); @@ -1346,7 +1347,7 @@ PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1, { if (pszFenceName) { - OSStringLCopy(psNewFence->pszName, + OSStringSafeCopy(psNewFence->pszName, pszFenceName, SYNC_FB_FENCE_MAX_LENGTH); } @@ -1540,6 +1541,9 @@ PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence, PVRSRV_ERROR eError = PVRSRV_OK; IMG_UINT32 i; + PVR_UNREFERENCED_PARAMETER(uiFileNameLength); + PVR_UNREFERENCED_PARAMETER(uiModuleLength); + PVR_UNREFERENCED_PARAMETER(uiDescLength); PVR_DPF_ENTERED1(psFence); @@ -1548,6 +1552,8 @@ PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence, PVR_LOG((" %s (%s:%u)", pszModule, pszFile, uiLine)); #else PVR_LOG((" %s (location only available in debug build)", pszModule)); + PVR_UNREFERENCED_PARAMETER(pszFile); + PVR_UNREFERENCED_PARAMETER(uiLine); #endif PVR_LOG((" Desc: %s", pszDesc)); PVR_LOG(("---------------- FENCE ----------------")); @@ -1604,7 +1610,7 @@ static PVRSRV_ERROR _SyncFbTimelineCreate(PFN_SYNC_PT_HAS_SIGNALLED pfnHasPtSign { if (pszTimelineName) { - OSStringLCopy((IMG_CHAR*) psNewTl->pszName, + OSStringSafeCopy((IMG_CHAR*) psNewTl->pszName, pszTimelineName, SYNC_FB_TIMELINE_MAX_LENGTH); } @@ -1618,8 +1624,6 @@ static PVRSRV_ERROR _SyncFbTimelineCreate(PFN_SYNC_PT_HAS_SIGNALLED pfnHasPtSign dllist_init(&psNewTl->sSyncActiveList); dllist_init(&psNewTl->sTlList); - _SyncFbFTimelineListAdd(psNewTl); - psNewTl->sTlOps.pfnSyncPtHasSignalled = pfnHasPtSignalled; psNewTl->iUID = (IMG_INT64)(uintptr_t) psNewTl; /* Not unique throughout the driver lifetime */ OSAtomicWrite(&psNewTl->iSeqNum, 0); @@ -1629,6 +1633,8 @@ static PVRSRV_ERROR _SyncFbTimelineCreate(PFN_SYNC_PT_HAS_SIGNALLED pfnHasPtSign /* Set initial refcount value */ TL_REF_SET(&psNewTl->iRef, 1, psNewTl); + _SyncFbFTimelineListAdd(psNewTl); + *ppsTimeline = psNewTl; PVR_DPF_RETURN_RC1(PVRSRV_OK, psNewTl); @@ -1858,7 +1864,7 @@ PVRSRV_ERROR SyncFbFenceCreatePVR(PPVRSRV_DEVICE_NODE psDeviceNode, OSLockRelease(gsSyncFbContext.hFbContextLock); /* Init Fence */ - OSStringLCopy(psNewFence->pszName, + OSStringSafeCopy(psNewFence->pszName, pszName, SYNC_FB_FENCE_MAX_LENGTH); @@ -2371,7 +2377,6 @@ PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize, /*****************************************************************************/ static PVRSRV_ERROR _SyncFbSWTimelineFenceCreate(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_TIMELINE_SERVER *psTl, - IMG_UINT32 uiFenceNameSize, const IMG_CHAR *pszFenceName, PVRSRV_FENCE_SERVER **ppsOutputFence, IMG_UINT64 *pui64SyncPtIdx) @@ -2456,7 +2461,7 @@ static PVRSRV_ERROR _SyncFbSWTimelineFenceCreate(PVRSRV_DEVICE_NODE *psDeviceNod if (pszFenceName) { /* Init Fence */ - OSStringLCopy(psNewFence->pszName, + OSStringSafeCopy(psNewFence->pszName, pszFenceName, SYNC_FB_FENCE_MAX_LENGTH); } @@ -2531,7 +2536,6 @@ PVRSRV_ERROR SyncFbSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_GOTO_IF_ERROR(eError, e0); eError = _SyncFbSWTimelineFenceCreate(psDeviceNode, psTl, - OSStringLength(pszFenceName), pszFenceName, &psNewFence, pui64SyncPtIdx); @@ -2581,10 +2585,10 @@ PVRSRV_ERROR SyncFbFenceCreateSW(CONNECTION_DATA *psConnection, PVRSRV_ERROR eError; PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(uiFenceNameSize); eError = _SyncFbSWTimelineFenceCreate(psDeviceNode, psTimeline, - 0, pszFenceName, ppsOutputFence, pui64SyncPtIdx); @@ -2904,6 +2908,8 @@ PVRSRV_ERROR SyncFbFenceExportSecure(CONNECTION_DATA *psConnection, PVR_DPF_ENTERED1(psFence); + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDevNode); PVR_UNREFERENCED_PARAMETER(ppsSecureConnection); eError = _SyncFbFenceExport(psFence, &psExport); @@ -2942,7 +2948,6 @@ PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection, eError = _SyncFbFenceImport(psImport, ppsFence); - PVR_DPF_RETURN_OK; err_out: PVR_DPF_RETURN_RC(eError); } @@ -2952,187 +2957,3 @@ PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection, /* TESTING FUNCTIONS */ /* */ /*****************************************************************************/ -#if defined(PVR_TESTING_UTILS) - -static void _GetCheckContext(PVRSRV_DEVICE_NODE *psDevNode, - PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext) -{ - *ppsSyncCheckpointContext = psDevNode->hSyncCheckpointContext; -} - -PVRSRV_ERROR TestIOCTLSyncFbFenceSignalPVR(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - void *psFenceIn) -{ - PVRSRV_ERROR eError; - PVRSRV_SYNC_PT *psSyncPt; - IMG_UINT32 i; - PVRSRV_FENCE_SERVER *psFence = psFenceIn; - - PVR_DPF_ENTERED; - - for (i = 0; i < psFence->uiNumSyncs; i++) - { - psSyncPt = psFence->apsFenceSyncList[i]; - OSAtomicWrite(&psSyncPt->iStatus, PVRSRV_SYNC_SIGNALLED); - - OSLockAcquire(psSyncPt->psTl->hTlLock); - eError = _SyncFbSyncPtSignalAttached(psSyncPt, PVRSRV_SYNC_SIGNALLED); - PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbSyncPtSignalAttached", eSignal); - - OSLockRelease(psSyncPt->psTl->hTlLock); - } - - eError = _SyncFbSignalEO(); - PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbSignalEO", eExit); - - PVR_DPF_RETURN_OK; - -eSignal: - OSLockRelease(psSyncPt->psTl->hTlLock); -eExit: - PVR_DPF_RETURN_RC(eError); -} - - -PVRSRV_ERROR TestIOCTLSyncFbFenceCreatePVR(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 uiNameLength, - const IMG_CHAR *pszName, - PVRSRV_TIMELINE iTL, - PVRSRV_FENCE *piOutFence) -{ - PSYNC_CHECKPOINT_CONTEXT psContext = NULL; - PSYNC_CHECKPOINT psCheckpoint; - PVRSRV_FENCE iFence; - PVRSRV_ERROR eError; - IMG_UINT64 uiFenceUID; - - PVR_DPF_ENTERED; - - if (iTL == PVRSRV_NO_TIMELINE) - { - WRN("Supplied invalid timeline, returning invalid fence!"); - *piOutFence = PVRSRV_NO_FENCE; - - eError = PVRSRV_OK; - goto e1; - } - - _GetCheckContext(psDevNode, - &psContext); - - eError = SyncFbFenceCreatePVR(psDevNode, - pszName, - iTL, - psContext, - &iFence, - &uiFenceUID, - NULL, - &psCheckpoint, - NULL, - NULL); - PVR_LOG_GOTO_IF_ERROR(eError, "SyncFbFenceCreatePVR", e1); - - *piOutFence = iFence; - - PVR_DPF_RETURN_OK; - -e1: - PVR_DPF_RETURN_RC(eError); -} - -PVRSRV_ERROR TestIOCTLSyncFbFenceResolvePVR(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_FENCE iFence) -{ - PSYNC_CHECKPOINT_CONTEXT psContext = NULL; - PVRSRV_ERROR eError; - PSYNC_CHECKPOINT *apsChecks = NULL; - IMG_UINT32 uiNumChecks, i; - IMG_UINT64 uiFenceUID; - - PVR_DPF_ENTERED; - - _GetCheckContext(psDevNode, - &psContext); - - eError = SyncFbFenceResolvePVR(psContext, - iFence, - &uiNumChecks, - &apsChecks, - &uiFenceUID); - PVR_LOG_GOTO_IF_ERROR(eError, "SyncFbFenceResolvePVR", eExit); - - /* Close Checkpoints */ - for (i = 0; i < uiNumChecks; i++) - { - SyncCheckpointFree(apsChecks[i]); - } - - OSFreeMem(apsChecks); - - PVR_DPF_RETURN_OK; - -eExit: - PVR_DPF_RETURN_RC(eError); -} - -PVRSRV_ERROR TestIOCTLSyncFbSWTimelineAdvance(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_TIMELINE iSWTl) -{ - PVRSRV_ERROR eError; - PVRSRV_TIMELINE_SERVER *psSWTl; - PVRSRV_HANDLE_BASE *psHB; - SYNC_TIMELINE_OBJ sSWTimelineObj; - - PVR_DPF_ENTERED; - - PVR_UNREFERENCED_PARAMETER(psConnection); - - eError = _SyncFbLookupProcHandle((IMG_HANDLE) (uintptr_t) iSWTl, - PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER, - IMG_FALSE, - (void**) &psSWTl, - &psHB); - PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbLookupProcHandle", e0); - - sSWTimelineObj.pvTlObj = psSWTl; - sSWTimelineObj.hTimeline = iSWTl; - - eError = SyncSWTimelineAdvanceKM(psDevNode, &sSWTimelineObj); - PVR_LOG_GOTO_IF_ERROR(eError, "SyncSWTimelineAdvanceKM", e0); - - PVR_DPF_RETURN_OK; - -e0: - PVR_DPF_RETURN_RC(eError); -} - -PVRSRV_ERROR TestIOCTLSyncFbSWFenceCreate(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_TIMELINE iTl, - IMG_UINT32 uiFenceNameLength, - const IMG_CHAR *pszFenceName, - PVRSRV_FENCE *piFence) -{ - PVRSRV_ERROR eError; - - PVR_DPF_ENTERED; - - PVR_UNREFERENCED_PARAMETER(psConnection); - - eError = SyncSWTimelineFenceCreateKM(psDevNode, - iTl, - pszFenceName, - piFence); - PVR_LOG_GOTO_IF_ERROR(eError, "SyncSWTimelineFenceCreateKM", e0); - - PVR_DPF_RETURN_OK; - -e0: - PVR_DPF_RETURN_RC(eError); -} - -#endif /* PVR_TESTING_UTILS */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_server.c index 41379100ac74..1746d946aca1 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/sync_server.c @@ -55,7 +55,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "sync.h" #include "sync_internal.h" #include "connection_server.h" -#include "htbuffer.h" +#include "htbserver.h" #include "rgxhwperf.h" #include "info_page.h" @@ -571,7 +571,7 @@ PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection, if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; /* Copy over the class name annotation */ - OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); + OSStringSafeCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); } else { @@ -781,6 +781,7 @@ PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block"); eError = psDevNode->pfnAllocUFOBlock(psDevNode, + sizeof(IMG_UINT32), &psNewSyncBlk->psMemDesc, &psNewSyncBlk->uiFWAddr.ui32Addr, &psNewSyncBlk->ui32BlockSize); @@ -833,15 +834,23 @@ PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk) } static INLINE IMG_BOOL _CheckSyncIndex(SYNC_PRIMITIVE_BLOCK *psSyncBlk, - IMG_UINT32 ui32Index) + IMG_UINT32 ui32Index) { - return ((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize); + if (psSyncBlk->ui32BlockSize == 0) + { + return IMG_FALSE; + } + + return (ui32Index < psSyncBlk->ui32BlockSize / sizeof(IMG_UINT32)); } PVRSRV_ERROR -PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, - IMG_UINT32 ui32Value) +PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, + IMG_UINT32 ui32Index, + IMG_UINT32 ui32Value) { + PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncBlk != NULL, "psSyncBlk"); + if (_CheckSyncIndex(psSyncBlk, ui32Index)) { psSyncBlk->pui32LinAddr[ui32Index] = ui32Value; @@ -849,11 +858,12 @@ PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, } else { - PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for " - "0x%08X byte sync block (value 0x%08X)", - ui32Index, - psSyncBlk->ui32BlockSize, - ui32Value)); + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVSyncPrimSetKM: Index %u out of range for " + "0x%08X byte sync block (value 0x%08X)", + ui32Index, + psSyncBlk->ui32BlockSize, + ui32Value)); return PVRSRV_ERROR_INVALID_PARAMS; } } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/tlintern.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/tlintern.c index 70d8b09d6e40..3bd6b0af1718 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/tlintern.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/tlintern.c @@ -307,37 +307,6 @@ PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc) PVR_DPF_RETURN_VAL(NULL); } -static inline IMG_BOOL IsDigit(IMG_CHAR c) -{ - return c >= '0' && c <= '9'; -} - -static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer, - IMG_UINT32 *pui32Number) -{ - IMG_CHAR acTmp[11] = {0}; /* max 10 digits */ - IMG_UINT32 ui32Result; - IMG_UINT i; - - for (i = 0; i < sizeof(acTmp) - 1; i++) - { - if (!IsDigit(*pszBuffer)) - break; - acTmp[i] = *pszBuffer++; - } - - /* if there are no digits or there is something after the number */ - if (i == 0 || *pszBuffer != '\0') - return IMG_FALSE; - - if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK) - return IMG_FALSE; - - *pui32Number = ui32Result; - - return IMG_TRUE; -} - IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], IMG_UINT32 ui32Max) @@ -366,7 +335,7 @@ IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, break; /* all of names are shorter than MAX and null terminated */ - OSStringLCopy(aaszStreams[ui32Count], psn->psStream->szName, + OSStringSafeCopy(aaszStreams[ui32Count], psn->psStream->szName, PRVSRVTL_MAX_STREAM_NAME_SIZE); } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/tlserver.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/tlserver.c index c250dd3dc618..5c8359271508 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/tlserver.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/tlserver.c @@ -270,6 +270,8 @@ TLServerCloseStreamKM(PTL_STREAM_DESC psSD) IMG_BOOL bDestroyStream; IMG_BOOL bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ? IMG_TRUE : IMG_FALSE; + IMG_BOOL bNoOpenCB = psSD->ui32Flags & PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK ? + IMG_TRUE : IMG_FALSE; PVR_DPF_ENTERED; @@ -329,6 +331,12 @@ TLServerCloseStreamKM(PTL_STREAM_DESC psSD) PVR_ASSERT(psGD->uiClientCnt > 0); psGD->uiClientCnt--; + /* This callback is executed only on reader close. */ + if (!bIsWriteOnly && psStream->pfOnReaderCloseCallback != NULL && !bNoOpenCB) + { + psStream->pfOnReaderCloseCallback(psStream->pvOnReaderCloseUserData); + } + OSLockRelease (psGD->hTLGDLock); /* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */ @@ -503,7 +511,7 @@ TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern, ui32Size / PRVSRVTL_MAX_STREAM_NAME_SIZE); /* Find "tlctrl" stream and reset it */ - psNode = TLFindStreamNodeByName(PVRSRV_TL_CTLR_STREAM); + psNode = TLFindStreamNodeByName(PVRSRV_TL_CTRL_STREAM); if (psNode != NULL) TLStreamReset(psNode->psStream); @@ -596,7 +604,7 @@ TLServerAcquireDataKM(PTL_STREAM_DESC psSD, TL_COUNTER_INC(psSD->ui32NoDataSleep); - LOOP_UNTIL_TIMEOUT(NO_DATA_WAIT_PERIOD_US) + LOOP_UNTIL_TIMEOUT_US(NO_DATA_WAIT_PERIOD_US) { eError = OSEventObjectWaitTimeout(psSD->hReadEvent, ui64WaitInChunksUs); if (eError == PVRSRV_OK) @@ -625,7 +633,7 @@ TLServerAcquireDataKM(PTL_STREAM_DESC psSD, { /* Some other system error with event objects */ PVR_DPF_RETURN_RC(eError); } - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (bDataFound) { diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/tlstream.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/tlstream.c index a80792e7bffe..29cc42a802fe 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/tlstream.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/tlstream.c @@ -49,6 +49,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "allocmem.h" #include "devicemem.h" #include "pvrsrv_error.h" +#include "sysinfo.h" #include "osfunc.h" #include "log2.h" @@ -57,7 +58,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv.h" -#define EVENT_OBJECT_TIMEOUT_US 1000000ULL +#if !defined(EVENT_OBJECT_TIMEOUT_US) +#error EVENT_OBJECT_TIMEOUT_US should be defined sysinfo.h +#endif + #define READ_PENDING_TIMEOUT_US 100000ULL /*! Compute maximum TL packet size for this stream. Max packet size will be @@ -155,7 +159,8 @@ PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream) PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL); /* TL for now is only used by host driver, so cpulocal mem suffices */ + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL) | /* TL for now is only used by host driver, so cpulocal mem suffices */ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC; /* Exit if memory has already been allocated. */ if (psStream->pbyBuffer != NULL) @@ -164,7 +169,6 @@ PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream) OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s", psStream->szName); - /* Use HostMemDeviceNode instead of psStream->psDevNode to benefit from faster * accesses to CPU local memory. When the framework to access CPU_LOCAL device * memory from GPU is fixed, we'll switch back to use psStream->psDevNode for @@ -225,6 +229,8 @@ TLStreamCreate(IMG_HANDLE *phStream, IMG_UINT32 ui32StreamFlags, TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, void *pvOnReaderOpenUD, + TL_STREAM_ONREADERCLOSECB pfOnReaderCloseCB, + void *pvOnReaderCloseUD, TL_STREAM_SOURCECB pfProducerCB, void *pvProducerUD) { @@ -273,7 +279,7 @@ TLStreamCreate(IMG_HANDLE *phStream, goto e0; } - OSStringLCopy(psTmp->szName, szStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE); + OSStringSafeCopy(psTmp->szName, szStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE); if (ui32StreamFlags & TL_FLAG_FORCE_FLUSH) { @@ -306,6 +312,8 @@ TLStreamCreate(IMG_HANDLE *phStream, psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB; psTmp->pvOnReaderOpenUserData = pvOnReaderOpenUD; + psTmp->pfOnReaderCloseCallback = pfOnReaderCloseCB; + psTmp->pvOnReaderCloseUserData = pvOnReaderCloseUD; /* Remember producer supplied CB and data for later */ psTmp->pfProducerCallback = (void(*)(void))pfProducerCB; psTmp->pvProducerUserData = pvProducerUD; @@ -394,6 +402,8 @@ TLStreamCreate(IMG_HANDLE *phStream, void TLStreamReset(IMG_HANDLE hStream) { PTL_STREAM psStream = (PTL_STREAM) hStream; + IMG_HANDLE hEventWaitForWriterToComplete; + PVRSRV_ERROR eError; PVR_ASSERT(psStream != NULL); @@ -401,21 +411,26 @@ void TLStreamReset(IMG_HANDLE hStream) while (psStream->ui32Pending != NOTHING_PENDING) { - PVRSRV_ERROR eError; - /* We're in the middle of a write so we cannot reset the stream. * We are going to wait until the data is committed. Release lock while * we're here. */ OSLockRelease(psStream->hStreamWLock); + eError = OSEventObjectOpen(psStream->psNode->hReadEventObj, + &hEventWaitForWriterToComplete); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); + /* Event when psStream->bNoSignalOnCommit is set we can still use * the timeout capability of event object API (time in us). */ - eError = OSEventObjectWaitTimeout(psStream->psNode->hReadEventObj, 100); + eError = OSEventObjectWaitTimeout(hEventWaitForWriterToComplete, 100); if (eError != PVRSRV_ERROR_TIMEOUT && eError != PVRSRV_OK) { - PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectWaitTimeout"); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectWaitTimeout", TimeoutError); } + eError = OSEventObjectClose(hEventWaitForWriterToComplete); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectClose"); + OSLockAcquire(psStream->hStreamWLock); /* Either timeout occurred or the stream has been signalled. @@ -431,6 +446,12 @@ void TLStreamReset(IMG_HANDLE hStream) /* we know that ui32Pending already has correct value (no need to set) */ OSLockRelease(psStream->hStreamWLock); + + return; + +TimeoutError: + eError = OSEventObjectClose(hEventWaitForWriterToComplete); + PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); } PVRSRV_ERROR @@ -1240,7 +1261,7 @@ TLStreamMarkEOS(IMG_HANDLE psStream, IMG_BOOL bRemoveOld) } else { - eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL, NULL); + eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL, NULL); } if (PVRSRV_OK != eError) @@ -1623,3 +1644,43 @@ TLStreamGetBufferPointer(PTL_STREAM psStream) PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc); } + +/* + * Determine the maximum transfer size which will fit into the specified + * L2 consumer stream. + * This is a point-in-time snapshot which compares the given requested transfer + * size and returns the minimum of that vs the available buffer space within + * the L2 stream. This is the largest amount of data that can be successfully + * copied in the stream without truncation occurring. + * Worst-case scenario is that we copy fewer bytes than the maximum + * actually available, but we will never copy too much data. + */ +IMG_UINT32 +TLStreamGetMaxTransfer(IMG_UINT32 uiXferSize, IMG_HANDLE hConsumerStream) +{ + PTL_STREAM psConsumer = (PTL_STREAM)hConsumerStream; + IMG_UINT32 ui32MaxTransfer = 0U; + + IMG_UINT32 ui32Xfer; + + /* Local copies */ + IMG_UINT32 ui32Read = psConsumer->ui32Read; + IMG_UINT32 ui32Write = psConsumer->ui32Write; + + if (ui32Write >= ui32Read) + { + /* Can transfer Write .. End-of-buffer + Read bytes at start */ + ui32Xfer = psConsumer->ui32Size - ui32Write + ui32Read; + } + else + { + /* Can transfer Read - Write bytes maximum */ + ui32Xfer = ui32Read - ui32Write; + } + + PVR_ASSERT(ui32Xfer <= psConsumer->ui32Size); + + ui32MaxTransfer = MIN(ui32Xfer, uiXferSize); + + return ui32MaxTransfer; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/vmm_pvz_client.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/vmm_pvz_client.c index 966400c080ed..c989cea789ea 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/vmm_pvz_client.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/vmm_pvz_client.c @@ -79,18 +79,18 @@ PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig) PVRSRV_ERROR eError; IMG_DEV_PHYADDR sDevPAddr; VMM_PVZ_CONNECTION *psVmmPvz; - PHYS_HEAP *psFwPhysHeap = psDevConfig->psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; + PHYS_HEAP *psFwPhysHeap = psDevConfig->psDevNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM]; eError = PhysHeapGetDevPAddr(psFwPhysHeap, &sDevPAddr); #if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) -{ - IMG_DEV_PHYADDR sDevPAddrTranslated; + { + IMG_DEV_PHYADDR sDevPAddrTranslated; - /* If required, perform a software translation between CPU and Device physical addresses. */ - PhysHeapCpuPAddrToDevPAddr(psFwPhysHeap, 1, &sDevPAddrTranslated, (IMG_CPU_PHYADDR *)&sDevPAddr); - sDevPAddr.uiAddr = sDevPAddrTranslated.uiAddr; -} + /* If required, perform a software translation between CPU and Device physical addresses. */ + PhysHeapCpuPAddrToDevPAddr(psFwPhysHeap, 1, &sDevPAddrTranslated, (IMG_CPU_PHYADDR *)&sDevPAddr); + sDevPAddr.uiAddr = sDevPAddrTranslated.uiAddr; + } #endif PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapGetDevPAddr"); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/vmm_pvz_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/vmm_pvz_server.c index 313b0b9b10f5..7e67d397a908 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/vmm_pvz_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/vmm_pvz_server.c @@ -55,29 +55,34 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. static inline void PvzServerLockAcquire(void) { +#if !defined(FPGA) PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); OSLockAcquire(psPVRSRVData->hPvzConnectionLock); +#endif } static inline void PvzServerLockRelease(void) { +#if !defined(FPGA) PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); OSLockRelease(psPVRSRVData->hPvzConnectionLock); +#endif } -#define VALIDATE_OSID_DEVID(ui32OSID, ui32DevID) do { \ - if ((ui32OSID >= RGX_NUM_OS_SUPPORTED) || (ui32OSID < RGXFW_GUEST_OSID_START)) \ +#define VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID) do { \ + if ((ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) || \ + (ui32DriverID < RGXFW_GUEST_DRIVER_ID_START)) \ { \ PVR_DPF((PVR_DBG_ERROR, \ "%s: Invalid OSID %u. Supported Guest OSID range: %u - %u", \ __func__, \ - ui32OSID, \ - RGXFW_GUEST_OSID_START, \ - RGX_NUM_OS_SUPPORTED-1)); \ + ui32DriverID, \ + RGXFW_GUEST_DRIVER_ID_START, \ + RGX_NUM_DRIVERS_SUPPORTED-1)); \ return PVRSRV_ERROR_INVALID_PARAMS; \ } \ - if (PVRSRVGetDeviceInstanceByOSId(ui32DevID) == NULL) \ + if (PVRSRVGetDeviceInstance(ui32DevID) == NULL) \ { \ PVR_DPF((PVR_DBG_ERROR, \ "%s: Invalid Device ID %u.", \ @@ -97,12 +102,12 @@ PvzServerLockRelease(void) */ PVRSRV_ERROR -PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, +PvzServerMapDevPhysHeap(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID, IMG_UINT64 ui64Size, IMG_UINT64 ui64PAddr) { -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) && !defined(FPGA) /* * Reject hypercall if called on a system configured at build time to * preallocate the Guest's firmware heaps from static carveout memory. @@ -114,18 +119,18 @@ PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, #else PVRSRV_ERROR eError = PVRSRV_OK; - VALIDATE_OSID_DEVID(ui32OSID, ui32DevID); + VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID); PvzServerLockAcquire(); #if defined(SUPPORT_RGX) - if (IsVmOnline(ui32OSID, ui32DevID)) + if (IsVmOnline(ui32DriverID, ui32DevID)) { - PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(ui32DevID); + PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstance(ui32DevID); IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr}; IMG_UINT32 sync; - eError = RGXFwRawHeapAllocMap(psDeviceNode, ui32OSID, sDevPAddr, ui64Size); + eError = RGXFwRawHeapAllocMap(psDeviceNode, ui32DriverID, sDevPAddr, ui64Size); PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", e0); /* Invalidate MMU cache in preparation for a kick from this Guest */ @@ -133,7 +138,13 @@ PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, PVR_LOG_GOTO_IF_ERROR(eError, "MMUCacheInvalidateKick", e0); /* Everything is ready for the firmware to start interacting with this OS */ - eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_ONLINE); + eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32DriverID, RGXFWIF_OS_ONLINE); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Paravirtualized request to map Guest%u's Firmware heap" + " rejected: Guest not allowed to run.", __func__, ui32DriverID)); + eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; } e0: #endif /* defined(SUPPORT_RGX) */ @@ -144,36 +155,36 @@ PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, } PVRSRV_ERROR -PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, +PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID) { -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) && !defined(FPGA) /* * Reject hypercall if called on a system configured at built time to * preallocate the Guest's firmware heaps from static carveout memory. */ PVR_DPF((PVR_DBG_ERROR, - "%s: Host PVZ config: Does not match with Guest PVZ config\n" - " Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__)); + "%s: Host PVZ config: Does not match with Guest PVZ config." + " Host preallocates the Guest's FW physheap from static memory carveouts at startup.", __func__)); return PVRSRV_ERROR_INVALID_PVZ_CONFIG; #else PVRSRV_ERROR eError = PVRSRV_OK; - VALIDATE_OSID_DEVID(ui32OSID, ui32DevID); + VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID); PvzServerLockAcquire(); #if defined(SUPPORT_RGX) - if (IsVmOnline(ui32OSID, ui32DevID)) + if (IsVmOnline(ui32DriverID, ui32DevID)) { - PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(ui32DevID); + PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstance(ui32DevID); /* Order firmware to offload this OS' data and stop accepting commands from it */ - eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_OFFLINE); + eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32DriverID, RGXFWIF_OS_OFFLINE); PVR_LOG_GOTO_IF_ERROR(eError, "RGXFWSetFwOsState", e0); - /* it is now safe to remove the Guest's memory mappings */ - RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + /* it is now safe to remove the Guest's memory mappings */ + RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID); } e0: #endif @@ -193,28 +204,28 @@ PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, */ PVRSRV_ERROR -PvzServerOnVmOnline(IMG_UINT32 ui32OSID, +PvzServerOnVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID) { PVRSRV_ERROR eError; - VALIDATE_OSID_DEVID(ui32OSID, ui32DevID); + VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID); PvzServerLockAcquire(); - eError = PvzOnVmOnline(ui32OSID, ui32DevID); + eError = PvzOnVmOnline(ui32DriverID, ui32DevID); PvzServerLockRelease(); return eError; } PVRSRV_ERROR -PvzServerOnVmOffline(IMG_UINT32 ui32OSID, +PvzServerOnVmOffline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID) { PVRSRV_ERROR eError; - VALIDATE_OSID_DEVID(ui32OSID, ui32DevID); + VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID); PvzServerLockAcquire(); - eError = PvzOnVmOffline(ui32OSID, ui32DevID); + eError = PvzOnVmOffline(ui32DriverID, ui32DevID); PvzServerLockRelease(); return eError; @@ -227,7 +238,7 @@ PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, { PVRSRV_ERROR eError; - VALIDATE_OSID_DEVID(RGXFW_GUEST_OSID_START, ui32DevID); + VALIDATE_DRVID_DEVID(RGXFW_GUEST_DRIVER_ID_START, ui32DevID); PvzServerLockAcquire(); eError = PvzVMMConfigure(eVMMParamType, ui32ParamValue, ui32DevID); PvzServerLockRelease(); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/vz_vmm_pvz.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/vz_vmm_pvz.c index 8a8b6a7ccd00..bb7c8f6426c6 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/vz_vmm_pvz.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/vz_vmm_pvz.c @@ -49,7 +49,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv.h" #include "vz_vmm_pvz.h" -#if (RGX_NUM_OS_SUPPORTED > 1) +#if (RGX_NUM_DRIVERS_SUPPORTED > 1) static PVRSRV_ERROR PvzConnectionValidate(void) { @@ -63,8 +63,7 @@ PvzConnectionValidate(void) if (psVmmPvz == NULL) { PVR_DPF((PVR_DBG_ERROR, - "%s: %s PVZ config: Unable to acquire PVZ connection", - __func__, PVRSRV_VZ_MODE_IS(GUEST) ? "Guest" : "Host")); + "%s: Unable to acquire PVZ connection", __func__)); eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; goto e0; } @@ -76,8 +75,8 @@ PvzConnectionValidate(void) * * This setup uses carve-out memory, has no hypercall mechanism & does not support * out-of-order initialisation of host/guest VMs/drivers. The host driver has all - * the information needed to initialize all OSIDs firmware state when it's loaded - * and its PVZ layer must mark all guest OSIDs as being online as part of its PVZ + * the information needed to initialize all Drivers firmware state when it's loaded + * and its PVZ layer must mark all guest Drivers as being online as part of its PVZ * initialisation. Having no out-of-order initialisation support, the guest driver * can only submit a workload to the device after the host driver has completely * initialized the firmware, the VZ hypervisor/VM setup must guarantee this. @@ -89,44 +88,32 @@ PvzConnectionValidate(void) * * This setup uses guest memory, has PVZ hypercall mechanism & supports out-of-order * initialisation of host/guest VMs/drivers. The host driver initializes only its - * own OSID-0 firmware state when its loaded and each guest driver will use its PVZ + * own Driver-0 firmware state when its loaded and each guest driver will use its PVZ * interface to hypercall to the host driver to both synchronise its initialisation * so it does not submit any workload to the firmware before the host driver has - * had a chance to initialize the firmware and to also initialize its own OSID-x + * had a chance to initialize the firmware and to also initialize its own Driver-x * firmware state. */ PVR_LOG(("Using dynamic PVZ bootstrap setup")); - - if (!PVRSRV_VZ_MODE_IS(GUEST) && - (psVmmPvz->sServerFuncTab.pfnMapDevPhysHeap == NULL || - psVmmPvz->sServerFuncTab.pfnUnmapDevPhysHeap == NULL)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Host PVZ config: Functions for mapping a Guest's heaps not implemented\n", __func__)); - eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; - } #endif PvzConnectionRelease(psVmmPvz); e0: return eError; } -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ +#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -PVRSRV_ERROR PvzConnectionInit(void) +PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig) { PVRSRV_ERROR eError; - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -#if (RGX_NUM_OS_SUPPORTED == 1) -#if !defined(PVRSRV_NEED_PVR_DPF) - PVR_UNREFERENCED_PARAMETER(psPVRSRVData); -# endif - PVR_DPF((PVR_DBG_ERROR, "This kernel driver does not support virtualization. Please rebuild with RGX_NUM_OS_SUPPORTED > 1")); - PVR_DPF((PVR_DBG_ERROR, "Halting initialisation, cannot transition to %s mode", - psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest")); +#if (RGX_NUM_DRIVERS_SUPPORTED == 1) + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_DPF((PVR_DBG_ERROR, "This kernel driver does not support virtualization. Please rebuild with RGX_NUM_DRIVERS_SUPPORTED > 1")); eError = PVRSRV_ERROR_NOT_SUPPORTED; goto e0; #else + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); if ((psPVRSRVData->hPvzConnection != NULL) && (psPVRSRVData->hPvzConnectionLock != NULL)) @@ -141,7 +128,7 @@ PVRSRV_ERROR PvzConnectionInit(void) PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); /* Create VM manager para-virtualization connection */ - eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection); + eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection, psDevConfig); if (eError != PVRSRV_OK) { OSLockDestroy(psPVRSRVData->hPvzConnectionLock); @@ -159,22 +146,40 @@ PVRSRV_ERROR PvzConnectionInit(void) return eError; } -void PvzConnectionDeInit(void) +void PvzConnectionDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) { PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDN; + IMG_BOOL bCanDestroyPvzData = IMG_TRUE; - if ((psPVRSRVData->hPvzConnection == NULL) && - (psPVRSRVData->hPvzConnectionLock == NULL)) + OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); + for (psDN = psPVRSRVData->psDeviceNodeList; psDN != NULL; psDN = psDN->psNext) { - PVR_DPF((PVR_DBG_MESSAGE, "PVzConnection already deinitialised.")); - return; + if ((psDN->psDevConfig != psDevConfig) && + (!PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDN))) + { + /* if any other virtual devices are present keep the pvz data */ + bCanDestroyPvzData = IMG_FALSE; + break; + } } + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); - VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection); - psPVRSRVData->hPvzConnection = NULL; + if (bCanDestroyPvzData) + { + if ((psPVRSRVData->hPvzConnection == NULL) && + (psPVRSRVData->hPvzConnectionLock == NULL)) + { + PVR_DPF((PVR_DBG_MESSAGE, "PVzConnection already deinitialised.")); + return; + } + + VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection, psDevConfig); + psPVRSRVData->hPvzConnection = NULL; - OSLockDestroy(psPVRSRVData->hPvzConnectionLock); - psPVRSRVData->hPvzConnectionLock = NULL; + OSLockDestroy(psPVRSRVData->hPvzConnectionLock); + psPVRSRVData->hPvzConnectionLock = NULL; + } } VMM_PVZ_CONNECTION* PvzConnectionAcquire(void) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/common/vz_vmm_vm.c b/drivers/gpu/drm/img/img-volcanic/services/server/common/vz_vmm_vm.c index 3a39a8ad98a0..bc737f5acd81 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/common/vz_vmm_vm.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/common/vz_vmm_vm.c @@ -48,34 +48,47 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_error.h" #include "vz_vm.h" #include "rgxfwutils.h" +#include "rgxfwdbg.h" -bool IsVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID) +bool IsVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID) { - PVRSRV_DEVICE_NODE *psDevNode = PVRSRVGetDeviceInstanceByOSId(ui32DevID); + PVRSRV_DEVICE_NODE *psDevNode = PVRSRVGetDeviceInstance(ui32DevID); - return BIT_ISSET(psDevNode->ui32VmState, ui32OSID); + if (psDevNode == NULL) + { + return false; + } + else + { + return BIT_ISSET(psDevNode->ui32VmState, ui32DriverID); + } } -PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID) +PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID) { -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) +#if !defined(RGX_NUM_DRIVERS_SUPPORTED) || (RGX_NUM_DRIVERS_SUPPORTED == 1) PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; #else PVRSRV_ERROR eError = PVRSRV_OK; PVRSRV_DEVICE_NODE *psDevNode; - psDevNode = PVRSRVGetDeviceInstanceByOSId(ui32DevID); + psDevNode = PVRSRVGetDeviceInstance(ui32DevID); - if (BIT_ISSET(psDevNode->ui32VmState, ui32OSID)) + if (psDevNode == NULL) + { + eError = PVRSRV_ERROR_NO_DEVICENODE_FOUND; + goto e0; + } + else if (BIT_ISSET(psDevNode->ui32VmState, ui32DriverID)) { PVR_DPF((PVR_DBG_ERROR, - "%s: OSID %u on Device %u is already enabled.", - __func__, ui32OSID, ui32DevID)); + "%s: DriverID %u on Device %u is already enabled.", + __func__, ui32DriverID, ui32DevID)); eError = PVRSRV_ERROR_INVALID_PARAMS; goto e0; } - if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT) + if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_CREATED) { /* Firmware not initialized yet, do it here */ eError = PVRSRVCommonDeviceInitialise(psDevNode); @@ -94,11 +107,11 @@ PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID) goto e0; } - BIT_SET(psDevNode->ui32VmState, ui32OSID); + BIT_SET(psDevNode->ui32VmState, ui32DriverID); #if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) /* Everything is ready for the firmware to start interacting with this OS */ - eError = RGXFWSetFwOsState(psDevNode->pvDevice, ui32OSID, RGXFWIF_OS_ONLINE); + eError = RGXFWSetFwOsState(psDevNode->pvDevice, ui32DriverID, RGXFWIF_OS_ONLINE); #endif e0: @@ -106,32 +119,42 @@ PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID) return eError; } -PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID) +PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID) { -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) +#if !defined(RGX_NUM_DRIVERS_SUPPORTED) || (RGX_NUM_DRIVERS_SUPPORTED == 1) PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; #else PVRSRV_ERROR eError = PVRSRV_OK; PVRSRV_DEVICE_NODE *psDevNode; PVRSRV_RGXDEV_INFO *psDevInfo; - psDevNode = PVRSRVGetDeviceInstanceByOSId(ui32DevID); + psDevNode = PVRSRVGetDeviceInstance(ui32DevID); - if (!BIT_ISSET(psDevNode->ui32VmState, ui32OSID)) + if (psDevNode == NULL) + { + eError = PVRSRV_ERROR_NO_DEVICENODE_FOUND; + goto e0; + } + else if (!BIT_ISSET(psDevNode->ui32VmState, ui32DriverID)) { PVR_DPF((PVR_DBG_ERROR, - "%s: OSID %u on Device %u is already disabled.", - __func__, ui32OSID, ui32DevID)); + "%s: DriverID %u on Device %u is already disabled.", + __func__, ui32DriverID, ui32DevID)); eError = PVRSRV_ERROR_INVALID_PARAMS; goto e0; } psDevInfo = psDevNode->pvDevice; + if (psDevInfo == NULL) + { + eError = PVRSRV_ERROR_INVALID_DEVINFO; + goto e0; + } - eError = RGXFWSetFwOsState(psDevInfo, ui32OSID, RGXFWIF_OS_OFFLINE); + eError = RGXFWSetFwOsState(psDevInfo, ui32DriverID, RGXFWIF_OS_OFFLINE); if (eError == PVRSRV_OK) { - BIT_UNSET(psDevNode->ui32VmState, ui32OSID); + BIT_UNSET(psDevNode->ui32VmState, ui32DriverID); } e0: @@ -144,27 +167,41 @@ PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32DevID) { #if defined(SUPPORT_RGX) - PVRSRV_DEVICE_NODE *psDevNode = PVRSRVGetDeviceInstanceByOSId(ui32DevID); - PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_RGXDEV_INFO *psDevInfo; PVRSRV_ERROR eError; + psDevNode = PVRSRVGetDeviceInstance(ui32DevID); + if (psDevNode == NULL) + { + eError = PVRSRV_ERROR_NO_DEVICENODE_FOUND; + goto e0; + } + + psDevInfo = psDevNode->pvDevice; + if (psDevInfo == NULL) + { + eError = PVRSRV_ERROR_INVALID_DEVINFO; + goto e0; + } + switch (eVMMParamType) { - case VMM_CONF_PRIO_OSID0: - case VMM_CONF_PRIO_OSID1: - case VMM_CONF_PRIO_OSID2: - case VMM_CONF_PRIO_OSID3: - case VMM_CONF_PRIO_OSID4: - case VMM_CONF_PRIO_OSID5: - case VMM_CONF_PRIO_OSID6: - case VMM_CONF_PRIO_OSID7: + case VMM_CONF_PRIO_DRV0: + case VMM_CONF_PRIO_DRV1: + case VMM_CONF_PRIO_DRV2: + case VMM_CONF_PRIO_DRV3: + case VMM_CONF_PRIO_DRV4: + case VMM_CONF_PRIO_DRV5: + case VMM_CONF_PRIO_DRV6: + case VMM_CONF_PRIO_DRV7: { - IMG_UINT32 ui32OSID = eVMMParamType; + IMG_UINT32 ui32DriverID = eVMMParamType; IMG_UINT32 ui32Prio = ui32ParamValue; - if (ui32OSID < RGX_NUM_OS_SUPPORTED) + if (ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED) { - eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSID, ui32Prio); + eError = PVRSRVRGXFWDebugSetDriverPriorityKM(NULL, psDevNode, ui32DriverID, ui32Prio); } else { @@ -178,12 +215,70 @@ PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline); break; } + case VMM_CONF_ISOLATION_GROUP_DRV0: + case VMM_CONF_ISOLATION_GROUP_DRV1: + case VMM_CONF_ISOLATION_GROUP_DRV2: + case VMM_CONF_ISOLATION_GROUP_DRV3: + case VMM_CONF_ISOLATION_GROUP_DRV4: + case VMM_CONF_ISOLATION_GROUP_DRV5: + case VMM_CONF_ISOLATION_GROUP_DRV6: + case VMM_CONF_ISOLATION_GROUP_DRV7: + { + IMG_UINT32 ui32DriverID = eVMMParamType - VMM_CONF_ISOLATION_GROUP_DRV0; + IMG_UINT32 ui32IsolationGroup = ui32ParamValue; + + if (ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED) + { + eError = PVRSRVRGXFWDebugSetDriverIsolationGroupKM(NULL, psDevNode, ui32DriverID, ui32IsolationGroup); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + break; + } + case VMM_CONF_TIME_SLICE_DRV0: + case VMM_CONF_TIME_SLICE_DRV1: + case VMM_CONF_TIME_SLICE_DRV2: + case VMM_CONF_TIME_SLICE_DRV3: + case VMM_CONF_TIME_SLICE_DRV4: + case VMM_CONF_TIME_SLICE_DRV5: + case VMM_CONF_TIME_SLICE_DRV6: + case VMM_CONF_TIME_SLICE_DRV7: + { + IMG_UINT32 ui32DriverID = eVMMParamType - VMM_CONF_TIME_SLICE_DRV0; + IMG_UINT32 ui32TSPercentage = ui32ParamValue; + + if (ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED) + { + eError = PVRSRVRGXFWDebugSetDriverTimeSliceKM(NULL, psDevNode, ui32DriverID, ui32TSPercentage); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + break; + } + case VMM_CONF_TIME_SLICE_INTERVAL: + { + IMG_UINT32 ui32TSIntervalMs = ui32ParamValue; + + eError = PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM(NULL, psDevNode, ui32TSIntervalMs); + break; + } + case VMM_CONF_VZ_CONNECTION_COOLDOWN_PERIOD: + { + IMG_UINT32 ui32VzConnectionCooldownPeriodInSec = ui32ParamValue; + eError = RGXFWSetVzConnectionCooldownPeriod(psDevInfo, ui32VzConnectionCooldownPeriodInSec); + break; + } default: { eError = PVRSRV_ERROR_INVALID_PARAMS; } } +e0: return eError; #else PVR_UNREFERENCED_PARAMETER(eVMMParamType); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_bridge_init.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_bridge_init.c index 1b1f81d788bc..1de4f08dd2b6 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_bridge_init.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_bridge_init.c @@ -46,10 +46,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_bridge_init.h" #include "rgxdevice.h" -#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) PVRSRV_ERROR InitRGXTQ2Bridge(void); void DeinitRGXTQ2Bridge(void); -#endif PVRSRV_ERROR InitRGXCMPBridge(void); void DeinitRGXCMPBridge(void); #if defined(SUPPORT_RGXRAY_BRIDGE) @@ -57,55 +55,112 @@ PVRSRV_ERROR InitRGXRAYBridge(void); void DeinitRGXRAYBridge(void); #endif -PVRSRV_ERROR DeviceDepBridgeInit(PVRSRV_RGXDEV_INFO *psDevInfo) +/* Reference counts for device-conditional + * bridges. This ensures that bridges remain + * valid while there are still devices using + * them. + */ +static ATOMIC_T i32RGXCMPBridgeRefCt; +static ATOMIC_T i32RGXTQ2BridgeRefCt; +#if defined(SUPPORT_RGXRAY_BRIDGE) +static ATOMIC_T i32RGXRayBridgeRefCt; +#endif +static IMG_BOOL bAtomicsInitialised = IMG_FALSE; + +void RGXBridgeDriverInit(void) +{ + if (!bAtomicsInitialised) + { + bAtomicsInitialised = IMG_TRUE; + OSAtomicWrite(&i32RGXCMPBridgeRefCt, 0); + OSAtomicWrite(&i32RGXTQ2BridgeRefCt, 0); +#if defined(SUPPORT_RGXRAY_BRIDGE) + OSAtomicWrite(&i32RGXRayBridgeRefCt, 0); +#endif + } +} + +PVRSRV_ERROR RGXRegisterBridges(PVRSRV_RGXDEV_INFO *psDevInfo) { PVRSRV_ERROR eError; + if (!bAtomicsInitialised) + { + eError = PVRSRV_ERROR_NOT_INITIALISED; + PVR_LOG_RETURN_IF_ERROR(eError, "RGXBridgeRefCts"); + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) { - eError = InitRGXCMPBridge(); - PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXCMPBridge"); + if (OSAtomicIncrement(&i32RGXCMPBridgeRefCt) == 1) + { + eError = InitRGXCMPBridge(); + if (eError != PVRSRV_OK) + { + OSAtomicDecrement(&i32RGXCMPBridgeRefCt); + } + PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXCMPBridge"); + } } -#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) { - eError = InitRGXTQ2Bridge(); - PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXTQ2Bridge"); + if (OSAtomicIncrement(&i32RGXTQ2BridgeRefCt) == 1) + { + eError = InitRGXTQ2Bridge(); + if (eError != PVRSRV_OK) + { + OSAtomicDecrement(&i32RGXTQ2BridgeRefCt); + } + PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXTQ2Bridge"); + } } -#endif #if defined(SUPPORT_RGXRAY_BRIDGE) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 0) { - eError = InitRGXRAYBridge(); - PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXRAYBridge"); + if (OSAtomicIncrement(&i32RGXRayBridgeRefCt) == 1) + { + eError = InitRGXRAYBridge(); + if (eError != PVRSRV_OK) + { + OSAtomicDecrement(&i32RGXRayBridgeRefCt); + } + PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXRAYBridge"); + } } #endif return PVRSRV_OK; } -void DeviceDepBridgeDeInit(PVRSRV_RGXDEV_INFO *psDevInfo) +void RGXUnregisterBridges(PVRSRV_RGXDEV_INFO *psDevInfo) { if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) { - DeinitRGXCMPBridge(); + if (OSAtomicDecrement(&i32RGXCMPBridgeRefCt) == 0) + { + DeinitRGXCMPBridge(); + } } -#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) { - DeinitRGXTQ2Bridge(); + if (OSAtomicDecrement(&i32RGXTQ2BridgeRefCt) == 0) + { + DeinitRGXTQ2Bridge(); + } } -#endif #if defined(SUPPORT_RGXRAY_BRIDGE) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 0) { - DeinitRGXRAYBridge(); + if (OSAtomicDecrement(&i32RGXRayBridgeRefCt) == 0) + { + DeinitRGXRAYBridge(); + } } #endif } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_bridge_init.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_bridge_init.h index 10e8e72ca095..8622848b51b1 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_bridge_init.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_bridge_init.h @@ -49,7 +49,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "device.h" #include "rgxdevice.h" -PVRSRV_ERROR DeviceDepBridgeInit(PVRSRV_RGXDEV_INFO *psDevInfo); -void DeviceDepBridgeDeInit(PVRSRV_RGXDEV_INFO *psDevInfo); +void RGXBridgeDriverInit(void); +PVRSRV_ERROR RGXRegisterBridges(PVRSRV_RGXDEV_INFO *psDevInfo); +void RGXUnregisterBridges(PVRSRV_RGXDEV_INFO *psDevInfo); #endif /* RGX_BRIDGE_INIT_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxhwperf.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_heaps_server.h similarity index 64% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxhwperf.h rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_heaps_server.h index cfac0a9befd5..a1dd31113fa3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxhwperf.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgx_heaps_server.h @@ -1,8 +1,7 @@ /*************************************************************************/ /*! @File -@Title RGX HW Performance header file +@Title RGX heap (server) definitions @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Header for the RGX HWPerf functions @License Dual MIT/GPLv2 The contents of this file are subject to the MIT license as set out below. @@ -40,32 +39,48 @@ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ +#if !defined RGX_HEAPS_SERVER_H +#define RGX_HEAPS_SERVER_H -#ifndef RGXHWPERF_H_ -#define RGXHWPERF_H_ +#include "img_types.h" +#include "rgx_heaps.h" -#include "rgxhwperf_common.h" +/* + * Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID + */ +#define RGX_HEAP_PAGE_SHIFTS_DEF \ + X(4KB, 12U) \ + X(16KB, 14U) \ + X(64KB, 16U) \ + X(256KB, 18U) \ + X(1MB, 20U) \ + X(2MB, 21U) -/****************************************************************************** - * RGX HW Performance Profiling API(s) - Volcanic specific - *****************************************************************************/ +typedef enum RGX_HEAP_PAGE_SHIFTS_TAG +{ +#define X(_name, _shift) RGX_HEAP_ ## _name ## _PAGE_SHIFT = _shift, + RGX_HEAP_PAGE_SHIFTS_DEF +#undef X +} RGX_HEAP_PAGE_SHIFTS; -PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( - CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE * psDeviceNode, - IMG_UINT32 ui32CtrlWord, - IMG_UINT32 ui32ArrayLen, - RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs); +/* Base and size alignment 2MB */ +#define RGX_HEAP_BASE_SIZE_ALIGN 0x200000UL +#define RGX_GENERAL_SVM_BASE_SIZE_ALIGNMENT 0x8000UL -PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCounters(PVRSRV_DEVICE_NODE *psDevNode, - RGXFWIF_HWPERF_CTL *psHWPerfCtl, - IMG_UINT32 ui32BlockID, - RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters); +/*************************************************************************/ /*! +@Function RGXGetValidHeapPageSizeMask +@Description Returns a bitmask indicating all supported virtual heap page sizes. -PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode, - RGXFWIF_HWPERF_CTL *psHWPerfCtl, - IMG_UINT32 ui32ArrayLength, - IMG_UINT32 *pui32BlockCount, - IMG_UINT32 *pui32EnabledBlockIDs); +@Return IMG_UINT32 A 32-bit mask with enabled bits indicating valid + page sizes. +*/ /**************************************************************************/ +static inline IMG_UINT32 RGXGetValidHeapPageSizeMask(void) +{ + /* Generates a bit mask with the values of RGX_HEAP_PAGE_SHIFTS_DEF. + * 0 is required for the first shift to properly bitwise OR. */ +#define X(_name, _shift) | (1 << _shift) + return 0 RGX_HEAP_PAGE_SHIFTS_DEF; +#undef X +} -#endif /* RGXHWPERF_H_ */ +#endif /* RGX_HEAPS_SERVER_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbreakpoint.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbreakpoint.c index bc6891d6b4dc..902cbe0318d9 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbreakpoint.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbreakpoint.c @@ -55,7 +55,7 @@ PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, IMG_HANDLE hMemCtxPrivData, RGXFWIF_DM eFWDataMaster, - IMG_UINT32 ui32TempSpillingAddr, + IMG_UINT64 ui64TempSpillingAddr, IMG_UINT32 ui32BPAddr, IMG_UINT32 ui32HandlerAddr, IMG_UINT32 ui32DataMaster) @@ -67,7 +67,6 @@ PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, IMG_UINT32 ui32kCCBCommandSlot; PVR_UNREFERENCED_PARAMETER(psConnection); - PVR_UNREFERENCED_PARAMETER(ui32TempSpillingAddr); OSLockAcquire(psDevInfo->hBPLock); @@ -81,6 +80,7 @@ PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr; sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr; sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster; + sBPCmd.uCmdData.sBPData.ui64SpillAddr = ui64TempSpillingAddr; sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_ENABLE; sBPCmd.uCmdData.sBPData.eDM = eFWDataMaster; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbreakpoint.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbreakpoint.h index 01962191d8c3..2470de250914 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbreakpoint.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbreakpoint.h @@ -69,7 +69,7 @@ PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, IMG_HANDLE hMemCtxPrivData, RGXFWIF_DM eFWDataMaster, - IMG_UINT32 ui32TempSpillingAddr, + IMG_UINT64 ui64TempSpillingAddr, IMG_UINT32 ui32BPAddr, IMG_UINT32 ui32HandlerAddr, IMG_UINT32 ui32DataMaster); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbvnc.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbvnc.c index dccc716ad636..9f82ff1260bb 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbvnc.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxbvnc.c @@ -2,7 +2,7 @@ @File @Title BVNC handling specific routines @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Functions used for BNVC related work +@Description Functions used for BVNC related work @License Dual MIT/GPLv2 The contents of this file are subject to the MIT license as set out below. @@ -46,11 +46,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXBVNC_C #include "rgx_bvnc_table_km.h" #undef RGXBVNC_C -#include "oskm_apphint.h" +#include "os_apphint.h" #include "pvrsrv.h" #include "pdump_km.h" #include "rgx_compat_bvnc.h" +#include "allocmem.h" +#define RGX_FEATURE_TRUE_VALUE_TYPE_UINT16 (RGX_FEATURE_VALUE_TYPE_UINT16 >> RGX_FEATURE_TYPE_BIT_SHIFT) +#define RGX_FEATURE_TRUE_VALUE_TYPE_UINT32 (RGX_FEATURE_VALUE_TYPE_UINT32 >> RGX_FEATURE_TYPE_BIT_SHIFT) #define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(RGX_BVNC_STR_SIZE_MAX))+1) /* This function searches the given array for a given search value */ @@ -88,7 +91,76 @@ static IMG_UINT64* _RGXSearchBVNCTable( IMG_UINT64 *pui64Array, sizeof((t)[0])/sizeof(IMG_UINT64)) ) -#if defined(DEBUG) +#if !defined(NO_HARDWARE) +/*************************************************************************/ /*! +@brief This function reads the (P)BVNC core_ID register and extracts + the BVNC configuration. Supports the old scheme and the newer + PBVNC scheme. +@param psDeviceNode - Device Node pointer +@param ui32CoreNum - Core/bank number (0 for single core) +@param pB - Address of branch value (output) +@param pV - Address of version value (output) +@param pN - Address of number of clusters/scalable shading units value (output) +@param pC - Address of configuration value (output) +@return BVNC encoded in 64-bit value, 16-bits per field +*/ /**************************************************************************/ +static +IMG_UINT64 _RGXReadBVNCFromReg(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32CoreNum, + IMG_UINT32 *pB, IMG_UINT32 *pV, IMG_UINT32 *pN, IMG_UINT32 *pC) +{ + IMG_UINT64 ui64BVNC; + IMG_UINT32 B=0, V=0, N=0, C=0; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + +#if defined(RGX_CR_CORE_ID__PBVNC) + /* Core ID reading code for Rogue */ + + /* Read the BVNC, in to new way first, if B not set, use old scheme */ + ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC + (ui32CoreNum << 16)); + + if (GET_B(ui64BVNC)) + { + B = GET_PBVNC_B(ui64BVNC); + V = GET_PBVNC_V(ui64BVNC); + N = GET_PBVNC_N(ui64BVNC); + C = GET_PBVNC_C(ui64BVNC); + } + else + { + IMG_UINT64 ui32CoreID, ui32CoreRev; + ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION + (ui32CoreNum << 16)); + ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (ui32CoreNum << 16)); + B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >> + RGX_CR_CORE_REVISION_MAJOR_SHIFT; + V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >> + RGX_CR_CORE_REVISION_MINOR_SHIFT; + N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >> + RGX_CR_CORE_ID_CONFIG_N_SHIFT; + C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >> + RGX_CR_CORE_ID_CONFIG_C_SHIFT; + ui64BVNC = rgx_bvnc_pack(B, V, N, C); + } +#else + /* Core ID reading code for Volcanic */ + + ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (ui32CoreNum << 16)); + + B = (ui64BVNC & ~RGX_CR_CORE_ID_BRANCH_ID_CLRMSK) >> + RGX_CR_CORE_ID_BRANCH_ID_SHIFT; + V = (ui64BVNC & ~RGX_CR_CORE_ID_VERSION_ID_CLRMSK) >> + RGX_CR_CORE_ID_VERSION_ID_SHIFT; + N = (ui64BVNC & ~RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> + RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT; + C = (ui64BVNC & ~RGX_CR_CORE_ID_CONFIG_ID_CLRMSK) >> + RGX_CR_CORE_ID_CONFIG_ID_SHIFT; +#endif + + *pB = B; *pV = V; *pN = N; *pC = C; + return ui64BVNC; +} +#endif + +#if defined(DEBUG) || defined(SUPPORT_PERFORMANCE_RUN) #define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature) \ if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED ) \ @@ -104,10 +176,12 @@ static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode) PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NC: ", NUM_CLUSTERS); PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "CSF: ", CDM_CONTROL_STREAM_FORMAT); PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "FBCDCA: ", FBCDC_ARCHITECTURE); -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "META: ", META); +#if defined(RGX_FEATURE_META_COREMEM_BANKS_IDX) PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMB: ", META_COREMEM_BANKS); +#endif PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMS: ", META_COREMEM_SIZE); +#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX) PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MDMACnt: ", META_DMA_CHANNEL_COUNT); #endif PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIIP: ", NUM_ISP_IPP_PIPES); @@ -119,8 +193,10 @@ static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode) PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NSPU: ", NUM_SPU); #endif PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PBW: ", PHYS_BUS_WIDTH); -#if defined(RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX) +#if defined(RGX_FEATURE_SCALABLE_TE_ARCH_IDX) PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "STEArch: ", SCALABLE_TE_ARCH); +#endif +#if defined(RGX_FEATURE_SCALABLE_VCE_IDX) PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SVCEA: ", SCALABLE_VCE); #endif PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCBanks: ", SLC_BANKS); @@ -175,40 +251,66 @@ static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode) } #endif +#if !defined(ERNSBRNS_IDS_MAX_IDX) && !defined(FEATURE_NO_VALUES_NAMES_MAX_IDX) + PVR_UNREFERENCED_PARAMETER(ui64Mask); + PVR_UNREFERENCED_PARAMETER(ui32IdOrNameIdx); +#endif + } #endif -static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 *pui64Cfg) +static PVRSRV_ERROR _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 *pui64Cfg) { IMG_UINT32 ui32Index; /* Read the feature values for the runtime BVNC */ for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++) { - IMG_UINT16 bitPosition = aui16FeaturesWithValuesBitPositions[ui32Index]; - IMG_UINT64 ui64PackedValues = pui64Cfg[2 + bitPosition / 64]; - IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (bitPosition % 64); + IMG_UINT16 ui16BitPosition = aui16FeaturesWithValuesBitPositions[ui32Index]; + IMG_UINT64 ui64PackedValues = pui64Cfg[2 + ui16BitPosition / 64]; + IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (ui16BitPosition % 64); - if (ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]) - { - if (gaFeaturesValues[ui32Index][ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED) - { - psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_DISABLED; - } - else - { - psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = gaFeaturesValues[ui32Index][ui16ValueIndex]; - } - } - else + if (ui16ValueIndex >= gaFeaturesValuesMaxIndexes[ui32Index]) { /* This case should never be reached */ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID; PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex)); PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]); + return PVRSRV_ERROR_INVALID_BVNC_PARAMS; + } + + switch (ui16BitPosition >> RGX_FEATURE_TYPE_BIT_SHIFT) + { + case RGX_FEATURE_TRUE_VALUE_TYPE_UINT16: + { + IMG_UINT16 *pui16FeatureValues = (IMG_UINT16*)gaFeaturesValues[ui32Index]; + if (pui16FeatureValues[ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = + RGX_FEATURE_VALUE_DISABLED; + } + else + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = + pui16FeatureValues[ui16ValueIndex]; + } + break; + } + case RGX_FEATURE_TRUE_VALUE_TYPE_UINT32: + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = + ((IMG_UINT32*)gaFeaturesValues[ui32Index])[ui16ValueIndex]; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Feature with index %d has invalid feature type", + __func__, + ui32Index)); + return PVRSRV_ERROR_INVALID_BVNC_PARAMS; } } + #if defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) /* Code path for Volcanic */ @@ -246,6 +348,7 @@ static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; PVR_DPF((PVR_DBG_ERROR, "%s: Power island feature version not found!", __func__)); PVR_ASSERT(0); + return PVRSRV_ERROR_FEATURE_DISABLED; } if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && @@ -267,74 +370,82 @@ static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); PVR_ASSERT(0); + return PVRSRV_ERROR_FEATURE_DISABLED; } #else /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */ - /* Code path for Rogue and Oceanic */ + /* Code path for Rogue */ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_CDM+1; #if defined(SUPPORT_AGP) psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM2+1); #endif - /* Meta feature not present in oceanic */ -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_IDX] = RGX_FEATURE_VALUE_DISABLED; } -#endif /* Get the max number of dusts in the core */ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS)) { - psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); } else { /* This case should never be reached as all cores have clusters */ - psDevInfo->sDevFeatureCfg.ui32MAXDustCount = RGX_FEATURE_VALUE_INVALID; + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); PVR_ASSERT(0); + return PVRSRV_ERROR_FEATURE_DISABLED; } #endif /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */ - /* Meta feature not present in oceanic */ -#if defined(RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX) /* Transform the META coremem size info in bytes */ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) { psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024; } -#endif + + return PVRSRV_OK; } -static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount) +static PVRSRV_ERROR _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount) { const IMG_CHAR *pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC; void *pvAppHintState = NULL; IMG_UINT32 ui32BVNCCount = 0; IMG_BOOL bRet; - IMG_CHAR szBVNCAppHint[RGXBVNC_BUFFER_SIZE]; - IMG_CHAR *pszCurrentBVNC = szBVNCAppHint; - szBVNCAppHint[0] = '\0'; + IMG_CHAR *pszBVNCAppHint; + IMG_CHAR *pszCurrentBVNC; + pszBVNCAppHint = (IMG_CHAR *)OSAllocMem(RGXBVNC_BUFFER_SIZE); + if (pszBVNCAppHint == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + pszBVNCAppHint[0] = '\0'; - OSCreateKMAppHintState(&pvAppHintState); + pszCurrentBVNC = pszBVNCAppHint; - bRet = (IMG_BOOL)OSGetKMAppHintSTRING(APPHINT_NO_DEVICE, + OSCreateAppHintState(&pvAppHintState); + + bRet = (IMG_BOOL)OSGetAppHintSTRING(APPHINT_NO_DEVICE, pvAppHintState, RGXBVNC, pszAppHintDefault, - szBVNCAppHint, - sizeof(szBVNCAppHint)); + pszBVNCAppHint, + RGXBVNC_BUFFER_SIZE + ); - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); - if (!bRet || (szBVNCAppHint[0] == '\0')) + if (!bRet || (pszBVNCAppHint[0] == '\0')) { - return; + OSFreeMem(pszBVNCAppHint); + return PVRSRV_OK; } - PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list: %s",__func__, szBVNCAppHint)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list: %s",__func__, pszBVNCAppHint)); while (*pszCurrentBVNC != '\0') { @@ -361,8 +472,9 @@ static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDe if (ui32BVNCCount == ui32RGXDevCount) { - OSStringLCopy(pszBVNC, pszCurrentBVNC, RGX_BVNC_STR_SIZE_MAX); - return; + OSStringSafeCopy(pszBVNC, pszCurrentBVNC, RGX_BVNC_STR_SIZE_MAX); + OSFreeMem(pszBVNCAppHint); + return PVRSRV_OK; } ui32BVNCCount++; @@ -376,8 +488,12 @@ static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDe * devices detected */ if (1 == ui32BVNCCount) { - OSStringLCopy(pszBVNC, szBVNCAppHint, RGX_BVNC_STR_SIZE_MAX); + OSStringSafeCopy(pszBVNC, pszBVNCAppHint, RGX_BVNC_STR_SIZE_MAX); } + + OSFreeMem(pszBVNCAppHint); + + return PVRSRV_OK; } /* Function that parses the BVNC List passed as module parameter */ @@ -389,13 +505,19 @@ static PVRSRV_ERROR _RGXBvncParseList(IMG_UINT32 *pB, { IMG_CHAR aszBVNCString[RGX_BVNC_STR_SIZE_MAX]; IMG_CHAR *pcTemp, *pcNext; + PVRSRV_ERROR eError; aszBVNCString[0] = '\0'; /* 4 components of a BVNC string is B, V, N & C */ #define RGX_BVNC_INFO_PARAMS (4) - _RGXBvncAcquireAppHint(aszBVNCString, ui32RGXDevCount); + eError = _RGXBvncAcquireAppHint(aszBVNCString, ui32RGXDevCount); + + if (eError != PVRSRV_OK) + { + return eError; + } if ('\0' == aszBVNCString[0]) { @@ -475,7 +597,7 @@ static IMG_UINT32 _RGXBvncReadSLCSize(PVRSRV_DEVICE_NODE *psDeviceNode) IMG_UINT64 ui64SLCSize = 0ULL; #if defined(RGX_CR_SLC_SIZE_IN_KB) - /* Rogue and Oceanic hardware */ + /* Rogue hardware */ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_CONFIGURABLE)) { ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_SIZE_IN_KB); @@ -545,13 +667,13 @@ PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) void *pvAppHintState = NULL; const IMG_BOOL bAppHintDefault = PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC; - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, IgnoreHWReportedBVNC, &bAppHintDefault, &psDevInfo->bIgnoreHWReportedBVNC); - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); } #if !defined(NO_HARDWARE) @@ -559,67 +681,22 @@ PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) /* Try to detect the RGX BVNC from the HW device */ if ((NULL == pui64Cfg) && !psDevInfo->bIgnoreHWReportedBVNC) { - IMG_UINT64 ui32ID; - IMG_BOOL bPowerDown = (psDeviceNode->eCurrentSysPowerState == PVRSRV_SYS_POWER_STATE_OFF); + IMG_BOOL bPowerDown = ! PVRSRVIsSystemPowered(psDeviceNode); /* Power-up the device as required to read the registers */ - if (bPowerDown) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) && bPowerDown) { eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON); PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON"); } -#if defined(RGX_CR_CORE_ID__PBVNC) - /* Core ID reading code for Rogue */ - - /* Read the BVNC, in to new way first, if B not set, use old scheme */ - ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC); - - if (GET_B(ui32ID)) - { - B = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >> - RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT; - V = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >> - RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT; - N = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> - RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT; - C = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >> - RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT; - - } - else - { - IMG_UINT64 ui32CoreID, ui32CoreRev; - ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION); - ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID); - B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >> - RGX_CR_CORE_REVISION_MAJOR_SHIFT; - V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >> - RGX_CR_CORE_REVISION_MINOR_SHIFT; - N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >> - RGX_CR_CORE_ID_CONFIG_N_SHIFT; - C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >> - RGX_CR_CORE_ID_CONFIG_C_SHIFT; - } -#else - /* Core ID reading code for Volcanic */ - - ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID); - - B = (ui32ID & ~RGX_CR_CORE_ID_BRANCH_ID_CLRMSK) >> - RGX_CR_CORE_ID_BRANCH_ID_SHIFT; - V = (ui32ID & ~RGX_CR_CORE_ID_VERSION_ID_CLRMSK) >> - RGX_CR_CORE_ID_VERSION_ID_SHIFT; - N = (ui32ID & ~RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> - RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT; - C = (ui32ID & ~RGX_CR_CORE_ID_CONFIG_ID_CLRMSK) >> - RGX_CR_CORE_ID_CONFIG_ID_SHIFT; -#endif + /* Read the BVNC from HW */ + _RGXReadBVNCFromReg(psDeviceNode, 0 /*core0*/, &B, &V, &N, &C); PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC " from HW device registers", B, V, N, C)); - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { /* Read the number of cores in the system for newer BVNC (Branch ID > 20) */ if (B > 20) @@ -632,7 +709,7 @@ PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) ui32SLCSize = _RGXBvncReadSLCSize(psDeviceNode); PVR_DPF((PVR_DBG_MESSAGE, "%s: SLC Size reported as %u", __func__, ui32SLCSize)); - if (bPowerDown) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) && bPowerDown) { eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); @@ -645,7 +722,7 @@ PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); PVR_LOG_IF_FALSE((pui64Cfg != NULL), "HW device BVNC configuration not found!"); } - else if (!PVRSRV_VZ_MODE_IS(GUEST)) + else if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { /* * On host OS we should not get here as CORE_ID should not be zero, so flag an error. @@ -707,7 +784,8 @@ PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) /* Parsing feature config depends on available features on the core * hence this parsing should always follow the above feature assignment */ psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1]; - _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg); + eError = _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg); + PVR_RETURN_IF_ERROR(eError); /* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */ ui64BVNC = BVNC_PACK(B,V,N,C); @@ -744,7 +822,8 @@ PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) psDevInfo->sDevFeatureCfg.ui32SLCSizeInBytes = ui32SLCSize; /* Message to confirm configuration look up was a success */ - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT) && + !PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { #if defined(NO_HARDWARE) { @@ -768,7 +847,7 @@ PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) ui32RGXDevCnt++; -#if defined(DEBUG) +#if defined(DEBUG) || defined(SUPPORT_PERFORMANCE_RUN) _RGXBvncDumpParsedConfig(psDeviceNode); #endif return PVRSRV_OK; @@ -818,6 +897,9 @@ PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64Give PVRSRV_RGXDEV_INFO *psDevInfo; PVRSRV_ERROR eError = PVRSRV_OK; IMG_UINT64 ui64MatchBVNC; +#if !defined(NO_HARDWARE) + IMG_UINT32 B=0, V=0, N=0, C=0; +#endif IMG_UINT32 i; PVR_ASSERT(psDeviceNode != NULL); @@ -839,7 +921,12 @@ PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64Give else { /* use the value in CORE_ID for any zero elements in the BVNC */ - ui64MatchBVNC = (ui64GivenBVNC & ~ui64CoreIdMask) | (OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID) & ui64CoreIdMask); +#if !defined(NO_HARDWARE) + IMG_UINT64 ui64BVNC = _RGXReadBVNCFromReg(psDeviceNode, 0, &B, &V, &N, &C); + ui64MatchBVNC = (ui64GivenBVNC & ~ui64CoreIdMask) | (ui64BVNC & ui64CoreIdMask); +#else + ui64MatchBVNC = 0; +#endif } PVR_LOG(("matchBVNC %d.%d.%d.%d", (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), @@ -851,22 +938,14 @@ PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64Give for (i = 0; i < NUM_RGX_CORE_IDS; ++i) { #if !defined(NO_HARDWARE) - IMG_UINT64 ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (i << 16)); - - PVR_LOG(("CORE_ID%d returned %d.%d.%d.%d", i, - (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), - (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), - (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), - (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + IMG_UINT64 ui64BVNC = _RGXReadBVNCFromReg(psDeviceNode, i, &B, &V, &N, &C); + PVR_LOG(("CORE_ID%d returned %d.%d.%d.%d", i, B, V, N, C)); if (ui64BVNC != ui64MatchBVNC) { eError = PVRSRV_ERROR_BVNC_MISMATCH; PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CORE_ID%d %d.%d.%d.%d, Expected %d.%d.%d.%d", __func__, i, - (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), - (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), - (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), - (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff), + B, V, N, C, (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), @@ -875,25 +954,6 @@ PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64Give } #endif -#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) - /* check upper DWORD */ - eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME, - (RGX_CR_CORE_ID + 4) + (i << 16), - (IMG_UINT32)(ui64MatchBVNC >> 32), - 0xFFFFFFFF, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - if (eError == PVRSRV_OK) - { - /* check lower DWORD */ - eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME, - RGX_CR_CORE_ID + (i << 16), - (IMG_UINT32)(ui64MatchBVNC & 0xFFFFFFFF), - 0xFFFFFFFF, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - } -#endif } return eError; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxccb.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxccb.c index 0146a6c20aeb..8c45398a97bb 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxccb.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxccb.c @@ -47,6 +47,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "allocmem.h" #include "devicemem.h" #include "rgxfwutils.h" +#include "rgxfwcmnctx.h" #include "osfunc.h" #include "rgxccb.h" @@ -125,7 +126,6 @@ struct _RGX_CLIENT_CCB_ { DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; /*!< MemDesc for the CCB control */ IMG_UINT32 ui32HostWriteOffset; /*!< CCB write offset from the driver side */ IMG_UINT32 ui32LastPDumpWriteOffset; /*!< CCB write offset from the last time we submitted a command in capture range */ - IMG_UINT32 ui32FinishedPDumpWriteOffset; /*!< Trails LastPDumpWriteOffset for last finished command, used for HW CB driven DMs */ IMG_UINT32 ui32LastROff; /*!< Last CCB Read offset to help detect any CCB wedge */ IMG_UINT32 ui32LastWOff; /*!< Last CCB Write offset to help detect any CCB wedge */ IMG_UINT32 ui32ByteCount; /*!< Count of the number of bytes written to CCCB */ @@ -176,36 +176,17 @@ PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); #endif - if (BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) - { - /* Draining CCB on a command that hasn't finished, and FW isn't expected - * to have updated Roff up to Woff. Only drain to the first - * finished command prior to this. The Roff for this - * is stored in ui32FinishedPDumpWriteOffset. - */ - ui32PollOffset = psClientCCB->ui32FinishedPDumpWriteOffset; - - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, - ui32PDumpFlags, - "cCCB(%s@%p): Draining open CCB rgxfw_roff < woff (%d)", - psClientCCB->szName, - psClientCCB, - ui32PollOffset); - } - else - { - /* Command to a finished CCB stream and FW is drained to empty - * out remaining commands until R==W. - */ - ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset; + /* Command to a finished CCB stream and FW is drained to empty + * out remaining commands until R==W. + */ + ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset; - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, - ui32PDumpFlags, - "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)", - psClientCCB->szName, - psClientCCB, - ui32PollOffset); - } + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, + ui32PDumpFlags, + "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)", + psClientCCB->szName, + psClientCCB, + ui32PollOffset); return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc, offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), @@ -296,13 +277,10 @@ static void RGXCCBPDumpFastForwardCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset; -#if defined(SUPPORT_AGP) + psCCBCtl->ui32ReadOffset2 = psClientCCB->ui32HostWriteOffset; -#if defined(SUPPORT_AGP4) psCCBCtl->ui32ReadOffset3 = psClientCCB->ui32HostWriteOffset; psCCBCtl->ui32ReadOffset4 = psClientCCB->ui32HostWriteOffset; -#endif -#endif PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, @@ -329,10 +307,12 @@ static void RGXCCBPDumpFastForwardCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui static PVRSRV_ERROR _RGXCCBPDumpTransition(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags) { RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData; + PVRSRV_ERROR eError; #if defined(PDUMP) PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) pvDevice; +#else + PVR_UNREFERENCED_PARAMETER(pvDevice); #endif - PVRSRV_ERROR eError; /* Block mode: * Here is block structure at transition (ui32BlockLength=N frames): @@ -395,6 +375,12 @@ static PVRSRV_ERROR _RGXCCBPDumpTransition(void *pvData, void *pvDevice, PDUMP_T } break; } + case PDUMP_TRANSITION_EVENT_RANGE_APPEND: + { + /* Capture range already entered. Skip dump sync and fast forward. + */ + break; + } case PDUMP_TRANSITION_EVENT_RANGE_EXITED: { /* Nothing to do */ @@ -523,6 +509,9 @@ static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) { IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage; + RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, + INVALIDATE); + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, psClientCCB->psClientCCBCtrl->ui32ReadOffset, psClientCCB->ui32Size); @@ -616,7 +605,9 @@ PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | +#if defined(DEBUG) + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | +#endif PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); @@ -645,31 +636,16 @@ PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN)) { PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; - PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap); + PHYS_HEAP_POLICY uiHeapPolicy = PhysHeapGetPolicy(psPhysHeap); psClientCCB->ui32VirtualAllocSize = ui32VirtualAllocSize; - /* - * Growing CCB is doubling the size. Last grow would require only ui32NumVirtChunks/2 new chunks - * because another ui32NumVirtChunks/2 is already allocated. - * Sometimes initial chunk count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed. - */ - psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumChunks, ui32NumVirtChunks/2) * sizeof(IMG_UINT32)); - if (psClientCCB->pui32MappingTable == NULL) - { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto fail_alloc_mtable; - } - for (i = 0; i < ui32NumChunks; i++) - { - psClientCCB->pui32MappingTable[i] = i; - } - - if (eHeapType == PHYS_HEAP_TYPE_LMA || - eHeapType == PHYS_HEAP_TYPE_DMA) + if (uiHeapPolicy != PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) { + psClientCCB->pui32MappingTable = NULL; /* - * On LMA sparse memory can't be mapped to kernel. + * On LMA sparse memory can't be mapped to kernel without support for non physically + * sparse allocations. * To work around this whole ccb memory is allocated at once as contiguous. */ eError = DevmemFwAllocate(psDevInfo, @@ -680,6 +656,23 @@ PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, } else { + /* + * Growing CCB is doubling the size. Last grow would require only ui32NumVirtChunks/2 new chunks + * because another ui32NumVirtChunks/2 is already allocated. + * Sometimes initial chunk count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed. + */ + psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumChunks, ui32NumVirtChunks/2) * sizeof(IMG_UINT32)); + if (psClientCCB->pui32MappingTable == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_mtable; + } + + for (i = 0; i < ui32NumChunks; i++) + { + psClientCCB->pui32MappingTable[i] = i; + } + eError = DevmemFwAllocateSparse(psDevInfo, ui32VirtualAllocSize, ui32NumChunks, @@ -789,16 +782,18 @@ PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, /* psClientCCBCtrlMemDesc was zero alloc'd so no need to initialise offsets. */ psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1; + /* Flush the whole struct since other parts are implicitly init (zero'd) */ + RGXFwSharedMemCacheOpPtr(psClientCCB->psClientCCBCtrl, + FLUSH); + PDUMPCOMMENT(psDevInfo->psDeviceNode, "cCCB control"); DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, 0, sizeof(RGXFWIF_CCCB_CTL), PDUMP_FLAGS_CONTINUOUS); - PVR_ASSERT(eError == PVRSRV_OK); psClientCCB->ui32HostWriteOffset = 0; psClientCCB->ui32LastPDumpWriteOffset = 0; - psClientCCB->ui32FinishedPDumpWriteOffset = 0; psClientCCB->ui32Size = ui32AllocSize; psClientCCB->ui32LastROff = ui32AllocSize - 1; psClientCCB->ui32ByteCount = 0; @@ -864,7 +859,7 @@ PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); #if defined(PVRSRV_ENABLE_CCCB_GROW) fail_alloc_ccb: - if ( psClientCCB->ui32VirtualAllocSize > 0) + if (psClientCCB->pui32MappingTable) { OSFreeMem(psClientCCB->pui32MappingTable); } @@ -914,9 +909,7 @@ static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB, PVRSRV_ERROR eError; IMG_UINT32 i; -#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); -#endif for (i = 0; i < ui32AllocPageCount; i++) { @@ -929,28 +922,22 @@ static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB, psClientCCB->pui32MappingTable, 0, NULL, -#if !defined(PVRSRV_UNMAP_ON_SPARSE_CHANGE) - SPARSE_MAP_CPU_ADDR | -#endif SPARSE_RESIZE_ALLOC); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to grow RGX client CCB (%s)", PVRSRVGetErrorString(eError))); -#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE if (DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, &psClientCCB->pvClientCCB) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to reacquire CCB mapping")); psClientCCB->pvClientCCB = NULL; } -#endif return eError; } -#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, &psClientCCB->pvClientCCB); if (eError != PVRSRV_OK) @@ -959,7 +946,6 @@ static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB, PVRSRVGetErrorString(eError))); return eError; } -#endif return PVRSRV_OK; } @@ -984,6 +970,10 @@ PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSiz sure we insert a padding command now and wrap before adding the main command. */ + + /* Invalidate read offset */ + RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, + INVALIDATE); if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) { ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, @@ -1020,7 +1010,7 @@ PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSiz _RGXCCBUtilisationEvent(psClientCCB, PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB, ui32CmdSize); -#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ return PVRSRV_ERROR_RETRY; } @@ -1066,15 +1056,20 @@ PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, bPDumpEnabled && !PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_NONE)) { + PDUMP_TRANSITION_EVENT eTransitionEvent = BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN) ? + PDUMP_TRANSITION_EVENT_RANGE_APPEND : PDUMP_TRANSITION_EVENT_RANGE_ENTERED; + eError = PDumpTransition(psDeviceNode, psClientCCB->psPDumpConnectionData, - PDUMP_TRANSITION_EVENT_RANGE_ENTERED, + eTransitionEvent, ui32PDumpFlags); if (eError != PVRSRV_OK) { return eError; } } +#else + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); #endif /* Check that the CCB can hold this command + padding */ @@ -1099,6 +1094,7 @@ PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, minimum amount for the padding command) we will need to make sure we insert a padding command now and wrap before adding the main command. */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) { /* The command can fit without wrapping... */ @@ -1117,6 +1113,8 @@ PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, psClientCCB->ui32Size); #endif + RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, + INVALIDATE); ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, psClientCCB->psClientCCBCtrl->ui32ReadOffset, psClientCCB->ui32Size); @@ -1145,6 +1143,8 @@ PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; #if defined(PVRSRV_ENABLE_CCCB_GROW) + RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, + INVALIDATE); /* Check this is a growable CCB */ if (psClientCCB->ui32VirtualAllocSize > 0) { @@ -1186,7 +1186,7 @@ PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, { /* Grow CCB */ PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; - PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap); + PHYS_HEAP_POLICY uiHeapPolicy = PhysHeapGetPolicy(psPhysHeap); PVRSRV_ERROR eErr = PVRSRV_OK; /* Something went wrong if we are here a second time */ @@ -1194,12 +1194,12 @@ PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, OSLockAcquire(psClientCCB->hCCBGrowLock); /* - * On LMA sparse memory can't be mapped to kernel. + * On LMA sparse memory can't be mapped to kernel without support for non physically + * sparse allocations. * To work around this whole ccb memory was allocated at once as contiguous. * In such case below sparse change is not needed because memory is already allocated. */ - if (eHeapType != PHYS_HEAP_TYPE_LMA && - eHeapType != PHYS_HEAP_TYPE_DMA) + if (uiHeapPolicy == PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) { IMG_UINT32 ui32AllocChunkCount = psClientCCB->ui32Size / psClientCCB->ui32ChunkSize; @@ -1295,6 +1295,8 @@ PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, ui32CmdSize, psClientCCB->ui32Size); #endif + RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, + INVALIDATE); ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, psClientCCB->psClientCCBCtrl->ui32ReadOffset, psClientCCB->ui32Size); @@ -1347,11 +1349,15 @@ void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize, IMG_UINT32 ui32PDumpFlags) { + IMG_UINT32 ui32NewWriteOffset; + #if defined(PDUMP) PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, ui32PDumpFlags); IMG_BOOL bPDumpFlagsContinuous = PDUMP_IS_CONTINUOUS(ui32PDumpFlags); +#else + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); #endif #if defined(PVRSRV_ENABLE_CCCB_GROW) @@ -1404,6 +1410,8 @@ void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + PVR_ASSERT(ui32NumUFOs != 0); + psClientCCB->ui32UpdateEntries = 0; while (ui32NumUFOs-- > 0) { @@ -1420,6 +1428,8 @@ void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + PVR_ASSERT(ui32NumUFOs != 0); + while (ui32NumUFOs-- > 0) { PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); @@ -1459,13 +1469,14 @@ void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, psUFOPtr++; } } - else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR || - psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE) + else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR) { /* For all other UFO ops check the UFO address is not NULL. */ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + PVR_ASSERT(ui32NumUFOs != 0); + while (ui32NumUFOs-- > 0) { PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); @@ -1499,14 +1510,23 @@ void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, } #endif + + ui32NewWriteOffset = psClientCCB->ui32HostWriteOffset; /* * Update the CCB write offset. */ - UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, - ui32CmdSize, - psClientCCB->ui32Size); + UPDATE_CCB_OFFSET(ui32NewWriteOffset, ui32CmdSize, + psClientCCB->ui32Size); psClientCCB->ui32ByteCount += ui32CmdSize; + /* Flush the CCB data */ + RGXFwSharedMemFlushCCB(psClientCCB->pvClientCCB, + psClientCCB->ui32HostWriteOffset, + ui32NewWriteOffset, + psClientCCB->psClientCCBCtrl->ui32WrapMask + 1); + + psClientCCB->ui32HostWriteOffset = ui32NewWriteOffset; + #if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) _RGXUpdateCCBUtilisation(psClientCCB); #endif @@ -1536,14 +1556,6 @@ void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, if (bPDumpEnabled) { - if (!BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) - { - /* Store offset to last finished CCB command. This offset can - * be needed when appending commands to a non finished CCB. - */ - psClientCCB->ui32FinishedPDumpWriteOffset = psClientCCB->ui32LastPDumpWriteOffset; - } - /* Update the PDump write offset to show we PDumped this command */ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; } @@ -1555,14 +1567,10 @@ void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, */ psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; -#if defined(SUPPORT_AGP) psClientCCB->psClientCCBCtrl->ui32ReadOffset2 = psClientCCB->ui32HostWriteOffset; -#if defined(SUPPORT_AGP4) psClientCCB->psClientCCBCtrl->ui32ReadOffset3 = psClientCCB->ui32HostWriteOffset; psClientCCB->psClientCCBCtrl->ui32ReadOffset4 = psClientCCB->ui32HostWriteOffset; #endif -#endif -#endif #if defined(PVRSRV_ENABLE_CCCB_GROW) OSLockRelease(psClientCCB->hCCBGrowLock); @@ -1602,43 +1610,23 @@ void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo, PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) { - PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; - IMG_BOOL bCacheInval = IMG_TRUE; + IMG_UINT32 ui32FenceCmdSize = 0; + IMG_UINT32 ui32UpdateCmdSize = 0; + /* Init the generated data members */ psCmdHelperData->ui32FBSCInvalCmdSize = 0; psCmdHelperData->ui64FBSCEntryMask = 0; - psCmdHelperData->ui32FenceCmdSize = 0; - psCmdHelperData->ui32UpdateCmdSize = 0; psCmdHelperData->ui32PreTimeStampCmdSize = 0; psCmdHelperData->ui32PostTimeStampCmdSize = 0; psCmdHelperData->ui32RMWUFOCmdSize = 0; - /* Only compile if RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE is defined to avoid - * compilation errors on rogue cores. - */ -#if defined(RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE) - bCacheInval = !(PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE) && - PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, USC_INSTRUCTION_CACHE_AUTO_INVALIDATE) && - PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, TDM_SLC_MMU_AUTO_CACHE_OPS) && - PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GEOM_SLC_MMU_AUTO_CACHE_OPS) && - PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, FRAG_SLC_MMU_AUTO_CACHE_OPS) && - PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, COMPUTE_SLC_MMU_AUTO_CACHE_OPS)) || - RGX_IS_BRN_SUPPORTED(psDevInfo, 71960) || - RGX_IS_BRN_SUPPORTED(psDevInfo, 72143); -#else - PVR_UNREFERENCED_PARAMETER(psDeviceNode); -#endif - /* Total FBSC invalidate command size (header plus command data) */ - if (bCacheInval) + if (ui64FBSCEntryMask != 0) { - if (ui64FBSCEntryMask != 0) - { - psCmdHelperData->ui32FBSCInvalCmdSize = + psCmdHelperData->ui32FBSCInvalCmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(psCmdHelperData->ui64FBSCEntryMask) + sizeof(RGXFWIF_CCB_CMD_HEADER)); - psCmdHelperData->ui64FBSCEntryMask = ui64FBSCEntryMask; - } + psCmdHelperData->ui64FBSCEntryMask = ui64FBSCEntryMask; } /* total DM command size (header plus command data) */ @@ -1648,14 +1636,14 @@ void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo, if (ui32ClientFenceCount != 0) { - psCmdHelperData->ui32FenceCmdSize = + ui32FenceCmdSize = RGX_CCB_FWALLOC_ALIGN(ui32ClientFenceCount * sizeof(RGXFWIF_UFO) + sizeof(RGXFWIF_CCB_CMD_HEADER)); } if (ui32ClientUpdateCount != 0) { - psCmdHelperData->ui32UpdateCmdSize = + ui32UpdateCmdSize = RGX_CCB_FWALLOC_ALIGN(ui32ClientUpdateCount * sizeof(RGXFWIF_UFO) + sizeof(RGXFWIF_CCB_CMD_HEADER)); } @@ -1663,25 +1651,40 @@ void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo, if (ppPreAddr && (ppPreAddr->ui32Addr != 0)) { psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) - + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1)); + + PVR_ALIGN(sizeof(RGXFWIF_DEV_VIRTADDR), RGXFWIF_FWALLOC_ALIGN); } if (ppPostAddr && (ppPostAddr->ui32Addr != 0)) { psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) - + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1)); + + PVR_ALIGN(sizeof(RGXFWIF_DEV_VIRTADDR), RGXFWIF_FWALLOC_ALIGN); } if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0)) { psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO); } + + psCmdHelperData->ui32TotalSize = + ui32FenceCmdSize + + psCmdHelperData->ui32FBSCInvalCmdSize + + psCmdHelperData->ui32DMCmdSize + + ui32UpdateCmdSize + + psCmdHelperData->ui32PreTimeStampCmdSize + + psCmdHelperData->ui32PostTimeStampCmdSize + + psCmdHelperData->ui32RMWUFOCmdSize; + + psCmdHelperData->ui32DMCmdOffset = + ui32FenceCmdSize + + psCmdHelperData->ui32PreTimeStampCmdSize + + psCmdHelperData->ui32FBSCInvalCmdSize; } /* Work out how much space this command will require */ -void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, +void RGXCmdHelperInitCmdCCB_OtherData(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32ClientFenceCount, PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, IMG_UINT32 *paui32FenceValue, @@ -1702,8 +1705,6 @@ void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, IMG_BOOL bCCBStateOpen, RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) { - PVRSRV_RGXDEV_INFO *psDevInfo = NULL; - /* Job reference values */ psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef; psCmdHelperData->ui32IntJobRef = ui32IntJobRef; @@ -1712,9 +1713,8 @@ void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, psCmdHelperData->psClientCCB = psClientCCB; #if defined(PDUMP) psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags; - psDevInfo = FWCommonContextGetRGXDevInfo(psCmdHelperData->psClientCCB->psServerCommonContext); #else - PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); #endif psCmdHelperData->pszCommandName = pszCommandName; if (bCCBStateOpen) @@ -1759,8 +1759,14 @@ void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Workload Data added */ - psCmdHelperData->psWorkEstKickData = psWorkEstKickData; + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + /* Workload Data added */ + psCmdHelperData->psWorkEstKickData = psWorkEstKickData; + } +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(psWorkEstKickData); #endif } @@ -1800,7 +1806,8 @@ void RGXCmdHelperInitCmdCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ppRMWUFOAddr, psCmdHelperData); - RGXCmdHelperInitCmdCCB_OtherData(psClientCCB, + RGXCmdHelperInitCmdCCB_OtherData(psDevInfo, + psClientCCB, ui32ClientFenceCount, pauiFenceUFOAddress, paui32FenceValue, @@ -1822,21 +1829,79 @@ void RGXCmdHelperInitCmdCCB(PVRSRV_RGXDEV_INFO *psDevInfo, psCmdHelperData); } +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +/*************************************************************************/ /*! +@Function RGXIsValidWorkloadEstCCBCommand + +@Description Checks if command type can be used for workload estimation + +@Input eType Command type to check + + +@Return IMG_BOOL +******************************************************************************/ +FORCE_INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType) +{ + switch (eType) + { + case RGXFWIF_CCB_CMD_TYPE_GEOM: + case RGXFWIF_CCB_CMD_TYPE_3D: + case RGXFWIF_CCB_CMD_TYPE_CDM: +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + case RGXFWIF_CCB_CMD_TYPE_RAY: +#endif + case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: + return IMG_TRUE; + default: + PVR_ASSERT(IMG_FALSE); + return IMG_FALSE; + } +} +#endif + +static inline void RGXWriteCmdHeader(void *pvCCB, IMG_UINT32 eCmdType, IMG_UINT32 ui32TotalSize, + IMG_UINT32 ui32ExtJobRef, IMG_UINT32 ui32IntJobRef, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData) +{ + RGXFWIF_CCB_CMD_HEADER sCmdHeader; + + sCmdHeader.eCmdType = eCmdType; + sCmdHeader.ui32CmdSize = ui32TotalSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + sCmdHeader.ui32ExtJobRef = ui32ExtJobRef; + sCmdHeader.ui32IntJobRef = ui32IntJobRef; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (psWorkEstKickData != NULL && + RGXIsValidWorkloadEstCCBCommand(eCmdType)) + { + sCmdHeader.sWorkEstKickData = *psWorkEstKickData; + } + else + { + sCmdHeader.sWorkEstKickData.ui16ReturnDataIndex = 0; + sCmdHeader.sWorkEstKickData.ui64Deadline = 0; + sCmdHeader.sWorkEstKickData.ui32CyclesPrediction = 0; + } +#else + PVR_UNREFERENCED_PARAMETER(psWorkEstKickData); +#endif + + OSCachedMemCopy(pvCCB, &sCmdHeader, sizeof(RGXFWIF_CCB_CMD_HEADER)); + +} + /* Reserve space in the CCB and fill in the command and client sync data */ PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) { - const IMG_UINT32 ui32MaxUFOCmdSize = RGX_CCB_FWALLOC_ALIGN((RGXFWIF_CCB_CMD_MAX_UFOS * sizeof(RGXFWIF_UFO)) + - sizeof(RGXFWIF_CCB_CMD_HEADER)); IMG_UINT32 ui32AllocSize = 0; IMG_UINT32 i; void *pvStartPtr; PVRSRV_ERROR eError; -#if defined(PDUMP) PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(asCmdHelperData->psClientCCB->psServerCommonContext); -#endif + RGXFWIF_UFO asUFOs[RGXFWIF_CCB_CMD_MAX_UFOS]; /* Check the number of fences & updates are valid. @@ -1845,8 +1910,8 @@ PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, { RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i]; - if (psCmdHelperData->ui32FenceCmdSize > ui32MaxUFOCmdSize || - psCmdHelperData->ui32UpdateCmdSize > ui32MaxUFOCmdSize) + if (psCmdHelperData->ui32ClientFenceCount > RGXFWIF_CCB_CMD_MAX_UFOS || + psCmdHelperData->ui32ClientUpdateCount > RGXFWIF_CCB_CMD_MAX_UFOS) { return PVRSRV_ERROR_TOO_MANY_SYNCS; } @@ -1909,57 +1974,55 @@ PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, /* Create the fence command. */ - if (psCmdHelperData->ui32FenceCmdSize) + if (psCmdHelperData->ui32ClientFenceCount > 0) { - RGXFWIF_CCB_CMD_HEADER *psHeader; IMG_UINT k, uiNextValueIndex; + IMG_UINT32 ui32FenceCmdSize = + RGX_CCB_FWALLOC_ALIGN(psCmdHelperData->ui32ClientFenceCount * sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); - psHeader = pvCmdPtr; - psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE; - - psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); - psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; - psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; - psHeader->sWorkEstKickData.ui64Deadline = 0; - psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; -#endif - - pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + RGXWriteCmdHeader(pvCmdPtr, + RGXFWIF_CCB_CMD_TYPE_FENCE, + ui32FenceCmdSize, + psCmdHelperData->ui32ExtJobRef, + psCmdHelperData->ui32IntJobRef, + NULL); /* Fill in the client fences */ uiNextValueIndex = 0; for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++) { - RGXFWIF_UFO *psUFOPtr = pvCmdPtr; + PVR_ASSERT(k < RGXFWIF_CCB_CMD_MAX_UFOS); - psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k]; + asUFOs[k].puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k]; - if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(psCmdHelperData->pauiFenceUFOAddress[k].ui32Addr)) { - psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + asUFOs[k].ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; } else { /* Only increment uiNextValueIndex for non sync checkpoints * (as paui32FenceValue only contains values for sync prims) */ - psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++]; + asUFOs[k].ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++]; } - pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); #if defined(SYNC_COMMAND_DEBUG) PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x", - psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); + psCmdHelperData->psClientCCB->szName, asUFOs[k].puiAddrUFO.ui32Addr, asUFOs[k].ui32Value)); #endif PDUMPCOMMENT(psDevInfo->psDeviceNode, ".. %s client sync fence - 0x%x -> 0x%x", psCmdHelperData->psClientCCB->szName, - psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); + asUFOs[k].puiAddrUFO.ui32Addr, asUFOs[k].ui32Value); } + + OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), + &asUFOs, psCmdHelperData->ui32ClientFenceCount * sizeof(RGXFWIF_UFO)); + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, ui32FenceCmdSize); } /* @@ -1967,27 +2030,18 @@ PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, */ if (psCmdHelperData->ui32FBSCInvalCmdSize) { - RGXFWIF_CCB_CMD_HEADER *psHeader; - IMG_UINT64 *pui64FBSCInvalCmdData; - - /* pui8CmdPtr */ - psHeader = pvCmdPtr; - psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE; + RGXWriteCmdHeader(pvCmdPtr, + RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE, + psCmdHelperData->ui32FBSCInvalCmdSize, + psCmdHelperData->ui32ExtJobRef, + psCmdHelperData->ui32IntJobRef, + NULL); - psHeader->ui32CmdSize = psCmdHelperData->ui32FBSCInvalCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); - psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; - psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; - psHeader->sWorkEstKickData.ui64Deadline = 0; - psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; -#endif - pui64FBSCInvalCmdData = IMG_OFFSET_ADDR(psHeader, sizeof(RGXFWIF_CCB_CMD_HEADER)); - *pui64FBSCInvalCmdData = psCmdHelperData->ui64FBSCEntryMask; - /* leap over the FBSC invalidate command */ + OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), + &psCmdHelperData->ui64FBSCEntryMask, + sizeof(psCmdHelperData->ui64FBSCEntryMask)); pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32FBSCInvalCmdSize); - } /* @@ -1997,9 +2051,18 @@ PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, */ if (psCmdHelperData->ui32PreTimeStampCmdSize != 0) { - RGXWriteTimestampCommand(&pvCmdPtr, - RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP, - psCmdHelperData->pPreTimestampAddr); + RGXWriteCmdHeader(pvCmdPtr, + RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP, + psCmdHelperData->ui32PreTimeStampCmdSize, + psCmdHelperData->ui32ExtJobRef, + psCmdHelperData->ui32IntJobRef, + NULL); + + OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), + &psCmdHelperData->pPreTimestampAddr.ui32Addr, + sizeof(psCmdHelperData->pPreTimestampAddr.ui32Addr)); + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32PreTimeStampCmdSize); + } /* @@ -2007,129 +2070,109 @@ PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, */ if (psCmdHelperData->ui32DMCmdSize) { - RGXFWIF_CCB_CMD_HEADER *psHeader; - - psHeader = pvCmdPtr; - psHeader->eCmdType = psCmdHelperData->eType; - - psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); - psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; - psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; - -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - if (psCmdHelperData->psWorkEstKickData != NULL && - RGXIsValidWorkloadEstCCBCommand(psCmdHelperData->eType)) - { - psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData; - } - else - { - psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; - psHeader->sWorkEstKickData.ui64Deadline = 0; - psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; - } -#endif - - pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); - - /* The buffer is write-combine, so no special device memory treatment required. */ - OSCachedMemCopy(pvCmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize); - pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32CmdSize); + PVR_ASSERT(psCmdHelperData->ui32DMCmdSize == sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHelperData->ui32CmdSize); + + RGXWriteCmdHeader(pvCmdPtr, + psCmdHelperData->eType, + psCmdHelperData->ui32DMCmdSize, + psCmdHelperData->ui32ExtJobRef, + psCmdHelperData->ui32IntJobRef, + (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) ? NULL : psCmdHelperData->psWorkEstKickData); + + OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), + psCmdHelperData->pui8DMCmd, + psCmdHelperData->ui32CmdSize); + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32DMCmdSize); } if (psCmdHelperData->ui32PostTimeStampCmdSize != 0) { - RGXWriteTimestampCommand(&pvCmdPtr, - RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP, - psCmdHelperData->pPostTimestampAddr); + RGXWriteCmdHeader(pvCmdPtr, + RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP, + psCmdHelperData->ui32PostTimeStampCmdSize, + psCmdHelperData->ui32ExtJobRef, + psCmdHelperData->ui32IntJobRef, + NULL); + + OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), + &psCmdHelperData->pPostTimestampAddr.ui32Addr, + sizeof(psCmdHelperData->pPostTimestampAddr.ui32Addr)); + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32PostTimeStampCmdSize); + } if (psCmdHelperData->ui32RMWUFOCmdSize != 0) { - RGXFWIF_CCB_CMD_HEADER * psHeader; - RGXFWIF_UFO * psUFO; - - psHeader = (RGXFWIF_CCB_CMD_HEADER *) pvCmdPtr; - psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE; - psHeader->ui32CmdSize = psCmdHelperData->ui32RMWUFOCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); - psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; - psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; - psHeader->sWorkEstKickData.ui64Deadline = 0; - psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; -#endif - pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); - - psUFO = (RGXFWIF_UFO *) pvCmdPtr; - psUFO->puiAddrUFO = psCmdHelperData->pRMWUFOAddr; - - pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); + RGXWriteCmdHeader(pvCmdPtr, + RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE, + psCmdHelperData->ui32RMWUFOCmdSize, + psCmdHelperData->ui32ExtJobRef, + psCmdHelperData->ui32IntJobRef, + NULL); + + asUFOs[0].puiAddrUFO = psCmdHelperData->pRMWUFOAddr; + asUFOs[0].ui32Value = 0; + + OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), + asUFOs, sizeof(RGXFWIF_UFO)); + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32RMWUFOCmdSize); } /* Create the update command. */ - if (psCmdHelperData->ui32UpdateCmdSize) + if (psCmdHelperData->ui32ClientUpdateCount > 0) { - RGXFWIF_CCB_CMD_HEADER *psHeader; IMG_UINT k, uiNextValueIndex; - - psHeader = pvCmdPtr; - psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE; - psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); - psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; - psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; - psHeader->sWorkEstKickData.ui64Deadline = 0; - psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; -#endif - pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + IMG_UINT32 ui32UpdateCmdSize = + RGX_CCB_FWALLOC_ALIGN(psCmdHelperData->ui32ClientUpdateCount * sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + RGXWriteCmdHeader(pvCmdPtr, + RGXFWIF_CCB_CMD_TYPE_UPDATE, + ui32UpdateCmdSize, + psCmdHelperData->ui32ExtJobRef, + psCmdHelperData->ui32IntJobRef, + NULL); /* Fill in the client updates */ uiNextValueIndex = 0; for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++) { - RGXFWIF_UFO *psUFOPtr = pvCmdPtr; + PVR_ASSERT(k < RGXFWIF_CCB_CMD_MAX_UFOS); - psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k]; - if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + asUFOs[k].puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k]; + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(psCmdHelperData->pauiUpdateUFOAddress[k].ui32Addr)) { - psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + asUFOs[k].ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; } else { /* Only increment uiNextValueIndex for non sync checkpoints * (as paui32UpdateValue only contains values for sync prims) */ - psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++]; + asUFOs[k].ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++]; } - pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); #if defined(SYNC_COMMAND_DEBUG) PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x", - psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); + psCmdHelperData->psClientCCB->szName, asUFOs[k].puiAddrUFO.ui32Addr, asUFOs[k].ui32Value)); #endif PDUMPCOMMENT(psDevInfo->psDeviceNode, ".. %s client sync update - 0x%x -> 0x%x", psCmdHelperData->psClientCCB->szName, - psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); + asUFOs[k].puiAddrUFO.ui32Addr, asUFOs[k].ui32Value); } + + OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), + &asUFOs, psCmdHelperData->ui32ClientUpdateCount * sizeof(RGXFWIF_UFO)); + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, ui32UpdateCmdSize); } /* Set the start pointer for the next iteration around the loop */ - pvStartPtr = IMG_OFFSET_ADDR(pvStartPtr, - psCmdHelperData->ui32FenceCmdSize + - psCmdHelperData->ui32FBSCInvalCmdSize + - psCmdHelperData->ui32PreTimeStampCmdSize + - psCmdHelperData->ui32DMCmdSize + - psCmdHelperData->ui32PostTimeStampCmdSize + - psCmdHelperData->ui32RMWUFOCmdSize + - psCmdHelperData->ui32UpdateCmdSize ); + pvStartPtr = IMG_OFFSET_ADDR(pvStartPtr, psCmdHelperData->ui32TotalSize); if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) { @@ -2158,7 +2201,7 @@ void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, { IMG_UINT32 ui32AllocSize = 0; IMG_UINT32 i; -#if defined(__linux__) +#if defined(__linux__) && defined(PVRSRV_TRACE_ROGUE_EVENTS) IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced(); IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced(); #endif @@ -2168,12 +2211,13 @@ void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, */ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); /* - For each command fill in the server sync info + For each command write PDump comments and emit FTrace events for fence + checks and updates if they exist. */ for (i=0;ipsClientCCB->psServerCommonContext); #endif @@ -2181,7 +2225,7 @@ void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, PVR_UNREFERENCED_PARAMETER(psCmdHelperData); #endif -#if defined(__linux__) +#if defined(__linux__) && defined(PVRSRV_TRACE_ROGUE_EVENTS) if (bTraceChecks) { trace_rogue_fence_checks(psCmdHelperData->pszCommandName, @@ -2204,6 +2248,9 @@ void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, psCmdHelperData->pauiUpdateUFOAddress, psCmdHelperData->paui32UpdateValue); } +#else + PVR_UNREFERENCED_PARAMETER(pcszDMName); + PVR_UNREFERENCED_PARAMETER(ui32CtxAddr); #endif /* @@ -2234,14 +2281,7 @@ IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, */ for (i = 0; i < ui32CmdCount; i++) { - ui32AllocSize += - asCmdHelperData[i].ui32FenceCmdSize + - asCmdHelperData[i].ui32FBSCInvalCmdSize + - asCmdHelperData[i].ui32DMCmdSize + - asCmdHelperData[i].ui32UpdateCmdSize + - asCmdHelperData[i].ui32PreTimeStampCmdSize + - asCmdHelperData[i].ui32PostTimeStampCmdSize + - asCmdHelperData[i].ui32RMWUFOCmdSize; + ui32AllocSize += asCmdHelperData[i].ui32TotalSize; } return ui32AllocSize; @@ -2251,30 +2291,13 @@ IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, IMG_UINT32 ui32Cmdindex) { - IMG_UINT32 ui32Offset = 0; - IMG_UINT32 i; - - for (i = 0; i < ui32Cmdindex; i++) - { - ui32Offset += - asCmdHelperData[i].ui32FenceCmdSize + - asCmdHelperData[i].ui32FBSCInvalCmdSize + - asCmdHelperData[i].ui32DMCmdSize + - asCmdHelperData[i].ui32UpdateCmdSize + - asCmdHelperData[i].ui32PreTimeStampCmdSize + - asCmdHelperData[i].ui32PostTimeStampCmdSize + - asCmdHelperData[i].ui32RMWUFOCmdSize; - } - - return ui32Offset; + return RGXCmdHelperGetCommandSize(ui32Cmdindex, asCmdHelperData); } /* Returns the offset of the data master command from a write offset */ IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) { - return psCmdHelperData->ui32FenceCmdSize + - psCmdHelperData->ui32PreTimeStampCmdSize + - psCmdHelperData->ui32FBSCInvalCmdSize; + return psCmdHelperData->ui32DMCmdOffset; } static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType) @@ -2294,11 +2317,9 @@ static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType) case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE"; case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR"; case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY"; - case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE"; case RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP: return "PRE_TIMESTAMP"; case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: return "RMW_UPDATE"; case RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP: return "POST_TIMESTAMP"; - case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: return "UNFENCED_RMW_UPDATE"; case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING"; default: @@ -2333,6 +2354,8 @@ PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *p * If we use the wrong value, we might incorrectly determine that the offsets are invalid. */ ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); + RGXFwSharedMemCacheOpPtr(psCurrentClientCCB->psClientCCBCtrl, + INVALIDATE); psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; ui32SampledDpOff = psClientCCBCtrl->ui32DepOffset; @@ -2359,6 +2382,8 @@ PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *p /* Only log a stalled CCB if GPU is idle (any state other than POW_ON is considered idle). * Guest drivers do not initialize psRGXFWIfFwSysData, so they assume FW internal state is ON. */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ePowState, + INVALIDATE); if (((psDevInfo->psRGXFWIfFwSysData == NULL) || (psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_ON)) && (psDevInfo->ui32SLRHoldoffCounter == 0)) { @@ -2460,6 +2485,8 @@ void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, #if defined(PVRSRV_ENABLE_CCCB_GROW) OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); #endif + RGXFwSharedMemCacheOpPtr(psCurrentClientCCB->psClientCCBCtrl, + INVALIDATE); psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset; @@ -2481,6 +2508,15 @@ void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_DUMPDEBUG_LOG(" `--"); } + if (ui32Offset > ui32WrapMask) + { + PVR_DUMPDEBUG_LOG(" `--"); +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psCurrentClientCCB->hCCBGrowLock); +#endif + return; + } + while (ui32Offset != ui32EndOffset) { RGXFWIF_CCB_CMD_HEADER *psCmdHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset); @@ -2511,7 +2547,6 @@ void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, switch (psCmdHeader->eCmdType) { case RGXFWIF_CCB_CMD_TYPE_UPDATE: - case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: case RGXFWIF_CCB_CMD_TYPE_FENCE: case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: { @@ -2543,7 +2578,6 @@ void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, break; } case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: - case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: { for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++) { @@ -2575,6 +2609,15 @@ void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, default: break; } + + /* Check the command size is valid, otherwise if corruption was present this loop might hang... */ + if ((ui32Offset == ui32NextOffset) || + ((ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) > ui32WrapMask+1)) + { + PVR_DUMPDEBUG_LOG(" `--Invalid CCB offset!"); + break; + } + ui32Offset = ui32NextOffset; } @@ -2588,14 +2631,17 @@ void DumpFirstCCBCmd(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile) { - volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; - volatile void *pvPtr; - IMG_UINT32 ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; - IMG_UINT32 ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset; IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; + IMG_UINT32 ui32SampledRdOff; + IMG_UINT32 ui32SampledDepOff; - pvPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + RGXFwSharedMemCacheOpPtr(psCurrentClientCCB->psClientCCBCtrl, + INVALIDATE); + psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; + ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset; if ((ui32SampledRdOff == ui32SampledDepOff) && (ui32SampledRdOff != ui32SampledWrOff)) @@ -2689,11 +2735,20 @@ void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo) if (psStalledClientCCB) { - volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl; - IMG_UINT32 ui32SampledDepOffset = psClientCCBCtrl->ui32DepOffset; - void *pvPtr = IMG_OFFSET_ADDR(psStalledClientCCB->pvClientCCB, ui32SampledDepOffset); - RGXFWIF_CCB_CMD_HEADER *psCommandHeader = pvPtr; - RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; + IMG_UINT32 ui32SampledDepOffset; + void *pvPtr; + RGXFWIF_CCB_CMD_HEADER *psCommandHeader; + RGXFWIF_CCB_CMD_TYPE eCommandType; + + RGXFwSharedMemCacheOpValue(psStalledClientCCB->psClientCCBCtrl->ui32DepOffset, + INVALIDATE); + psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl; + ui32SampledDepOffset = psClientCCBCtrl->ui32DepOffset; + /* No need to invalidate CCCB as FW doesn't write to it, only read. */ + pvPtr = IMG_OFFSET_ADDR(psStalledClientCCB->pvClientCCB, ui32SampledDepOffset); + psCommandHeader = pvPtr; + eCommandType = psCommandHeader->eCmdType; if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) { @@ -2703,12 +2758,14 @@ void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo) IMG_UINT32 ui32UnsignalledUFOVaddrs[PVRSRV_MAX_SYNCS]; #if defined(PVRSRV_STALLED_CCB_ACTION) + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, + INVALIDATE); if (!psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName[0]) { OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui64Timestamp); psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; - OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName, + OSStringSafeCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName, psStalledClientCCB->szName, MAX_CLIENT_CCB_NAME); } @@ -2717,7 +2774,7 @@ void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo) OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui64Timestamp); psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; - OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].aszCCBName, + OSStringSafeCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].aszCCBName, psStalledClientCCB->szName, MAX_CLIENT_CCB_NAME); psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp = (psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp + 1) % PVR_SLR_LOG_ENTRIES; @@ -2725,6 +2782,8 @@ void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo) psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested++; /* flush write buffers for psRGXFWIfFwOsData */ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp]); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, + FLUSH); #endif PVR_LOG(("Fence found on context 0x%x '%s' @ %d has %d UFOs", FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxccb.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxccb.h index 2669b8eb8fe5..e9edd9a37bc3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxccb.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxccb.h @@ -49,7 +49,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "sync_server.h" #include "connection_server.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "rgxdefs_km.h" #include "pvr_notifier.h" @@ -143,10 +143,10 @@ typedef struct _RGX_CCB_CMD_HELPER_DATA_ { RGXFWIF_CCB_CMD_TYPE eType; IMG_UINT32 ui32CmdSize; IMG_UINT8 *pui8DMCmd; - IMG_UINT32 ui32FenceCmdSize; IMG_UINT32 ui32FBSCInvalCmdSize; IMG_UINT32 ui32DMCmdSize; - IMG_UINT32 ui32UpdateCmdSize; + IMG_UINT32 ui32TotalSize; + IMG_UINT32 ui32DMCmdOffset; /* data for FBSC invalidate command */ IMG_UINT64 ui64FBSCEntryMask; @@ -279,7 +279,8 @@ void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo, PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); -void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, +void RGXCmdHelperInitCmdCCB_OtherData(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32ClientFenceCount, PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, IMG_UINT32 *paui32FenceValue, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxcompute.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxcompute.c index 8f3fb7f60978..65ff0cf76d53 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxcompute.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxcompute.c @@ -46,7 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pdump_km.h" #include "pvr_debug.h" #include "rgxutils.h" -#include "rgxfwutils.h" +#include "rgxfwcmnctx.h" #include "rgxcompute.h" #include "rgx_bvnc_defs_km.h" #include "rgxmem.h" @@ -57,7 +57,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxccb.h" #include "rgxhwperf.h" #include "ospvr_gputrace.h" -#include "htbuffer.h" +#include "htbserver.h" +#include "rgxshader.h" #include "sync_server.h" #include "sync_internal.h" @@ -73,6 +74,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxtimerquery.h" + #if defined(SUPPORT_WORKLOAD_ESTIMATION) #include "rgxworkest.h" @@ -188,7 +190,10 @@ PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData); + } #endif if (ui32FrameworkCommandSize) @@ -258,6 +263,7 @@ PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputeContextState, ui32StaticComputeContextStateSize); DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS); + RGXFwSharedMemCacheOpValue(psFWComputeContext->sStaticComputeContextState, FLUSH); DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); #if defined(SUPPORT_BUFFER_SYNC) @@ -315,10 +321,6 @@ PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComp { PVRSRV_ERROR eError = PVRSRV_OK; PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; - IMG_UINT32 ui32WorkEstCCBSubmitted; -#endif /* Check if the FW has finished with this resource ... */ eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode, @@ -326,17 +328,9 @@ PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComp RGXFWIF_DM_CDM, PDUMP_FLAGS_NONE); - if (eError == PVRSRV_ERROR_RETRY) - { - return eError; - } - else if (eError != PVRSRV_OK) - { - PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psComputeContext->psDeviceNode, + eError, + RGXFWRequestCommonContextCleanUp); #if defined(SUPPORT_BUFFER_SYNC) /* remove after RGXFWRequestCommonContextCleanUp() because we might return @@ -349,30 +343,36 @@ PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComp #endif #if defined(SUPPORT_WORKLOAD_ESTIMATION) - eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, - (void **)&psFWComputeContext); - if (eError != PVRSRV_OK) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware compute context (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; - ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted; + eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, + (void **)&psFWComputeContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware compute context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + RGXFwSharedMemCacheOpValue(psFWComputeContext->ui32WorkEstCCBSubmitted, INVALIDATE); + ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted; - DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); + DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); - /* Check if all of the workload estimation CCB commands for this workload are read */ - if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived) - { - PVR_DPF((PVR_DBG_WARNING, - "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", - __func__, ui32WorkEstCCBSubmitted, - psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)); + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)); - return PVRSRV_ERROR_RETRY; + return PVRSRV_ERROR_RETRY; + } } #endif @@ -383,10 +383,18 @@ PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComp OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData); + } #endif + SyncAddrListDeinit(&psComputeContext->sSyncAddrListFence); + SyncAddrListDeinit(&psComputeContext->sSyncAddrListUpdate); + FWCommonContextFree(psComputeContext->psServerCommonContext); + psComputeContext->psServerCommonContext = NULL; + if (psComputeContext->psFWFrameworkMemDesc) { DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc); @@ -400,7 +408,6 @@ PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComp return PVRSRV_OK; } - PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, IMG_UINT32 ui32ClientUpdateCount, SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, @@ -410,6 +417,7 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, PVRSRV_TIMELINE iUpdateTimeline, PVRSRV_FENCE *piUpdateFence, IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iExportFenceToSignal, IMG_UINT32 ui32CmdSize, IMG_PBYTE pui8DMCmd, IMG_UINT32 ui32PDumpFlags, @@ -419,7 +427,8 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, PMR **ppsSyncPMRs, IMG_UINT32 ui32NumWorkgroups, IMG_UINT32 ui32NumWorkitems, - IMG_UINT64 ui64DeadlineInus) + IMG_UINT64 ui64DeadlineInus, + IMG_PUINT32 pui32IntJobRef) { RGXFWIF_KCCB_CMD sCmpKCCBCmd; RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; @@ -453,6 +462,7 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, IMG_UINT64 uiCheckFenceUID = 0; IMG_UINT64 uiUpdateFenceUID = 0; PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT psExportFenceSyncCheckpoint = NULL; PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; IMG_UINT32 ui32FenceSyncCheckpointCount = 0; IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; @@ -471,9 +481,17 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, if (iUpdateTimeline >= 0 && !piUpdateFence) { + PVR_DPF((PVR_DBG_ERROR, "%s: " + "iUpdateTimeline=%d but piUpdateFence is NULL - PVRSRV_ERROR_INVALID_PARAMS", + __func__, iUpdateTimeline)); return PVRSRV_ERROR_INVALID_PARAMS; } + if (pui32IntJobRef) + { + *pui32IntJobRef = ui32IntJobRef; + } + /* Ensure we haven't been given a null ptr to * update values if we have been told we * have updates @@ -487,7 +505,7 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, } /* Ensure the string is null-terminated (Required for safety) */ - pszUpdateFenceName[31] = '\0'; + pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; OSLockAcquire(psComputeContext->hLock); @@ -593,6 +611,9 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, ui32IntClientUpdateCount++; } #else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_UNREFERENCED_PARAMETER(paui32SyncPMRFlags); + PVR_UNREFERENCED_PARAMETER(ppsSyncPMRs); + PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); eError = PVRSRV_ERROR_INVALID_PARAMS; @@ -612,7 +633,7 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (%s)", __func__, PVRSRVGetErrorString(eError))); goto fail_free_buffer_sync_data; } - CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, iCheckFence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); #if defined(CMP_CHECKPOINT_DEBUG) if (ui32FenceSyncCheckpointCount > 0) { @@ -620,7 +641,7 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>, FWAddr=0x%x", __func__, ii, (void*)psNextCheckpoint, SyncCheckpointGetFirmwareAddr(psNextCheckpoint))); } } #endif @@ -647,6 +668,48 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: iExportFenceToSignal=%d", __func__, iExportFenceToSignal)); + /* Resolve the iExportFenceToSignal (if required) */ + if (iExportFenceToSignal != PVRSRV_NO_FENCE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncCheckpointResolveExportFence(iExportFenceToSignal=%d), ui32FenceSyncCheckpointCount=%d", __func__, iExportFenceToSignal, ui32FenceSyncCheckpointCount)); + eError = SyncCheckpointResolveExportFence(iExportFenceToSignal, + psComputeContext->psDeviceNode->hSyncCheckpointContext, + &psExportFenceSyncCheckpoint, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%s) psExportFenceSyncCheckpoint=<%p>", __func__, PVRSRVGetErrorString(eError), psExportFenceSyncCheckpoint)); + goto fail_resolve_export_fence; + } + + /* Check that the export fence was not also included as part of the + * check fence (which is an error and would lead to a stalled kick). + */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Checking export fence is not part of check fence...", __func__)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32FenceSyncCheckpointCount=%d", + __func__, ui32FenceSyncCheckpointCount)); + if (ui32FenceSyncCheckpointCount > 0) + { + IMG_UINT32 iii; + + for (iii=0; iii, FWAddr=0x%x", __func__, iii, apsFenceSyncCheckpoints[iii], SyncCheckpointGetFirmwareAddr(apsFenceSyncCheckpoints[iii]))); + if (apsFenceSyncCheckpoints[iii] == psExportFenceSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ERROR psExportFenceSyncCheckpoint=<%p>", __func__, psExportFenceSyncCheckpoint)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, " %s - iCheckFence includes iExportFenceToSignal", PVRSRVGetErrorString(eError))); + goto fail_check_fence_includes_export_fence; + } + } + } + } + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync)); /* Append the sync prim update for the timeline (if required) */ if (psFenceTimelineUpdateSync) @@ -716,15 +779,13 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, { CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence)); #if defined(CMP_CHECKPOINT_DEBUG) - if (ui32IntClientUpdateCount > 0) + if (ui32FenceSyncCheckpointCount > 0) { IMG_UINT32 iii; - IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; - for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); - pui32Tmp++; + CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>, FWAddr=0x%x", __func__, iii, apsFenceSyncCheckpoints[iii], SyncCheckpointGetFirmwareAddr(apsFenceSyncCheckpoints[iii]))); } } #endif @@ -780,6 +841,34 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, } #endif } + + if (psExportFenceSyncCheckpoint) + { + /* Append the update (from export fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psExportFenceSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psExportFenceSyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, + 1, + &psExportFenceSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); #if (ENABLE_CMP_UFO_DUMP == 1) @@ -834,17 +923,24 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, #endif #if defined(SUPPORT_WORKLOAD_ESTIMATION) - sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups; - sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems = ui32NumWorkitems; - - /* Prepare workload estimation */ - WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice, - &psComputeContext->sWorkEstData, - &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM, - RGXFWIF_CCB_CMD_TYPE_CDM, - &sWorkloadCharacteristics, - ui64DeadlineInus, - &sWorkloadKickDataCompute); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psComputeContext->psDeviceNode)) + { + sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups; + sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems = ui32NumWorkitems; + + /* Prepare workload estimation */ + WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice, + &psComputeContext->sWorkEstData, + &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM, + RGXFWIF_CCB_CMD_TYPE_CDM, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataCompute); + } +#else + PVR_UNREFERENCED_PARAMETER(ui32NumWorkgroups); + PVR_UNREFERENCED_PARAMETER(ui32NumWorkitems); + PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus); #endif RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice, @@ -882,7 +978,7 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); if (eError != PVRSRV_OK) { - goto fail_cmdaquire; + goto fail_cmdacquire; } @@ -903,26 +999,43 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, take the required server sync operations and commit all the resources */ + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + goto fail_acquirepowerlock; + } + ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB); RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* The following is used to determine the offset of the command header containing - the workload estimation data so that can be accessed when the KCCB is read */ - ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) + { + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData); - ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext)); + ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext)); - /* This checks if the command would wrap around at the end of the CCB and - * therefore would start at an offset of 0 rather than the current command - * offset */ - if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck) - { - ui32CDMWorkloadDataRO = ui32CDMCmdOffset; - } - else - { - ui32CDMWorkloadDataRO = 0; + /* This checks if the command would wrap around at the end of the CCB and + * therefore would start at an offset of 0 rather than the current command + * offset */ + if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck) + { + ui32CDMWorkloadDataRO = ui32CDMCmdOffset; + } + else + { + ui32CDMWorkloadDataRO = 0; + } } #endif @@ -935,9 +1048,12 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, /* Add the Workload data into the KCCB kick */ #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Store the offset to the CCCB command header so that it can be referenced - * when the KCCB command reaches the FW */ - sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset; + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) + { + /* Store the offset to the CCCB command header so that it can be referenced + * when the KCCB command reaches the FW */ + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset; + } #endif ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr; @@ -969,9 +1085,9 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, /* * Submit the compute command to the firmware. */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, + eError = RGXScheduleCommandWithoutPowerLock(psComputeContext->psDeviceNode->pvDevice, RGXFWIF_DM_CDM, &sCmpKCCBCmd, ui32PDumpFlags); @@ -980,7 +1096,9 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); + + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); if (eError != PVRSRV_OK) { @@ -1001,15 +1119,28 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ if (psUpdateSyncCheckpoint) { - CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, + (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), + SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); } if (psFenceTimelineUpdateSync) { - CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, + (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); } SyncCheckpointNoHWUpdateTimelines(NULL); + + if (psExportFenceSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, + (void*)psExportFenceSyncCheckpoint, SyncCheckpointGetId(psExportFenceSyncCheckpoint), + SyncCheckpointGetFirmwareAddr(psExportFenceSyncCheckpoint))); + SyncCheckpointSignalNoHW(psExportFenceSyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncCheckpointNoHWSignalExportFence(iExportFenceToSignal=%d)", __func__, iExportFenceToSignal)); + SyncCheckpointNoHWSignalExportFence(iExportFenceToSignal); + } #endif /* defined(NO_HARDWARE) */ #if defined(SUPPORT_BUFFER_SYNC) @@ -1052,13 +1183,20 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, return PVRSRV_OK; fail_schedulecmd: -fail_cmdaquire: +fail_acquirepowerlock: +fail_cmdacquire: #if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED) fail_cmdinvalfbsc: #endif SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence); SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate); fail_alloc_update_values_mem: + if (psExportFenceSyncCheckpoint) + { + SyncCheckpointRollbackExportFence(iExportFenceToSignal); + } +fail_check_fence_includes_export_fence: +fail_resolve_export_fence: if (iUpdateFence != PVRSRV_NO_FENCE) { SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); @@ -1099,6 +1237,38 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, return eError; } +PVRSRV_ERROR PVRSRVRGXSendCancelCmdKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, IMG_UINT32 ui32FirstIntJobRefToCancel, IMG_UINT32 ui32LastIntJobRefToCancel) +{ + RGXFWIF_KCCB_CMD sCancelWorkKCCBCmd; + PVRSRV_ERROR eError = PVRSRV_OK; + + sCancelWorkKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_CANCEL_WORK; + sCancelWorkKCCBCmd.uCmdData.sCancelWorkData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); + sCancelWorkKCCBCmd.uCmdData.sCancelWorkData.ui32FirstIntJobRefToCancel = ui32FirstIntJobRefToCancel; + sCancelWorkKCCBCmd.uCmdData.sCancelWorkData.ui32LastIntJobRefToCancel = ui32LastIntJobRefToCancel; + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, + RGXFWIF_DM_CDM, + &sCancelWorkKCCBCmd, + PDUMP_FLAGS_CONTINUOUS); + + /* Iterate if we hit a PVRSRV_ERROR_KERNEL_CCB_FULL error */ + if ((eError != PVRSRV_ERROR_RETRY) && + (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + PVR_DPF((PVR_DBG_MESSAGE, "Sending compute cancel command for context <%p>. Work with IntJobRef below ui32FirstValidIntJobRef=0x%x might be discarded.", + psComputeContext->psServerCommonContext, ui32LastIntJobRefToCancel + 1)); + + return eError; +} + PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) { RGXFWIF_KCCB_CMD sFlushCmd; @@ -1117,7 +1287,7 @@ PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeCo OSLockAcquire(psComputeContext->hLock); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, RGXFWIF_DM_CDM, @@ -1131,7 +1301,7 @@ PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeCo break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (eError != PVRSRV_OK) { @@ -1160,14 +1330,17 @@ PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeCo "%s: Compute flush aborted (%s)", __func__, PVRSRVGetErrorString(eError))); + goto error_exit; } - else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & + + if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) { PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); } } +error_exit: OSLockRelease(psComputeContext->hLock); return eError; } @@ -1176,8 +1349,11 @@ PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeCo PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) { PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) && - 2 == RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT)) + IMG_UINT32 ui32ControlStreamFormat = + RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) ? + RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT) : 0; + + if (ui32ControlStreamFormat >= 2U && ui32ControlStreamFormat < 5U) { RGXFWIF_KCCB_CMD sKCCBCmd; @@ -1189,7 +1365,7 @@ PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEX sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, RGXFWIF_DM_CDM, @@ -1200,7 +1376,7 @@ PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEX break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (eError != PVRSRV_OK) { @@ -1331,6 +1507,366 @@ PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection, return PVRSRV_OK; } +/* + * PVRSRVRGXKickTimestampQueryKM + */ +PVRSRV_ERROR PVRSRVRGXKickTimestampQueryKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32ExtJobRef) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext); + RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_UINT32 ui32PDumpFlags = 0; + IMG_UINT32 ui32IntClientFenceCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL; + IMG_UINT32 ui32IntClientUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL; + IMG_UINT32 *paui32IntUpdateValue = NULL; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *paui32ClientUpdateValue = NULL; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + RGXFWIF_KCCB_CMD sCmpKCCBCmd; + PVRSRV_ERROR eError; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + void *pvUpdateFenceFinaliseData = NULL; + + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Ensure the string is null-terminated (Required for safety) */ + pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + + OSLockAcquire(psComputeContext->hLock); + + eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + + eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + if (ui32IntClientUpdateCount) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + paui32IntUpdateValue = paui32ClientUpdateValue; + + + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (%s)", __func__, PVRSRVGetErrorString(eError))); + goto err_populate_sync_addr_list; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32FenceSyncCheckpointCount > 0) + { + IMG_UINT32 ii; + for (ii=0; ii, FWAddr=0x%x", __func__, ii, (void*)psNextCheckpoint, SyncCheckpointGetFirmwareAddr(psNextCheckpoint))); + } + } +#endif + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode, + pszUpdateFenceName, + iUpdateTimeline, + psComputeContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%s)", __func__, PVRSRVGetErrorString(eError))); + goto fail_create_output_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync)); + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + if (paui32IntUpdateValue) + { + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); + } + + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32IntClientUpdateCount++; + /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ + paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync)); + /* Now append the timeline sync prim addr to the compute context update list */ + SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); + + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + paui32IntUpdateValue = pui32IntAllocatedUpdateValues; + } + } + + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence)); + + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } + + if (psUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; + } + + RGXCmdHelperInitCmdCCB(psDevInfo, + psClientCCB, + 0, /* empty ui64FBSCEntryMask */ + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + ui32CmdSize, + pui8DMCmd, + NULL, + NULL, + NULL, + RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP, + ui32ExtJobRef, + ui32IntJobRef, + PDUMP_FLAGS_NONE, + NULL, + "VkTimestamp", + IMG_FALSE, /* bCCBStateOpen */ + asCmdHelperData); + + eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); + + PVR_LOG_GOTO_IF_ERROR(eError, "RGXCmdHelperAcquireCmdCCB", fail_cmdacquire); + + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + goto fail_acquirepowerlock; + } + + RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", + FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr); + + /* Construct the kernel compute CCB command. */ + sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + + /* + * Submit the RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP + * command to the firmware. + */ + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommandWithoutPowerLock(psComputeContext->psDeviceNode->pvDevice, + RGXFWIF_DM_CDM, + &sCmpKCCBCmd, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s failed to schedule kernel CCB command (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_schedulecmd; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + + + *piUpdateFence = iUpdateFence; + + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence, + pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, pszUpdateFenceName); + } + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned + * by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psComputeContext->hLock); + + return PVRSRV_OK; + +fail_schedulecmd: +fail_acquirepowerlock: +fail_cmdacquire: + SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate); +fail_alloc_update_values_mem: + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); +err_populate_sync_addr_list: + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + OSLockRelease(psComputeContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXCDMGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXCDMReleaseSharedMemoryKM(PMR * psPMRMem) +{ + PVR_UNREFERENCED_PARAMETER(psPMRMem); + + return PVRSRV_OK; +} + /****************************************************************************** End of file (rgxcompute.c) ******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxcompute.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxcompute.h index 14df7ff4af21..b93c42efc044 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxcompute.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxcompute.h @@ -48,7 +48,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "device.h" #include "rgxfwutils.h" #include "rgx_fwif_resetframework.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "pvr_notifier.h" #include "sync_server.h" @@ -108,7 +108,6 @@ PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, ******************************************************************************/ PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); - /*! ******************************************************************************* @Function PVRSRVRGXKickCDMKM @@ -127,6 +126,7 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, PVRSRV_TIMELINE iUpdateTimeline, PVRSRV_FENCE *piUpdateFence, IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iExportFenceToSignal, IMG_UINT32 ui32CmdSize, IMG_PBYTE pui8DMCmd, IMG_UINT32 ui32PDumpFlags, @@ -136,7 +136,8 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, PMR **ppsSyncPMRs, IMG_UINT32 ui32NumWorkgroups, IMG_UINT32 ui32NumWorkitems, - IMG_UINT64 ui64DeadlineInus); + IMG_UINT64 ui64DeadlineInus, + IMG_PUINT32 pui32IntJobRef); /*! ******************************************************************************* @@ -151,6 +152,25 @@ PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, ******************************************************************************/ PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + + +/*! +******************************************************************************* + @Function PVRSRVRGXSendCancelCmd + + @Description + Server-side implementation of RGXSendCancelCmd + + @Input psComputeContext - Compute context to cancel work on + @Input ui32LastIntJobRefToCancel - Last IntJobRef to cancel + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXSendCancelCmdKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32FirstIntJobRefToCancel, + IMG_UINT32 ui32LastIntJobRefToCancel); + + /*! ******************************************************************************* @@ -178,6 +198,15 @@ PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 *ui32Error); +PVRSRV_ERROR PVRSRVRGXKickTimestampQueryKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32ExtJobRef); + /* Debug - Dump debug info of compute contexts on this device */ void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, @@ -187,4 +216,11 @@ void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, /* Debug/Watchdog - check if client compute contexts are stalled */ IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR PVRSRVRGXCDMGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem); + +PVRSRV_ERROR PVRSRVRGXCDMReleaseSharedMemoryKM(PMR * psUSCPMRMem); + #endif /* RGXCOMPUTE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxdebug_common.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxdebug_common.c new file mode 100644 index 000000000000..fca648b26f56 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxdebug_common.c @@ -0,0 +1,3236 @@ +/*************************************************************************/ /*! +@File +@Title Rgx debug information +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX debugging functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxdefs_km.h" +#include "rgxdevice.h" +#include "osfunc.h" +#include "allocmem.h" + +#include "rgxdebug_common.h" +#include "pvrversion.h" +#include "pvrsrv.h" +#include "rgx_fwif_sf.h" +#include "rgxfw_log_helper.h" +#include "fwtrace_string.h" +#include "rgxmmudefs_km.h" +#include "rgxinit.h" +#include "rgxfwutils.h" +#include "rgxfwriscv.h" +#include "rgxfwimageutils.h" +#include "fwload.h" +#include "rgx_options.h" +#include "devicemem_history_server.h" +#include "debug_common.h" +#include "info_page.h" +#include "osfunc.h" +#if defined(SUPPORT_SOC_TIMER) +#include "rgxtimecorr.h" +#endif + +#define MAX_FW_DESCRIPTION_LENGTH (600U) + +#define PVR_DUMP_FIRMWARE_INFO(x) \ + PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ + PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \ + PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \ + (x).ui32DDKBuild, \ + ((x).ui32BuildOptions & OPTIONS_DEBUG_EN) ? "debug":"release", \ + (x).ui32BuildOptions); + +#define PVR_DUMP_FIRMWARE_INFO_HDR(x) \ + PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ + (x).ui16PVRVersionMajor, \ + (x).ui16PVRVersionMinor, \ + (x).ui32PVRVersionBuild, \ + ((x).ui32Flags & OPTIONS_DEBUG_EN) ? "debug":"release", \ + (x).ui32Flags); + +typedef struct { + IMG_UINT16 ui16Mask; + const IMG_CHAR *pszStr; +} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */ + +/* + * Array of all the Firmware Trace log IDs used to convert the trace data. + */ +typedef struct _TRACEBUF_LOG_ { + RGXFW_LOG_SFids eSFId; + const IMG_CHAR *pszName; + const IMG_CHAR *pszFmt; + IMG_UINT32 ui32ArgNum; +} TRACEBUF_LOG; + +static const TRACEBUF_LOG aLogDefinitions[] = +{ +#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e}, + RGXFW_LOG_SFIDLIST +#undef X +}; + +static const IMG_FLAGS2DESC asCswOpts2Description[] = +{ + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"}, + {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"}, +}; + +static const IMG_FLAGS2DESC asMisc2Description[] = +{ + {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"}, + {RGXFWIF_INICFG_SPU_CLOCK_GATE, " SPU Clock Gating (requires Power Rascal/Dust);"}, + {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"}, + {RGXFWIF_INICFG_FBCDC_V3_1_EN, " FBCDCv3.1;"}, + {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"}, + {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"}, + {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"}, + {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"}, + {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"}, + {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"}, + {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"}, + {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"}, + {RGXFWIF_INICFG_INJECT_ICS_FAULT, " Inject ICS Fault;"}, + {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"}, + {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"}, + {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"}, + {RGXFWIF_INICFG_WORKEST, " Workload Estim;"}, + {RGXFWIF_INICFG_PDVFS, " PDVFS;"}, + {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"}, + {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"}, + {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"}, +}; + +static const IMG_FLAGS2DESC asFwOsCfg2Description[] = +{ + {RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN, " TDM;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN, " GEOM;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"}, +#if defined(SUPPORT_RAY_TRACING) + {RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN, " RDM;"}, +#endif + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM, " LowPrio GEOM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"}, +#if defined(SUPPORT_RAY_TRACING) + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM, " LowPrio RDM;"}, +#endif +#if defined(SUPPORT_ICS) + {RGXFWIF_INICFG_OS_ICS_TDM_EN, " TDM;"}, + {RGXFWIF_INICFG_OS_ICS_GEOM_EN, " GEOM;"}, + {RGXFWIF_INICFG_OS_ICS_3D_EN, " 3D;"}, + {RGXFWIF_INICFG_OS_ICS_CDM_EN, " CDM;"}, +#if defined(SUPPORT_RAY_TRACING) + {RGXFWIF_INICFG_OS_ICS_RDM_EN, " RDM;"}, +#endif + {RGXFWIF_INICFG_OS_FDTI_PROFILE_LONG, " Long FDTI profile;"}, + {RGXFWIF_INICFG_OS_FDTI_PROFILE_MEDIUM, " Medium FDTI profile;"}, + {RGXFWIF_INICFG_OS_FDTI_PROFILE_SHORT, " Short FDTI profile;"}, +#endif +}; + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +const IMG_CHAR * const gapszMipsPermissionPTFlags[4] = +{ + " ", + "XI ", + "RI ", + "RIXI" +}; + +const IMG_CHAR * const gapszMipsCoherencyPTFlags[8] = +{ + "C", + "C", + " ", + "C", + "C", + "C", + "C", + " " +}; + +const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8] = +{ + " ", + " G", + " V ", + " VG", + "D ", + "D G", + "DV ", + "DVG" +}; + +#if !defined(NO_HARDWARE) +/* Translation of MIPS exception encoding */ +typedef struct _MIPS_EXCEPTION_ENCODING_ +{ + const IMG_CHAR *const pszStr; /* Error type */ + const IMG_BOOL bIsFatal; /* Error is fatal or non-fatal */ +} MIPS_EXCEPTION_ENCODING; + +static const MIPS_EXCEPTION_ENCODING apsMIPSExcCodes[] = +{ + {"Interrupt", IMG_FALSE}, + {"TLB modified exception", IMG_FALSE}, + {"TLB exception (load/instruction fetch)", IMG_FALSE}, + {"TLB exception (store)", IMG_FALSE}, + {"Address error exception (load/instruction fetch)", IMG_TRUE}, + {"Address error exception (store)", IMG_TRUE}, + {"Bus error exception (instruction fetch)", IMG_TRUE}, + {"Bus error exception (load/store)", IMG_TRUE}, + {"Syscall exception", IMG_FALSE}, + {"Breakpoint exception (FW assert)", IMG_FALSE}, + {"Reserved instruction exception", IMG_TRUE}, + {"Coprocessor Unusable exception", IMG_FALSE}, + {"Arithmetic Overflow exception", IMG_FALSE}, + {"Trap exception", IMG_FALSE}, + {NULL, IMG_FALSE}, + {NULL, IMG_FALSE}, + {"Implementation-Specific Exception 1 (COP2)", IMG_FALSE}, + {"CorExtend Unusable", IMG_FALSE}, + {"Coprocessor 2 exceptions", IMG_FALSE}, + {"TLB Read-Inhibit", IMG_TRUE}, + {"TLB Execute-Inhibit", IMG_TRUE}, + {NULL, IMG_FALSE}, + {NULL, IMG_FALSE}, + {"Reference to WatchHi/WatchLo address", IMG_FALSE}, + {"Machine check", IMG_FALSE}, + {NULL, IMG_FALSE}, + {"DSP Module State Disabled exception", IMG_FALSE}, + {NULL, IMG_FALSE}, + {NULL, IMG_FALSE}, + {NULL, IMG_FALSE}, + /* Can only happen in MIPS debug mode */ + {"Parity error", IMG_FALSE}, + {NULL, IMG_FALSE} +}; + +static IMG_CHAR const *_GetMIPSExcString(IMG_UINT32 ui32ExcCode) +{ + if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) + { + PVR_DPF((PVR_DBG_WARNING, + "Only %lu exceptions available in MIPS, %u is not a valid exception code", + (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); + return NULL; + } + + return apsMIPSExcCodes[ui32ExcCode].pszStr; +} +#endif + +typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_ +{ + IMG_UINT32 ui32Mask; + const IMG_CHAR * pszExplanation; +} RGXMIPSFW_C0_DEBUG_TBL_ENTRY; + +#if !defined(NO_HARDWARE) +static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] = +{ + { RGXMIPSFW_C0_DEBUG_DSS, "Debug single-step exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DBP, "Debug software breakpoint exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DDBL, "Debug data break exception occurred on a load" }, + { RGXMIPSFW_C0_DEBUG_DDBS, "Debug data break exception occurred on a store" }, + { RGXMIPSFW_C0_DEBUG_DIB, "Debug instruction break exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DINT, "Debug interrupt exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DIBIMPR, "Imprecise debug instruction break exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" }, + { RGXMIPSFW_C0_DEBUG_IEXI, "Imprecise error exception inhibit controls exception occurred" }, + { RGXMIPSFW_C0_DEBUG_DBUSEP, "Data access Bus Error exception pending" }, + { RGXMIPSFW_C0_DEBUG_CACHEEP, "Imprecise Cache Error pending" }, + { RGXMIPSFW_C0_DEBUG_MCHECKP, "Imprecise Machine Check exception pending" }, + { RGXMIPSFW_C0_DEBUG_IBUSEP, "Instruction fetch Bus Error exception pending" }, + { (IMG_UINT32)RGXMIPSFW_C0_DEBUG_DBD, "Debug exception occurred in branch delay slot" } +}; +#endif +#endif +#endif /* !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION)*/ + +#define NARGS_MASK ~(0xF<<16) +static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0]; + IMG_BOOL bIntegrityOk = IMG_TRUE; + + /* + * For every log ID, check the format string and number of arguments is valid. + */ + while (psLogDef->eSFId != RGXFW_SF_LAST) + { + const TRACEBUF_LOG *psLogDef2; + const IMG_CHAR *pszString; + IMG_UINT32 ui32Count; + + /* + * Check the number of arguments matches the number of '%' in the string and + * check that no string uses %s which is not supported as it requires a + * pointer to memory that is not going to be valid. + */ + pszString = psLogDef->pszFmt; + ui32Count = 0; + + while (*pszString != '\0') + { + if (*pszString++ == '%') + { + ui32Count++; + if (*pszString == 's') + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.", + psLogDef->pszName, *pszString); + } + else if (*pszString == '%') + { + /* Double % is a printable % sign and not a format string... */ + ui32Count--; + pszString++; + } + } + } + + if (ui32Count != psLogDef->ui32ArgNum) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.", + psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum); + } + + /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */ + if (ui32Count > 20) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.", + psLogDef->pszName, ui32Count); + } + + /* Check the id number is unique (don't take into account the number of arguments) */ + ui32Count = 0; + psLogDef2 = &aLogDefinitions[0]; + + while (psLogDef2->eSFId != RGXFW_SF_LAST) + { + if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK)) + { + ui32Count++; + } + psLogDef2++; + } + + if (ui32Count != 1) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.", + psLogDef->pszName, psLogDef->eSFId, ui32Count - 1); + } + + /* Move to the next log ID... */ + psLogDef++; + } + + return bIntegrityOk; +} + +void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE; + + /* Check that the firmware trace is correctly defined... */ + if (!bIntegrityCheckPassed) + { + bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile); + if (!bIntegrityCheckPassed) + { + return; + } + } + + /* Dump FW trace information... */ + if (psRGXFWIfTraceBufCtl != NULL) + { + IMG_UINT32 tid; + + PVR_DUMPDEBUG_LOG("Device ID: %u", psDevInfo->psDeviceNode->sDevId.ui32InternalID); + + RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->ui32LogType, INVALIDATE); + + /* Print the log type settings... */ + if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + { + PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", + ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), + RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) + ); + } + else + { + PVR_DUMPDEBUG_LOG("Debug log type: none"); + } + + /* Print the decoded log for each thread... */ + for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) + { + RGXDumpFirmwareTraceDecoded(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl, tid); + } + } +} + +/*! +******************************************************************************* + + @Function RGXPrepareExtraDebugInfo + + @Description + + Prepares debug info string by decoding ui16DebugInfo value passed + + @Input pszBuffer - pointer to debug info string buffer + + @Return void + +******************************************************************************/ +static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo) +{ + const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] = + { +#define X(a, b) {a, b}, + RGXFWT_DEBUG_INFO_MSKSTRLIST +#undef X + }; + + IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR); + IMG_UINT32 i; + IMG_BOOL bHasExtraDebugInfo = IMG_FALSE; + + /* Add prepend string */ + OSStringSafeCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize); + + /* Add debug info strings */ + for (i = 0; i < ui32NumFields; i++) + { + if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask) + { + if (bHasExtraDebugInfo) + { + OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */ + } + OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize); + bHasExtraDebugInfo = IMG_TRUE; + } + } + + /* Add append string */ + OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize); +} + +#define PVR_MAX_DEBUG_PARTIAL_LINES (40U) +#define PVR_DUMPDEBUG_LOG_LINES(fmt, ...) \ + if (!bPrintAllLines) { \ + OSSNPrintf(&pszLineBuffer[ui32LastLineIdx * PVR_MAX_DEBUG_MESSAGE_LEN], PVR_MAX_DEBUG_MESSAGE_LEN, (fmt), ##__VA_ARGS__); \ + ui32LineCount++; \ + ui32LastLineIdx = ui32LineCount % PVR_MAX_DEBUG_PARTIAL_LINES; \ + } else { \ + PVR_UNREFERENCED_PARAMETER(pszLineBuffer); \ + PVR_UNREFERENCED_PARAMETER(ui32LineCount); \ + PVR_UNREFERENCED_PARAMETER(ui32LastLineIdx); \ + PVR_DUMPDEBUG_LOG((fmt), ##__VA_ARGS__); \ + } + +static void RGXDumpFirmwareTraceLines(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, + IMG_UINT32 ui32TID, + bool bPrintAllLines) +{ + volatile IMG_UINT32 *pui32FWWrapCount; + volatile IMG_UINT32 *pui32FWTracePtr; + IMG_UINT32 *pui32TraceBuf; + IMG_UINT32 *pui32LocalTraceBuf = NULL; + IMG_UINT32 ui32HostWrapCount; + IMG_UINT32 ui32HostTracePtr; + IMG_UINT32 ui32Count = 0; + IMG_UINT32 ui32LineCount = 0; + IMG_UINT32 ui32LastLineIdx = 0; + IMG_CHAR *pszLineBuffer = NULL; + IMG_UINT32 ui32TraceBufSizeInDWords; + + RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID], INVALIDATE); + RGXFwSharedMemCacheOpExec(psDevInfo->apui32TraceBuffer[ui32TID], + psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32), + PVRSRV_CACHE_OP_INVALIDATE); + + pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].ui32WrapCount); + pui32FWTracePtr = &(psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].ui32TracePointer); + pui32TraceBuf = psDevInfo->apui32TraceBuffer[ui32TID]; + ui32HostWrapCount = *pui32FWWrapCount; + ui32HostTracePtr = *pui32FWTracePtr; + + if (pui32TraceBuf == NULL) + { + /* trace buffer not yet allocated */ + return; + } + + if (!bPrintAllLines) + { + pszLineBuffer = OSAllocMem(PVR_MAX_DEBUG_MESSAGE_LEN * PVR_MAX_DEBUG_PARTIAL_LINES); + PVR_LOG_RETURN_VOID_IF_FALSE(pszLineBuffer != NULL, "pszLineBuffer alloc failed"); + } + + ui32TraceBufSizeInDWords = psDevInfo->ui32TraceBufSizeInDWords; + + if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) + { + PVR_DUMPDEBUG_LOG_LINES("WARNING: Trace pointer (%d) greater than buffer size (%d).", + ui32HostTracePtr, ui32TraceBufSizeInDWords); + ui32HostTracePtr %= ui32TraceBufSizeInDWords; + } + + /* + * Allocate a local copy of the trace buffer which will contain a static non-changing + * snapshot view of the buffer. This removes the issue of a fast GPU wrapping and + * overwriting the tail data of the buffer. + */ + pui32LocalTraceBuf = OSAllocMem(ui32TraceBufSizeInDWords * sizeof(IMG_UINT32)); + if (pui32LocalTraceBuf != NULL) + { + memcpy(pui32LocalTraceBuf, pui32TraceBuf, ui32TraceBufSizeInDWords * sizeof(IMG_UINT32)); + ui32HostTracePtr = *pui32FWTracePtr; + pui32TraceBuf = pui32LocalTraceBuf; + } + + while (ui32Count < ui32TraceBufSizeInDWords) + { + IMG_UINT32 ui32Data, ui32DataToId; + + /* Find the first valid log ID, skipping whitespace... */ + do + { + IMG_UINT32 ui32ValidatedHostTracePtr; + ui32ValidatedHostTracePtr = OSConfineArrayIndexNoSpeculation(ui32HostTracePtr, + ui32TraceBufSizeInDWords); + ui32Data = pui32TraceBuf[ui32ValidatedHostTracePtr]; + ui32DataToId = idToStringID(ui32Data, SFs); + + /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */ + if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data)) + { + PVR_DUMPDEBUG_LOG_LINES("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data); + } + + /* Update the trace pointer... */ + ui32HostTracePtr++; + if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) + { + ui32HostTracePtr = 0; + ui32HostWrapCount++; + } + ui32Count++; + } while ((RGXFW_SF_LAST == ui32DataToId) && + ui32Count < ui32TraceBufSizeInDWords); + + if (ui32Count < ui32TraceBufSizeInDWords) + { + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> "; + IMG_CHAR szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = ""; + IMG_UINT64 ui64Timestamp; + IMG_UINT16 ui16DebugInfo; + + /* If we hit the ASSERT message then this is the end of the log... */ + if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED) + { + PVR_DUMPDEBUG_LOG_LINES("ASSERTION %.*s failed at %.*s:%u", + RGXFW_TRACE_BUFFER_ASSERT_SIZE, + psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].sAssertBuf.szInfo, + RGXFW_TRACE_BUFFER_ASSERT_SIZE, + psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].sAssertBuf.szPath, + psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].sAssertBuf.ui32LineNum); + break; + } + + ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 | + (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 1) % ui32TraceBufSizeInDWords]); + + ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT); + ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT; + + /* + * Print the trace string and provide up to 20 arguments which + * printf function will be able to use. We have already checked + * that no string uses more than this. + */ + OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN); + + /* Check and append any extra debug info available */ + if (ui16DebugInfo) + { + /* Prepare debug info string */ + RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo); + + /* Append debug info string */ + OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN); + } + + PVR_DUMPDEBUG_LOG_LINES(szBuffer, ui64Timestamp, ui32TID, groups[RGXFW_SF_GID(ui32Data)], + pui32TraceBuf[(ui32HostTracePtr + 2) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 3) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 4) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 5) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 6) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 7) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 8) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 9) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 10) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 11) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 12) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 13) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 14) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 15) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 16) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 17) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 18) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 19) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 20) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32HostTracePtr + 21) % ui32TraceBufSizeInDWords]); + + /* Update the trace pointer... */ + ui32HostTracePtr = ui32HostTracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data); + if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) + { + ui32HostTracePtr = ui32HostTracePtr % ui32TraceBufSizeInDWords; + ui32HostWrapCount++; + } + ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data)); + + /* Has the FW trace buffer overtaken the host pointer during the last line printed??? */ + if ((pui32LocalTraceBuf == NULL) && + ((*pui32FWWrapCount > ui32HostWrapCount) || + ((*pui32FWWrapCount == ui32HostWrapCount) && (*pui32FWTracePtr > ui32HostTracePtr)))) + { + /* Move forward to the oldest entry again... */ + PVR_DUMPDEBUG_LOG_LINES(". . ."); + ui32HostWrapCount = *pui32FWWrapCount; + ui32HostTracePtr = *pui32FWTracePtr; + } + } + } + + /* Free the local copy of the trace buffer if it was allocated... */ + if (pui32LocalTraceBuf != NULL) + { + OSFreeMem(pui32LocalTraceBuf); + } + + if (!bPrintAllLines) + { + IMG_UINT32 ui32FirstLineIdx; + + if (ui32LineCount > PVR_MAX_DEBUG_PARTIAL_LINES) + { + ui32FirstLineIdx = ui32LastLineIdx; + ui32LineCount = PVR_MAX_DEBUG_PARTIAL_LINES; + } + else + { + ui32FirstLineIdx = 0; + } + + for (ui32Count = 0; ui32Count < ui32LineCount; ui32Count++) + { + PVR_DUMPDEBUG_LOG("%s", &pszLineBuffer[((ui32FirstLineIdx + ui32Count) % PVR_MAX_DEBUG_PARTIAL_LINES) * PVR_MAX_DEBUG_MESSAGE_LEN]); + } + + OSFreeMem(pszLineBuffer); + } +} + +void RGXDumpFirmwareTraceDecoded(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, + IMG_UINT32 ui32TID) +{ + RGXDumpFirmwareTraceLines(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, + psRGXFWIfTraceBufCtl, ui32TID, true); +} + +void RGXDumpFirmwareTracePartial(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, + IMG_UINT32 ui32TID) +{ + RGXDumpFirmwareTraceLines(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, + psRGXFWIfTraceBufCtl, ui32TID, false); +} + +void RGXDumpFirmwareTraceBinary(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, + IMG_UINT32 ui32TID) +{ + IMG_UINT32 i; + IMG_BOOL bPrevLineWasZero = IMG_FALSE; + IMG_BOOL bLineIsAllZeros = IMG_FALSE; + IMG_UINT32 ui32CountLines = 0; + IMG_UINT32 *pui32TraceBuffer; + IMG_CHAR *pszLine; + + RGXFwSharedMemCacheOpExec(psDevInfo->apui32TraceBuffer[ui32TID], + psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32), + PVRSRV_CACHE_OP_INVALIDATE); + pui32TraceBuffer = psDevInfo->apui32TraceBuffer[ui32TID]; + +/* Max number of DWords to be printed per line, in debug dump binary output */ +#define PVR_DD_FW_TRACEBUF_LINESIZE 30U + /* each element in the line is 8 characters plus a space. The '+ 1' is because of the final trailing '\0'. */ + pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1); + if (pszLine == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of mem allocating line string (size: %d)", + __func__, + 9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1)); + return; + } + + for (i = 0; i < psDevInfo->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE) + { + IMG_UINT32 k = 0; + IMG_UINT32 ui32Line = 0x0; + IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32); + IMG_CHAR *pszBuf = pszLine; + + for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++) + { + if ((i + k) >= psDevInfo->ui32TraceBufSizeInDWords) + { + /* Stop reading when the index goes beyond trace buffer size. This condition is + * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not + * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */ + break; + } + + ui32Line |= pui32TraceBuffer[i + k]; + + /* prepare the line to print it. The '+1' is because of the trailing '\0' added */ + OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]); + pszBuf += 9; /* write over the '\0' */ + } + + bLineIsAllZeros = (ui32Line == 0x0); + + if (bLineIsAllZeros) + { + if (bPrevLineWasZero) + { + ui32CountLines++; + } + else + { + bPrevLineWasZero = IMG_TRUE; + ui32CountLines = 1; + PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset); + } + } + else + { + if (bPrevLineWasZero && ui32CountLines > 1) + { + PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines); + } + bPrevLineWasZero = IMG_FALSE; + + PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine); + } + } + + if (bPrevLineWasZero) + { + PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines); + } + + OSFreeMem(pszLine); +} + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) +void RGXDocumentFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const IMG_UINT32 ui32FwVA, + const IMG_CPU_PHYADDR sCpuPA, + const IMG_DEV_PHYADDR sDevPA, + const IMG_UINT64 ui64PTE) +{ +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + PVR_DUMPDEBUG_LOG("| 0x%08X | " + "0x%016" IMG_UINT64_FMTSPECX " | " + "0x%016" IMG_UINT64_FMTSPECX " | " + "%s%s%s |", + ui32FwVA, + (IMG_UINT64) sCpuPA.uiAddr, + sDevPA.uiAddr, + gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(ui64PTE)], + gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(ui64PTE)], + gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(ui64PTE)]); + } + else +#endif + { + const char *pszSLCBypass = +#if defined(RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) + BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) ? "B" : " "; +#else + " "; +#endif + + /* META and RISCV use a subset of the GPU's virtual address space */ + PVR_DUMPDEBUG_LOG("| 0x%08X | " + "0x%016" IMG_UINT64_FMTSPECX " | " + "0x%016" IMG_UINT64_FMTSPECX " | " + "%s%s%s%s%s%s |", + ui32FwVA, + (IMG_UINT64) sCpuPA.uiAddr, + sDevPA.uiAddr, + BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN) ? "P" : " ", + BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_PM_SRC_EN) ? "PM" : " ", + pszSLCBypass, + BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_CC_EN) ? "C" : " ", + BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_READ_ONLY_EN) ? "RO" : "RW", + BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_VALID_EN) ? "V" : " "); + } +} +#endif /* !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) */ + +#if !defined(NO_HARDWARE) +static PVRSRV_ERROR +RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask) +{ + IMG_UINT32 ui32RegValue, ui32NumPolls = 0; + PVRSRV_ERROR eError; + + do + { + eError = RGXReadFWModuleAddr(psDevInfo, ui32RegOffset, &ui32RegValue); + if (eError != PVRSRV_OK) + { + return eError; + } + } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000)); + + return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY; +} + +PVRSRV_ERROR +RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal) +{ + PVRSRV_ERROR eError; + + /* Core Read Ready? */ + eError = RGXPollMetaRegThroughSP(psDevInfo, + META_CR_TXUXXRXRQ_OFFSET, + META_CR_TXUXXRXRQ_DREADY_BIT, + META_CR_TXUXXRXRQ_DREADY_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); + + /* Set the reg we are interested in reading */ + eError = RGXWriteFWModuleAddr(psDevInfo, META_CR_TXUXXRXRQ_OFFSET, + ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteFWModuleAddr"); + + /* Core Read Done? */ + eError = RGXPollMetaRegThroughSP(psDevInfo, + META_CR_TXUXXRXRQ_OFFSET, + META_CR_TXUXXRXRQ_DREADY_BIT, + META_CR_TXUXXRXRQ_DREADY_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); + + /* Read the value */ + return RGXReadFWModuleAddr(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal); +} +#endif /* !defined(NO_HARDWARE) */ + +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) +static PVRSRV_ERROR _ValidateWithFWModule(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DEV_VIRTADDR *psFWAddr, + void *pvHostCodeAddr, + IMG_UINT32 ui32MaxLen, + const IMG_CHAR *pszDesc, + IMG_UINT32 ui32StartOffset) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32Value = 0; + IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset; + IMG_UINT32 *pui32FWCode = (IMG_UINT32*) IMG_OFFSET_ADDR(pvHostCodeAddr,ui32StartOffset); + IMG_UINT32 i; + +#if defined(EMULATOR) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + return PVRSRV_OK; + } +#endif + + ui32MaxLen -= ui32StartOffset; + ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */ + + for (i = 0; i < ui32MaxLen; i++) + { + eError = RGXReadFWModuleAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + return eError; + } + +#if defined(EMULATOR) + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) +#endif + { + PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value)); + + if (pui32FWCode[i] != ui32Value) + { + PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)", + __func__, pszDesc, + (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr); + return PVRSRV_ERROR_FW_IMAGE_MISMATCH; + } + } + + ui32FWCodeDevVAAddr += 4; + } + + PVR_DUMPDEBUG_LOG("Match between Host and Firmware view of the %s", pszDesc); + return PVRSRV_OK; +} +#endif + +#if !defined(NO_HARDWARE) +PVRSRV_ERROR RGXValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if !defined(SUPPORT_TRUSTED_DEVICE) + PVRSRV_ERROR eError; + IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL; + OS_FW_IMAGE *psRGXFW = NULL; + const IMG_BYTE *pbRGXFirmware = NULL; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + IMG_UINT32 *pui32CodeMemoryPointer; +#endif + RGXFWIF_DEV_VIRTADDR sFWAddr; + IMG_UINT32 ui32StartOffset = 0; + +#if defined(EMULATOR) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + PVR_DUMPDEBUG_LOG("Validation of RISC-V FW code is disabled on emulator"); + return PVRSRV_OK; + } +#endif + + if (psDevInfo->pvRegsBaseKM == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__)); + return PVRSRV_ERROR_BAD_MAPPING; + } + + /* Load FW from system for code verification */ + pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); + if (pui32HostFWCode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW code. " + "So skipping FW code verification", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Coremem is not present on all GPU cores, so size can be zero */ + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes); + if (pui32HostFWCoremem == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW core code. " + "So skipping FW code verification", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto freeHostFWCode; + } + } + + /* Load FW image */ + eError = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file (%s).", + __func__, PVRSRVGetErrorString(eError))); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto cleanup_initfw; + } + + pbRGXFirmware = (const IMG_BYTE *)OSFirmwareData(psRGXFW); + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + eError = ProcessLDRCommandStream(&psDevInfo->sLayerParams, pbRGXFirmware, + (void*) pui32HostFWCode, NULL, + (void*) pui32HostFWCoremem, NULL, NULL); + } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = ProcessELFCommandStream(&psDevInfo->sLayerParams, pbRGXFirmware, + pui32HostFWCode, NULL, + NULL, NULL); + } +#endif + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + eError = ProcessELFCommandStream(&psDevInfo->sLayerParams, pbRGXFirmware, + pui32HostFWCode, NULL, + pui32HostFWCoremem, NULL); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); + goto cleanup_initfw; + } + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32CodeMemoryPointer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error in acquiring MIPS FW code memory area (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto cleanup_initfw; + } + + RGXFwSharedMemCacheOpExec(pui32CodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, PVRSRV_CACHE_OP_INVALIDATE); + + if (OSMemCmp(pui32HostFWCode, pui32CodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes) == 0) + { + PVR_DUMPDEBUG_LOG("Match between Host and MIPS views of the FW code" ); + } + else + { + IMG_UINT32 ui32Count = 10; /* Show only the first 10 mismatches */ + IMG_UINT32 ui32Offset; + + PVR_DUMPDEBUG_LOG("Mismatch between Host and MIPS views of the FW code"); + for (ui32Offset = 0; (ui32Offset*4 < psDevInfo->ui32FWCodeSizeInBytes) || (ui32Count == 0); ui32Offset++) + { + if (pui32HostFWCode[ui32Offset] != pui32CodeMemoryPointer[ui32Offset]) + { + PVR_DUMPDEBUG_LOG("At %d bytes, code should be 0x%x but it is instead 0x%x", + ui32Offset*4, pui32HostFWCode[ui32Offset], pui32CodeMemoryPointer[ui32Offset]); + ui32Count--; + } + } + } + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + } + else +#endif + { + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + /* starting checking after BOOT LOADER config */ + sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; + + ui32StartOffset = RGXFW_MAX_BOOTLDR_OFFSET; + } + else + { +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + /* Use bootloader code remap which is always configured before the FW is started */ + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) + { + sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_REMAP_SECURE; + } + else +#endif + { + sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_REMAP; + } + } + + eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes, + "FW code", ui32StartOffset); + if (eError != PVRSRV_OK) + { + goto cleanup_initfw; + } + + /* Coremem is not present on all GPU cores, so may not be alloc'd */ + if (pui32HostFWCoremem != NULL) // && psDevInfo->ui32FWCorememCodeSizeInBytes + { + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); + } + else + { + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); + + /* Core must be halted while issuing abstract commands */ + eError = RGXRiscvHalt(psDevInfo); + PVR_GOTO_IF_ERROR(eError, cleanup_initfw); + } + + eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes, + "FW coremem code", 0); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + eError = RGXRiscvResume(psDevInfo); + PVR_GOTO_IF_ERROR(eError, cleanup_initfw); + } + } + } + +cleanup_initfw: + if (psRGXFW) + { + OSUnloadFirmware(psRGXFW); + } + + if (pui32HostFWCoremem) + { + OSFreeMem(pui32HostFWCoremem); + } +freeHostFWCode: + if (pui32HostFWCode) + { + OSFreeMem(pui32HostFWCode); + } + return eError; +#else + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); + PVR_UNREFERENCED_PARAMETER(psDevInfo); + return PVRSRV_OK; +#endif +} +#endif /* !defined(NO_HARDWARE) */ + +#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) +PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) + IMG_PBYTE pbCodeMemoryPointer; + PVRSRV_ERROR eError; + RGXFWIF_DEV_VIRTADDR sFWAddr; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer); + if (eError != PVRSRV_OK) + { + return eError; + } + + RGXFwSharedMemCacheOpExec(pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, PVRSRV_CACHE_OP_INVALIDATE); + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; + } + else + { + PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) + { + sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_REMAP_SECURE; + } + else +#endif + { + sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_REMAP; + } + }; + + eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0); + if (eError != PVRSRV_OK) + { + goto releaseFWCodeMapping; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, (void **)&pbCodeMemoryPointer); + if (eError != PVRSRV_OK) + { + goto releaseFWCoreCodeMapping; + } + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); + } + else + { + PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); + } + + eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, + psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0); + } + +releaseFWCoreCodeMapping: + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); + } +releaseFWCodeMapping: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + return PVRSRV_OK; +#endif +} +#endif + +static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState) +{ + switch (ePowerState) + { + case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT"; + case PVRSRV_DEV_POWER_STATE_OFF: return "OFF"; + case PVRSRV_DEV_POWER_STATE_ON: return "ON"; + default: return "UNKNOWN"; + } +} + +/* + Writes flags strings to an uninitialised buffer. +*/ +static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) +{ + const IMG_CHAR szCswLabel[] = "Ctx switch options:"; + size_t uLabelLen = sizeof(szCswLabel) - 1; + const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; + + OSStringSafeCopy(psDesc, szCswLabel, ui32DescSize); + + DebugCommonFlagStrings(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags); + DebugCommonFlagStrings(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags); +} + +static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) +{ + const IMG_CHAR szCswLabel[] = "Ctx switch:"; + size_t uLabelLen = sizeof(szCswLabel) - 1; + const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; + + OSStringSafeCopy(psDesc, szCswLabel, ui32DescSize); + + DebugCommonFlagStrings(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags); +} + + +typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_ +{ + DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING, + DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED, + DEVICEMEM_HISTORY_QUERY_INDEX_NEXT, + DEVICEMEM_HISTORY_QUERY_INDEX_COUNT, +} DEVICEMEM_HISTORY_QUERY_INDEX; + + +/*! +******************************************************************************* + + @Function _PrintDevicememHistoryQueryResult + + @Description + + Print details of a single result from a DevicememHistory query + + @Input pfnDumpDebugPrintf - Debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFaultProcessInfo - The process info derived from the page fault + @Input psResult - The DevicememHistory result to be printed + @Input ui32Index - The index of the result + + @Return void + +******************************************************************************/ +static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXMEM_PROCESS_INFO *psFaultProcessInfo, + DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult, + IMG_UINT32 ui32Index, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 ui32Remainder; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + RGXConvertOSTimestampToSAndNS(psResult->ui64When, + &ui64Seconds, + &ui64Nanoseconds); + + if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE) + { + PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Operation: %s Modified: %" IMG_UINT64_FMTSPEC + " us ago (OS time %" IMG_UINT64_FMTSPEC + ".%09" IMG_UINT64_FMTSPEC " s)", + pszIndent, + ui32Index, + psResult->szString, + psResult->sBaseDevVAddr.uiAddr, + psResult->uiSize, + psResult->bMap ? "Map": "Unmap", + OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), + ui64Seconds, + ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Operation: %s Modified: %" IMG_UINT64_FMTSPEC + " us ago (OS time %" IMG_UINT64_FMTSPEC + ".%09" IMG_UINT64_FMTSPEC + ") PID: %u (%s)", + pszIndent, + ui32Index, + psResult->szString, + psResult->sBaseDevVAddr.uiAddr, + psResult->uiSize, + psResult->bMap ? "Map": "Unmap", + OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), + ui64Seconds, + ui64Nanoseconds, + psResult->sProcessInfo.uiPID, + psResult->sProcessInfo.szProcessName); + } + + if (!psResult->bRange) + { + PVR_DUMPDEBUG_LOG("%s Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped"); + } + else + { + PVR_DUMPDEBUG_LOG("%s Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s", + pszIndent, + psResult->ui32StartPage, + psResult->ui32StartPage + psResult->ui32PageCount - 1, + psResult->sMapStartAddr.uiAddr, + psResult->sMapEndAddr.uiAddr, + psResult->bAll ? "(whole allocation) " : "", + psResult->bMap ? "mapped": "unmapped"); + } +} + +/*! +******************************************************************************* + + @Function _PrintDevicememHistoryQueryOut + + @Description + + Print details of all the results from a DevicememHistory query + + @Input pfnDumpDebugPrintf - Debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFaultProcessInfo - The process info derived from the page fault + @Input psQueryOut - Storage for the query results + + @Return void + +******************************************************************************/ +static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXMEM_PROCESS_INFO *psFaultProcessInfo, + DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 i; + + if (psQueryOut->ui32NumResults == 0) + { + PVR_DUMPDEBUG_LOG("%s No results", pszIndent); + } + else + { + for (i = 0; i < psQueryOut->ui32NumResults; i++) + { + _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile, + psFaultProcessInfo, + &psQueryOut->sResults[i], + i, + pszIndent); + } + } +} + +/* table of HW page size values and the equivalent */ +static const unsigned int aui32HWPageSizeTable[][2] = +{ + { 0, PVRSRV_4K_PAGE_SIZE }, + { 1, PVRSRV_16K_PAGE_SIZE }, + { 2, PVRSRV_64K_PAGE_SIZE }, + { 3, PVRSRV_256K_PAGE_SIZE }, + { 4, PVRSRV_1M_PAGE_SIZE }, + { 5, PVRSRV_2M_PAGE_SIZE } +}; + +/*! +******************************************************************************* + + @Function _PageSizeHWToBytes + + @Description + + Convert a HW page size value to its size in bytes + + @Input ui32PageSizeHW - The HW page size value + + @Return IMG_UINT32 The page size in bytes + +******************************************************************************/ +static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW) +{ + if (ui32PageSizeHW > 5) + { + /* This is invalid, so return a default value as we cannot ASSERT in this code! */ + return PVRSRV_4K_PAGE_SIZE; + } + + return aui32HWPageSizeTable[ui32PageSizeHW][1]; +} + +/*! +******************************************************************************* + + @Function _GetDevicememHistoryData + + @Description + + Get the DevicememHistory results for the given PID and faulting device virtual address. + The function will query DevicememHistory for information about the faulting page, as well + as the page before and after. + + @Input psDeviceNode - The device which this allocation search should be made on + @Input uiPID - The process ID to search for allocations belonging to + @Input sFaultDevVAddr - The device address to search for allocations at/before/after + @Input asQueryOut - Storage for the query results + @Input ui32PageSizeBytes - Faulted page size in bytes + + @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault + +******************************************************************************/ +static IMG_BOOL _GetDevicememHistoryData(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PID uiPID, + IMG_DEV_VIRTADDR sFaultDevVAddr, + DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT], + IMG_UINT32 ui32PageSizeBytes) +{ + DEVICEMEM_HISTORY_QUERY_IN sQueryIn; + IMG_BOOL bAnyHits = IMG_FALSE; + + /* if the page fault originated in the firmware then the allocation may + * appear to belong to any PID, because FW allocations are attributed + * to the client process creating the allocation, so instruct the + * devicemem_history query to search all available PIDs + */ + if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY; + } + else + { + sQueryIn.uiPID = uiPID; + } + + sQueryIn.psDevNode = psDeviceNode; + /* Query the DevicememHistory for all allocations in the previous page... */ + sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - ui32PageSizeBytes; + if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING], + ui32PageSizeBytes, IMG_TRUE)) + { + bAnyHits = IMG_TRUE; + } + + /* Query the DevicememHistory for any record at the exact address... */ + sQueryIn.sDevVAddr = sFaultDevVAddr; + if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], + ui32PageSizeBytes, IMG_FALSE)) + { + bAnyHits = IMG_TRUE; + } + else + { + /* If not matched then try matching any record in the faulting page... */ + if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], + ui32PageSizeBytes, IMG_TRUE)) + { + bAnyHits = IMG_TRUE; + } + } + + /* Query the DevicememHistory for all allocations in the next page... */ + sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes; + if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT], + ui32PageSizeBytes, IMG_TRUE)) + { + bAnyHits = IMG_TRUE; + } + + return bAnyHits; +} + +/* stored data about one page fault */ +typedef struct _FAULT_INFO_ +{ + /* the process info of the memory context that page faulted */ + RGXMEM_PROCESS_INFO sProcessInfo; + IMG_DEV_VIRTADDR sFaultDevVAddr; + MMU_FAULT_DATA sMMUFaultData; + DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT]; + /* the CR timer value at the time of the fault, recorded by the FW. + * used to differentiate different page faults + */ + IMG_UINT64 ui64CRTimer; + /* time when this FAULT_INFO entry was added. used for timing + * reference against the map/unmap information + */ + IMG_UINT64 ui64When; + IMG_UINT32 ui32FaultInfoFlags; +} FAULT_INFO; + +/* history list of page faults. + * Keeps the first `n` page faults and the last `n` page faults, like the FW + * HWR log + */ +typedef struct _FAULT_INFO_LOG_ +{ + IMG_UINT32 ui32Head; + /* the number of faults in this log need not correspond exactly to + * the HWINFO number of the FW, as the FW HWINFO log may contain + * non-page fault HWRs + */ + FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX]; +} FAULT_INFO_LOG; + +#define FAULT_INFO_PROC_INFO (0x1U) +#define FAULT_INFO_DEVMEM_HIST (0x2U) + +static FAULT_INFO_LOG gsFaultInfoLog = { 0 }; + +static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo, + FAULT_INFO *psInfo) +{ + IMG_UINT32 i, j; + + for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++) + { + IMG_BOOL bFound; + + RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo; + bFound = RGXPCPIDToProcessInfo(psDevInfo, + psProcInfo->uiPID, + psProcInfo); + if (!bFound) + { + OSStringSafeCopy(psProcInfo->szProcessName, + "(unknown)", + sizeof(psProcInfo->szProcessName)); + } + } + } +} + +/*! +******************************************************************************* + + @Function _PrintFaultInfo + + @Description + + Print all the details of a page fault from a FAULT_INFO structure + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psInfo - The page fault occurrence to print + + @Return void + +******************************************************************************/ +static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + PVRSRV_DEVICE_NODE *psDevNode, + void *pvDumpDebugFile, + FAULT_INFO *psInfo, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 i; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + RGXConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds); + + if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO)) + { + IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ? + 0 : psInfo->sProcessInfo.uiPID; + + PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC + ", CRTimer: 0x%016" IMG_UINT64_FMTSPECX + ", PID: %u (%s, unregistered: %u) OS time: " + "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, + pszIndent, + psInfo->sFaultDevVAddr.uiAddr, + psInfo->ui64CRTimer, + uiPID, + psInfo->sProcessInfo.szProcessName, + psInfo->sProcessInfo.bUnregistered, + ui64Seconds, + ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent); + } + + if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST)) + { + for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + const IMG_CHAR *pszWhich = NULL; + + switch (i) + { + case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: + pszWhich = "Preceding page"; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: + pszWhich = "Faulted page"; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: + pszWhich = "Next page"; + break; + } + + PVR_DUMPDEBUG_LOG("%s %s:", pszIndent, pszWhich); + _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile, + &psInfo->sProcessInfo, + &psInfo->asQueryOut[i], + pszIndent); + } + } + else + { + PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent); + DevicememHistoryDumpRecordStats(psDevNode, pfnDumpDebugPrintf, pvDumpDebugFile); + PVR_DUMPDEBUG_LOG("%s Records Searched -" + " PP:%"IMG_UINT64_FMTSPEC + " FP:%"IMG_UINT64_FMTSPEC + " NP:%"IMG_UINT64_FMTSPEC, + pszIndent, + psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING].ui64SearchCount, + psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED].ui64SearchCount, + psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT].ui64SearchCount); + } +} + +static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + FAULT_INFO *psInfo, + IMG_DEV_VIRTADDR sFaultDevVAddr, + IMG_DEV_PHYADDR sPCDevPAddr, + IMG_UINT64 ui64CRTimer, + IMG_UINT32 ui32PageSizeBytes) +{ + IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE; + RGXMEM_PROCESS_INFO sProcessInfo; + + psInfo->ui32FaultInfoFlags = 0; + psInfo->sFaultDevVAddr = sFaultDevVAddr; + psInfo->ui64CRTimer = ui64CRTimer; + psInfo->ui64When = OSClockns64(); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + /* Check if this is PM fault */ + if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM) + { + bIsPMFault = IMG_TRUE; + bFound = IMG_TRUE; + sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM; + OSStringSafeCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName)); + sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0'; + sProcessInfo.bUnregistered = IMG_FALSE; + } + else + { + /* look up the process details for the faulting page catalogue */ + bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo); + } + + if (bFound) + { + IMG_BOOL bHits; + + psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO; + psInfo->sProcessInfo = sProcessInfo; + + if (bIsPMFault) + { + bHits = IMG_TRUE; + } + else + { + /* get any DevicememHistory data for the faulting address */ + bHits = _GetDevicememHistoryData(psDevInfo->psDeviceNode, + sProcessInfo.uiPID, + sFaultDevVAddr, + psInfo->asQueryOut, + ui32PageSizeBytes); + + if (bHits) + { + psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST; + + /* if the page fault was caused by the firmware then get information about + * which client application created the related allocations. + * + * Fill in the process info data for each query result. + */ + + if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + _FillAppForFWFaults(psDevInfo, psInfo); + } + } + } + } + } +} + +void RGXDumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const IMG_CHAR* pszIndent) +{ + MMU_LEVEL eTopLevel; + const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" }; + const IMG_CHAR szPageError[][3] = {"", "PT", "PD", "PC" }; + + eTopLevel = psFaultData->eTopLevel; + + if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN) + { + PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent); + return; + } + else if (psFaultData->eType == MMU_FAULT_TYPE_PM) + { + PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address); + } + else + { + MMU_LEVEL eCurrLevel; + PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST); + + for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--) + { + MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel]; + if (psMMULevelData->ui64Address) + { + if (psMMULevelData->uiBytesPerEntry == 4) + { + PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s", + pszIndent, + szPageLevel[eCurrLevel], + psMMULevelData->ui32Index, + (IMG_UINT) psMMULevelData->ui64Address, + psMMULevelData->psDebugStr); + } + else + { + PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s", + pszIndent, + szPageLevel[eCurrLevel], + psMMULevelData->ui32Index, + psMMULevelData->ui64Address, + psMMULevelData->psDebugStr); + } + } + else + { + PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)", + pszIndent, + szPageError[eCurrLevel], + psMMULevelData->ui32Index, + psMMULevelData->ui32NumOfEntries); + break; + } + } + } + +} + +void RGXDumpFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGX_HWRINFO *psHWRInfo, + IMG_UINT32 ui32ReadIndex, + IMG_DEV_VIRTADDR *psFaultDevVAddr, + IMG_DEV_PHYADDR *psPCDevPAddr, + bool bPMFault, + IMG_UINT32 ui32PageSize) +{ + FAULT_INFO *psInfo; + + OSLockAcquire(psDevInfo->hDebugFaultInfoLock); + + /* Find the matching Fault Info for this HWRInfo */ + psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex]; + + /* if they do not match, we need to update the psInfo */ + if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) || + (psInfo->sFaultDevVAddr.uiAddr != psFaultDevVAddr->uiAddr)) + { + MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData; + + psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN; + + if (bPMFault) + { + /* PM fault and we dump PC details only */ + psFaultData->eTopLevel = MMU_LEVEL_0; + psFaultData->eType = MMU_FAULT_TYPE_PM; + psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = psPCDevPAddr->uiAddr; + } + else + { + RGXCheckFaultAddress(psDevInfo, psFaultDevVAddr, psPCDevPAddr, psFaultData); + } + + _RecordFaultInfo(psDevInfo, psInfo, + *psFaultDevVAddr, *psPCDevPAddr, psHWRInfo->ui64CRTimer, + _PageSizeHWToBytes(ui32PageSize)); + + } + + RGXDumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + _PrintFaultInfo(pfnDumpDebugPrintf, psDevInfo->psDeviceNode, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT); + } + + OSLockRelease(psDevInfo->hDebugFaultInfoLock); +} + +void RGXConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, + IMG_UINT64 *pui64Seconds, + IMG_UINT64 *pui64Nanoseconds) +{ + IMG_UINT32 ui32Remainder; + + *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder); + *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL); +} + +#if !defined(NO_HARDWARE) +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_MIPS_STATE *psMIPSState) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + IMG_UINT32 ui32RegRead; + IMG_UINT32 eError = PVRSRV_OK; + IMG_UINT32 volatile *pui32SyncFlag; + + /* Acquire the NMI operations lock */ + OSLockAcquire(psDevInfo->hNMILock); + + /* Make sure the synchronisation flag is set to 0 */ + pui32SyncFlag = &psDevInfo->psRGXFWIfSysInit->sMIPSState.ui32Sync; + *pui32SyncFlag = 0; + + /* Readback performed as a part of memory barrier */ + OSWriteMemoryBarrier(pui32SyncFlag); + RGXFwSharedMemCacheOpPtr(pui32SyncFlag, + FLUSH); + + + /* Enable NMI issuing in the MIPS wrapper */ + OSWriteHWReg64(pvRegsBaseKM, + RGX_CR_MIPS_WRAPPER_NMI_ENABLE, + RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN); + (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); + + /* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */ + ui32RegRead = OSReadHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_EXCEPTION_STATUS); + if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)) + { + + eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; + goto fail; + } + ui32RegRead = 0; + + /* Issue NMI */ + OSWriteHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_WRAPPER_NMI_EVENT, + RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN); + (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_EVENT); + + + /* Wait for NMI Taken to be asserted */ + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + ui32RegRead = OSReadHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_EXCEPTION_STATUS); + if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0) + { + eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; + goto fail; + } + ui32RegRead = 0; + + /* Allow the firmware to proceed */ + *pui32SyncFlag = 1; + + /* Readback performed as a part of memory barrier */ + OSWriteMemoryBarrier(pui32SyncFlag); + RGXFwSharedMemCacheOpPtr(pui32SyncFlag, + FLUSH); + + + /* Wait for the FW to have finished the NMI routine */ + ui32RegRead = OSReadHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_EXCEPTION_STATUS); + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + ui32RegRead = OSReadHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_EXCEPTION_STATUS); + if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) + { + eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; + goto fail; + } + ui32RegRead = 0; + + /* Copy state */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->sMIPSState, + INVALIDATE); + OSDeviceMemCopy(psMIPSState, &psDevInfo->psRGXFWIfSysInit->sMIPSState, sizeof(*psMIPSState)); + + --(psMIPSState->ui32ErrorEPC); + --(psMIPSState->ui32EPC); + + /* Disable NMI issuing in the MIPS wrapper */ + OSWriteHWReg32(pvRegsBaseKM, + RGX_CR_MIPS_WRAPPER_NMI_ENABLE, + 0); + (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); + +fail: + /* Release the NMI operations lock */ + OSLockRelease(psDevInfo->hNMILock); + return eError; +} + +/* Print decoded information from cause register */ +static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32Cause, + IMG_UINT32 ui32ErrorState) +{ +#define INDENT " " + const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause); + const IMG_CHAR * const pszException = _GetMIPSExcString(ui32ExcCode); + + if (ui32ErrorState != 0 && + pszException != NULL) + { + PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException); + } + + if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING) + { + PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending"); + } + + if (!(ui32Cause & RGXMIPSFW_C0_CAUSE_IV)) + { + PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses general interrupt vector"); + } + + if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING) + { + PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending"); + } + + /* Unusable Coproc exception */ + if (ui32ExcCode == 11) + { + PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause)); + } + +#undef INDENT +} + +static IMG_BOOL _IsFWCodeException(IMG_UINT32 ui32ExcCode) +{ + if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) + { + PVR_DPF((PVR_DBG_WARNING, + "Only %lu exceptions available in MIPS, %u is not a valid exception code", + (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); + return IMG_FALSE; + } + + return apsMIPSExcCodes[ui32ExcCode].bIsFatal; +} + +static void _RGXMipsDumpDebugDecode(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32Debug, + IMG_UINT32 ui32DEPC) +{ + const IMG_CHAR *pszDException = NULL; + IMG_UINT32 i; +#define INDENT " " + + if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM)) + { + return; + } + + PVR_DUMPDEBUG_LOG("DEBUG :"); + + pszDException = _GetMIPSExcString(RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug)); + + if (pszDException != NULL) + { + PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException); + } + + for (i = 0; i < ARRAY_SIZE(sMIPS_C0_DebugTable); ++i) + { + const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i]; + + if (ui32Debug & psDebugEntry->ui32Mask) + { + PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation); + } + } +#undef INDENT + PVR_DUMPDEBUG_LOG("DEPC :0x%08X", ui32DEPC); +} + +static inline void _GetMipsTLBPARanges(const RGX_MIPS_TLB_ENTRY *psTLBEntry, + const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, + const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, + IMG_UINT64 *pui64PA0Start, + IMG_UINT64 *pui64PA0End, + IMG_UINT64 *pui64PA1Start, + IMG_UINT64 *pui64PA1End) +{ + IMG_BOOL bUseRemapOutput = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; + IMG_UINT64 ui64PageSize = RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask); + + if ((psTLBEntry->ui32TLBLo0 & RGXMIPSFW_TLB_VALID) == 0) + { + /* Dummy values to fail the range checks later */ + *pui64PA0Start = -1ULL; + *pui64PA0End = -1ULL; + } + else if (bUseRemapOutput) + { + *pui64PA0Start = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; + *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; + } + else + { + *pui64PA0Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); + *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; + } + + if ((psTLBEntry->ui32TLBLo1 & RGXMIPSFW_TLB_VALID) == 0) + { + /* Dummy values to fail the range checks later */ + *pui64PA1Start = -1ULL; + *pui64PA1End = -1ULL; + } + else if (bUseRemapOutput) + { + *pui64PA1Start = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; + *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; + } + else + { + *pui64PA1Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); + *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; + } +} + +static void _CheckMipsTLBDuplicatePAs(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGX_MIPS_TLB_ENTRY *psTLB, + const RGX_MIPS_REMAP_ENTRY *psRemap) +{ + IMG_UINT64 ui64PA0StartI, ui64PA1StartI, ui64PA0StartJ, ui64PA1StartJ; + IMG_UINT64 ui64PA0EndI, ui64PA1EndI, ui64PA0EndJ, ui64PA1EndJ; + IMG_UINT32 i, j; + +#define RANGES_OVERLAP(start0,end0,start1,end1) ((start0) < (end1) && (start1) < (end0)) + + for (i = 0; i < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; i++) + { + _GetMipsTLBPARanges(&psTLB[i], + psRemap ? &psRemap[i] : NULL, + psRemap ? &psRemap[i + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, + &ui64PA0StartI, &ui64PA0EndI, + &ui64PA1StartI, &ui64PA1EndI); + + for (j = i + 1; j < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; j++) + { + _GetMipsTLBPARanges(&psTLB[j], + psRemap ? &psRemap[j] : NULL, + psRemap ? &psRemap[j + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, + &ui64PA0StartJ, &ui64PA0EndJ, + &ui64PA1StartJ, &ui64PA1EndJ); + + if (RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA0StartJ, ui64PA0EndJ) || + RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA1StartJ, ui64PA1EndJ) || + RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA0StartJ, ui64PA0EndJ) || + RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA1StartJ, ui64PA1EndJ) ) + { + PVR_DUMPDEBUG_LOG("Overlap between TLB entry %u and %u", i , j); + } + } + } +} + +static inline IMG_UINT32 _GetMIPSRemapRegionSize(IMG_UINT32 ui32RegionSizeEncoding) +{ + return 1U << ((ui32RegionSizeEncoding + 1U) << 1U); +} + +static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGX_MIPS_TLB_ENTRY *psTLBEntry, + const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, + const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, + IMG_UINT32 ui32Index) +{ + IMG_BOOL bDumpRemapEntries = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; + IMG_UINT64 ui64PA0 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); + IMG_UINT64 ui64PA1 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); + IMG_UINT64 ui64Remap0AddrOut = 0, ui64Remap1AddrOut = 0; + IMG_UINT32 ui32Remap0AddrIn = 0, ui32Remap1AddrIn = 0; + + if (bDumpRemapEntries) + { + /* RemapAddrIn is always 4k aligned and on 32 bit */ + ui32Remap0AddrIn = psRemapEntry0->ui32RemapAddrIn << 12; + ui32Remap1AddrIn = psRemapEntry1->ui32RemapAddrIn << 12; + + /* RemapAddrOut is always 4k aligned and on 32 or 36 bit */ + ui64Remap0AddrOut = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; + ui64Remap1AddrOut = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; + + /* If TLB and remap entries match, then merge them else, print them separately */ + if ((IMG_UINT32)ui64PA0 == ui32Remap0AddrIn && + (IMG_UINT32)ui64PA1 == ui32Remap1AddrIn) + { + ui64PA0 = ui64Remap0AddrOut; + ui64PA1 = ui64Remap1AddrOut; + bDumpRemapEntries = IMG_FALSE; + } + } + + PVR_DUMPDEBUG_LOG("%2u) VA 0x%08X (%3uk) -> PA0 0x%08" IMG_UINT64_FMTSPECx " %s%s%s, " + "PA1 0x%08" IMG_UINT64_FMTSPECx " %s%s%s", + ui32Index, + psTLBEntry->ui32TLBHi, + RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask), + ui64PA0, + gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo0)], + gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo0)], + gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo0)], + ui64PA1, + gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo1)], + gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo1)], + gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo1)]); + + if (bDumpRemapEntries) + { + PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, + ui32Index, + ui32Remap0AddrIn, + _GetMIPSRemapRegionSize(psRemapEntry0->ui32RemapRegionSize), + ui64Remap0AddrOut); + + PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, + ui32Index + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES, + ui32Remap1AddrIn, + _GetMIPSRemapRegionSize(psRemapEntry1->ui32RemapRegionSize), + ui64Remap1AddrOut); + } +} + +static RGX_MIPS_REMAP_ENTRY RGXDecodeMIPSRemap(IMG_UINT64 ui64RemapReg) +{ + RGX_MIPS_REMAP_ENTRY sRemapInfo; + + sRemapInfo.ui32RemapAddrIn = + (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK) + >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT; + + sRemapInfo.ui32RemapAddrOut = + (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK) + >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT; + + sRemapInfo.ui32RemapRegionSize = + (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK) + >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT; + + return sRemapInfo; +} + +static void RGXDumpMIPSState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + RGX_MIPS_STATE sMIPSState = {0}; + PVRSRV_ERROR eError; + + eError = _RGXMipsExtraDebug(psDevInfo, &sMIPSState); + PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----"); + if (eError != PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("MIPS extra debug not available"); + } + else + { + DDLOGVAL32("PC", sMIPSState.ui32ErrorEPC); + DDLOGVAL32("STATUS_REGISTER", sMIPSState.ui32StatusRegister); + DDLOGVAL32("CAUSE_REGISTER", sMIPSState.ui32CauseRegister); + _RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile, + sMIPSState.ui32CauseRegister, sMIPSState.ui32ErrorState); + DDLOGVAL32("BAD_REGISTER", sMIPSState.ui32BadRegister); + DDLOGVAL32("EPC", sMIPSState.ui32EPC); + DDLOGVAL32("SP", sMIPSState.ui32SP); + DDLOGVAL32("BAD_INSTRUCTION", sMIPSState.ui32BadInstr); + _RGXMipsDumpDebugDecode(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, + sMIPSState.ui32Debug, sMIPSState.ui32DEPC); + + { + IMG_UINT32 ui32Idx; + RGX_MIPS_REMAP_ENTRY *psMipsRemaps = NULL; + + IMG_BOOL bCheckBRN63553WA = + RGX_IS_BRN_SUPPORTED(psDevInfo, 63553) && + (OSReadHWReg32(pvRegsBaseKM, RGX_CR_MIPS_ADDR_REMAP5_CONFIG1) == (0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN)); + + IMG_BOOL bUseRemapRanges = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32; + + if (bUseRemapRanges) + { + psMipsRemaps = OSAllocMem(sizeof(RGX_MIPS_REMAP_ENTRY) * RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES); + PVR_LOG_RETURN_VOID_IF_FALSE(psMipsRemaps != NULL, "psMipsRemaps alloc failed."); + } + + PVR_DUMPDEBUG_LOG("TLB :"); + + for (ui32Idx = 0; ui32Idx < ARRAY_SIZE(sMIPSState.asTLB); ui32Idx++) + { + if (bUseRemapRanges) + { + psMipsRemaps[ui32Idx] = + RGXDecodeMIPSRemap(sMIPSState.aui64Remap[ui32Idx]); + + psMipsRemaps[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] = + RGXDecodeMIPSRemap(sMIPSState.aui64Remap[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]); + } + + _RGXMipsDumpTLBEntry(pfnDumpDebugPrintf, + pvDumpDebugFile, + &sMIPSState.asTLB[ui32Idx], + (bUseRemapRanges) ? &psMipsRemaps[ui32Idx] : NULL, + (bUseRemapRanges) ? &psMipsRemaps[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, + ui32Idx); + + if (bCheckBRN63553WA) + { + const RGX_MIPS_TLB_ENTRY *psTLBEntry = &sMIPSState.asTLB[ui32Idx]; + + #define BRN63553_TLB_IS_NUL(X) (((X) & RGXMIPSFW_TLB_VALID) && (RGXMIPSFW_TLB_GET_PA(X) == 0x0)) + + if (BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo0) || BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo1)) + { + PVR_DUMPDEBUG_LOG("BRN63553 WA present with a valid TLB entry mapping address 0x0."); + } + } + } + + /* This implicitly also checks for overlaps between memory and regbank addresses */ + _CheckMipsTLBDuplicatePAs(pfnDumpDebugPrintf, + pvDumpDebugFile, + sMIPSState.asTLB, + bUseRemapRanges ? psMipsRemaps : NULL); + + if (bUseRemapRanges) + { + /* Dump unmapped address if it was dumped in FW, otherwise it will be 0 */ + if (sMIPSState.ui32UnmappedAddress) + { + PVR_DUMPDEBUG_LOG("Remap unmapped address => 0x%08X", + sMIPSState.ui32UnmappedAddress); + } + } + + if (psMipsRemaps != NULL) + { + OSFreeMem(psMipsRemaps); + } + } + + /* Check FW code corruption in case of known errors */ + if (_IsFWCodeException(RGXMIPSFW_C0_CAUSE_EXCCODE(sMIPSState.ui32CauseRegister))) + { + eError = RGXValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); + } + } + } + PVR_DUMPDEBUG_LOG("--------------------------------"); +} +#endif /* defined(RGX_FEATURE_MIPS_BIT_MASK) */ + +static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ +/* List of extra META Slave Port debug registers */ +#define RGX_META_SP_EXTRA_DEBUG \ + X(RGX_CR_META_SP_MSLVCTRL0) \ + X(RGX_CR_META_SP_MSLVCTRL1) \ + X(RGX_CR_META_SP_MSLVDATAX) \ + X(RGX_CR_META_SP_MSLVIRQSTATUS) \ + X(RGX_CR_META_SP_MSLVIRQENABLE) \ + X(RGX_CR_META_SP_MSLVIRQLEVEL) + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) +/* Order in these two initialisers and the one above must match */ +#define RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_EQ1_AND_MRUA_ACCESSES \ + X(RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_EQ1_AND_MRUA) + +#define RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_GT1_AND_MRUA_ACCESSES \ + X(RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA) \ + X(RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_MRUA) +#endif + + IMG_UINT32 ui32Idx; + IMG_UINT32 ui32RegVal; + IMG_UINT32 ui32RegAddr; + + const IMG_UINT32* pui32DebugRegAddr; + const IMG_UINT32 aui32DebugRegAddr[] = { +#define X(A) A, + RGX_META_SP_EXTRA_DEBUG +#undef X + }; + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + const IMG_UINT32 aui32DebugRegAddrUAHSV1[] = { +#define X(A) A, + RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_EQ1_AND_MRUA_ACCESSES +#undef X + }; + + const IMG_UINT32 aui32DebugRegAddrUAHSGT1[] = { +#define X(A) A, + RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_GT1_AND_MRUA_ACCESSES +#undef X + }; +#endif + + const IMG_CHAR* apszDebugRegName[] = { +#define X(A) #A, + RGX_META_SP_EXTRA_DEBUG +#undef X + }; + + PVR_DUMPDEBUG_LOG("META Slave Port extra debug:"); + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + /* array of register offset values depends on feature. But don't augment names in apszDebugRegName */ + PVR_ASSERT(sizeof(aui32DebugRegAddrUAHSGT1) == sizeof(aui32DebugRegAddr)); + PVR_ASSERT(sizeof(aui32DebugRegAddrUAHSV1) == sizeof(aui32DebugRegAddr)); + pui32DebugRegAddr = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? + ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? (aui32DebugRegAddrUAHSGT1) : (aui32DebugRegAddrUAHSV1)) : aui32DebugRegAddr; +#else + pui32DebugRegAddr = aui32DebugRegAddr; +#endif + + /* dump set of Slave Port debug registers */ + for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++) + { + const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx]; + + ui32RegAddr = pui32DebugRegAddr[ui32Idx]; + ui32RegVal = OSReadUncheckedHWReg32(RGX_GET_RISCV_REGS_BASE(psDevInfo), ui32RegAddr); + PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal); + } +} +#endif +#endif /* !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) */ + +PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if defined(NO_HARDWARE) + PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); + PVR_DUMPDEBUG_LOG("(Not supported for NO_HARDWARE builds)"); + + return PVRSRV_OK; +#else /* !defined(NO_HARDWARE) */ +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) + IMG_UINT32 ui32Meta = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0; +#endif + IMG_BOOL bFirmwarePerf; + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); + PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM); + PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr); + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) + { +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) + PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Linear): N/A in REE "); +#else + PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Linear): 0x%p", + psDevInfo->pvSecureRegsBaseKM); +#endif + PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Physical): 0x%08lX", + (unsigned long)psDevInfo->sRegsPhysBase.uiAddr + RGX_HOST_SECURE_REGBANK_OFFSET); + } +#endif + + /* Check if firmware perf was set at Init time */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->eFirmwarePerf, + INVALIDATE); + bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE); + + RGXDumpCoreRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) + if (ui32Meta) + { +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + IMG_UINT32 ui32MSlvCtrl1Reg = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? + ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA : + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA) : + RGX_CR_META_SP_MSLVCTRL1; + + /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ + OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32MSlvCtrl1Reg, 0x0); +#else + /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0); +#endif + + DDLOG32(META_SP_MSLVIRQSTATUS); + } +#endif /* !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) */ + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + RGXDumpMulticoreRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + } + + RGXDumpClkRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + + DDLOG32(EVENT_STATUS); + DDLOG64(TIMER); + + RGXDumpMMURegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + + RGXDumpDMRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + + DDLOG32(MTS_INTCTX); + DDLOG32(MTS_BGCTX); + DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE); + DDLOG32(MTS_SCHEDULE); + DDLOG32(MTS_GPU_INT_STATUS); + + RGXDumpSLCRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + + RGXDumpMiscRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) + PVR_DUMPDEBUG_LOG("FW Core Registers not available to REE"); +#else + if (ui32Meta) + { + IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE; + IMG_UINT32 ui32RegVal; + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + IMG_UINT32 ui32MSlvIrqStatusReg = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? + ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? + RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA : + RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA) : + RGX_CR_META_SP_MSLVIRQSTATUS; + + PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, "META_SP_MSLVIRQSTATUS", OSReadUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32MSlvIrqStatusReg)); +#endif + + eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); + DDLOGVAL32("T0 TXENABLE", ui32RegVal); + if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT) + { + bIsT0Enabled = IMG_TRUE; + } + + eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); + DDLOGVAL32("T0 TXSTATUS", ui32RegVal); + + /* check for FW fault */ + if (((ui32RegVal >> 20) & 0x3) == 0x2) + { + bIsFWFaulted = IMG_TRUE; + } + + eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); + DDLOGVAL32("T0 TXDEFR", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T0 PC", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T0 PCX", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T0 SP", ui32RegVal); + + if ((ui32Meta == MTP218) || (ui32Meta == MTP219)) + { + eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); + DDLOGVAL32("T1 TXENABLE", ui32RegVal); + + eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); + DDLOGVAL32("T1 TXSTATUS", ui32RegVal); + + eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); + DDLOGVAL32("T1 TXDEFR", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T1 PC", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T1 PCX", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T1 SP", ui32RegVal); + } + + if (bFirmwarePerf) + { + eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); + DDLOGVAL32("META_CR_PERF_COUNT0", ui32RegVal); + + eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); + DDLOGVAL32("META_CR_PERF_COUNT1", ui32RegVal); + } + + if (bIsT0Enabled & bIsFWFaulted) + { + eError = RGXValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); + } + } + else if (bIsFWFaulted) + { + PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled"); + } + } + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + DDLOG32(MIPS_ADDR_REMAP1_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP1_CONFIG2); + DDLOG32(MIPS_ADDR_REMAP2_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP2_CONFIG2); + DDLOG32(MIPS_ADDR_REMAP3_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP3_CONFIG2); + DDLOG32(MIPS_ADDR_REMAP4_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP4_CONFIG2); + DDLOG32(MIPS_ADDR_REMAP5_CONFIG1); + DDLOG64(MIPS_ADDR_REMAP5_CONFIG2); + DDLOG64(MIPS_WRAPPER_CONFIG); + DDLOG32(MIPS_EXCEPTION_STATUS); + + RGXDumpMIPSState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + } +#endif + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + eError = RGXDumpRISCVState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + PVR_RETURN_IF_ERROR(eError); + } +#endif /* defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) */ + +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, TFBC_VERSION)) + { + DDLOGVAL32("TFBC_VERSION", RGX_GET_FEATURE_VALUE(psDevInfo, TFBC_VERSION)); + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION)) + { + DDLOGVAL32("TFBC_COMPRESSION_CONTROL", psDevInfo->psRGXFWIfSysInit->ui32TFBCCompressionControl); + } +#endif + + return eError; + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(SUPPORT_SECURITY_VALIDATION) +_METASPError: + PVR_DUMPDEBUG_LOG("Dump Slave Port debug information"); + _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + + return eError; +#endif +#endif /* defined(NO_HARDWARE) */ +} + + +/*! +******************************************************************************* + + @Function RGXDebugRequestProcess + + @Description + + This function will print out the debug for the specified level of verbosity + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input ui32VerbLevel - Verbosity level + + @Return void + +******************************************************************************/ +static +void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32VerbLevel) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + PVRSRV_DEV_POWER_STATE ePowerState; +#if defined(SUPPORT_SOC_TIMER) + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; +#endif + IMG_BOOL bRGXPoweredON; + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + const RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; + IMG_BOOL bPwrLockAlreadyHeld; + IMG_BOOL bPwrLockAcquired = IMG_TRUE; + IMG_UINT8 ui8FwOsCount; + + bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode); + if (!bPwrLockAlreadyHeld) + { + /* Only acquire the power-lock if not already held by the calling context */ + eError = PVRSRVPowerTryLockWaitForTimeout(psDeviceNode); + if (eError == PVRSRV_ERROR_TIMEOUT) + { + bPwrLockAcquired = IMG_FALSE; +#if defined(DEBUG) + PVR_DPF((PVR_DBG_WARNING, "Power lock owner: PID = %u at timestamp %" IMG_UINT64_FMTSPEC " (%s:%u)", + psDeviceNode->uiPwrLockOwnerPID, psDeviceNode->sPowerLockOwner.ui64Timestamp, + psDeviceNode->sPowerLockOwner.pszFile, psDeviceNode->sPowerLockOwner.ui32LineNum)); +#endif + PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); +#if !defined(DEBUG_OVERRIDE_PWRLOCK) + /* Bail out if another thread has the power lock and may be + * powering down the device. */ + return; +#endif + } + else if (eError != PVRSRV_OK) + { + return; + } + } + /* This should satisfy all accesses below */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks, + INVALIDATE); + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error retrieving RGX power state. No debug info dumped.", + __func__)); + goto Exit; + } + + if (PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) + { + PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + 1, RGX_NUM_DRIVERS_SUPPORTED); + } + + PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); + + bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON); + + PVR_DUMPDEBUG_LOG("------[ RGX Info ]------"); + PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo); + DevicememHistoryDumpRecordStats(psDevInfo->psDeviceNode, pfnDumpDebugPrintf, pvDumpDebugFile); + PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C, + PVR_ARCH_NAME); + PVR_DUMPDEBUG_LOG("RGX Device State: %s", PVRSRVGetDebugDevStateString(psDeviceNode->eDevState)); + PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState)); + + PVR_DUMPDEBUG_LOG("RGX Health Status: %s, Reason: %s",PVRSRVGetDebugHealthStatusString(OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus)), + PVRSRVGetDebugHealthReasonString(OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason))); + + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) + { + PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks); + ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + if (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED) + { + PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + RGX_NUM_DRIVERS_SUPPORTED, ui8FwOsCount); + } + } + else + { + PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED"); + } + } + else + { + PVR_DUMP_FIRMWARE_INFO_HDR(psDevInfo->sFWInfoHeader); + if (PVRSRV_VZ_MODE_IS(HOST, DEVNODE, psDeviceNode) && BITMASK_HAS(psDevInfo->sFWInfoHeader.ui32Flags, OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN)) + { + ui8FwOsCount = (psDevInfo->sFWInfoHeader.ui32Flags & OPTIONS_NUM_DRIVERS_SUPPORTED_MASK) >> OPTIONS_NUM_DRIVERS_SUPPORTED_SHIFT; + ui8FwOsCount++; + if (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED) + { + PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + RGX_NUM_DRIVERS_SUPPORTED, ui8FwOsCount); + } + } + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TILE_REGION_PROTECTION)) + { +#if defined(SUPPORT_TRP) + PVR_DUMPDEBUG_LOG("TRP: HW support - Yes; SW enabled"); +#else + PVR_DUMPDEBUG_LOG("TRP: HW support - Yes; SW disabled"); +#endif + } + else + { + PVR_DUMPDEBUG_LOG("TRP: HW support - No"); + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, WORKGROUP_PROTECTION)) + { +#if defined(SUPPORT_WGP) + PVR_DUMPDEBUG_LOG("WGP: HW support - Yes; SW enabled"); +#else + PVR_DUMPDEBUG_LOG("WGP: HW support - Yes; SW disabled"); +#endif + } + else + { + PVR_DUMPDEBUG_LOG("WGP: HW support - No"); + } + +#if defined(SUPPORT_SOC_TIMER) + if (psDevConfig && psDevConfig->pfnSoCTimerRead) + { + RGX_DATA *psRGXData = (RGX_DATA*)psDevConfig->hDevData; + RGX_TIMING_INFORMATION *psRGXTimingInfo = psRGXData->psRGXTimingInfo; + IMG_UINT32 ui32Remainder; + IMG_UINT64 ui64CurrentSoCTime = psDevConfig->pfnSoCTimerRead(psDevConfig->hSysData); + IMG_UINT64 ui64CurrentSoCTimeInNS = + OSDivide64r64(ui64CurrentSoCTime * SECONDS_TO_MICROSECONDS, RGXFWIF_CONVERT_TO_KHZ(psRGXTimingInfo->ui32SOCClockSpeed), &ui32Remainder); + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + RGXConvertOSTimestampToSAndNS(ui64CurrentSoCTimeInNS, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("SoC timer counter: 0x%" IMG_UINT64_FMTSPECx, ui64CurrentSoCTime); + PVR_DUMPDEBUG_LOG("SoC timer: %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC " seconds", + ui64Seconds, ui64Nanoseconds); + } +#endif + + RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON); + + /* Dump out the kernel CCB. */ + { + const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + const RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; + RGXFwSharedMemCacheOpPtr(psDevInfo->psKernelCCBCtl, INVALIDATE); + + if (psKCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X", + psKCCBCtlLocal->ui32WriteOffset, + psKCCBCtl->ui32ReadOffset); + } + } + + /* Dump out the firmware CCB. */ + { + const RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl; + const RGXFWIF_CCB_CTL *psFCCBCtlLocal = psDevInfo->psFirmwareCCBCtlLocal; + RGXFwSharedMemCacheOpPtr(psDevInfo->psFirmwareCCBCtl, INVALIDATE); + + if (psFCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X", + psFCCBCtl->ui32WriteOffset, + psFCCBCtlLocal->ui32ReadOffset); + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Dump out the Workload estimation CCB. */ + const RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; + const RGXFWIF_CCB_CTL *psWorkEstCCBCtlLocal = psDevInfo->psWorkEstFirmwareCCBCtlLocal; + + if (psWorkEstCCBCtl != NULL) + { + RGXFwSharedMemCacheOpPtr(psWorkEstCCBCtl, INVALIDATE); + PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X", + psWorkEstCCBCtl->ui32WriteOffset, + psWorkEstCCBCtlLocal->ui32ReadOffset); + } + } +#endif + + RGXFwSharedMemCacheOpPtr(psFwOsData, + INVALIDATE); + + if (psFwOsData != NULL) + { + /* Dump the KCCB commands executed */ + PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d", + psFwOsData->ui32KCCBCmdsExecuted); + +#if defined(PVRSRV_STALLED_CCB_ACTION) + /* Dump the number of times we have performed a forced UFO update, + * and (if non-zero) the timestamp of the most recent occurrence/ + */ + PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d", + psFwOsData->ui32ForcedUpdatesRequested); + if (psFwOsData->ui32ForcedUpdatesRequested > 0) + { + IMG_UINT8 ui8Idx; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + if (psFwOsData->ui64LastForcedUpdateTime > 0ULL) + { + RGXConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")", + ui64Seconds, ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)"); + } + /* Dump SLR log */ + if (psFwOsData->sSLRLogFirst.aszCCBName[0]) + { + RGXConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC + "} Fence found on context 0x%x [%u/%d] '%.*s' has %d UFOs", + ui64Seconds, ui64Nanoseconds, + psFwOsData->sSLRLogFirst.ui32FWCtxAddr, + psDeviceNode->sDevId.ui32InternalID, + psDeviceNode->sDevId.i32KernelDeviceID, + PVR_SLR_LOG_STRLEN, psFwOsData->sSLRLogFirst.aszCCBName, + psFwOsData->sSLRLogFirst.ui32NumUFOs); + } + for (ui8Idx=0; ui8IdxsSLRLog[ui8Idx].aszCCBName[0]) + { + RGXConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC + "] Fence found on context 0x%x '%.*s' has %d UFOs", + ui64Seconds, ui64Nanoseconds, + psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr, + PVR_SLR_LOG_STRLEN, psFwOsData->sSLRLog[ui8Idx].aszCCBName, + psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs); + } + } + } +#else + PVR_DUMPDEBUG_LOG("RGX SLR: Disabled"); +#endif + + /* Dump the error counts */ + PVR_DUMPDEBUG_LOG("RGX Errors: WGP:%d, TRP:%d", + psDevInfo->sErrorCounts.ui32WGPErrorCount, + psDevInfo->sErrorCounts.ui32TRPErrorCount); + + /* Dump the IRQ info for threads or OS IDs */ +#if defined(RGX_FW_IRQ_OS_COUNTERS) + /* only Host has access to registers containing IRQ counters */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) +#endif + { + IMG_UINT32 ui32idx; + + for_each_irq_cnt(ui32idx) + { + IMG_UINT32 ui32IrqCnt; + + get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); + if (ui32IrqCnt) + { + PVR_DUMPDEBUG_LOG(MSG_IRQ_CNT_TYPE "%u: FW IRQ count = %u", ui32idx, ui32IrqCnt); +#if defined(RGX_FW_IRQ_OS_COUNTERS) + if (ui32idx == RGXFW_HOST_DRIVER_ID) +#endif + { + PVR_DUMPDEBUG_LOG("Last sampled IRQ count in LISR = %u", psDevInfo->aui32SampleIRQCount[ui32idx]); + } + } + } + } + } + + /* Dump the FW Sys config flags on the Host */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + + if (!psFwSysData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__)); + goto Exit; + } + + RGXFwSharedMemCacheOpValue(psFwSysData->ui32ConfigFlags, + INVALIDATE); + + _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags); + PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription); + } + + /* Dump the FW OS config flags */ + { + IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + + if (!psFwOsData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__)); + goto Exit; + } + + _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags); + PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription); + } + + if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXDumpRGXRegisters failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + } + else + { + PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) ? "Guest Mode of operation" : "RGX power is down"); + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------"); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + IMG_INT tid; + /* Dump FW trace information */ + if (psRGXFWIfTraceBufCtl != NULL) + { + RGX_FWT_LOGTYPE eFWTLogType = psDevInfo->eDebugDumpFWTLogType; + + if (eFWTLogType == RGX_FWT_LOGTYPE_NONE) + { + PVR_DUMPDEBUG_LOG("Firmware trace printing disabled."); + } + else + { + RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->ui32LogType, INVALIDATE); + + for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) + { + IMG_UINT32 *pui32TraceBuffer; + + if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + { + PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", + ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), + RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) + ); + } + else + { + PVR_DUMPDEBUG_LOG("Debug log type: none"); + } + + pui32TraceBuffer = psDevInfo->apui32TraceBuffer[tid]; + + /* Skip if trace buffer is not allocated */ + if (pui32TraceBuffer == NULL) + { + PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid); + continue; + } + + RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer, INVALIDATE); + PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid); + PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); + PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psDevInfo->ui32TraceBufSizeInDWords); + + if (eFWTLogType == RGX_FWT_LOGTYPE_BINARY) + { + RGXDumpFirmwareTraceBinary(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl, tid); + } + else if (eFWTLogType == RGX_FWT_LOGTYPE_DECODED) + { + RGXDumpFirmwareTraceDecoded(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl, tid); + } + else if (eFWTLogType == RGX_FWT_LOGTYPE_PARTIAL) + { + RGXDumpFirmwareTracePartial(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl, tid); + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid); + } + } + } + + { + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------"); + } + else + { + PVR_DUMPDEBUG_LOG("------[ FWCtxs Next CMD ]------"); + } + + RGXDumpAllContextInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + } + + PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d End ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); + +Exit: + if (!bPwrLockAlreadyHeld && bPwrLockAcquired) + { + PVRSRVPowerUnlock(psDeviceNode); + } +} + +/*! + ****************************************************************************** + + @Function RGXDebugRequestNotify + + @Description Dump the debug data for RGX + + ******************************************************************************/ +static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = hDbgRequestHandle; + + /* Only action the request if we've fully init'ed */ + if (psDevInfo->bDevInit2Done) + { + RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel); + } +} + +PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + return PVRSRVRegisterDeviceDbgRequestNotify(&psDevInfo->hDbgReqNotify, + psDevInfo->psDeviceNode, + RGXDebugRequestNotify, + DEBUG_REQUEST_RGX, + psDevInfo); +} + +PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + if (psDevInfo->hDbgReqNotify) + { + return PVRSRVUnregisterDeviceDbgRequestNotify(psDevInfo->hDbgReqNotify); + } + + /* No notifier registered */ + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxdebug_common.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxdebug_common.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxdebug_common.h new file mode 100644 index 000000000000..f7707b87fa9f --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxdebug_common.h @@ -0,0 +1,433 @@ +/*************************************************************************/ /*! +@File +@Title RGX debug header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the RGX debugging functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXDEBUG_COMMON_H) +#define RGXDEBUG_COMMON_H + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "device.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "rgxdevice.h" +#include "rgxfwmemctx.h" + +#define DD_NORMAL_INDENT " " + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +extern const IMG_CHAR * const gapszMipsPermissionPTFlags[4]; +extern const IMG_CHAR * const gapszMipsCoherencyPTFlags[8]; +extern const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8]; +#endif + +/** + * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in + * LISR for each RGX FW thread. + * Macro takes pointer to PVRSRV_RGXDEV_INFO as input. + */ + +#if defined(RGX_FW_IRQ_OS_COUNTERS) +#define for_each_irq_cnt(ui32idx) FOREACH_SUPPORTED_DRIVER(ui32idx) + +#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ + do { \ + extern const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OSIDS]; \ + ui32Dest = PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psRgxDevInfo) ? 0 : OSReadHWReg32((psRgxDevInfo)->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[ui32idx]); \ + } while (false) + +#define MSG_IRQ_CNT_TYPE "OS" + +#else + +#define for_each_irq_cnt(ui32idx) \ + for (ui32idx = 0; ui32idx < RGXFW_THREAD_NUM; ui32idx++) + +#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ + do { \ + RGXFwSharedMemCacheOpValue(psRgxDevInfo->psRGXFWIfFwOsData->aui32InterruptCount[ui32idx], \ + INVALIDATE); \ + ui32Dest = (psRgxDevInfo)->psRGXFWIfFwOsData->aui32InterruptCount[ui32idx]; \ + } while (false) +#define MSG_IRQ_CNT_TYPE "Thread" +#endif /* RGX_FW_IRQ_OS_COUNTERS */ + +static inline void RGXDEBUG_PRINT_IRQ_COUNT(PVRSRV_RGXDEV_INFO* psRgxDevInfo) +{ +#if defined(PVRSRV_NEED_PVR_DPF) && defined(DEBUG) + IMG_UINT32 ui32idx; + + for_each_irq_cnt(ui32idx) + { + IMG_UINT32 ui32IrqCnt; + + get_irq_cnt_val(ui32IrqCnt, ui32idx, psRgxDevInfo); + + PVR_DPF((DBGPRIV_VERBOSE, MSG_IRQ_CNT_TYPE + " %u FW IRQ count = %u", ui32idx, ui32IrqCnt)); + +#if defined(RGX_FW_IRQ_OS_COUNTERS) + if (ui32idx == RGXFW_HOST_DRIVER_ID) +#endif + { + PVR_DPF((DBGPRIV_VERBOSE, "Last sampled IRQ count in LISR = %u", + (psRgxDevInfo)->aui32SampleIRQCount[ui32idx])); + } + } +#else + PVR_UNREFERENCED_PARAMETER(psRgxDevInfo); +#endif /* PVRSRV_NEED_PVR_DPF */ +} + +/*! +******************************************************************************* + + @Function RGXDumpFirmwareTrace + + @Description Dumps the decoded version of the firmware trace buffer. + + Dump useful debugging info + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + + @Return void + +******************************************************************************/ +void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +void RGXDumpFirmwareTraceBinary(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, + IMG_UINT32 ui32TID); + +void RGXDumpFirmwareTracePartial(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, + IMG_UINT32 ui32TID); + +void RGXDumpFirmwareTraceDecoded(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, + IMG_UINT32 ui32TID); + + +/* Helper macros to emit data */ +#define REG32_FMTSPEC "%-30s: 0x%08X" +#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECX +#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R)); +#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R)); +#define DDLOGUNCHECKED64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadUncheckedHWReg64(pvRegsBaseKM, RGX_CR_##R)); +#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R)); +#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R)); +#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V); + +PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +void RGXDumpCoreRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +void RGXDumpMulticoreRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +void RGXDumpClkRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +void RGXDumpMMURegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +void RGXDumpDMRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +void RGXDumpSLCRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +void RGXDumpMiscRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function RGXDumpRGXRegisters + + @Description + + Dumps an extensive list of RGX registers required for debugging + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + + @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise + +******************************************************************************/ +PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +#if !defined(NO_HARDWARE) +/*! +******************************************************************************* + + @Function RGXReadMetaCoreReg + + @Description Read a META core register's value + + @Input psDevInfo RGX device info + @Input ui32RegAddr Register address to read from + @Output pui32RegVal Pointer to the resulting register value + + @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise + +******************************************************************************/ +PVRSRV_ERROR RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 *pui32RegVal); + +/*! +******************************************************************************* + + @Function RGXValidateFWImage + + @Description Validate the currently running firmware + against the firmware image + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + + @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise + +******************************************************************************/ +PVRSRV_ERROR RGXValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) +/*! +******************************************************************************* + + @Function ValidateFWOnLoad + + @Description Compare the Firmware image as seen from the CPU point of view + against the same memory area as seen from the firmware point + of view after first power up. + + @Input psDevInfo - Device Info + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +/*! +******************************************************************************* + + @Function RGXDumpRGXDebugSummary + + @Description + + Dump a summary in human readable form with the RGX state + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input bRGXPoweredON - IMG_TRUE if RGX device is on + + @Return void + +******************************************************************************/ +void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_BOOL bRGXPoweredON); + +/*! +******************************************************************************* + + @Function RGXDebugInit + + @Description + + Setup debug requests, calls into PVRSRVRegisterDeviceDbgRequestNotify + + @Input psDevInfo RGX device info + @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error + +******************************************************************************/ +PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function RGXDebugDeinit + + @Description + + Remove debug requests, calls into PVRSRVUnregisterDeviceDbgRequestNotify + + @Output phNotify Points to debug notifier handle + @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error + +******************************************************************************/ +PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function RGXGetFwMapping + + @Description Retrieve any of the CPU Physical Address, Device Physical + Address or the raw value of the page table entry associated + with the firmware virtual address given. + + @Input psDevInfo Pointer to device info + @Input pfnDumpDebugPrintf The debug printf function + @Input pvDumpDebugFile Optional file identifier to be passed to + the 'printf' function if required + @Input ui32FwVA The Fw VA that needs decoding + @Output psCpuPA Pointer to the resulting CPU PA + @Output psDevPA Pointer to the resulting Dev PA + @Output pui64PTE Pointer to the raw Page Table Entry value + + @Return void + +******************************************************************************/ +void RGXDocumentFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const IMG_UINT32 ui32FwVA, + const IMG_CPU_PHYADDR sCpuPA, + const IMG_DEV_PHYADDR sDevPA, + const IMG_UINT64 ui64PTE); + +/*! +******************************************************************************* + + @Function RGXConvertOSTimestampToSAndNS + + @Description Convert the OS time to seconds and nanoseconds + + @Input ui64OSTimer OS time to convert + @Output pui64Seconds Pointer to the resulting seconds + @Output pui64Nanoseconds Pointer to the resulting nanoseconds + + @Return void + +******************************************************************************/ +void RGXConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, + IMG_UINT64 *pui64Seconds, + IMG_UINT64 *pui64Nanoseconds); + +/*! +******************************************************************************* + + @Function RGXDumpAllContextInfo + + @Description Dump debug info of all contexts on a device + + @Input psDevInfo Pointer to device info + @Input pfnDumpDebugPrintf The debug printf function + @Input pvDumpDebugFile Optional file identifier to be passed to + the 'printf' function if required + @Input ui32VerbLevel Verbosity level + + @Return void + +******************************************************************************/ +void RGXDumpAllContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/*! +******************************************************************************* + + @Function RGXDumpFaultAddressHostView + + @Description + + Dump FW HWR fault status in human readable form. + + @Input ui32Index - Index of global Fault info + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Return void + +******************************************************************************/ +void RGXDumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const IMG_CHAR* pszIndent); + +void RGXDumpFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGX_HWRINFO *psHWRInfo, + IMG_UINT32 ui32ReadIndex, + IMG_DEV_VIRTADDR *psFaultDevVAddr, + IMG_DEV_PHYADDR *psPCDevPAddr, + bool bPMFault, + IMG_UINT32 ui32PageSize); + +#endif /* RGXDEBUG_COMMON_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwcmnctx.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwcmnctx.c new file mode 100644 index 000000000000..62a33fb9995d --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwcmnctx.c @@ -0,0 +1,810 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware common context utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware common context utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxfwcmnctx.h" +#include "rgxfwutils.h" +#include "devicemem_pdump.h" +#if defined(__linux__) && defined(PVRSRV_TRACE_ROGUE_EVENTS) +#include "rogue_trace_events.h" +#endif + +#if defined(__linux__) && defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) +#include "linux/cred.h" +#include "linux/pid.h" +#endif +/* + * Maximum length of time a DM can run for before the DM will be marked + * as out-of-time. CDM has an increased value due to longer running kernels. + * + * These deadlines are increased on FPGA, EMU and VP due to the slower + * execution time of these platforms. PDUMPS are also included since they + * are often run on EMU, FPGA or in CSim. + */ +#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP) +#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (480000) +#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (10800000) +#else +#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (40000) +#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (600000) +#endif +#define RGXFWIF_MAX_RDM_WORKLOAD_DEADLINE_MS (36000000) + +struct _RGX_SERVER_COMMON_CONTEXT_ { + PVRSRV_RGXDEV_INFO *psDevInfo; + DEVMEM_MEMDESC *psFWCommonContextMemDesc; + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr; + SERVER_MMU_CONTEXT *psServerMMUContext; + DEVMEM_MEMDESC *psFWMemContextMemDesc; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_CLIENT_CCB *psClientCCB; + DEVMEM_MEMDESC *psClientCCBMemDesc; + DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; + IMG_BOOL bCommonContextMemProvided; + IMG_UINT32 ui32ContextID; + DLLIST_NODE sListNode; + RGX_CONTEXT_RESET_REASON eLastResetReason; + IMG_UINT32 ui32LastResetJobRef; + IMG_INT32 i32Priority; + RGX_CCB_REQUESTOR_TYPE eRequestor; +}; + +#if defined(__linux__) && defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) +static IMG_UINT32 _GetUID(IMG_PID pid) +{ + struct task_struct *psTask; + struct pid *psPid; + + psPid = find_get_pid((pid_t)pid); + if (!psPid) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup PID %u.", + __func__, pid)); + return 0; + } + + psTask = get_pid_task(psPid, PIDTYPE_PID); + if (!psTask) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get pid task for PID %u.", + __func__, pid)); + } + put_pid(psPid); + + return psTask ? from_kuid(&init_user_ns, psTask->cred->uid) : 0; +} +#endif + +/*************************************************************************/ /*! +@Function _CheckPriority +@Description Check if priority is allowed for requestor type +@Input psDevInfo pointer to DevInfo struct +@Input i32Priority Requested priority +@Input eRequestor Requestor type specifying data master +@Return PVRSRV_ERROR PVRSRV_OK on success +*/ /**************************************************************************/ +static PVRSRV_ERROR _CheckPriority(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_INT32 i32Priority, + RGX_CCB_REQUESTOR_TYPE eRequestor) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Only contexts from a single PID allowed with real time priority (highest priority) */ + if (i32Priority == RGX_CTX_PRIORITY_REALTIME) + { + DLLIST_NODE *psNode, *psNext; + + OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); + + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + if (psThisContext->i32Priority == RGX_CTX_PRIORITY_REALTIME && + psThisContext->eRequestor == eRequestor && + RGXGetPIDFromServerMMUContext(psThisContext->psServerMMUContext) != OSGetCurrentClientProcessIDKM()) + { + PVR_LOG(("Only one process can have contexts with real time priority")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + } + + OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); + } + + return eError; +} + +PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGXFWIF_DM eDM, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + DEVMEM_MEMDESC *psContextStateMemDesc, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags, + IMG_INT32 i32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + RGXFWIF_FWCOMMONCONTEXT sFWCommonContext = {{0}}; + IMG_UINT32 ui32FWCommonContextOffset; + IMG_UINT8 *pui8Ptr; + PVRSRV_ERROR eError; + + /* + * Allocate all the resources that are required + */ + psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext)); + if (psServerCommonContext == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc; + } + + psServerCommonContext->psDevInfo = psDevInfo; + psServerCommonContext->psServerMMUContext = psServerMMUContext; + + if (psAllocatedMemDesc) + { + PDUMPCOMMENT(psDeviceNode, + "Using existing MemDesc for Rogue firmware %s context (offset = %d)", + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32AllocatedOffset); + ui32FWCommonContextOffset = ui32AllocatedOffset; + psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc; + psServerCommonContext->bCommonContextMemProvided = IMG_TRUE; + } + else + { + /* Allocate device memory for the firmware context */ + PDUMPCOMMENT(psDeviceNode, + "Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); + eError = DevmemFwAllocate(psDevInfo, + sizeof(sFWCommonContext), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwContext", + &psServerCommonContext->psFWCommonContextMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware %s context (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_contextalloc; + } + ui32FWCommonContextOffset = 0; + psServerCommonContext->bCommonContextMemProvided = IMG_FALSE; + } + + /* Record this context so we can refer to it if the FW needs to tell us it was reset. */ + psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; + psServerCommonContext->ui32LastResetJobRef = 0; + psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++; + + /* + * Temporarily map the firmware context to the kernel and initialise it + */ + eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc, + (void **)&pui8Ptr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware %s context to CPU (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_cpuvirtacquire; + } + + /* Allocate the client CCB */ + eError = RGXCreateCCB(psDevInfo, + ui32CCBAllocSizeLog2, + ui32CCBMaxAllocSizeLog2, + ui32ContextFlags, + psConnection, + eRGXCCBRequestor, + psServerCommonContext, + &psServerCommonContext->psClientCCB, + &psServerCommonContext->psClientCCBMemDesc, + &psServerCommonContext->psClientCCBCtrlMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create CCB for %s context (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_allocateccb; + } + + sFWCommonContext.eDM = eDM; + BITMASK_SET(sFWCommonContext.ui32CompatFlags, RGXFWIF_CONTEXT_COMPAT_FLAGS_HAS_DEFER_COUNT); + + /* Set the firmware CCB device addresses in the firmware common context */ + eError = RGXSetFirmwareAddress(&sFWCommonContext.psCCB, + psServerCommonContext->psClientCCBMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr); + + eError = RGXSetFirmwareAddress(&sFWCommonContext.psCCBCtl, + psServerCommonContext->psClientCCBCtrlMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr); + +#if defined(RGX_FEATURE_META_DMA_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + RGXSetMetaDMAAddress(&sFWCommonContext.sCCBMetaDMAAddr, + psServerCommonContext->psClientCCBMemDesc, + &sFWCommonContext.psCCB, + 0); + } +#endif + + /* Set the memory context device address */ + psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc; + eError = RGXSetFirmwareAddress(&sFWCommonContext.psFWMemContext, + psFWMemContextMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr); + + /* Set the framework register updates address */ + psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc; + if (psInfo->psFWFrameworkMemDesc != NULL) + { + eError = RGXSetFirmwareAddress(&sFWCommonContext.psRFCmd, + psInfo->psFWFrameworkMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwaddr); + } + else + { + /* This should never be touched in this contexts without a framework + * memdesc, but ensure it is zero so we see crashes if it is. + */ + sFWCommonContext.psRFCmd.ui32Addr = 0; + } + + eError = _CheckPriority(psDevInfo, i32Priority, eRGXCCBRequestor); + PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); + + psServerCommonContext->i32Priority = i32Priority; + psServerCommonContext->eRequestor = eRGXCCBRequestor; + + sFWCommonContext.i32Priority = i32Priority; + sFWCommonContext.ui32PrioritySeqNum = 0; + + if (eDM == RGXFWIF_DM_CDM) + { + sFWCommonContext.ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS); + } + else if (eDM == RGXFWIF_DM_RAY) + { + sFWCommonContext.ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, RGXFWIF_MAX_RDM_WORKLOAD_DEADLINE_MS); + } + else + { + sFWCommonContext.ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, RGXFWIF_MAX_WORKLOAD_DEADLINE_MS); + } + + sFWCommonContext.ui64RobustnessAddress = ui64RobustnessAddress; + + /* Store a references to Server Common Context and PID for notifications back from the FW. */ + sFWCommonContext.ui32ServerCommonContextID = psServerCommonContext->ui32ContextID; + sFWCommonContext.ui32PID = OSGetCurrentClientProcessIDKM(); +#if defined(__linux__) && defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) + sFWCommonContext.ui32UID = _GetUID(sFWCommonContext.ui32PID); +#endif + OSStringSafeCopy(sFWCommonContext.szProcName, psConnection->pszProcName, RGXFW_PROCESS_NAME_LEN); + + /* Set the firmware GPU context state buffer */ + psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc; + if (psContextStateMemDesc) + { + eError = RGXSetFirmwareAddress(&sFWCommonContext.psContextState, + psContextStateMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_ctxstatefwaddr); + } + + OSCachedMemCopy(IMG_OFFSET_ADDR(pui8Ptr, ui32FWCommonContextOffset), &sFWCommonContext, sizeof(sFWCommonContext)); + RGXFwSharedMemCacheOpExec(IMG_OFFSET_ADDR(pui8Ptr, ui32FWCommonContextOffset), + sizeof(sFWCommonContext), + PVRSRV_CACHE_OP_FLUSH); + + /* + * Dump the created context + */ + PDUMPCOMMENT(psDeviceNode, + "Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); + DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc, + ui32FWCommonContextOffset, + sizeof(sFWCommonContext), + PDUMP_FLAGS_CONTINUOUS); + + /* We've finished the setup so release the CPU mapping */ + DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); + + /* Map this allocation into the FW */ + eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr, + psServerCommonContext->psFWCommonContextMemDesc, + ui32FWCommonContextOffset, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:6", fail_fwcommonctxfwaddr); + +#if defined(__linux__) && defined(PVRSRV_TRACE_ROGUE_EVENTS) + { + IMG_UINT32 ui32FWAddr; + switch (eDM) { + case RGXFWIF_DM_GEOM: + ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) + psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext)); + break; + case RGXFWIF_DM_3D: + ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) + psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext)); + break; + default: + ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr; + break; + } + + trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(), + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + psDeviceNode->sDevId.ui32InternalID, + ui32FWAddr); + } +#endif + + /*Add the node to the list when finalised */ + OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock); + dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock); + + *ppsServerCommonContext = psServerCommonContext; + return PVRSRV_OK; + +fail_fwcommonctxfwaddr: + if (psContextStateMemDesc) + { + RGXUnsetFirmwareAddress(psContextStateMemDesc); + } +fail_ctxstatefwaddr: +fail_checkpriority: + if (psInfo->psFWFrameworkMemDesc != NULL) + { + RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc); + } +fail_fwframeworkfwaddr: + RGXUnsetFirmwareAddress(psFWMemContextMemDesc); +fail_fwmemctxfwaddr: + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); +fail_cccbctrlfwaddr: + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); +fail_cccbfwaddr: + RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB); +fail_allocateccb: + DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); +fail_cpuvirtacquire: + if (!psServerCommonContext->bCommonContextMemProvided) + { + DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc); + psServerCommonContext->psFWCommonContextMemDesc = NULL; + } +fail_contextalloc: + OSFreeMem(psServerCommonContext); +fail_alloc: + return eError; +} + +void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + + OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); + /* Remove the context from the list of all contexts. */ + dllist_remove_node(&psServerCommonContext->sListNode); + OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); + + /* + Unmap the context itself and then all its resources + */ + + /* Unmap the FW common context */ + RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); + /* Umap context state buffer (if there was one) */ + if (psServerCommonContext->psContextStateMemDesc) + { + RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc); + } + /* Unmap the framework buffer */ + if (psServerCommonContext->psFWFrameworkMemDesc != NULL) + { + RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc); + } + /* Unmap client CCB and CCB control */ + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); + /* Unmap the memory context */ + RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc); + + /* Destroy the client CCB */ + RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB); + + + /* Free the FW common context (if there was one) */ + if (!psServerCommonContext->bCommonContextMemProvided) + { + DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo, + psServerCommonContext->psFWCommonContextMemDesc); + psServerCommonContext->psFWCommonContextMemDesc = NULL; + } + /* Free the hosts representation of the common context */ + OSFreeMem(psServerCommonContext); +} + +PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->sFWCommonContextFWAddr; +} + +RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psClientCCB; +} + +SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psServerMMUContext; +} + +RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 *pui32LastResetJobRef) +{ + RGX_CONTEXT_RESET_REASON eLastResetReason; + + PVR_ASSERT(psServerCommonContext != NULL); + PVR_ASSERT(pui32LastResetJobRef != NULL); + + /* Take the most recent reason & job ref and reset for next time... */ + eLastResetReason = psServerCommonContext->eLastResetReason; + *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef; + psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; + psServerCommonContext->ui32LastResetJobRef = 0; + + if (eLastResetReason == RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH) + { + PVR_DPF((PVR_DBG_WARNING, + "A Hard Context Switch was triggered on the GPU to ensure Quality of Service.")); + } + + return eLastResetReason; +} + +PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psDevInfo; +} + +PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr) +{ + DLLIST_NODE *psNode, *psNext; + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + if (psThisContext->psServerMMUContext == psServerMMUContext) + { + psFWCommonContextFWAddr->ui32Addr = psThisContext->sFWCommonContextFWAddr.ui32Addr; + return PVRSRV_OK; + } + } + return PVRSRV_ERROR_INVALID_PARAMS; +} + +PRGXFWIF_FWCOMMONCONTEXT RGXGetFWCommonContextAddrFromServerCommonCtx(PVRSRV_RGXDEV_INFO *psDevInfo, + DLLIST_NODE *psNode) +{ + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + return FWCommonContextGetFWAddress(psThisContext); +} + +PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (BITMASK_ANY(ui32ContextFlags, ~RGX_CONTEXT_FLAGS_WRITEABLE_MASK)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Context flag(s) invalid or not writeable (%d)", + __func__, ui32ContextFlags)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + else + { + RGXSetCCBFlags(psServerCommonContext->psClientCCB, + ui32ContextFlags); + } + + return eError; +} + +PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, + CONNECTION_DATA *psConnection, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_INT32 i32Priority, + RGXFWIF_DM eDM) +{ + IMG_UINT32 ui32CmdSize; + IMG_UINT8 *pui8CmdPtr; + RGXFWIF_KCCB_CMD sPriorityCmd = { 0 }; + RGXFWIF_CCB_CMD_HEADER *psCmdHeader; + RGXFWIF_CMD_PRIORITY *psCmd; + PVRSRV_ERROR eError; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + eError = _CheckPriority(psDevInfo, i32Priority, psContext->eRequestor); + PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); + + /* + Get space for command + */ + ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY)); + + eError = RGXAcquireCCB(psClientCCB, + ui32CmdSize, + (void **) &pui8CmdPtr, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__)); + } + goto fail_ccbacquire; + } + + /* + Write the command header and command + */ + psCmdHeader = IMG_OFFSET_ADDR(pui8CmdPtr, 0); + psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY; + psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY)); + pui8CmdPtr += sizeof(*psCmdHeader); + + psCmd = IMG_OFFSET_ADDR(pui8CmdPtr, 0); + psCmd->i32Priority = i32Priority; + pui8CmdPtr += sizeof(*psCmd); + + /* + We should reserve space in the kernel CCB here and fill in the command + directly. + This is so if there isn't space in the kernel CCB we can return with + retry back to services client before we take any operations + */ + + /* + Submit the command + */ + RGXReleaseCCB(psClientCCB, + ui32CmdSize, + PDUMP_FLAGS_CONTINUOUS); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__)); + return eError; + } + + /* Construct the priority command. */ + sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext); + sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + eDM, + &sPriorityCmd, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to submit set priority command with error (%u)", + __func__, + eError)); + goto fail_cmdacquire; + } + + psContext->i32Priority = i32Priority; + + return PVRSRV_OK; + +fail_ccbacquire: +fail_checkpriority: +fail_cmdacquire: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM) +{ + if (psCurrentServerCommonContext == NULL) + { + /* the context has already been freed so there is nothing to do here */ + return PVRSRV_OK; + } + + return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, + psCurrentServerCommonContext->psClientCCB, + eKickTypeDM); +} + +void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + if (psCurrentServerCommonContext == NULL) + { + /* the context has already been freed so there is nothing to do here */ + return; + } + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + /* If high verbosity requested, dump whole CCB */ + DumpCCB(psCurrentServerCommonContext->psDevInfo, + psCurrentServerCommonContext->sFWCommonContextFWAddr, + psCurrentServerCommonContext->psClientCCB, + pfnDumpDebugPrintf, + pvDumpDebugFile); + } + else + { + /* Otherwise, only dump first command in the CCB */ + DumpFirstCCBCmd(psCurrentServerCommonContext->sFWCommonContextFWAddr, + psCurrentServerCommonContext->psClientCCB, + pfnDumpDebugPrintf, + pvDumpDebugFile); + } +} + +void FWCommonContextListSetLastResetReason(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 *pui32ErrorPid, + const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification) +{ + DLLIST_NODE *psNode, *psNext; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL; + IMG_UINT32 ui32ErrorPid = 0; + + OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); + + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + /* If the notification applies to all contexts update reset info + * for all contexts, otherwise only do so for the appropriate ID. + */ + if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) + { + /* Notification applies to all contexts */ + psThisContext->eLastResetReason = psCmdContextResetNotification->eResetReason; + psThisContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; + } + else + { + /* Notification applies to one context only */ + if (psThisContext->ui32ContextID == psCmdContextResetNotification->ui32ServerCommonContextID) + { + psServerCommonContext = psThisContext; + psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason; + psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; + ui32ErrorPid = RGXGetPIDFromServerMMUContext(psServerCommonContext->psServerMMUContext); + break; + } + } + } + + OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); + + if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: All contexts reset (Reason=%d, JobRef=0x%08x)", + __func__, + (IMG_UINT32)(psCmdContextResetNotification->eResetReason), + psCmdContextResetNotification->ui32ResetJobRef)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)", + __func__, + psServerCommonContext, + psCmdContextResetNotification->ui32ServerCommonContextID, + (IMG_UINT32)(psCmdContextResetNotification->eResetReason), + psCmdContextResetNotification->ui32ResetJobRef)); + } + + if (pui32ErrorPid) + { + *pui32ErrorPid = ui32ErrorPid; + } +} + +/****************************************************************************** + End of file (rgxfwcmnctx.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwcmnctx.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwcmnctx.h new file mode 100644 index 000000000000..eba2fac705da --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwcmnctx.h @@ -0,0 +1,150 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware common context utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware common context utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXFWCMNCTX_H +#define RGXFWCMNCTX_H + +#include "connection_server.h" +#include "device.h" +#include "rgxccb.h" +#include "rgx_common.h" +#include "devicemem_typedefs.h" +#include "rgxdevice.h" +#include "rgxmem.h" + +/*************************************************************************/ /*! +@Function FWCommonContextAllocate + +@Description Allocate a FW common context. This allocates the HW memory + for the context, the CCB and wires it all together. + +@Input psConnection Connection this context is being created on +@Input psDeviceNode Device node to create the FW context on + (must be RGX device node) +@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which + represents the requestor of this FWCC +@Input eDM Data Master type +@Input psServerMMUContext Server MMU memory context. +@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use + as the FW context or NULL if this function + should allocate it +@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use + as the FW context. If psAllocatedMemDesc + is NULL then this parameter is ignored +@Input psFWMemContextMemDesc MemDesc of the FW memory context this + common context resides on +@Input psContextStateMemDesc FW context state (context switch) MemDesc +@Input ui32CCBAllocSizeLog2 Size of the CCB for this context +@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context +@Input ui32ContextFlags Flags which specify properties of the context +@Input i32Priority Priority of the context +@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run +@Input ui64RobustnessAddress Address for FW to signal a context reset +@Input psInfo Structure that contains extra info + required for the creation of the context + (elements might change from core to core) +@Return PVRSRV_OK if the context was successfully created +*/ /**************************************************************************/ +PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGXFWIF_DM eDM, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + DEVMEM_MEMDESC *psContextStateMemDesc, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags, + IMG_INT32 i32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext); + + +void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 *pui32LastResetJobRef); + +PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr); + +PRGXFWIF_FWCOMMONCONTEXT RGXGetFWCommonContextAddrFromServerCommonCtx(PVRSRV_RGXDEV_INFO *psDevInfo, + DLLIST_NODE *psNode); + +PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 ui32ContextFlags); + +PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, + CONNECTION_DATA *psConnection, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_INT32 i32Priority, + RGXFWIF_DM eDM); + +PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM); + +void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +void FWCommonContextListSetLastResetReason(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 *pui32ErrorPid, + const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification); + +#endif /* RGXFWCMNCTX_H */ +/****************************************************************************** + End of file (rgxfwcmnctx.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwdbg.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwdbg.c index ff16ce478362..2271adf3ea9c 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwdbg.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwdbg.c @@ -53,6 +53,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "osfunc.h" #include "vmm_pvz_server.h" #include "vz_vm.h" +#if defined(PDUMP) +#include "devicemem_pdump.h" +#endif PVRSRV_ERROR PVRSRVRGXFWDebugQueryFWLogKM( @@ -63,13 +66,13 @@ PVRSRVRGXFWDebugQueryFWLogKM( PVRSRV_RGXDEV_INFO *psDevInfo; PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); if (!psDeviceNode || !pui32RGXFWLogType) { return PVRSRV_ERROR_INVALID_PARAMS; } + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_IMPLEMENTED); psDevInfo = psDeviceNode->pvDevice; if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBufCtl) @@ -77,6 +80,7 @@ PVRSRVRGXFWDebugQueryFWLogKM( return PVRSRV_ERROR_INVALID_PARAMS; } + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, INVALIDATE); *pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; return PVRSRV_OK; } @@ -91,14 +95,22 @@ PVRSRVRGXFWDebugSetFWLogKM( RGXFWIF_KCCB_CMD sLogTypeUpdateCmd; PVRSRV_DEV_POWER_STATE ePowerState; PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + PVRSRV_RGXDEV_INFO* psDevInfo; IMG_UINT32 ui32OldRGXFWLogTpe; IMG_UINT32 ui32kCCBCommandSlot; IMG_BOOL bWaitForFwUpdate = IMG_FALSE; PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + if (!psDeviceNode) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); + + psDevInfo = psDeviceNode->pvDevice; + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, INVALIDATE); ui32OldRGXFWLogTpe = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; /* check log type is valid */ @@ -114,6 +126,7 @@ PVRSRVRGXFWDebugSetFWLogKM( */ psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32RGXFWLogType; OSMemoryBarrier(&psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, FLUSH); /* Allocate firmware trace buffer resource(s) if not already done */ if (RGXTraceBufferIsInitRequired(psDevInfo)) @@ -142,6 +155,7 @@ PVRSRVRGXFWDebugSetFWLogKM( __func__)); psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32OldRGXFWLogTpe; OSMemoryBarrier(&psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, FLUSH); OSLockRelease(psDevInfo->hRGXFWIfBufInitLock); @@ -202,31 +216,34 @@ PVRSRV_ERROR PVRSRVRGXFWDebugMapGuestHeapKM( CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSid, + IMG_UINT32 ui32DriverID, IMG_UINT64 ui64GuestHeapBase) { +#if defined(ENABLE_PVRDEBUG_PRIVILEGED_CMDS) PVRSRV_ERROR eError; - IMG_UINT32 ui32DeviceID = psDeviceNode->sDevId.i32OsDeviceID; + IMG_UINT32 ui32DeviceID = psDeviceNode->sDevId.ui32InternalID; PVR_UNREFERENCED_PARAMETER(psConnection); - if (PVRSRV_VZ_MODE_IS(HOST)) + if (PVRSRV_VZ_MODE_IS(HOST, DEVNODE, psDeviceNode)) { if (ui64GuestHeapBase == IMG_UINT64_MAX) { - /* unmap heap and set OSID to offline */ - eError = PvzServerUnmapDevPhysHeap(ui32OSid, ui32DeviceID); - eError = PvzServerOnVmOffline(ui32OSid, ui32DeviceID); + /* unmap heap and set DriverID to offline */ + eError = PvzServerUnmapDevPhysHeap(ui32DriverID, ui32DeviceID); + PVR_LOG_RETURN_IF_ERROR(eError, "PvzServerUnmapDevPhysHeap()"); + eError = PvzServerOnVmOffline(ui32DriverID, ui32DeviceID); } else { - /* set OSID online if necessary and map firmware heap */ - if (!IsVmOnline(ui32OSid, ui32DeviceID)) + /* set DriverID online if necessary and map firmware heap */ + if (!IsVmOnline(ui32DriverID, ui32DeviceID)) { - eError = PvzServerOnVmOnline(ui32OSid, ui32DeviceID); + eError = PvzServerOnVmOnline(ui32DriverID, ui32DeviceID); + PVR_LOG_RETURN_IF_ERROR(eError, "PvzServerOnVmOnline()"); } - eError = PvzServerMapDevPhysHeap(ui32OSid, ui32DeviceID, RGX_FIRMWARE_RAW_HEAP_SIZE, ui64GuestHeapBase); + eError = PvzServerMapDevPhysHeap(ui32DriverID, ui32DeviceID, RGX_FIRMWARE_RAW_HEAP_SIZE, ui64GuestHeapBase); } } else @@ -235,35 +252,290 @@ PVRSRVRGXFWDebugMapGuestHeapKM( PVR_DPF((PVR_DBG_ERROR, " %s: Driver must run in Host mode to support Guest Mapping operations\n", __func__)); } + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32DriverID); + PVR_UNREFERENCED_PARAMETER(ui64GuestHeapBase); + + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32TSIntervalMs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sVzTSIntervalCmd = { 0 }; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); + + if (psDevInfo->psRGXFWIfRuntimeCfg == NULL) + { + return PVRSRV_ERROR_NOT_INITIALISED; + } + + sVzTSIntervalCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE_INTERVAL; + psDevInfo->psRGXFWIfRuntimeCfg->ui32TSIntervalMs = ui32TSIntervalMs; + OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32TSIntervalMs); + +#if defined(PDUMP) + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "Updating the timeslice interval inside RGXFWIfRuntimeCfg"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + offsetof(RGXFWIF_RUNTIME_CFG, ui32TSIntervalMs), + ui32TSIntervalMs, + PDUMP_FLAGS_CONTINUOUS); +#endif + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sVzTSIntervalCmd, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + return eError; +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetDriverTimeSliceKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DriverID, + IMG_UINT32 ui32TSPercentage) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sVzTimeSliceCmd = { 0 }; + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg; + IMG_INT32 ui32TSPercentageMax = 0; + IMG_UINT32 ui32DriverIDLoop; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); + + if (ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + PVR_RETURN_IF_FALSE(psDevInfo != NULL, PVRSRV_ERROR_NOT_INITIALISED); + + psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, PVRSRV_ERROR_NOT_INITIALISED); + + /* + * Each time slice is a number between 0 -> 100. + * Use '0' to disable time slice based CSW for the driver. + */ + /* Check if the sum exceeds PVRSRV_VZ_TIME_SLICE_MAX */ + if (ui32TSPercentage) + { + PVR_RETURN_IF_FALSE(ui32TSPercentage <= PVRSRV_VZ_TIME_SLICE_MAX, PVRSRV_ERROR_INVALID_PARAMS); + + FOREACH_SUPPORTED_DRIVER(ui32DriverIDLoop) + { + if (ui32DriverID != ui32DriverIDLoop) + { + ui32TSPercentageMax += psRuntimeCfg->aui32TSPercentage[ui32DriverIDLoop]; + } + else + { + ui32TSPercentageMax += ui32TSPercentage; + } + + PVR_RETURN_IF_FALSE(ui32TSPercentageMax <= PVRSRV_VZ_TIME_SLICE_MAX, PVRSRV_ERROR_INVALID_PARAMS); + } + } + + sVzTimeSliceCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE; + psDevInfo->psRGXFWIfRuntimeCfg->aui32TSPercentage[ui32DriverID] = ui32TSPercentage; + OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32TSPercentage[ui32DriverID]); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->aui32TSPercentage[ui32DriverID], FLUSH); + +#if defined(PDUMP) + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "Updating the timeslice of DriverID %u inside RGXFWIfRuntimeCfg", ui32DriverID); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + offsetof(RGXFWIF_RUNTIME_CFG, aui32TSPercentage) + (ui32DriverID * sizeof(ui32TSPercentage)), + ui32TSPercentage, + PDUMP_FLAGS_CONTINUOUS); +#endif + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sVzTimeSliceCmd, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + return eError; +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetDriverPriorityKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DriverID, + IMG_INT32 i32DriverPriority) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sVzPriorityCmd = { 0 }; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); + + if (psDevInfo->psRGXFWIfRuntimeCfg == NULL) + { + return PVRSRV_ERROR_NOT_INITIALISED; + } + + if (ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if ((i32DriverPriority & ~RGXFW_VZ_PRIORITY_MASK) != 0) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + sVzPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE; + psDevInfo->psRGXFWIfRuntimeCfg->ai32DriverPriority[ui32DriverID] = i32DriverPriority; + OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ai32DriverPriority[ui32DriverID]); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ai32DriverPriority[ui32DriverID], FLUSH); + +#if defined(PDUMP) + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "Updating the priority of DriverID %u inside RGXFWIfRuntimeCfg", ui32DriverID); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + offsetof(RGXFWIF_RUNTIME_CFG, ai32DriverPriority) + (ui32DriverID * sizeof(i32DriverPriority)), + i32DriverPriority, + PDUMP_FLAGS_CONTINUOUS); +#endif + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sVzPriorityCmd, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + return eError; } PVRSRV_ERROR -PVRSRVRGXFWDebugSetOSidPriorityKM( +PVRSRVRGXFWDebugSetDriverIsolationGroupKM( CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSid, - IMG_UINT32 ui32OSidPriority) + IMG_UINT32 ui32DriverID, + IMG_UINT32 ui32DriverIsolationGroup) { + PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sVzIsolationGroupCmd = { 0 }; + PVR_UNREFERENCED_PARAMETER(psConnection); - return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); + + if (psDevInfo->psRGXFWIfRuntimeCfg == NULL) + { + return PVRSRV_ERROR_NOT_INITIALISED; + } + + if (ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + sVzIsolationGroupCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE; + psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID] = ui32DriverIsolationGroup; + OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID]); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID], FLUSH); + +#if defined(PDUMP) + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "Updating the isolation group of DriverID%u inside RGXFWIfRuntimeCfg", ui32DriverID); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + offsetof(RGXFWIF_RUNTIME_CFG, aui32DriverIsolationGroup) + (ui32DriverID * sizeof(ui32DriverIsolationGroup)), + ui32DriverIsolationGroup, + PDUMP_FLAGS_CONTINUOUS); +#endif + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sVzIsolationGroupCmd, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + return eError; } PVRSRV_ERROR PVRSRVRGXFWDebugSetOSNewOnlineStateKM( CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSid, + IMG_UINT32 ui32DriverID, IMG_UINT32 ui32OSNewState) { +#if defined(ENABLE_PVRDEBUG_PRIVILEGED_CMDS) PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; RGXFWIF_OS_STATE_CHANGE eState; PVR_UNREFERENCED_PARAMETER(psConnection); + if (ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + eState = (ui32OSNewState) ? (RGXFWIF_OS_ONLINE) : (RGXFWIF_OS_OFFLINE); - return RGXFWSetFwOsState(psDevInfo, ui32OSid, eState); + return RGXFWSetFwOsState(psDevInfo, ui32DriverID, eState); +#else + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32DriverID); + PVR_UNREFERENCED_PARAMETER(ui32OSNewState); + + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif } PVRSRV_ERROR @@ -328,8 +600,67 @@ PVRSRVRGXFWDebugInjectFaultKM( CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode) { +#if defined(ENABLE_PVRDEBUG_PRIVILEGED_CMDS) PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; PVR_UNREFERENCED_PARAMETER(psConnection); return RGXFWInjectFault(psDevInfo); +#else + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugPowerOffKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + +#if defined(ENABLE_PVRDEBUG_PRIVILEGED_CMDS) +#if defined(SUPPORT_AUTOVZ) + psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; +#endif + + return PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF, + PVRSRV_POWER_FLAGS_NONE); +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugPowerOnKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(ENABLE_PVRDEBUG_PRIVILEGED_CMDS) + PVR_UNREFERENCED_PARAMETER(psConnection); + + return PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_ON, + PVRSRV_POWER_FLAGS_NONE); +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetVzConnectionCooldownPeriodInSecKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32VzConnectionCooldownPeriodInSec) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + + return RGXFWSetVzConnectionCooldownPeriod(psDevInfo, ui32VzConnectionCooldownPeriodInSec); } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwdbg.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwdbg.h index 0ff130365deb..59c48ed5f4f8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwdbg.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwdbg.h @@ -80,24 +80,44 @@ PVRSRVRGXFWDebugSetHCSDeadlineKM( IMG_UINT32 ui32HCSDeadlineMS); PVRSRV_ERROR -PVRSRVRGXFWDebugSetOSidPriorityKM( +PVRSRVRGXFWDebugSetDriverTimeSliceKM( CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSid, - IMG_UINT32 ui32OSidPriority); + IMG_UINT32 ui32DriverID, + IMG_UINT32 ui32TSPercentage); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32TSIntervalMs); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetDriverPriorityKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DriverID, + IMG_INT32 i32DriverPriority); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetDriverIsolationGroupKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DriverID, + IMG_UINT32 ui32DriverIsolationGroup); PVRSRV_ERROR PVRSRVRGXFWDebugSetOSNewOnlineStateKM( CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSid, + IMG_UINT32 ui32DriverID, IMG_UINT32 ui32OSNewState); PVRSRV_ERROR PVRSRVRGXFWDebugMapGuestHeapKM( CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSid, + IMG_UINT32 ui32DriverID, IMG_UINT64 ui64GuestHeapBase); PVRSRV_ERROR @@ -121,4 +141,20 @@ PVRSRV_ERROR PVRSRVRGXFWDebugInjectFaultKM( CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR +PVRSRVRGXFWDebugPowerOffKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR +PVRSRVRGXFWDebugPowerOnKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetVzConnectionCooldownPeriodInSecKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32VzConnectionCooldownPeriodInSec); #endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwimageutils.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwimageutils.c index 112014972f0c..afc69f7228f8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwimageutils.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwimageutils.c @@ -48,8 +48,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * Any new code should be built on top of the existing abstraction layer, * which should be extended when necessary. */ #include "rgxfwimageutils.h" -#include "pvrsrv.h" +#include "pvrversion.h" +#if defined(CONFIG_ARM64) && defined(__linux__) && defined(SUPPORT_CPUCACHED_FWMEMCTX) +#include "rgxfwutils.h" +#endif /************************************************************************ * FW layout information @@ -85,6 +88,7 @@ static RGX_FW_LAYOUT_ENTRY* GetTableEntry(const void *hPrivate, RGX_FW_SECTION_I @Description Given a 32 bit FW address attempt to find the corresponding pointer to FW allocation + @Input hPrivate : Implementation specific data @Input ui32OffsetIn : 32 bit FW address @Input pvHostFWCodeAddr : Pointer to FW code @Input pvHostFWDataAddr : Pointer to FW data @@ -95,7 +99,8 @@ static RGX_FW_LAYOUT_ENTRY* GetTableEntry(const void *hPrivate, RGX_FW_SECTION_I @Return PVRSRV_ERROR ******************************************************************************/ -static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn, +static PVRSRV_ERROR FindMMUSegment(const void *hPrivate, + IMG_UINT32 ui32OffsetIn, void *pvHostFWCodeAddr, void *pvHostFWDataAddr, void *pvHostFWCorememCodeAddr, @@ -116,7 +121,14 @@ static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn, break; case FW_DATA: - *uiHostAddrOut = pvHostFWDataAddr; + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + *uiHostAddrOut = RGXCalculateHostFWDataAddress(hPrivate, pvHostFWDataAddr); + } + else + { + *uiHostAddrOut = pvHostFWDataAddr; + } break; case FW_COREMEM_CODE: @@ -173,7 +185,6 @@ static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn, @Return void ******************************************************************************/ -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) static void RGXFWConfigureSegID(const void *hPrivate, IMG_UINT64 ui64SegOutAddr, IMG_UINT32 ui32SegBase, @@ -214,7 +225,6 @@ static void RGXFWConfigureSegID(const void *hPrivate, *ppui32BootConf = pui32BootConf; } -#endif /*! ******************************************************************************* @@ -231,7 +241,6 @@ static void RGXFWConfigureSegID(const void *hPrivate, @Return void ******************************************************************************/ -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) static void RGXFWConfigureSegMMU(const void *hPrivate, IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase, IMG_DEV_VIRTADDR *psFWDataDevVAddrBase, @@ -245,11 +254,13 @@ static void RGXFWConfigureSegMMU(const void *hPrivate, /* Configure Segment MMU */ RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********"); +#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) { ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWPRIV); } else +#endif { ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV, RGXFW_SEGMMU_META_BIFDM_ID); } @@ -281,7 +292,6 @@ static void RGXFWConfigureSegMMU(const void *hPrivate, } } } -#endif /*! ******************************************************************************* @@ -297,7 +307,6 @@ static void RGXFWConfigureSegMMU(const void *hPrivate, @Return void ******************************************************************************/ -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) static void RGXFWConfigureMetaCaches(const void *hPrivate, IMG_UINT32 ui32NumThreads, IMG_UINT32 **ppui32BootConf) @@ -414,7 +423,6 @@ static void RGXFWConfigureMetaCaches(const void *hPrivate, *ppui32BootConf = pui32BootConf; } -#endif /*! ******************************************************************************* @@ -481,7 +489,8 @@ PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; } - eError = FindMMUSegment(ui32Offset, + eError = FindMMUSegment(hPrivate, + ui32Offset, pvHostFWCodeAddr, pvHostFWDataAddr, pvHostFWCorememCodeAddr, @@ -503,6 +512,9 @@ PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, pvWriteAddr, psL2Block->aui32BlockData, ui32DataSize); +#if defined(CONFIG_ARM64) && defined(__linux__) && defined(SUPPORT_CPUCACHED_FWMEMCTX) + RGXFwSharedMemCacheOpExec(pvWriteAddr, ui32DataSize, PVRSRV_CACHE_OP_FLUSH); +#endif } break; @@ -533,7 +545,8 @@ PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, /* Global range is aliased to local range */ ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; - eError = FindMMUSegment(ui32Offset, + eError = FindMMUSegment(hPrivate, + ui32Offset, pvHostFWCodeAddr, pvHostFWDataAddr, pvHostFWCorememCodeAddr, @@ -552,6 +565,9 @@ PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, if (pvWriteAddr) { RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount); +#if defined(CONFIG_ARM64) && defined(__linux__) && defined(SUPPORT_CPUCACHED_FWMEMCTX) + RGXFwSharedMemCacheOpExec(pvWriteAddr, ui32ByteCount, PVRSRV_CACHE_OP_FLUSH); +#endif } break; @@ -680,7 +696,8 @@ PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, /* Only consider loadable entries in the ELF segment table */ if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue; - eError = FindMMUSegment(psProgramHeader->ui32Pvaddr, + eError = FindMMUSegment(hPrivate, + psProgramHeader->ui32Pvaddr, pvHostFWCodeAddr, pvHostFWDataAddr, pvHostFWCorememCodeAddr, @@ -708,6 +725,10 @@ PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz, 0, psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz); + +#if defined(CONFIG_ARM64) && defined(__linux__) && defined(SUPPORT_CPUCACHED_FWMEMCTX) + RGXFwSharedMemCacheOpExec(pvWriteAddr, psProgramHeader->ui32Pmemsz, PVRSRV_CACHE_OP_FLUSH); +#endif } } @@ -742,17 +763,133 @@ IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, RGX_FW_SECTION_ID e return psEntry->ui32BaseAddr; } +static inline +PVRSRV_ERROR RGXValidateFWHeaderVersion1(const void *hPrivate, + const RGX_FW_INFO_HEADER *psInfoHeader) +{ + /* Applicable to any FW_INFO_VERSION */ + if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY)) + { + RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY), + psInfoHeader->ui32LayoutEntrySize); + } + + /* Applicable to any FW_INFO_VERSION */ + if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES) + { + RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)", + __func__, + MAX_NUM_ENTRIES, + psInfoHeader->ui32LayoutEntryNum); + } + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + /* Applicable to any FW_INFO_VERSION */ + if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) + { + if (psInfoHeader->ui32FwPageSize != RGXGetOSPageSize(hPrivate)) + { + RGXErrorLog(hPrivate, "%s: FW page size mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) RGXGetOSPageSize(hPrivate), + psInfoHeader->ui32FwPageSize); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } +#endif + + if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION) + { + /* Not an error because RGX_FW_INFO_HEADER is now versioned. It can grow + * incrementally and it must be backwards compatible. + */ + RGXCommentLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) FW_INFO_VERSION, + psInfoHeader->ui32InfoVersion); + goto exit_version1_validation; + } + + if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER)) + { + RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER), + psInfoHeader->ui32HeaderLen); + } + +exit_version1_validation: + return PVRSRV_OK; +} + +static inline +PVRSRV_ERROR RGXValidateFWHeaderVersion2(const void *hPrivate, + const RGX_FW_INFO_HEADER *psInfoHeader) +{ + if (psInfoHeader->ui16PVRVersionMajor != PVRVERSION_MAJ || + psInfoHeader->ui16PVRVersionMinor != PVRVERSION_MIN) + { + RGXErrorLog(hPrivate, "%s: KM and FW version mismatch (expected: %u.%u, found: %u.%u)", + __func__, + PVRVERSION_MAJ, + PVRVERSION_MIN, + psInfoHeader->ui16PVRVersionMajor, + psInfoHeader->ui16PVRVersionMinor); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + +static inline +PVRSRV_ERROR RGXValidateFWHeaderVersion(const void *hPrivate, + const RGX_FW_INFO_HEADER *psInfoHeader) +{ + PVRSRV_ERROR eError; + + switch (psInfoHeader->ui32InfoVersion) + { + default: + __fallthrough; + case 2: + eError = RGXValidateFWHeaderVersion2(hPrivate, psInfoHeader); + if (eError != PVRSRV_OK) + { + return eError; + } + + __fallthrough; + case 1: + eError = RGXValidateFWHeaderVersion1(hPrivate, psInfoHeader); + if (eError != PVRSRV_OK) + { + return eError; + } + + break; + case 0: + RGXErrorLog(hPrivate, "%s: invalid FW_INFO_VERSION", __func__); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, const IMG_BYTE *pbRGXFirmware, const IMG_UINT32 ui32RGXFirmwareSize, IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, - IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize) + IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize, + RGX_FW_INFO_HEADER *psFWInfoHeader) { RGX_FW_INFO_HEADER *psInfoHeader; const IMG_BYTE *pbRGXFirmwareInfo; const IMG_BYTE *pbRGXFirmwareLayout; + PVRSRV_ERROR eError; IMG_UINT32 i; if (pbRGXFirmware == NULL || ui32RGXFirmwareSize == 0 || ui32RGXFirmwareSize <= FW_BLOCK_SIZE) @@ -762,7 +899,6 @@ PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, return PVRSRV_ERROR_INVALID_PARAMS; } - /* * Acquire pointer to the FW info header within the FW image. * The format of the header in the FW image might not be the one expected @@ -774,54 +910,12 @@ PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, pbRGXFirmwareInfo = pbRGXFirmware + ui32RGXFirmwareSize - FW_BLOCK_SIZE; psInfoHeader = (RGX_FW_INFO_HEADER*)pbRGXFirmwareInfo; - /* If any of the following checks fails, the FW will likely not work properly */ - - if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION) - { - RGXErrorLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)", - __func__, - (IMG_UINT32) FW_INFO_VERSION, - psInfoHeader->ui32InfoVersion); - } - - if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER)) - { - RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)", - __func__, - (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER), - psInfoHeader->ui32HeaderLen); - } - - if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY)) + eError = RGXValidateFWHeaderVersion(hPrivate, psInfoHeader); + if (eError != PVRSRV_OK) { - RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)", - __func__, - (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY), - psInfoHeader->ui32LayoutEntrySize); - } - - if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES) - { - RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)", - __func__, - MAX_NUM_ENTRIES, - psInfoHeader->ui32LayoutEntryNum); + return eError; } -#if defined(RGX_FEATURE_MIPS_BIT_MASK) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) - { - if (psInfoHeader->ui32FwPageSize != RGXGetOSPageSize(hPrivate)) - { - RGXErrorLog(hPrivate, "%s: FW page size mismatch (expected: %u, found: %u)", - __func__, - (IMG_UINT32) RGXGetOSPageSize(hPrivate), - psInfoHeader->ui32FwPageSize); - return PVRSRV_ERROR_INVALID_PARAMS; - } - } -#endif - ui32LayoutEntryNum = psInfoHeader->ui32LayoutEntryNum; @@ -883,6 +977,7 @@ PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, } } + *psFWInfoHeader = *psInfoHeader; return PVRSRV_OK; } @@ -897,23 +992,16 @@ PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, { PVRSRV_ERROR eError = PVRSRV_OK; IMG_BOOL bMIPS = IMG_FALSE; -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) IMG_BOOL bRISCV = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); -#endif IMG_BOOL bMETA; #if defined(RGX_FEATURE_MIPS_BIT_MASK) bMIPS = (IMG_BOOL)RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS); #endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) bMETA = (IMG_BOOL)(!bMIPS && !bRISCV); -#else - bMETA = !bMIPS; -#endif if (bMETA) { -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) IMG_UINT32 *pui32BootConf = NULL; /* Skip bootloader configuration if a pointer to the FW code * allocation is not available @@ -996,7 +1084,6 @@ PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, *pui32BootConf++ = 0; } } -#endif /* defined(RGX_FEATURE_META_MAX_VALUE_IDX) */ } #if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES) else if (bMIPS) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwimageutils.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwimageutils.h index e5f9a2afea77..b4cedb8d3271 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwimageutils.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwimageutils.h @@ -134,7 +134,8 @@ PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, - IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize); + IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize, + RGX_FW_INFO_HEADER *psFWInfoHeader); /*! ******************************************************************************* diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwmemctx.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwmemctx.h new file mode 100644 index 000000000000..49f37b1c27aa --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwmemctx.h @@ -0,0 +1,156 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware Memctx routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for operations on FWKM Shared memory context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXFWMEMCTX_H +#define RGXFWMEMCTX_H + +#include "device.h" +#include "rgx_memallocflags.h" +#include "pvrsrv.h" +#include "cache_ops.h" +#include "cache_km.h" +#include "pvr_debug.h" + +#if defined(CONFIG_ARM64) && defined(__linux__) && defined(SUPPORT_CPUCACHED_FWMEMCTX) +/* + * RGXFwSharedMemCPUCacheMode() + * We upgrade allocations on ARM64 Linux when CPU Cache snooping is enabled. + * This is because of the Linux direct mapping causing interference due to PIPT + * cache. All allocations are normally UCWC but snooping can return a bad value from the + * direct mapping as it is cached. Upgrade our allocations to cached to prevent bad cached + * values but in turn we require flushing. + */ +static INLINE void RGXFwSharedMemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T *puiFlags) +{ + if ((*puiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE)) == 0) + { + /* We don't need to upgrade if we don't map into the CPU */ + return; + } + + /* Clear the existing CPU cache flags */ + *puiFlags &= ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK); + + if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) + { + *puiFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT; + } + else + { + *puiFlags |= PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; + } +} + +#define RGXFwSharedMemCheckSnoopMode(psDeviceConfig) PVR_ASSERT(PVRSRVSystemSnoopingOfCPUCache(psDeviceConfig)) + +/* + * FWSharedMemCacheOpExec() + * This is the CPU data-cache maintenance interface for FW shared allocations. + * We have to be very careful that the VAs supplied to this function are + * sensible as to not cause a kernel oops. Given that this should only be + * used for allocations used for the FW this should be guaranteed. + */ +static INLINE PVRSRV_ERROR RGXFwSharedMemCacheOpExec(const volatile void *pvVirtStart, + IMG_UINT64 uiSize, + PVRSRV_CACHE_OP uiCacheOp) +{ + IMG_UINT64 uiEndAddr = (IMG_UINT64) pvVirtStart + uiSize; + IMG_CPU_PHYADDR uiUnusedPhysAddr = {.uiAddr = 0}; + + if (!pvVirtStart || uiSize == 0) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return CacheOpExec(NULL, + (void*) pvVirtStart, + (void*) uiEndAddr, + uiUnusedPhysAddr, + uiUnusedPhysAddr, + uiCacheOp); +} + +#define RGXFwSharedMemCacheOpValue(value, cacheop) (RGXFwSharedMemCacheOpExec(&value, sizeof(value), PVRSRV_CACHE_OP_##cacheop)) +#define RGXFwSharedMemCacheOpPtr(ptr, cacheop) (RGXFwSharedMemCacheOpExec(ptr, sizeof(*ptr), PVRSRV_CACHE_OP_##cacheop)) +#define RGXFwSharedMemCacheOpExecPfn RGXFwSharedMemCacheOpExec + +static INLINE void RGXFwSharedMemFlushCCB(void *pvCCBVirtAddr, + IMG_UINT64 uiStart, + IMG_UINT64 uiFinish, + IMG_UINT64 uiLimit) +{ + if (uiFinish >= uiStart) + { + /* Flush the CCB data */ + RGXFwSharedMemCacheOpExec(IMG_OFFSET_ADDR(pvCCBVirtAddr, uiStart), + uiFinish - uiStart, + PVRSRV_CACHE_OP_FLUSH); + } + else + { + /* CCCB wrapped around - flush the pre and post wrap boundary separately */ + RGXFwSharedMemCacheOpExec(IMG_OFFSET_ADDR(pvCCBVirtAddr, uiStart), + uiLimit - uiStart, + PVRSRV_CACHE_OP_FLUSH); + + RGXFwSharedMemCacheOpExec(IMG_OFFSET_ADDR(pvCCBVirtAddr, 0), + uiFinish, + PVRSRV_CACHE_OP_FLUSH); + } +} +#else +#define RGXFwSharedMemCPUCacheMode(...) +#define RGXFwSharedMemCheckSnoopMode(...) +/* NULL value required for function callbacks */ +#define RGXFwSharedMemCacheOpExec(...) ((void)NULL) +#define RGXFwSharedMemCacheOpValue(...) ((void)NULL) +#define RGXFwSharedMemCacheOpPtr(...) ((void)NULL) +#define RGXFwSharedMemCacheOpExecPfn NULL +#define RGXFwSharedMemFlushCCB(...) +#endif + + +#endif /* RGXFWMEMCTX_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwriscv.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwriscv.c new file mode 100644 index 000000000000..555691010517 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwriscv.c @@ -0,0 +1,1074 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware RISC-V utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware RISC-V utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxfwutils.h" +#include "rgxfwriscv.h" + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) +#define RGX_GET_DMI_REG(psDevInfo, value) \ + ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) ? \ + RGX_CR_FWCORE_DMI_##value##__HOST_SECURITY_GEQ4 : RGX_CR_FWCORE_DMI_##value) +#else +#define RGX_GET_DMI_REG(psDevInfo, value) RGX_CR_FWCORE_DMI_##value +#endif + +/* + * RGXRiscvHalt + */ +PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + __maybe_unused IMG_UINT32 ui32_DMI_DMCONTROL_Reg = RGX_GET_DMI_REG(psDevInfo, DMCONTROL); + __maybe_unused IMG_UINT32 ui32_DMI_DMSTATUS_Reg = RGX_GET_DMI_REG(psDevInfo, DMSTATUS); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, + PDUMP_FLAGS_CONTINUOUS, "Halt RISC-V FW"); + + /* Send halt request (no need to select one or more harts on this RISC-V core) */ + PDUMPREG32(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, ui32_DMI_DMCONTROL_Reg, + RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until hart is halted */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_DMSTATUS_Reg, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + /* Clear halt request */ + PDUMPREG32(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, ui32_DMI_DMCONTROL_Reg, + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, + PDUMP_FLAGS_CONTINUOUS); +#else + IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + /* Send halt request (no need to select one or more harts on this RISC-V core) */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_DMCONTROL_Reg, + RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); + + /* Wait until hart is halted */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + ui32_DMI_DMSTATUS_Reg/sizeof(IMG_UINT32), + RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Hart not halted (0x%x)", + __func__, OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_DMSTATUS_Reg))); + return PVRSRV_ERROR_TIMEOUT; + } + + /* Clear halt request */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_DMCONTROL_Reg, + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); +#endif + + return PVRSRV_OK; +} + +/* + * RGXRiscvIsHalted + */ +IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + /* Assume the core is always halted in nohw */ + return IMG_TRUE; +#else + IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + IMG_UINT32 ui32_DMI_DMSTATUS_Reg = RGX_GET_DMI_REG(psDevInfo, DMSTATUS); + + return (OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_DMSTATUS_Reg) & + RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN) != 0U; +#endif +} + +/* + * RGXRiscvResume + */ +PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + __maybe_unused IMG_UINT32 ui32_DMI_DMCONTROL_Reg = RGX_GET_DMI_REG(psDevInfo, DMCONTROL); + __maybe_unused IMG_UINT32 ui32_DMI_DMSTATUS_Reg = RGX_GET_DMI_REG(psDevInfo, DMSTATUS); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, + PDUMP_FLAGS_CONTINUOUS, "Resume RISC-V FW"); + + /* Send resume request (no need to select one or more harts on this RISC-V core) */ + PDUMPREG32(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, ui32_DMI_DMCONTROL_Reg, + RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until hart is resumed */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_DMSTATUS_Reg, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + /* Clear resume request */ + PDUMPREG32(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, ui32_DMI_DMCONTROL_Reg, + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, + PDUMP_FLAGS_CONTINUOUS); +#else + IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + /* Send resume request (no need to select one or more harts on this RISC-V core) */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_DMCONTROL_Reg, + RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); + + /* Wait until hart is resumed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + ui32_DMI_DMSTATUS_Reg/sizeof(IMG_UINT32), + RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Hart not resumed (0x%x)", + __func__, OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_DMSTATUS_Reg))); + return PVRSRV_ERROR_TIMEOUT; + } + + /* Clear resume request */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_DMCONTROL_Reg, + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* +@Function RGXRiscvCheckAbstractCmdError + +@Description Check for RISC-V abstract command errors and clear them + +@Input psDevInfo Pointer to GPU device info + +@Return RGXRISCVFW_ABSTRACT_CMD_ERR +******************************************************************************/ +static RGXRISCVFW_ABSTRACT_CMD_ERR RGXRiscvCheckAbstractCmdError(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXRISCVFW_ABSTRACT_CMD_ERR eCmdErr; + + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + +#if defined(NO_HARDWARE) && defined(PDUMP) + eCmdErr = RISCV_ABSTRACT_CMD_NO_ERROR; + + /* Check error status */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_ABSTRACTCS_Reg, + RISCV_ABSTRACT_CMD_NO_ERROR << RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT, + ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + void __iomem *pvRegsBaseKM = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + /* Check error status */ + eCmdErr = (OSReadUncheckedHWReg32(pvRegsBaseKM, ui32_DMI_ABSTRACTCS_Reg) + & ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK) + >> RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT; + + if (eCmdErr != RISCV_ABSTRACT_CMD_NO_ERROR) + { + PVR_DPF((PVR_DBG_WARNING, "RISC-V FW abstract command error %u", eCmdErr)); + + /* Clear the error (note CMDERR field is write-1-to-clear) */ + OSWriteUncheckedHWReg32(pvRegsBaseKM, ui32_DMI_ABSTRACTCS_Reg, + ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK); + } +#endif + + return eCmdErr; +} + +/* + * RGXRiscReadReg + */ +PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 *pui32Value) +{ + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); + __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32RegAddr); + PVR_UNREFERENCED_PARAMETER(pui32Value); + + /* Reading HW registers is not supported in nohw/pdump */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#else + IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + /* Send abstract register read command */ + OSWriteUncheckedHWReg32(pui32RegsBase, + ui32_DMI_COMMAND_Reg, + (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_READ | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | + ui32RegAddr); + + /* Wait until abstract command is completed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + ui32_DMI_ABSTRACTCS_Reg/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", + __func__, OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_ABSTRACTCS_Reg))); + return PVRSRV_ERROR_TIMEOUT; + } + + if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) + { + /* Read register value */ + *pui32Value = OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg); + } + else + { + *pui32Value = 0U; + } + + return PVRSRV_OK; +#endif +} + +/* + * RGXRiscvPollReg + */ +PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32Value) +{ + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); + __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "Poll RISC-V register 0x%x (expected 0x%08x)", + ui32RegAddr, ui32Value); + + /* Send abstract register read command */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_COMMAND_Reg, + (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_READ | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | + ui32RegAddr, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until abstract command is completed */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_ABSTRACTCS_Reg, + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + RGXRiscvCheckAbstractCmdError(psDevInfo); + + /* Check read value */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_DATA0_Reg, + ui32Value, + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32RegAddr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + + /* Polling HW registers is currently not required driverlive */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +/* + * RGXRiscvWriteReg + */ +PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32Value) +{ + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); + __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "Write RISC-V register 0x%x (value 0x%08x)", + ui32RegAddr, ui32Value); + + /* Prepare data to be written to register */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_DATA0_Reg, + ui32Value, PDUMP_FLAGS_CONTINUOUS); + + /* Send abstract register write command */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_COMMAND_Reg, + (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_WRITE | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | + ui32RegAddr, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until abstract command is completed */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_ABSTRACTCS_Reg, + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + /* Prepare data to be written to register */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg, ui32Value); + + /* Send abstract register write command */ + OSWriteUncheckedHWReg32(pui32RegsBase, + ui32_DMI_COMMAND_Reg, + (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_WRITE | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | + ui32RegAddr); + + /* Wait until abstract command is completed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + ui32_DMI_ABSTRACTCS_Reg/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", + __func__, OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_ABSTRACTCS_Reg))); + return PVRSRV_ERROR_TIMEOUT; + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* +@Function RGXRiscvCheckSysBusError + +@Description Check for RISC-V system bus errors and clear them + +@Input psDevInfo Pointer to GPU device info + +@Return RGXRISCVFW_SYSBUS_ERR +******************************************************************************/ +static __maybe_unused RGXRISCVFW_SYSBUS_ERR RGXRiscvCheckSysBusError(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXRISCVFW_SYSBUS_ERR eSBError; + + __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); + +#if defined(NO_HARDWARE) && defined(PDUMP) + eSBError = RISCV_SYSBUS_NO_ERROR; + + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_SBCS_Reg, + RISCV_SYSBUS_NO_ERROR << RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT, + ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + void __iomem *pvRegsBaseKM = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + eSBError = (OSReadUncheckedHWReg32(pvRegsBaseKM, ui32_DMI_SBCS_Reg) + & ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK) + >> RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT; + + if (eSBError != RISCV_SYSBUS_NO_ERROR) + { + PVR_DPF((PVR_DBG_WARNING, "RISC-V FW system bus error %u", eSBError)); + + /* Clear the error (note SBERROR field is write-1-to-clear) */ + OSWriteUncheckedHWReg32(pvRegsBaseKM, ui32_DMI_SBCS_Reg, + ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK); + } +#endif + + return eSBError; +} + +#if !defined(EMULATOR) +/*! +******************************************************************************* +@Function RGXRiscvReadAbstractMem + +@Description Read a value at the given address in RISC-V memory space + using RISC-V abstract memory commands + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvReadAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) +{ + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); + __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Addr); + PVR_UNREFERENCED_PARAMETER(pui32Value); + + /* Reading memory is not supported in nohw/pdump */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#else + IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + /* Prepare read address */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_DATA1_Reg, ui32Addr); + + /* Send abstract memory read command */ + OSWriteUncheckedHWReg32(pui32RegsBase, + ui32_DMI_COMMAND_Reg, + (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_READ | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); + + /* Wait until abstract command is completed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + ui32_DMI_ABSTRACTCS_Reg/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", + __func__, OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_ABSTRACTCS_Reg))); + return PVRSRV_ERROR_TIMEOUT; + } + + if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) + { + /* Read memory value */ + *pui32Value = OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg); + } + else + { + *pui32Value = 0U; + } + + return PVRSRV_OK; +#endif +} +#endif /* !defined(EMULATOR) */ + +/*! +******************************************************************************* +@Function RGXRiscvPollAbstractMem + +@Description Poll for a value at the given address in RISC-V memory space + using RISC-V abstract memory commands + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvPollAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) +{ + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); + __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, + PDUMP_FLAGS_CONTINUOUS, + "Poll RISC-V address 0x%x (expected 0x%08x)", + ui32Addr, ui32Value); + + /* Prepare read address */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_DATA1_Reg, + ui32Addr, PDUMP_FLAGS_CONTINUOUS); + + /* Send abstract memory read command */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_COMMAND_Reg, + (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_READ | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until abstract command is completed */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_ABSTRACTCS_Reg, + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + RGXRiscvCheckAbstractCmdError(psDevInfo); + + /* Check read value */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_DATA0_Reg, + ui32Value, + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Addr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + + /* Polling memory is currently not required driverlive */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +#if !defined(EMULATOR) +/*! +******************************************************************************* +@Function RGXRiscvReadSysBusMem + +@Description Read a value at the given address in RISC-V memory space + using the RISC-V system bus + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvReadSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) +{ + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); + __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); + __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); + __maybe_unused IMG_UINT32 ui32_DMI_SBADDRESS0_Reg = RGX_GET_DMI_REG(psDevInfo, SBADDRESS0); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Addr); + PVR_UNREFERENCED_PARAMETER(pui32Value); + + /* Reading memory is not supported in nohw/pdump */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#else + IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + /* Configure system bus to read 32 bit every time a new address is provided */ + OSWriteUncheckedHWReg32(pui32RegsBase, + ui32_DMI_SBCS_Reg, + (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | + RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN); + + /* Perform read */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_SBADDRESS0_Reg, ui32Addr); + + /* Wait until system bus is idle */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + ui32_DMI_SBCS_Reg/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", + __func__, OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_SBCS_Reg))); + return PVRSRV_ERROR_TIMEOUT; + } + + if (RGXRiscvCheckSysBusError(psDevInfo) == RISCV_SYSBUS_NO_ERROR) + { + /* Read value from debug system bus */ + *pui32Value = OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg); + } + else + { + *pui32Value = 0U; + } + + return PVRSRV_OK; +#endif +} +#endif /* !defined(EMULATOR) */ + +/*! +******************************************************************************* +@Function RGXRiscvPollSysBusMem + +@Description Poll for a value at the given address in RISC-V memory space + using the RISC-V system bus + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvPollSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) +{ + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); + __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); + __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); + __maybe_unused IMG_UINT32 ui32_DMI_SBADDRESS0_Reg = RGX_GET_DMI_REG(psDevInfo, SBADDRESS0); + __maybe_unused IMG_UINT32 ui32_DMI_SBDATA0_Reg = RGX_GET_DMI_REG(psDevInfo, SBDATA0); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "Poll RISC-V address 0x%x (expected 0x%08x)", + ui32Addr, ui32Value); + + /* Configure system bus to read 32 bit every time a new address is provided */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_SBCS_Reg, + (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | + RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN, + PDUMP_FLAGS_CONTINUOUS); + + /* Perform read */ + PDUMPREG32(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, ui32_DMI_SBADDRESS0_Reg, + ui32Addr, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until system bus is idle */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_SBCS_Reg, + 0U, + RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + RGXRiscvCheckSysBusError(psDevInfo); + + /* Check read value */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_SBDATA0_Reg, + ui32Value, + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Addr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + + /* Polling memory is currently not required driverlive */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +#if !defined(EMULATOR) +/* + * RGXRiscvReadMem + */ +PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 *pui32Value) +{ + if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) + { + return RGXRiscvReadAbstractMem(psDevInfo, ui32Addr, pui32Value); + } + + return RGXRiscvReadSysBusMem(psDevInfo, ui32Addr, pui32Value); +} +#endif /* !defined(EMULATOR) */ + +/* + * RGXRiscvPollMem + */ +PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 ui32Value) +{ + if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) + { + return RGXRiscvPollAbstractMem(psDevInfo, ui32Addr, ui32Value); + } + + return RGXRiscvPollSysBusMem(psDevInfo, ui32Addr, ui32Value); +} + +#if !defined(EMULATOR) +/*! +******************************************************************************* +@Function RGXRiscvWriteAbstractMem + +@Description Write a value at the given address in RISC-V memory space + using RISC-V abstract memory commands + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvWriteAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) +{ + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); + __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); + __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); + __maybe_unused IMG_UINT32 ui32_DMI_SBADDRESS0_Reg = RGX_GET_DMI_REG(psDevInfo, SBADDRESS0); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "Write RISC-V address 0x%x (value 0x%08x)", + ui32Addr, ui32Value); + + /* Prepare write address */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_DATA1_Reg, + ui32Addr, PDUMP_FLAGS_CONTINUOUS); + + /* Prepare write data */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_DATA0_Reg, + ui32Value, PDUMP_FLAGS_CONTINUOUS); + + /* Send abstract register write command */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_COMMAND_Reg, + (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_WRITE | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until abstract command is completed */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_ABSTRACTCS_Reg, + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + /* Prepare write address */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_DATA1_Reg, ui32Addr); + + /* Prepare write data */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg, ui32Value); + + /* Send abstract memory write command */ + OSWriteUncheckedHWReg32(pui32RegsBase, + ui32_DMI_COMMAND_Reg, + (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_WRITE | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); + + /* Wait until abstract command is completed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + ui32_DMI_ABSTRACTCS_Reg/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", + __func__, OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_ABSTRACTCS_Reg))); + return PVRSRV_ERROR_TIMEOUT; + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* +@Function RGXRiscvWriteSysBusMem + +@Description Write a value at the given address in RISC-V memory space + using the RISC-V system bus + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvWriteSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) +{ + __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); + __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); + __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); + __maybe_unused IMG_UINT32 ui32_DMI_SBDATA0_Reg = RGX_GET_DMI_REG(psDevInfo, SBDATA0); + __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); + __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); + __maybe_unused IMG_UINT32 ui32_DMI_SBADDRESS0_Reg = RGX_GET_DMI_REG(psDevInfo, SBADDRESS0); + +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "Write RISC-V address 0x%x (value 0x%08x)", + ui32Addr, ui32Value); + + /* Configure system bus to read 32 bit every time a new address is provided */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_SBCS_Reg, + RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT, + PDUMP_FLAGS_CONTINUOUS); + + /* Prepare write address */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_SBADDRESS0_Reg, + ui32Addr, PDUMP_FLAGS_CONTINUOUS); + + /* Prepare write data and initiate write */ + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_SBDATA0_Reg, + ui32Value, PDUMP_FLAGS_CONTINUOUS); + + /* Wait until system bus is idle */ + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32_DMI_SBCS_Reg, + 0U, + RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + + /* Configure system bus for 32 bit accesses */ + OSWriteUncheckedHWReg32(pui32RegsBase, + ui32_DMI_SBCS_Reg, + RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT); + + /* Prepare write address */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_SBADDRESS0_Reg, ui32Addr); + + /* Prepare write data and initiate write */ + OSWriteUncheckedHWReg32(pui32RegsBase, ui32_DMI_SBDATA0_Reg, ui32Value); + + /* Wait until system bus is idle */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + ui32_DMI_SBCS_Reg/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", + __func__, OSReadUncheckedHWReg32(pui32RegsBase, ui32_DMI_SBCS_Reg))); + return PVRSRV_ERROR_TIMEOUT; + } +#endif + + return PVRSRV_OK; +} + +/* + * RGXRiscvWriteMem + */ +PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 ui32Value) +{ + if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) + { + return RGXRiscvWriteAbstractMem(psDevInfo, ui32Addr, ui32Value); + } + + return RGXRiscvWriteSysBusMem(psDevInfo, ui32Addr, ui32Value); +} +#endif /* !defined(EMULATOR) */ + +/* + * RGXRiscvDmiOp + */ +PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 *pui64DMI) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(pui64DMI); + + /* Accessing DM registers is not supported in nohw/pdump */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#else +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) +#define DMI_BASE ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) ? RGX_CR_FWCORE_DMI_RESERVED00__HOST_SECURITY_GEQ4 : RGX_CR_FWCORE_DMI_RESERVED00) +#else +#define DMI_BASE RGX_CR_FWCORE_DMI_RESERVED00 +#endif +#define DMI_STRIDE (RGX_CR_FWCORE_DMI_RESERVED01 - RGX_CR_FWCORE_DMI_RESERVED00) +#define DMI_REG(r) ((DMI_BASE) + (DMI_STRIDE) * (r)) + +#define DMI_OP_SHIFT 0U +#define DMI_OP_MASK 0x3ULL +#define DMI_DATA_SHIFT 2U +#define DMI_DATA_MASK 0x3FFFFFFFCULL +#define DMI_ADDRESS_SHIFT 34U +#define DMI_ADDRESS_MASK 0xFC00000000ULL + +#define DMI_OP_NOP 0U +#define DMI_OP_READ 1U +#define DMI_OP_WRITE 2U +#define DMI_OP_RESERVED 3U + +#define DMI_OP_STATUS_SUCCESS 0U +#define DMI_OP_STATUS_RESERVED 1U +#define DMI_OP_STATUS_FAILED 2U +#define DMI_OP_STATUS_BUSY 3U + + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_ERROR eError; + IMG_UINT64 ui64Op, ui64Address, ui64Data; + + ui64Op = (*pui64DMI & DMI_OP_MASK) >> DMI_OP_SHIFT; + ui64Address = (*pui64DMI & DMI_ADDRESS_MASK) >> DMI_ADDRESS_SHIFT; + ui64Data = (*pui64DMI & DMI_DATA_MASK) >> DMI_DATA_SHIFT; + + eError = PVRSRVPowerLock(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + ui64Op = DMI_OP_STATUS_FAILED; + goto dmiop_update; + } + + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to retrieve RGX power state (%s)", + __func__, PVRSRVGetErrorString(eError))); + ui64Op = DMI_OP_STATUS_FAILED; + goto dmiop_release_lock; + } + + if (ePowerState == PVRSRV_DEV_POWER_STATE_ON) + { + void __iomem *pvRegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); + switch (ui64Op) + { + case DMI_OP_NOP: + ui64Op = DMI_OP_STATUS_SUCCESS; + break; + case DMI_OP_WRITE: + OSWriteUncheckedHWReg32(pvRegsBase, + DMI_REG(ui64Address), + (IMG_UINT32)ui64Data); + ui64Op = DMI_OP_STATUS_SUCCESS; + break; + case DMI_OP_READ: + ui64Data = (IMG_UINT64)OSReadUncheckedHWReg32(pvRegsBase, + DMI_REG(ui64Address)); + ui64Op = DMI_OP_STATUS_SUCCESS; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: unknown op %u", __func__, (IMG_UINT32)ui64Op)); + ui64Op = DMI_OP_STATUS_FAILED; + break; + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Accessing RISC-V Debug Module is not " + "possible while the GPU is powered off", __func__)); + + ui64Op = DMI_OP_STATUS_FAILED; + } + +dmiop_release_lock: + PVRSRVPowerUnlock(psDeviceNode); + +dmiop_update: + *pui64DMI = (ui64Op << DMI_OP_SHIFT) | + (ui64Address << DMI_ADDRESS_SHIFT) | + (ui64Data << DMI_DATA_SHIFT); + + return eError; +#endif +} + +/****************************************************************************** + End of file (rgxfwriscv.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwriscv.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwriscv.h new file mode 100644 index 000000000000..7821c167fa2a --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwriscv.h @@ -0,0 +1,217 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware RISC-V utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX firmware RISC-V utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXFWRISCV_H +#define RGXFWRISCV_H + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) +#define RGXRISCVFW_GET_REMAP_SECURE(r) (RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4 + ((r) * 8U)) +#define RGXRISCVFW_BOOTLDR_CODE_REMAP_SECURE (RGXRISCVFW_GET_REMAP_SECURE(RGXRISCVFW_BOOTLDR_CODE_REGION)) +#define RGXRISCVFW_BOOTLDR_DATA_REMAP_SECURE (RGXRISCVFW_GET_REMAP_SECURE(RGXRISCVFW_BOOTLDR_DATA_REGION)) +#define RGX_GET_RISCV_REGS_BASE(psDevInfo) \ + ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) ? \ + (psDevInfo)->pvSecureRegsBaseKM : (psDevInfo)->pvRegsBaseKM) +#else +#define RGX_GET_RISCV_REGS_BASE(psDevInfo) ((psDevInfo)->pvRegsBaseKM) +#endif + +/*! +******************************************************************************* +@Function RGXRiscvHalt + +@Description Halt the RISC-V FW core (required for certain operations + done through Debug Module) + +@Input psDevInfo Pointer to device info + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXRiscvIsHalted + +@Description Check if the RISC-V FW is halted + +@Input psDevInfo Pointer to device info + +@Return IMG_BOOL +******************************************************************************/ +IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXRiscvResume + +@Description Resume the RISC-V FW core + +@Input psDevInfo Pointer to device info + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXRiscvReadReg + +@Description Read a value from the given RISC-V register (GPR or CSR) + +@Input psDevInfo Pointer to device info +@Input ui32RegAddr RISC-V register address + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* +@Function RGXRiscvPollReg + +@Description Poll for a value from the given RISC-V register (GPR or CSR) + +@Input psDevInfo Pointer to device info +@Input ui32RegAddr RISC-V register address +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32Value); + +/*! +******************************************************************************* +@Function RGXRiscvWriteReg + +@Description Write a value to the given RISC-V register (GPR or CSR) + +@Input psDevInfo Pointer to device info +@Input ui32RegAddr RISC-V register address +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32Value); + +/*! +******************************************************************************* +@Function RGXRiscvPollMem + +@Description Poll for a value at the given address in RISC-V memory space + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 ui32Value); + +#if !defined(EMULATOR) +/*! +******************************************************************************* +@Function RGXRiscvReadMem + +@Description Read a value at the given address in RISC-V memory space + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* +@Function RGXRiscvWriteMem + +@Description Write a value to the given address in RISC-V memory space + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 ui32Value); +#endif /* !defined(EMULATOR) */ + +/*! +******************************************************************************* +@Function RGXRiscvDmiOp + +@Description Acquire the powerlock and perform an operation on the RISC-V + Debug Module Interface, but only if the GPU is powered on. + +@Input psDevInfo Pointer to device info +@InOut pui64DMI Encoding of a request for the RISC-V Debug + Module with same format as the 'dmi' register + from the RISC-V debug specification (v0.13+). + On return, this is updated with the result of + the request, encoded the same way. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 *pui64DMI); + +#endif /* RGXFWRISCV_H */ +/****************************************************************************** + End of file (rgxfwriscv.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxfwutils.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwutils.h similarity index 74% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxfwutils.h rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwutils.h index e053fece9be4..6752bc95fa18 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxfwutils.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxfwutils.h @@ -56,8 +56,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxta3d.h" #include "devicemem_utils.h" #include "rgxmem.h" +#include "rgxfwmemctx.h" +#include "rgxinit_apphints.h" -#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */ +#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawDriverID%d" /*!< RGX Raw Firmware Heap identifier */ static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRV_MEMALLOCFLAGS_T *puiFlags, @@ -68,10 +70,6 @@ static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo, switch (ePhysHeap) { -#if defined(SUPPORT_SECURITY_VALIDATION) - /* call with GPU_SECURE from RGXSetupFwSysData */ - case PVRSRV_PHYS_HEAP_GPU_SECURE: -#endif case PVRSRV_PHYS_HEAP_FW_CODE: case PVRSRV_PHYS_HEAP_FW_PRIV_DATA: case PVRSRV_PHYS_HEAP_FW_MAIN: @@ -93,10 +91,10 @@ static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo, case PVRSRV_PHYS_HEAP_FW_PREMAP6: case PVRSRV_PHYS_HEAP_FW_PREMAP7: { - IMG_UINT32 ui32OSID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0; + IMG_UINT32 ui32DriverID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0; - PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID"); - *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID]; + PVR_LOG_RETURN_IF_INVALID_PARAM(ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED, "ui32DriverID"); + *ppsFwHeap = psDevInfo->psPremappedFwRawHeap[ui32DriverID]; break; } default: @@ -144,31 +142,42 @@ static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo, if (psFwHeap == psDevInfo->psFirmwareConfigHeap) { /* - * All structures allocated from the Firmware Config subheap must start at the same pre-determined + * All structures allocated from the Firmware Config sub-heap must start at the same pre-determined * offsets, regardless of the system's page size (e.g. 4k,16k,64k). The alignment requirement is * satisfied for virtual addresses during the mapping stage. Physical allocations do not take * alignment into consideration. - * VZ drivers usually preallocate and premap the entire Firmware heap range. Any allocations from - * this heap are physical alloc only, having their device VAs derived from their PAs. This makes + * VZ drivers usually preallocate and pre-map the entire Firmware heap range. Any allocations from + * this heap are physical allocations only, having their device VAs derived from their PAs. This makes * it impossible to fulfil alignment requirements. * To work around this limitation, allocation sizes are rounded to the nearest multiple of 64kb, * regardless of the actual size of object. */ uiAlign = RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY; - uiSize = ((uiSize + RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY - 1) / - RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) * - RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY; + uiSize = PVR_ALIGN(uiSize, RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY); } else { - /* Aligning fw based allocations for MIPS based rogue cores at cache line boundary(16 bytes) instead - of SLC(64 bytes) to have more compact memory with less waste and hopefully save some tlb misses. */ - #define MIPS_CACHE_LINE_SIZE_IN_BYTES 16 - uiAlign = RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? MIPS_CACHE_LINE_SIZE_IN_BYTES - : GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + /* Aligning FW based allocations for MIPS based rogue cores at cache line boundary(16 bytes) instead + * of SLC(64 bytes) to have more compact memory with less waste and hopefully save some TLB misses. + * MIPS CPU cores alignment. + */ + uiAlign = RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE; + } + else +#endif + { + /* Non-MIPS CPU cores alignment */ + uiAlign = (GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS))); + } } + RGXFwSharedMemCPUCacheMode(psDevInfo->psDeviceNode, + &uiFlags); + eError = DevmemAllocateAndMap(psFwHeap, uiSize, uiAlign, @@ -191,6 +200,7 @@ static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDevi IMG_DEV_VIRTADDR sTmpDevVAddr; PVRSRV_ERROR eError; DEVMEM_HEAP *psFwHeap; + IMG_UINT32 ui32HeapLog2PageSize; PVR_DPF_ENTERED; @@ -208,12 +218,26 @@ static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDevi PVR_DPF_RETURN_RC(eError); } + RGXFwSharedMemCPUCacheMode(psDevInfo->psDeviceNode, + &uiFlags); + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + /* MIPS cores */ + ui32HeapLog2PageSize = ExactLog2(uiAlign); + } + else +#endif + { + /* Meta and RiscV cores */ + ui32HeapLog2PageSize = DevmemGetHeapLog2PageSize(psFwHeap); + } + eError = DevmemAllocateExportable(psDeviceNode, uiSize, uiAlign, - RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? - ExactLog2(uiAlign) : - DevmemGetHeapLog2PageSize(psFwHeap), + ui32HeapLog2PageSize, uiFlags, pszText, ppsMemDescPtr); @@ -228,7 +252,7 @@ static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDevi is set */ eError = DevmemMapToDevice(*ppsMemDescPtr, - psDevInfo->psFirmwareMainHeap, + psFwHeap, &sTmpDevVAddr); if (eError != PVRSRV_OK) { @@ -268,6 +292,9 @@ static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_DPF_RETURN_RC(eError); } + RGXFwSharedMemCPUCacheMode(psDevInfo->psDeviceNode, + &uiFlags); + eError = DevmemAllocateSparse(psDevInfo->psDeviceNode, uiSize, ui32NumPhysChunks, @@ -275,7 +302,7 @@ static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo, pui32MappingTable, ui32Align, DevmemGetHeapLog2PageSize(psFwHeap), - uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING, + uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING, pszText, ppsMemDescPtr); if (eError != PVRSRV_OK) @@ -304,6 +331,8 @@ static INLINE void DevmemFwUnmapAndFree(PVRSRV_RGXDEV_INFO *psDevInfo, { PVR_DPF_ENTERED1(psMemDesc); + PVR_UNREFERENCED_PARAMETER(psDevInfo); + DevmemReleaseDevVirtAddr(psMemDesc); DevmemFree(psMemDesc); @@ -349,8 +378,19 @@ static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC | \ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) +#define RGX_FWCODEDATA_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \ + PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) + #define RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ @@ -360,6 +400,7 @@ static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC | \ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) #define RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ @@ -371,6 +412,7 @@ static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC | \ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CONFIG)) #define RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ @@ -381,6 +423,7 @@ static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC | \ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) /* Firmware memory that is not accessible by the CPU. */ @@ -388,7 +431,8 @@ static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC) /* Firmware shared memory that is supposed to be read-only to the CPU. * In reality it isn't due to ZERO_ON_ALLOC which enforces CPU_WRITEABLE @@ -401,7 +445,8 @@ static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC) /* data content being kept from previous boot cycles from physical memory must not be cleared during allocation */ #define RGX_AUTOVZ_KEEP_FW_DATA_MASK(bKeepMem) ((bKeepMem) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL)) @@ -413,32 +458,82 @@ static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) #define RFW_FWADDR_NOREF_FLAG (1U << 0) /*!< It is safe to immediately release the reference to the pointer, otherwise RGXUnsetFirmwareAddress() must be call when finished. */ -IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +/*************************************************************************/ /*! +@Function RGXTraceBufferIsInitRequired + +@Description Returns true if the firmware trace buffer is not allocated and + might be required by the firmware soon. Trace buffer allocated + on-demand to reduce RAM footprint on systems not needing + firmware trace. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed or not +*/ /**************************************************************************/ +FORCE_INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + + RGXFwSharedMemCacheOpValue(psTraceBufCtl->ui32LogType, INVALIDATE); + + /* The firmware expects a trace buffer only when: + * - Logtype is "trace" AND + * - at least one LogGroup is configured + * - the Driver Mode is not Guest + */ + if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + && !PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, PVRSRV_MEMALLOCFLAGS_T uiAllocFlags); #if defined(SUPPORT_TBI_INTERFACE) -IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +/*************************************************************************/ /*! +@Function RGXTBIBufferIsInitRequired + +@Description Returns true if the firmware tbi buffer is not allocated and + might be required by the firmware soon. TBI buffer allocated + on-demand to reduce RAM footprint on systems not needing + tbi. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed or not +*/ /**************************************************************************/ +FORCE_INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + + RGXFwSharedMemCacheOpValue(psTraceBufCtl->ui32LogType, INVALIDATE); + + /* The firmware expects a tbi buffer only when: + * - Logtype is "tbi" + */ + if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL) + && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); #endif PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, + RGX_INIT_APPHINTS *psApphints, IMG_UINT32 ui32ConfigFlags, IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32FwOsCfgFlags, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWRDebugDumpLimit, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_UINT32 *pui32TPUTrilinearFracMask, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, - FW_PERF_CONF eFirmwarePerf, - IMG_UINT32 ui32KCCBSizeLog2); - + IMG_UINT32 ui32FwOsCfgFlags); void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo); @@ -459,7 +554,7 @@ void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo); @Return PVRSRV_ERROR */ /**************************************************************************/ PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO *psDevInfo, - PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, IMG_UINT32 ui32Size, const IMG_CHAR *pszName, DEVMEM_MEMDESC **ppsMemDesc, @@ -514,74 +609,6 @@ void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, */ /**************************************************************************/ void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc); -/*************************************************************************/ /*! -@Function FWCommonContextAllocate - -@Description Allocate a FW common context. This allocates the HW memory - for the context, the CCB and wires it all together. - -@Input psConnection Connection this context is being created on -@Input psDeviceNode Device node to create the FW context on - (must be RGX device node) -@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which - which represents the requestor of this FWCC -@Input eDM Data Master type -@Input psServerMMUContext Server MMU memory context. -@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use - as the FW context or NULL if this function - should allocate it -@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use - as the FW context. If psAllocatedMemDesc - is NULL then this parameter is ignored -@Input psFWMemContextMemDesc MemDesc of the FW memory context this - common context resides on -@Input psContextStateMemDesc FW context state (context switch) MemDesc -@Input ui32CCBAllocSizeLog2 Size of the CCB for this context -@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context -@Input ui32ContextFlags Flags which specify properties of the context -@Input i32Priority Priority of the context -@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run -@Input ui64RobustnessAddress Address for FW to signal a context reset -@Input psInfo Structure that contains extra info - required for the creation of the context - (elements might change from core to core) -@Return PVRSRV_OK if the context was successfully created -*/ /**************************************************************************/ -PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, - RGXFWIF_DM eDM, - SERVER_MMU_CONTEXT *psServerMMUContext, - DEVMEM_MEMDESC *psAllocatedMemDesc, - IMG_UINT32 ui32AllocatedOffset, - DEVMEM_MEMDESC *psFWMemContextMemDesc, - DEVMEM_MEMDESC *psContextStateMemDesc, - IMG_UINT32 ui32CCBAllocSizeLog2, - IMG_UINT32 ui32CCBMaxAllocSizeLog2, - IMG_UINT32 ui32ContextFlags, - IMG_INT32 i32Priority, - IMG_UINT32 ui32MaxDeadlineMS, - IMG_UINT64 ui64RobustnessAddress, - RGX_COMMON_CONTEXT_INFO *psInfo, - RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext); - -void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); - -PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); - -RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); - -RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - IMG_UINT32 *pui32LastResetJobRef); - -PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); - -PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, - SERVER_MMU_CONTEXT *psServerMMUContext, - PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr); - -PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - IMG_UINT32 ui32ContextFlags); /*! ******************************************************************************* @Function RGXScheduleProcessQueuesKM @@ -595,26 +622,6 @@ PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonCo ******************************************************************************/ void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); -#if defined(SUPPORT_VALIDATION) -/*! -******************************************************************************* -@Function RGXScheduleRgxRegCommand - -@Input psDevInfo Device Info struct -@Input ui64RegVal Value to write into FW register -@Input ui64Size Register size -@Input ui32Offset Register Offset -@Input bWriteOp Register Write or Read toggle - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 ui64RegVal, - IMG_UINT64 ui64Size, - IMG_UINT32 ui32Offset, - IMG_BOOL bWriteOp); - -#endif /*! ******************************************************************************* @@ -682,7 +689,7 @@ PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) /*************************************************************************/ /*! -@Function RGXScheduleCommand +@Function _RGXScheduleCommandAndGetKCCBSlot @Description Sends a command to a particular DM and kicks the firmware but first schedules any commands which have to happen before @@ -692,6 +699,7 @@ PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, @Input eDM To which DM the cmd is sent. @Input psKCCBCmd The cmd to send. @Input ui32PDumpFlags PDump flags +@Input bCallerHasPwrLock Caller already has power lock @Output pui32CmdKCCBSlot When non-NULL: - Pointer on return contains the kCCB slot number in which the command was enqueued. @@ -699,14 +707,22 @@ PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_KCCB_RTN_SLOT_RST @Return PVRSRV_ERROR */ /**************************************************************************/ -PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_DM eKCCBType, - RGXFWIF_KCCB_CMD *psKCCBCmd, - IMG_UINT32 ui32PDumpFlags, - IMG_UINT32 *pui32CmdKCCBSlot); +PVRSRV_ERROR _RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DM eKCCBType, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot, + IMG_BOOL bCallerHasPwrLock); + +#define RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot) \ + _RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot, IMG_FALSE) + #define RGXScheduleCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags) \ RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, NULL) +#define RGXScheduleCommandWithoutPowerLock(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags) \ + _RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, NULL, IMG_TRUE) + /*************************************************************************/ /*! @Function RGXWaitForKCCBSlotUpdate @@ -724,7 +740,6 @@ PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32SlotNum, IMG_UINT32 ui32PDumpFlags); -PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo); /*************************************************************************/ /*! @Function PVRSRVRGXFrameworkCopyCommand @@ -739,9 +754,9 @@ PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo); @Return PVRSRV_ERROR */ /**************************************************************************/ PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, - DEVMEM_MEMDESC *psFWFrameworkMemDesc, - IMG_PBYTE pbyGPUFRegisterList, - IMG_UINT32 ui32FrameworkRegisterSize); + DEVMEM_MEMDESC *psFWFrameworkMemDesc, + IMG_PBYTE pbyGPUFRegisterList, + IMG_UINT32 ui32FrameworkRegisterSize); /*************************************************************************/ /*! @@ -800,7 +815,7 @@ PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, @Description Schedules a FW common context cleanup. The firmware doesn't block waiting for the resource to become idle but rather - notifies the host that the resources is busy. + notifies the host that the resource is busy. @Input psDeviceNode pointer to device node @Input psServerCommonContext context to be cleaned up @@ -821,7 +836,7 @@ PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, @Description Schedules a FW HWRTData memory cleanup. The firmware doesn't block waiting for the resource to become idle but rather - notifies the host that the resources is busy. + notifies the host that the resource is busy. @Input psDeviceNode pointer to device node @Input psHWRTData firmware address of the HWRTData for clean-up @@ -837,7 +852,7 @@ PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, @Description Schedules a FW FreeList cleanup. The firmware doesn't block waiting for the resource to become idle but rather notifies the - host that the resources is busy. + host that the resource is busy. @Input psDeviceNode pointer to device node @Input psFWFreeList firmware address of the FreeList for clean-up @@ -853,7 +868,7 @@ PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode, @Description Schedules a FW ZS Buffer cleanup. The firmware doesn't block waiting for the resource to become idle but rather notifies the - host that the resources is busy. + host that the resource is busy. @Input psDevInfo pointer to device node @Input psFWZSBuffer firmware address of the ZS Buffer for clean-up @@ -863,12 +878,6 @@ PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode, PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, PRGXFWIF_ZSBUFFER psFWZSBuffer); -PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, - CONNECTION_DATA *psConnection, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_INT32 i32Priority, - RGXFWIF_DM eDM); - /*! ******************************************************************************* @Function RGXFWSetHCSDeadline @@ -887,33 +896,22 @@ PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, /*! ******************************************************************************* -@Function RGXFWChangeOSidPriority +@Function RGXFWHealthCheckCmdInt -@Description Requests the Firmware to change the priority of an operating - system. Higher priority number equals higher priority on the - scheduling system. +@Description Ping the firmware to check if it is responsive. -@Input psDevInfo pointer to device info -@Input ui32OSid The OSid whose priority is to be altered -@Input ui32Priority The new priority number for the specified OSid +@Input psDevInfo pointer to device info +@Input bCallerHasPwrLock Caller already has power lock @Return PVRSRV_ERROR ******************************************************************************/ -PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32OSid, - IMG_UINT32 ui32Priority); +PVRSRV_ERROR RGXFWHealthCheckCmdInt(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bCallerHasPwrLock); -/*! -******************************************************************************* -@Function RGXFWHealthCheckCmd - -@Description Ping the firmware to check if it is responsive. +#define RGXFWHealthCheckCmd(psDevInfo) \ + RGXFWHealthCheckCmdInt(psDevInfo, IMG_FALSE) -@Input psDevInfo pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo); +#define RGXFWHealthCheckCmdWithoutPowerLock(psDevInfo) \ + RGXFWHealthCheckCmdInt(psDevInfo, IMG_TRUE) /*! ******************************************************************************* @@ -922,18 +920,18 @@ PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo); @Description Requests the Firmware to change the guest OS Online states. This should be initiated by the VMM when a guest VM comes online or goes offline. If offline, the FW offloads any current - resource from that OSID. The request is repeated until the FW - has had time to free all the resources or has waited for + resource from that DriverID. The request is repeated until the + FW has had time to free all the resources or has waited for workloads to finish. @Input psDevInfo pointer to device info -@Input ui32OSid The Guest OSid whose state is being altered +@Input ui32DriverID The driver whose state is being altered @Input eOSOnlineState The new state (Online or Offline) @Return PVRSRV_ERROR ******************************************************************************/ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32OSid, + IMG_UINT32 ui32DriverID, RGXFWIF_OS_STATE_CHANGE eOSOnlineState); #if defined(SUPPORT_AUTOVZ) @@ -949,6 +947,16 @@ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, @Input psDevInfo pointer to device info ******************************************************************************/ void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXDisconnectAllGuests + +@Description Send requests to FW to disconnect all guest connections. + +@Input psDeviceNode pointer to device node +******************************************************************************/ +PVRSRV_ERROR RGXDisconnectAllGuests(PVRSRV_DEVICE_NODE *psDeviceNode); #endif /*! @@ -1032,9 +1040,6 @@ void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bI PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, IMG_BOOL bCheckAfterTimePassed); - -PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM); - #if defined(SUPPORT_AUTOVZ) /*! ******************************************************************************* @@ -1047,11 +1052,6 @@ PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurren void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode); #endif /* SUPPORT_AUTOVZ */ -void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - IMG_UINT32 ui32VerbLevel); - /*! ******************************************************************************* @Function AttachKickResourcesCleanupCtls @@ -1139,7 +1139,7 @@ PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, Otherwise, a PVRSRV error code ******************************************************************************/ PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSID, + IMG_UINT32 ui32DriverID, IMG_DEV_PHYADDR sDevPAddr, IMG_UINT64 ui64DevPSize); @@ -1150,128 +1150,7 @@ PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, @Description Unregister and unmap from device, a raw firmware physheap ******************************************************************************/ void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSID); - -/*! -******************************************************************************* -@Function RGXRiscvHalt - -@Description Halt the RISC-V FW core (required for certain operations - done through Debug Module) - -@Input psDevInfo Pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* -@Function RGXRiscvIsHalted - -@Description Check if the RISC-V FW is halted - -@Input psDevInfo Pointer to device info - -@Return IMG_BOOL -******************************************************************************/ -IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* -@Function RGXRiscvResume - -@Description Resume the RISC-V FW core - -@Input psDevInfo Pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* -@Function RGXRiscvReadReg - -@Description Read a value from the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address - -@Output pui32Value Read value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 *pui32Value); - -/*! -******************************************************************************* -@Function RGXRiscvPollReg - -@Description Poll for a value from the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address -@Input ui32Value Expected value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32Value); - -/*! -******************************************************************************* -@Function RGXRiscvWriteReg - -@Description Write a value to the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address -@Input ui32Value Write value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32Value); - -/*! -******************************************************************************* -@Function RGXRiscvPollMem - -@Description Poll for a value at the given address in RISC-V memory space - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Expected value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Addr, - IMG_UINT32 ui32Value); - -/*! -******************************************************************************* -@Function RGXRiscvDmiOp - -@Description Acquire the powerlock and perform an operation on the RISC-V - Debug Module Interface, but only if the GPU is powered on. - -@Input psDevInfo Pointer to device info -@InOut pui64DMI Encoding of a request for the RISC-V Debug - Module with same format as the 'dmi' register - from the RISC-V debug specification (v0.13+). - On return, this is updated with the result of - the request, encoded the same way. - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 *pui64DMI); + IMG_UINT32 ui32DriverID); /*! ******************************************************************************* @@ -1318,7 +1197,7 @@ PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, @Input ui32FwVA The Fw VA that needs decoding @Output psCpuPA Pointer to the resulting CPU PA @Output psDevPA Pointer to the resulting Dev PA -@Output pui64RawPTE Pointer to the raw Page Table Entry value +@Output pui64RawPTE Pointer to the raw Page Table Entry value @Return PVRSRV_ERROR ******************************************************************************/ @@ -1328,32 +1207,30 @@ PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR *psDevPA, IMG_UINT64 *pui64RawPTE); -#if defined(SUPPORT_WORKLOAD_ESTIMATION) /*! ******************************************************************************* -@Function RGXIsValidWorkloadEstCCBCommand +@Function RGXFWInjectFault -@Description Checks if command type can be used for workload estimation +@Description Injecting firmware fault to validate recovery through Host -@Input eType Command type to check +@Input psDevInfo Pointer to device info -@Return IMG_BOOL +@Return PVRSRV_ERROR ******************************************************************************/ -INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType); - -#endif +PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo); /*! ******************************************************************************* -@Function RGXFWInjectFault - -@Description Injecting firmware fault to validate recovery through Host +@Function RGXFWSetVzConnectionCooldownPeriod -@Input psDevInfo Pointer to device info +@Description Set Vz connection cooldown period -@Return PVRSRV_ERROR +@Input psDevInfo pointer to device info +@Input ui32VzConnectionCooldownPeriodInSec Cooldown period in secs ******************************************************************************/ -PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR +RGXFWSetVzConnectionCooldownPeriod(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32VzConnectionCooldownPeriodInSec); #if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ) #error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers." @@ -1369,31 +1246,47 @@ PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo); #define KM_GET_OS_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0) #define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0, val) +#define KM_ALIVE_TOKEN_CACHEOP(Target, CacheOp) +#define KM_CONNECTION_CACHEOP(Target, CacheOp) + #else #if defined(SUPPORT_AUTOVZ) #define KM_GET_FW_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveFwToken) #define KM_GET_OS_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken) -#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteDeviceMem32WithWMB((volatile IMG_UINT32 *) &psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val) +#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) do { \ + OSWriteDeviceMem32WithWMB((volatile IMG_UINT32 *) &psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val); \ + KM_ALIVE_TOKEN_CACHEOP(Os, FLUSH); \ + } while (0) + +#define KM_ALIVE_TOKEN_CACHEOP(Target, CacheOp) RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfConnectionCtl->ui32Alive##Target##Token, \ + CacheOp); #endif /* defined(SUPPORT_AUTOVZ) */ -#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1))) -/* native, static-vz and AutoVz using shared memory */ +#if !defined(NO_HARDWARE) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) +/* static, dynamic and AutoVz DDKs using shared memory */ #define KM_GET_FW_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState) #define KM_GET_OS_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState) -#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteDeviceMem32WithWMB((void*)&psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState, RGXFW_CONNECTION_OS_##val) +#define KM_SET_OS_CONNECTION(val, psDevInfo) do { \ + OSWriteDeviceMem32WithWMB((void*)&psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState, RGXFW_CONNECTION_OS_##val); \ + KM_CONNECTION_CACHEOP(Os, FLUSH); \ + } while (0) + +#define KM_CONNECTION_CACHEOP(Target, CacheOp) RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfConnectionCtl->eConnection##Target##State, \ + CacheOp); #else -/* dynamic-vz & nohw */ +/* nohw & native */ #define KM_GET_FW_CONNECTION(psDevInfo) (RGXFW_CONNECTION_FW_ACTIVE) #define KM_GET_OS_CONNECTION(psDevInfo) (RGXFW_CONNECTION_OS_ACTIVE) #define KM_SET_OS_CONNECTION(val, psDevInfo) -#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */ +#define KM_CONNECTION_CACHEOP(Target, CacheOp) +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED == 1) */ #endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ -#if defined(SUPPORT_AUTOVZ) -#define RGX_FIRST_RAW_HEAP_OSID RGXFW_HOST_OS +#if defined(RGX_PREMAP_FW_HEAPS) +#define RGX_FIRST_RAW_HEAP_DRIVER_ID RGXFW_HOST_DRIVER_ID #else -#define RGX_FIRST_RAW_HEAP_OSID RGXFW_GUEST_OSID_START +#define RGX_FIRST_RAW_HEAP_DRIVER_ID RGXFW_GUEST_DRIVER_ID_START #endif #define KM_OS_CONNECTION_IS(val, psDevInfo) (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxhwperf.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf.h similarity index 87% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxhwperf.h rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf.h index 16a959c7594a..06d3c3ae0487 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxhwperf.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf.h @@ -44,13 +44,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef RGXHWPERF_H_ #define RGXHWPERF_H_ -#include "rgx_fwif_hwperf.h" #include "rgxhwperf_common.h" /****************************************************************************** - * RGX HW Performance Profiling API(s) Rogue specific + * RGX HW Performance Profiling API(s) *****************************************************************************/ +#if defined(RGX_FEATURE_HWPERF_ROGUE) PVRSRV_ERROR PVRSRVRGXConfigMuxHWPerfCountersKM( CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, @@ -64,6 +64,7 @@ PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM( IMG_UINT16 ui16CustomBlockID, IMG_UINT16 ui16NumCustomCounters, IMG_UINT32 * pui32CustomCounterIDs); +#endif PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( CONNECTION_DATA * psConnection, @@ -72,6 +73,7 @@ PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( IMG_UINT32 ui32ArrayLen, RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs); +#if defined(RGX_FEATURE_HWPERF_ROGUE) PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCountersKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_UINT32 ui32BlockID, @@ -81,6 +83,7 @@ PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCounters(PVRSRV_DEVICE_NODE *psDevNo RGXFWIF_HWPERF_CTL *psHWPerfCtl, IMG_UINT32 ui32BlockID, RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters); +#endif PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCounters(PVRSRV_DEVICE_NODE *psDevNode, RGXFWIF_HWPERF_CTL *psHWPerfCtl, @@ -93,4 +96,14 @@ PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 *pui32BlockCount, IMG_UINT32 *pui32EnabledBlockIDs); +typedef struct HWPERF_STREAM_DESC_TAG HWPERF_STREAM_DESC; + +PVRSRV_ERROR PVRSRVRGXOpenHWPerfClientStreamKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + HWPERF_STREAM_DESC **ppsSD); +PVRSRV_ERROR PVRSRVRGXCloseHWPerfClientStreamKM(HWPERF_STREAM_DESC *psSD); +PVRSRV_ERROR PVRSRVRGXWriteHWPerfClientEventKM(HWPERF_STREAM_DESC *psSD, + IMG_UINT32 uiSize, + IMG_BYTE *puiData); + #endif /* RGXHWPERF_H_ */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf_common.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf_common.c index 2124f6d61c7a..b440560f2141 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf_common.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf_common.c @@ -57,6 +57,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_tlcommon.h" #include "tlclient.h" #include "tlstream.h" +#include "tlserver.h" #include "rgxhwperf.h" #include "rgxapi_km.h" @@ -69,6 +70,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "process_stats.h" #include "rgx_hwperf_table.h" #include "rgxinit.h" +#if (defined(__linux__) && !defined(__QNXNTO__) && !defined(INTEGRITY_OS)) +#include "ospvr_gputrace.h" +#endif #include "info_page_defs.h" @@ -91,6 +95,9 @@ static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); +static IMG_UINT64 RGXHWPerfFwSetEventFilterNoLock(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_L2_STREAM_ID eL2StreamId, + IMG_UINT64 uiFilter); static inline IMG_UINT32 RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, @@ -98,13 +105,15 @@ RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, RGX_PHWPERF_V2_PACKET_HDR psCurPkt ) { IMG_UINT32 sizeSum = 0; + RGXFwSharedMemCacheOpValue(psCurPkt->ui32Size, INVALIDATE); /* Traverse the array to find how many packets will fit in the available space. */ - while ( sizeSum < ui32BytesExp && + while ( sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) <= ui32BytesExp && sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize ) { sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt); psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt); + RGXFwSharedMemCacheOpValue(psCurPkt->ui32Size, INVALIDATE); } return sizeSum; @@ -112,15 +121,151 @@ RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, static inline void RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo, - IMG_BOOL bIsReaderConnected) + RGX_HWPERF_L2_STREAM_ID eL2StreamId, + IMG_BOOL bIsReaderConnected) { + PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); + if (!bIsReaderConnected) { - PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but host buffer for FW events is full " - "and no reader is currently connected, suspending event collection. " - "Connect a reader or restart driver to avoid event loss.", __func__)); - psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE; + PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but L2 host buffer " + "for stream %u is full and no reader is currently connected, suspending " + "event collection. Connect a reader or restart driver to avoid event loss.", + __func__, eL2StreamId)); + psDeviceInfo->bSuspendHWPerfL2DataCopy[eL2StreamId] = IMG_TRUE; + } +} + +static IMG_UINT32 RGXHWPerfCopyData(PVRSRV_RGXDEV_INFO *psDeviceInfo, + IMG_BYTE *pbSrcBuffer, + RGX_HWPERF_L2_STREAM_ID eL2StreamId, + IMG_UINT32 uiBytesToCopy) +{ + IMG_BYTE *pbDestBuffer; + IMG_UINT32 uiBytesCopied = 0; + IMG_UINT32 uiFreeSpace; + IMG_UINT32 uiBytesToCopyMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbSrcBuffer)); + IMG_BOOL bIsReaderConnected; + PVRSRV_ERROR eError; + IMG_HANDLE hHWPerfDestStream; + + PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); + + hHWPerfDestStream = psDeviceInfo->hHWPerfStream[eL2StreamId]; + + PVR_DPF_ENTERED; + + /* Try submitting all data in one TL packet. */ + eError = TLStreamReserve2(hHWPerfDestStream, &pbDestBuffer, uiBytesToCopy, uiBytesToCopyMin, + &uiFreeSpace, &bIsReaderConnected); + if (eError == PVRSRV_OK) + { + RGXFwSharedMemCacheOpExec(pbSrcBuffer, uiBytesToCopy, PVRSRV_CACHE_OP_INVALIDATE); + OSDeviceMemCopy(pbDestBuffer, pbSrcBuffer, (size_t) uiBytesToCopy); + + eError = TLStreamCommit(hHWPerfDestStream, uiBytesToCopy); + PVR_LOG_GOTO_IF_ERROR_VA(eError, ErrReturn, "TLStreamCommit() failed with error %d, " + "unable to copy packet from L1 to L2 buffer", eError); + + /* Data were successfully written */ + uiBytesCopied = (size_t) uiBytesToCopy; + } + else if (eError == PVRSRV_ERROR_STREAM_FULL) + { + /* There was not enough space for all data, copy as much as possible */ + IMG_UINT32 uiSizeSum = RGXHWPerfGetPackets(uiBytesToCopy, uiFreeSpace, + RGX_HWPERF_GET_PACKET(pbSrcBuffer)); + + PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, " + "remaining free space: %d", uiBytesToCopy, uiFreeSpace)); + + if (uiSizeSum != 0) + { + eError = TLStreamReserve(hHWPerfDestStream, &pbDestBuffer, uiSizeSum); + + if (eError == PVRSRV_OK) + { + RGXFwSharedMemCacheOpExec(pbSrcBuffer, uiSizeSum, PVRSRV_CACHE_OP_INVALIDATE); + OSDeviceMemCopy(pbDestBuffer, pbSrcBuffer, (size_t) uiSizeSum); + + eError = TLStreamCommit(hHWPerfDestStream, uiSizeSum); + PVR_LOG_GOTO_IF_ERROR_VA(eError, ErrReturn, "TLStreamCommit() failed with error " + "%d, unable to copy packet from L1 to L2 buffer", eError); + + /* uiSizeSum bytes of hwperf packets have been successfully written */ + uiBytesCopied = uiSizeSum; + } + else if (eError == PVRSRV_ERROR_STREAM_FULL) + { + PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data " + "in case of packet loss, remaining free space: %d", uiFreeSpace)); + RGXSuspendHWPerfL2DataCopy(psDeviceInfo, eL2StreamId, bIsReaderConnected); + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of " + "packet loss, remaining free space: %d", uiFreeSpace)); + RGXSuspendHWPerfL2DataCopy(psDeviceInfo, eL2StreamId, bIsReaderConnected); + } + } + + /* Some other error occurred. Full error handled by caller, we returning the copied bytes count + * to caller */ + if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_STREAM_FULL) + { + PVR_DPF((PVR_DBG_ERROR, "HWPerf enabled: Unexpected Error (%d) while copying FW buffer " + "to destination buffer.", eError)); + } + +ErrReturn: + /* Return the remaining packets left to be transported. */ + PVR_DPF_RETURN_VAL(uiBytesCopied); +} + +/* + RGXHWPerfGetMaxTransfer + */ +static IMG_UINT32 RGXHWPerfGetMaxTransfer(PVRSRV_RGXDEV_INFO *psDeviceInfo, + IMG_UINT32 ui32BytesExp, + IMG_UINT32 uiL2StreamCopyMask) +{ + IMG_UINT32 uiMaxXfer = ui32BytesExp; + IMG_UINT32 eL2StreamId; + + for (eL2StreamId = 0; eL2StreamId < RGX_HWPERF_L2_STREAM_LAST; eL2StreamId++) + { + if (BIT_ISSET(uiL2StreamCopyMask, eL2StreamId)) + { + IMG_UINT32 uiMaxXferSize = TLStreamGetMaxTransfer( + ui32BytesExp, + psDeviceInfo->hHWPerfStream[eL2StreamId]); + + if (uiMaxXferSize < uiMaxXfer) + { + /* New minimum size found, save it for later */ +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, + "%s(dev %u, len %u, mask %u) New/Old = [0x%x/0x%x]", + __func__, eL2StreamId, ui32BytesExp, + uiL2StreamCopyMask, uiMaxXferSize, uiMaxXfer)); +#endif + + uiMaxXfer = uiMaxXferSize; + } +#if defined(DEBUG) + else + { + PVR_DPF((PVR_DBG_VERBOSE, + "%s(dev %u, len %u, mask %u) New/Old = [0x%x/0x%x]", + __func__, eL2StreamId, ui32BytesExp, + uiL2StreamCopyMask, uiMaxXferSize, uiMaxXfer)); + } +#endif + } } + + return uiMaxXfer; } /* @@ -128,37 +273,85 @@ RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo, */ static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo, IMG_BYTE *pbFwBuffer, - IMG_UINT32 ui32BytesExp) + IMG_UINT32 uiL1DataSize, + IMG_UINT32 uiL2StreamCopyMask) { - IMG_HANDLE hHWPerfStream = psDeviceInfo->hHWPerfStream; - IMG_BYTE * pbL2Buffer; - IMG_UINT32 ui32L2BufFree; - IMG_UINT32 ui32BytesCopied = 0; - IMG_UINT32 ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer)); - PVRSRV_ERROR eError; - IMG_BOOL bIsReaderConnected; + IMG_UINT32 eL2StreamId, ui32L2AvailableSpace, uiHWPerfBytesCopied = 0; /* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */ #ifdef HWPERF_MISR_FUNC_DEBUG static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX; #endif + /* Invalidate initial packet header, type/size cast via RGX_HWPEF_GET_PACKET */ + RGXFwSharedMemCacheOpPtr(RGX_HWPERF_GET_PACKET(pbFwBuffer), INVALIDATE); + PVR_DPF_ENTERED; #ifdef HWPERF_MISR_FUNC_DEBUG - PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d", - pbFwBuffer, ui32BytesExp)); + PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d mask:0x%2x", + pbFwBuffer, uiL1DataSize, uiL2StreamCopyMask)); #endif + /* Determine the maximum space available in all consumer (L2) streams. + * This limits the amount of data that will be copied if we have multiple + * L2 consumers registered. In this case we only transfer the amount of + * data that can fit into all available L2 consumer streams and so we will + * exert back-pressure onto the L1 buffer whenever one of the L2 consumer + * streams gets filled. + * This effectively lock-steps the L2 consumer streams together. + */ + ui32L2AvailableSpace = RGXHWPerfGetMaxTransfer(psDeviceInfo, uiL1DataSize, uiL2StreamCopyMask); + + for (eL2StreamId = 0; eL2StreamId < RGX_HWPERF_L2_STREAM_LAST; eL2StreamId++) + { + if (BIT_ISSET(uiL2StreamCopyMask, eL2StreamId)) + { + IMG_UINT32 uiBytesCopied, uiPacketDataSize = ui32L2AvailableSpace; + IMG_UINT32 uiL2MaxPacketSize = psDeviceInfo->ui32L2BufMaxPacketSize[eL2StreamId]; + + /* Adjust the copied data size if the maximum packet size is smaller + * than the available space or if the L1 data we want to read can't + * fit into the available space. */ + if (ui32L2AvailableSpace > uiL2MaxPacketSize || ui32L2AvailableSpace < uiL1DataSize) + { + uiPacketDataSize = RGXHWPerfGetPackets(ui32L2AvailableSpace, uiL2MaxPacketSize, + RGX_HWPERF_GET_PACKET(pbFwBuffer)); + + if (uiPacketDataSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer " + "(%u) as packet is too big (%u) and can't fit into available " + "space (%u)", eL2StreamId, uiL1DataSize, + MIN(ui32L2AvailableSpace, uiL2MaxPacketSize))); + + continue; + } + } + + uiBytesCopied = RGXHWPerfCopyData(psDeviceInfo, pbFwBuffer, eL2StreamId, + uiPacketDataSize); + + uiHWPerfBytesCopied = MAX(uiBytesCopied, uiHWPerfBytesCopied); + } + } + #ifdef HWPERF_MISR_FUNC_DEBUG + if (uiHWPerfBytesCopied > 0) { /* Check the incoming buffer of data has not lost any packets */ IMG_BYTE *pbFwBufferIter = pbFwBuffer; - IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp; + IMG_BYTE *pbFwBufferEnd = pbFwBuffer + uiHWPerfBytesCopied; + do { RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter); - IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal; + IMG_UINT32 ui32CurOrdinal; + /* Invalidate HDR pointed to by asCurPos as we use both ordinal for detecting + * lost packets and size for iteration. + */ + RGXFwSharedMemCacheOpPtr(asCurPos, INVALIDATE); + ui32CurOrdinal = asCurPos->ui32Ordinal; if (gui32Ordinal != IMG_UINT32_MAX) { if ((gui32Ordinal+1) != ui32CurOrdinal) @@ -188,92 +381,8 @@ static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo, } #endif - if (ui32BytesExp > psDeviceInfo->ui32L2BufMaxPacketSize) - { - IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, - psDeviceInfo->ui32L2BufMaxPacketSize, - RGX_HWPERF_GET_PACKET(pbFwBuffer)); - - if (0 != sizeSum) - { - ui32BytesExp = sizeSum; - } - else - { - PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer as " - "packet is too big and hence it breaches TL " - "packet size limit (TLBufferSize / 2.5)")); - goto e0; - } - } - - /* Try submitting all data in one TL packet. */ - eError = TLStreamReserve2(hHWPerfStream, - &pbL2Buffer, - (size_t)ui32BytesExp, ui32BytesExpMin, - &ui32L2BufFree, &bIsReaderConnected); - if ( eError == PVRSRV_OK ) - { - OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp ); - eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp); - if ( eError != PVRSRV_OK ) - { - PVR_DPF((PVR_DBG_ERROR, - "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", - eError, __func__)); - goto e0; - } - /* Data were successfully written */ - ui32BytesCopied = ui32BytesExp; - } - else if (eError == PVRSRV_ERROR_STREAM_FULL) - { - /* There was not enough space for all data, copy as much as possible */ - IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, ui32L2BufFree, RGX_HWPERF_GET_PACKET(pbFwBuffer)); - - PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree)); - - if ( 0 != sizeSum ) - { - eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum); - - if ( eError == PVRSRV_OK ) - { - OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum ); - eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum); - if ( eError != PVRSRV_OK ) - { - PVR_DPF((PVR_DBG_ERROR, - "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", - eError, __func__)); - goto e0; - } - /* sizeSum bytes of hwperf packets have been successfully written */ - ui32BytesCopied = sizeSum; - } - else if ( PVRSRV_ERROR_STREAM_FULL == eError ) - { - PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); - RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); - } - } - else - { - PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); - RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); - } - } - if ( PVRSRV_OK != eError && /* Some other error occurred */ - PVRSRV_ERROR_STREAM_FULL != eError ) /* Full error handled by caller, we returning the copied bytes count to caller */ - { - PVR_DPF((PVR_DBG_ERROR, - "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.", - eError)); - } - -e0: /* Return the remaining packets left to be transported. */ - PVR_DPF_RETURN_VAL(ui32BytesCopied); + PVR_DPF_RETURN_VAL(uiHWPerfBytesCopied); } @@ -288,6 +397,15 @@ static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx( /* RGXHWPerfDataStore + + This function copies HWPerf data from L1 buffer to all L2 streams. + The number of copied data is always the maximum read number of packets. + In case where one of the stream is not able to accept the same amount of + data as other streams it will suffer from gaps in the data. + + To avoid these gaps, we will exert back-pressure on the L1 buffer by + pre-calculating the maximum amount of data that can be copied to all + L2 streams (currently a maximum of 2). */ static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) { @@ -295,34 +413,70 @@ static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) IMG_BYTE* psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf; IMG_UINT32 ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount; IMG_UINT32 ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0; + IMG_UINT32 uiStreamCopyMask = 0; #ifdef HWPERF_MISR_FUNC_DEBUG IMG_UINT32 ui32BytesExpSum = 0; #endif + /* It's unlikely that we're ever going to have more than 32 consumers + * for the Firmware L1 buffer but check just to be safe. */ + static_assert(RGX_HWPERF_L2_STREAM_LAST <= sizeof(uiStreamCopyMask) * 8, + "RGX_HWPERF_L2_STREAM_LAST cannot be greater than 32.") + PVR_DPF_ENTERED; /* Caller should check this member is valid before calling */ - PVR_ASSERT(psDevInfo->hHWPerfStream); - - if (psDevInfo->bSuspendHWPerfL2DataCopy) { - PVR_DPF((PVR_DBG_MESSAGE, - "%s : Copying data to host buffer for FW events is " - "suspended. Start HWPerf consumer or restart driver if " - "HWPerf FW events are needed", __func__)); + RGX_HWPERF_L2_STREAM_ID eL2StreamId; +#if defined(PVRSRV_NEED_PVR_ASSERT) + IMG_UINT32 uiNotNullCount = 0; +#endif + for (eL2StreamId = 0; eL2StreamId < RGX_HWPERF_L2_STREAM_LAST; eL2StreamId++) + { +#if defined(PVRSRV_NEED_PVR_ASSERT) + if (psDevInfo->hHWPerfStream[eL2StreamId] != NULL) + { + uiNotNullCount++; + } +#endif + if (!psDevInfo->bSuspendHWPerfL2DataCopy[eL2StreamId]) + { + BIT_SET(uiStreamCopyMask, eL2StreamId); + } + } - PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); +#if defined(PVRSRV_NEED_PVR_ASSERT) + /* At least one stream must exist. */ + PVR_ASSERT(uiNotNullCount > 0); +#endif + + /* Only proceed if any of the streams are not suspended. + * Build bit field representing each L2 stream's suspend status. This + * will be passed down the stack for copy function to determine if the + * data should or should not be copied. */ + if (uiStreamCopyMask == 0) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s : Copying data to all L2 host buffers for FW events is " + "suspended. Start at least one of the HWPerf consumers or restart the driver " + "if HWPerf FW events are needed", __func__)); + + PVR_DPF_RETURN_VAL(0); + } } + /* Invalidate partial region of struct */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl, + INVALIDATE); + /* Get a copy of the current * read (first packet to read) * write (empty location for the next write to be inserted) * WrapCount (size in bytes of the buffer at or past end) * indexes of the FW buffer */ - ui32SrcRIdx = psFwSysData->ui32HWPerfRIdx; - ui32SrcWIdx = psFwSysData->ui32HWPerfWIdx; + ui32SrcRIdx = psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx; + ui32SrcWIdx = psFwSysData->sHWPerfCtrl.ui32HWPerfWIdx; OSMemoryBarrier(NULL); - ui32SrcWrapCount = psFwSysData->ui32HWPerfWrapCount; + ui32SrcWrapCount = psFwSysData->sHWPerfCtrl.ui32HWPerfWrapCount; #if defined(HWPERF_MISR_FUNC_DEBUG) || defined(EMULATOR) { @@ -353,15 +507,18 @@ static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) #endif ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, psHwPerfInfo + ui32SrcRIdx, - ui32BytesExp); + ui32BytesExp, + uiStreamCopyMask); /* Advance the read index and the free bytes counter by the number * of bytes transported. Items will be left in buffer if not all data * could be transported. Exit to allow buffer to drain. */ - OSWriteDeviceMem32WithWMB(&psFwSysData->ui32HWPerfRIdx, + OSWriteDeviceMem32WithWMB(&psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, RGXHWPerfAdvanceRIdx(psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, ui32BytesCopied)); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, + FLUSH); ui32BytesCopiedSum += ui32BytesCopied; } @@ -376,10 +533,10 @@ static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) #ifdef HWPERF_MISR_FUNC_DEBUG ui32BytesExpSum += ui32BytesExp; #endif - /* Attempt to transfer the packets to the TL stream buffer */ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, psHwPerfInfo + ui32SrcRIdx, - ui32BytesExp); + ui32BytesExp, + uiStreamCopyMask); /* Advance read index as before and Update the local copy of the * read index as it might be used in the last if branch*/ @@ -390,10 +547,12 @@ static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) /* Update Wrap Count */ if ( ui32SrcRIdx == 0) { - OSWriteDeviceMem32WithWMB(&psFwSysData->ui32HWPerfWrapCount, + OSWriteDeviceMem32WithWMB(&psFwSysData->sHWPerfCtrl.ui32HWPerfWrapCount, psDevInfo->ui32RGXFWIfHWPerfBufSize); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfWrapCount, + FLUSH); } - OSWriteDeviceMem32WithWMB(&psFwSysData->ui32HWPerfRIdx, ui32SrcRIdx); + OSWriteDeviceMem32WithWMB(&psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, ui32SrcRIdx); ui32BytesCopiedSum += ui32BytesCopied; @@ -410,19 +569,23 @@ static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) #endif ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, psHwPerfInfo, - ui32BytesExp); + ui32BytesExp, + uiStreamCopyMask); /* Advance the FW buffer read position. */ - psFwSysData->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( + psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, ui32BytesCopied); ui32BytesCopiedSum += ui32BytesCopied; } + /* This flush covers both writes above */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, + FLUSH); } #ifdef HWPERF_MISR_FUNC_DEBUG if (ui32BytesCopiedSum != ui32BytesExpSum) { - PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psFwSysData->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum)); + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum)); } #endif @@ -435,48 +598,55 @@ static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); } - +/* Function called from MISR to copy data from L1 buffer to L2 streams. */ PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo) { PVRSRV_ERROR eError = PVRSRV_OK; PVRSRV_RGXDEV_INFO* psRgxDevInfo; IMG_UINT32 ui32BytesCopied; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVR_ASSERT(psDevInfo); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevInfo, PVRSRV_OK); PVR_DPF_ENTERED; - PVR_ASSERT(psDevInfo); psRgxDevInfo = psDevInfo->pvDevice; /* Store FW event data if the destination buffer exists.*/ - if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + + if (psRgxDevInfo->uiHWPerfStreamCount > 0) { - OSLockAcquire(psRgxDevInfo->hHWPerfLock); ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo); if ( ui32BytesCopied ) - { /* Signal consumers that packets may be available to read when - * running from a HW kick, not when called by client APP thread - * via the transport layer CB as this can lead to stream - * corruption.*/ - eError = TLStreamSync(psRgxDevInfo->hHWPerfStream); - PVR_ASSERT(eError == PVRSRV_OK); + { + /* It's possible that the HWPerf stream doesn't exist yet. It's + * possible that only FTrace L2 stream has been created so far. */ + if (psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF] != NULL) + { + /* Signal consumers that packets may be available to read when + * running from a HW kick, not when called by client APP thread + * via the transport layer CB as this can lead to stream + * corruption. */ + eError = TLStreamSync(psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF]); + PVR_LOG_IF_ERROR(eError, "TLStreamSync"); + } } else { PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied")); RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo); } - OSLockRelease(psRgxDevInfo->hHWPerfLock); } + OSLockRelease(psRgxDevInfo->hHWPerfLock); - PVR_DPF_RETURN_OK; + PVR_DPF_RETURN_RC(eError); } /* Currently supported by default */ -#if defined(SUPPORT_TL_PRODUCER_CALLBACK) +#if !defined(NO_HARDWARE) && defined(SUPPORT_TL_PRODUCER_CALLBACK) static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream, IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser) { @@ -511,7 +681,7 @@ static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream, return PVRSRV_OK; } - if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) + if (psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF] != NULL) { (void) RGXHWPerfDataStore(psRgxDevInfo); } @@ -556,23 +726,27 @@ static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) { PVRSRV_ERROR eError; - - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + IMG_UINT32 i; PVR_DPF_ENTERED; /* expecting a valid device info */ PVR_RETURN_IF_INVALID_PARAM(psRgxDevInfo != NULL); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psRgxDevInfo, PVRSRV_OK); + /* Create a lock for HWPerf server module used for serializing, L1 to L2 * copy calls (e.g. in case of TL producer callback) and L1, L2 resource * allocation */ eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock); PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); - /* avoid uninitialised data */ - psRgxDevInfo->hHWPerfStream = (IMG_HANDLE) NULL; - psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; + /* Initialise only non-zero fields since psRgxDevInfo is zeroed + * on allocation. */ + for (i = 0; i < RGX_HWPERF_L2_STREAM_LAST; i++) + { + psRgxDevInfo->bSuspendHWPerfL2DataCopy[i] = IMG_TRUE; + } PVR_DPF_RETURN_OK; } @@ -590,37 +764,24 @@ PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) @Return IMG_BOOL Whether initialization (allocation) is required */ /**************************************************************************/ -static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_L2_STREAM_ID eL2StreamId) { PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock)); #if !defined(NO_HARDWARE) + PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); + /* Both L1 and L2 buffers are required (for HWPerf functioning) on driver * built for actual hardware (TC, EMU, etc.) */ - if (psRgxDevInfo->hHWPerfStream == (IMG_HANDLE) NULL) - { - /* The allocation API (RGXHWPerfInitOnDemandResources) allocates - * device memory for both L1 and L2 without any checks. Hence, - * either both should be allocated or both be NULL. - * - * In-case this changes in future (for e.g. a situation where one - * of the 2 buffers is already allocated and other is required), - * add required checks before allocation calls to avoid memory leaks. - */ - PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL); - return IMG_TRUE; - } - PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL); + return psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL || + psRgxDevInfo->hHWPerfStream[eL2StreamId] == NULL; #else /* On a NO-HW driver L2 is not allocated. So, no point in checking its * allocation */ - if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL) - { - return IMG_TRUE; - } + return psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL; #endif - return IMG_FALSE; } #if !defined(NO_HARDWARE) static void _HWPerfFWOnReaderOpenCB(void *pvArg) @@ -631,15 +792,15 @@ static void _HWPerfFWOnReaderOpenCB(void *pvArg) RGXFWIF_KCCB_CMD sKccbCmd; IMG_UINT32 ui32kCCBCommandSlot; - PVRSRV_VZ_RETN_IF_MODE(GUEST); + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVNODE, psDevNode); /* Clear any previously suspended state for bSuspendHWPerfL2DataCopy as we * now have a reader attached so the data will be delivered upstream. */ - if (psRgxDevInfo->bSuspendHWPerfL2DataCopy) + if (psRgxDevInfo->bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_HWPERF]) { - PVR_DPF((PVR_DBG_WARNING, "%s: Resuming HWPerf FW event collection.", + PVR_DPF((PVR_DBG_MESSAGE, "%s: Resuming HWPerf FW event collection.", __func__)); - psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE; + psRgxDevInfo->bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_HWPERF] = IMG_FALSE; } sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; @@ -661,35 +822,45 @@ static void _HWPerfFWOnReaderOpenCB(void *pvArg) eError = RGXWaitForKCCBSlotUpdate(psRgxDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); PVR_LOG_RETURN_VOID_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); } + +static void _HWPerfFWOnReaderCloseCB(void *pvArg) +{ + PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg; + + psRgxDevInfo->bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_HWPERF] = IMG_TRUE; +} #endif + /*************************************************************************/ /*! -@Function RGXHWPerfInitOnDemandResources +@Function RGXHWPerfInitOnDemandL1Buffer @Description This function allocates the HWperf firmware buffer (L1 buffer) - and host driver TL buffer (L2 buffer) if HWPerf is enabled at - driver load time. Otherwise, these buffers are allocated - on-demand as and when required. Caller - must possess hHWPerfLock lock before calling this - function so the state tested is not inconsistent if called - outside of driver initialisation. + if HWPerf is enabled at driver load time. Otherwise, this + buffer is allocated on-demand as and when required. Caller must + possess hHWPerfLock lock before calling this function so the + state tested is not inconsistent if called outside of + initialisation. @Input psRgxDevInfo RGX Device Info, on which init is done @Return PVRSRV_ERROR */ /**************************************************************************/ -PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) +PVRSRV_ERROR RGXHWPerfInitOnDemandL1Buffer(PVRSRV_RGXDEV_INFO *psRgxDevInfo) { - IMG_HANDLE hStream = NULL; /* Init required for noHW */ - PVRSRV_ERROR eError; - IMG_UINT32 ui32L2BufferSize = 0; PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; - IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold - names up to "hwperf_9999", which is enough */ + PVRSRV_ERROR eError; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psRgxDevInfo, PVRSRV_ERROR_NOT_IMPLEMENTED); PVR_DPF_ENTERED; + /* This function might be called more than once due to initialisation of + * multiple consumers. Make sure that L1 is only ever initialised once. */ + if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL) + { + PVR_DPF_RETURN_OK; + } + /* Create the L1 HWPerf buffer on demand, read-only for the CPU * (except for the zero/poison operations) */ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) @@ -704,6 +875,7 @@ PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) #else /* Helps show corruption issues in driver-live */ | PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC #endif + | PVRSRV_MEMALLOCFLAG_RI_FWKMD_ALLOC | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); /* Allocate HWPerf FW L1 buffer */ @@ -718,7 +890,7 @@ PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate kernel fw hwperf buffer (%u)", __func__, eError)); - goto e0; + goto ErrReturn; } /* Expecting the RuntimeCfg structure is mapped into CPU virtual memory. @@ -730,7 +902,7 @@ PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) eError = RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, 0, RFW_FWADDR_NOREF_FLAG); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e0); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrDeInitL1Buffer); #if defined(RGX_FEATURE_HWPERF_VOLCANIC) RGXSetMetaDMAAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfDMABuf, @@ -741,6 +913,7 @@ PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) /* flush write buffers for psRgxDevInfo->psRGXFWIfRuntimeCfg */ OSWriteMemoryBarrier(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr); + RGXFwSharedMemCacheOpValue(psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr, FLUSH); eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf); @@ -749,106 +922,151 @@ PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire kernel hwperf buffer (%u)", __func__, eError)); - goto e0; + goto ErrDeInitL1Buffer; } - /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence, - * L2 buffer is not allocated */ + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d", + psRgxDevInfo->ui32RGXFWIfHWPerfBufSize)); + + PVR_DPF_RETURN_OK; + +ErrDeInitL1Buffer: + /* L1 buffer initialisation failures */ + RGXHWPerfL1BufferDeinit(psRgxDevInfo); +ErrReturn: + PVR_DPF_RETURN_RC(eError); +} + +/*************************************************************************/ /*! +@Function RGXHWPerfInitOnDemandL2Stream + +@Description This function allocates the HWperf firmware buffer (L1 buffer) + and host driver TL buffer (L2 buffer) if HWPerf is enabled at + driver load time. Otherwise, these buffers are allocated + on-demand as and when required. Caller must possess hHWPerfLock + lock before calling this function so the state tested is not + inconsistent if called outside of driver initialisation. + +@Input psRgxDevInfo RGX Device Info, on which init is done + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfInitOnDemandL2Stream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_L2_STREAM_ID eL2StreamId) +{ + PVRSRV_ERROR eError = PVRSRV_OK; #if !defined(NO_HARDWARE) - /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer - * accessed by the FW. The MISR may try to write one packet the size of the L1 - * buffer in some scenarios. When logging is enabled in the MISR, it can be seen - * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers - * are the more chance of this happening. - * Size chosen to allow MISR to write an L1 sized packet and for the client - * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1. - */ - ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize + - (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1); + IMG_HANDLE hStream; + TL_STREAM_INFO sTLStreamInfo; +#endif + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psRgxDevInfo, PVRSRV_ERROR_NOT_IMPLEMENTED); - /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ - if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", - PVRSRV_TL_HWPERF_RGX_FW_STREAM, - psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) + PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); + + PVR_DPF_ENTERED; + +#if !defined(NO_HARDWARE) + if (eL2StreamId == RGX_HWPERF_L2_STREAM_HWPERF) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to form HWPerf stream name for device %d", - __func__, - psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); - return PVRSRV_ERROR_INVALID_PARAMS; - } + /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence, + * L2 buffer is not allocated */ + IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 4]; + /* + 4 is used to allow names up to "hwperf_fw_999", which is enough */ + + /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", + PVRSRV_TL_HWPERF_RGX_FW_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } - eError = TLStreamCreate(&hStream, - pszHWPerfStreamName, - ui32L2BufferSize, - TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT, - _HWPerfFWOnReaderOpenCB, psRgxDevInfo, + eError = TLStreamCreate(&hStream, + pszHWPerfStreamName, + psRgxDevInfo->ui32RGXL2HWPerfBufSize, + TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT, + _HWPerfFWOnReaderOpenCB, psRgxDevInfo, + _HWPerfFWOnReaderCloseCB, psRgxDevInfo, #if !defined(SUPPORT_TL_PRODUCER_CALLBACK) - NULL, NULL + NULL, NULL #else - /* Not enabled by default */ - RGXHWPerfTLCB, psRgxDevInfo + /* Not enabled by default */ + RGXHWPerfTLCB, psRgxDevInfo #endif - ); - PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", e1); + ); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", ErrClearStream); - eError = TLStreamSetNotifStream(hStream, - PVRSRVGetPVRSRVData()->hTLCtrlStream); - /* we can still discover host stream so leave it as is and just log error */ - PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); - - /* send the event here because host stream is implicitly opened for write - * in TLStreamCreate and TLStreamOpen is never called (so the event is - * never emitted) */ - TLStreamMarkStreamOpen(hStream); + eError = TLStreamSetNotifStream(hStream, + PVRSRVGetPVRSRVData()->hTLCtrlStream); + /* we can still discover host stream so leave it as is and just log error */ + PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); - { - TL_STREAM_INFO sTLStreamInfo; + /* send the event here because host stream is implicitly opened for write + * in TLStreamCreate and TLStreamOpen is never called (so the event is + * never emitted) */ + TLStreamMarkStreamOpen(hStream); - TLStreamInfo(hStream, &sTLStreamInfo); - psRgxDevInfo->ui32L2BufMaxPacketSize = sTLStreamInfo.maxTLpacketSize; + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d", + psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, + psRgxDevInfo->ui32RGXL2HWPerfBufSize)); - psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE; + psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF] = hStream; + psRgxDevInfo->uiHWPerfStreamCount++; + PVR_ASSERT(psRgxDevInfo->uiHWPerfStreamCount <= RGX_HWPERF_L2_STREAM_LAST); + } +#if (defined(__linux__) && !defined(__QNXNTO__) && !defined(INTEGRITY_OS)) + else if (eL2StreamId == RGX_HWPERF_L2_STREAM_FTRACE) + { + eError = PVRGpuTraceInitStream(psRgxDevInfo); + PVR_LOG_IF_ERROR(eError, "PVRGpuTraceInitStream"); } - - PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d", - psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize)); - -#else /* defined(NO_HARDWARE) */ - PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize); - PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB); - PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName); - ui32L2BufferSize = 0; #endif - psRgxDevInfo->hHWPerfStream = hStream; + TLStreamInfo(psRgxDevInfo->hHWPerfStream[eL2StreamId], &sTLStreamInfo); + psRgxDevInfo->ui32L2BufMaxPacketSize[eL2StreamId] = sTLStreamInfo.maxTLpacketSize; +#else + psRgxDevInfo->hHWPerfStream[eL2StreamId] = NULL; +#endif /* !defined(NO_HARDWARE) */ + PVR_DPF_RETURN_OK; #if !defined(NO_HARDWARE) -e1: /* L2 buffer initialisation failures */ - psRgxDevInfo->hHWPerfStream = NULL; +ErrClearStream: /* L2 buffer initialisation failures */ + psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF] = NULL; #endif -e0: /* L1 buffer initialisation failures */ + /* L1 buffer initialisation failures */ RGXHWPerfL1BufferDeinit(psRgxDevInfo); PVR_DPF_RETURN_RC(eError); } -void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +void RGXHWPerfDeinitL2Stream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_L2_STREAM_ID eL2StreamId) { - IMG_HANDLE hStream = psRgxDevInfo->hHWPerfStream; - - PVRSRV_VZ_RETN_IF_MODE(GUEST); + IMG_HANDLE hStream; PVR_DPF_ENTERED; PVR_ASSERT(psRgxDevInfo); - psRgxDevInfo->hHWPerfStream = NULL; + + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVINFO, psRgxDevInfo); + + hStream = psRgxDevInfo->hHWPerfStream[eL2StreamId]; /* Clean up the L2 buffer stream object if allocated */ if (hStream) { + psRgxDevInfo->hHWPerfStream[eL2StreamId] = NULL; + psRgxDevInfo->bSuspendHWPerfL2DataCopy[eL2StreamId] = IMG_TRUE; + psRgxDevInfo->uiHWPerfStreamCount--; + PVR_ASSERT(psRgxDevInfo->uiHWPerfStreamCount < RGX_HWPERF_L2_STREAM_LAST); + /* send the event here because host stream is implicitly opened for * write in TLStreamCreate and TLStreamClose is never called (so the * event is never emitted) */ @@ -856,6 +1074,11 @@ void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) TLStreamClose(hStream); } + PVR_DPF_RETURN; +} + +void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ /* Cleanup L1 buffer resources */ RGXHWPerfL1BufferDeinit(psRgxDevInfo); @@ -865,8 +1088,6 @@ void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) OSLockDestroy(psRgxDevInfo->hHWPerfLock); psRgxDevInfo->hHWPerfLock = NULL; } - - PVR_DPF_RETURN; } @@ -875,6 +1096,7 @@ void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) *****************************************************************************/ static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_L2_STREAM_ID eL2StreamId, IMG_BOOL bToggle, IMG_UINT64 ui64Mask) { @@ -882,8 +1104,18 @@ static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; RGXFWIF_KCCB_CMD sKccbCmd; IMG_UINT32 ui32kCCBCommandSlot; + IMG_UINT64 ui64MaskValue = ui64Mask; + IMG_UINT64 ui64OldMaskValue; + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); + + /* Modify mask to include the default bit settings if it is non-zero */ + if (!bToggle && (ui64Mask != 0ULL)) + { + ui64MaskValue = ui64Mask | RGX_HWPERF_EVENT_MASK_DEFAULT; + } /* If this method is being used whether to enable or disable * then the hwperf buffers (host and FW) are likely to be needed @@ -898,50 +1130,50 @@ static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode /* No other initialisation can be done at this point until the FW is * initialised so unlock, log and return Ok so the caller knows * the filter was set. */ - psDevice->ui64HWPerfFilter = ui64Mask; + (void) RGXHWPerfFwSetEventFilterNoLock(psDevice, eL2StreamId, ui64MaskValue); OSLockRelease(psDevice->hHWPerfLock); goto done_; } - if (RGXHWPerfIsInitRequired(psDevice)) + if (RGXHWPerfIsInitRequired(psDevice, eL2StreamId)) { - eError = RGXHWPerfInitOnDemandResources(psDevice); + eError = RGXHWPerfInitOnDemandL1Buffer(psDevice); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW " "resources failed", __func__)); goto unlock_and_return; } - } -#if defined(RGX_FEATURE_HWPERF_VOLCANIC) && defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) - if (RGXPowmonBufferIsInitRequired(psDeviceNode->pvDevice)) - { - /* Allocate power monitoring log buffer if enabled */ - eError = RGXPowmonBufferInitOnDemandResources(psDeviceNode->pvDevice); + /* if this fails it also cleans up L1 buffer */ + eError = RGXHWPerfInitOnDemandL2Stream(psDevice, eL2StreamId); if (eError != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand power monitoring " + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW " "resources failed", __func__)); goto unlock_and_return; } } -#endif /* Unlock here as no further HWPerf resources are used below that would be * affected if freed by another thread */ OSLockRelease(psDevice->hHWPerfLock); /* Return if the filter is the same */ - if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask) + if (!bToggle && psDevice->ui64HWPerfFilter[eL2StreamId] == ui64MaskValue) { goto done_; } + ui64OldMaskValue = psDevice->ui64HWPerfFilter[eL2StreamId]; + ui64MaskValue = RGXHWPerfFwSetEventFilter(psDevice, eL2StreamId, bToggle + ? psDevice->ui64HWPerfFilter[eL2StreamId] ^ ui64MaskValue + : ui64MaskValue); + /* Prepare command parameters ... */ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = bToggle ? RGXFWIF_HWPERF_CTRL_TOGGLE : RGXFWIF_HWPERF_CTRL_SET; - sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask; + sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64MaskValue; /* Ask the FW to carry out the HWPerf configuration command */ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, @@ -953,36 +1185,24 @@ static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in " "firmware (error = %d)", __func__, eError)); - goto return_; + goto restore_mask_; } - psDevice->ui64HWPerfFilter = bToggle ? - psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask; - /* Wait for FW to complete */ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", restore_mask_); done_: -#if defined(DEBUG) - if (bToggle) - { - PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%" IMG_UINT64_FMTSPECx ") have been TOGGLED", - ui64Mask)); - } - else - { - PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", - ui64Mask)); - } -#endif - return PVRSRV_OK; unlock_and_return: OSLockRelease(psDevice->hHWPerfLock); -return_: + return eError; + +restore_mask_: + (void) RGXHWPerfFwSetEventFilter(psDevice, eL2StreamId, ui64OldMaskValue); + return eError; } @@ -998,7 +1218,6 @@ static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNo IMG_UINT32 ui32OldFilter = psDevice->ui32HWPerfHostFilter; #endif - OSLockAcquire(psDevice->hLockHWPerfHostStream); if (psDevice->hHWPerfHostStream == NULL) { eError = RGXHWPerfHostInitOnDemandResources(psDevice); @@ -1007,11 +1226,11 @@ static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNo PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfHost resources failed", __func__)); - OSLockRelease(psDevice->hLockHWPerfHostStream); return eError; } } + OSLockAcquire(psDevice->hLockHWPerfHostStream); psDevice->ui32HWPerfHostFilter = bToggle ? psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask; @@ -1043,18 +1262,16 @@ static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNo OSLockRelease(psDevice->hLockHWPerfHostStream); -#if defined(DEBUG) if (bToggle) { - PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED", + PVR_DPF((PVR_DBG_MESSAGE, "HWPerfHost events (%x) have been TOGGLED", ui32Mask)); } else { - PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)", + PVR_DPF((PVR_DBG_MESSAGE, "HWPerfHost mask has been SET to (%x)", ui32Mask)); } -#endif return PVRSRV_OK; } @@ -1065,27 +1282,25 @@ static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle, { PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); - PVR_LOG_RETURN_IF_FALSE(ui32InfoPageIdx >= HWPERF_INFO_IDX_START && - ui32InfoPageIdx < HWPERF_INFO_IDX_END, "invalid info" - " page index", PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE(ui32InfoPageIdx >= INFO_PAGE_HWPERF_BLOCK_START && + ui32InfoPageIdx < INFO_PAGE_HWPERF_BLOCK_END, + "invalid info page index", PVRSRV_ERROR_INVALID_PARAMS); OSLockAcquire(psData->hInfoPageLock); psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ? psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask; OSLockRelease(psData->hInfoPageLock); -#if defined(DEBUG) if (bToggle) { - PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED", + PVR_DPF((PVR_DBG_MESSAGE, "HWPerfClient (%u) events (%x) have been TOGGLED", ui32InfoPageIdx, ui32Mask)); } else { - PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)", + PVR_DPF((PVR_DBG_MESSAGE, "HWPerfClient (%u) mask has been SET to (%x)", ui32InfoPageIdx, ui32Mask)); } -#endif return PVRSRV_OK; } @@ -1097,6 +1312,8 @@ PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnecti PVRSRV_RGXDEV_INFO *psDevInfo; PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_LOG_RETURN_IF_FALSE((NULL != psDeviceNode), "psDeviceNode invalid", PVRSRV_ERROR_INVALID_PARAMS); psDevInfo = psDeviceNode->pvDevice; @@ -1114,16 +1331,14 @@ PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64Value) { PVRSRV_ERROR eError; - PVRSRV_RGXDEV_INFO *psDeviceInfo; PVR_UNREFERENCED_PARAMETER(psPrivate); PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); - psDeviceInfo = psDeviceNode->pvDevice; - - eError = RGXHWPerfCtrlFwBuffer(psDeviceNode, IMG_FALSE, ui64Value); + eError = RGXHWPerfCtrlFwBuffer(psDeviceNode, RGX_HWPERF_L2_STREAM_HWPERF, + IMG_FALSE, ui64Value); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1140,13 +1355,16 @@ PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, const void *psPrivate, IMG_UINT64 *pui64Value) { + PVRSRV_RGXDEV_INFO *psDevInfo; + PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); PVR_UNREFERENCED_PARAMETER(psPrivate); - *pui64Value = - ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui64HWPerfFilter; + psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + + *pui64Value = psDevInfo->ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_HWPERF]; return PVRSRV_OK; } @@ -1406,10 +1624,10 @@ PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32 { PVRSRV_ERROR eError; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - PVR_RETURN_IF_INVALID_PARAM(psRgxDevInfo != NULL); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psRgxDevInfo, PVRSRV_OK); + eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream); PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", error); @@ -1459,6 +1677,8 @@ static void _HWPerfHostOnConnectCB(void *pvArg) PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); } + RGXSRV_HWPERF_DEVICE_INFO_FEATURES(psDevice); + if (RGXHWPerfHostIsEventEnabled(psDevice, RGX_HWPERF_HOST_CLIENT_INFO)) { // GCC throws -Werror=frame-larger-than error if the frame size is > 1024 bytes, @@ -1502,7 +1722,7 @@ static void _HWPerfHostOnConnectCB(void *pvArg) psHostClientInfo->uDetail.sProcName.ui32Count++; psProcName->uiClientPID = psData->pid; psProcName->ui32Length = ui32NameLen; - (void)OSStringLCopy(psProcName->acName, psData->pszProcName, ui32NameLen); + (void)OSCachedMemCopy(psProcName->acName, psData->pszProcName, ui32NameLen); psProcName = (RGX_HWPERF_HOST_CLIENT_PROC_NAME*)IMG_OFFSET_ADDR(psProcName, ui32ProcNamePktSize); ui32TotalPayloadSize += ui32ProcNamePktSize; @@ -1554,10 +1774,10 @@ static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo) { PVRSRV_ERROR eError; - /* 5 makes space up to "hwperf_host_9999" streams */ - IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; + /* 4 makes space up to "hwperf_host_999" streams */ + IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 4]; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psRgxDevInfo, PVRSRV_ERROR_NOT_IMPLEMENTED); if (psRgxDevInfo->hHWPerfHostStream != NULL) { @@ -1568,12 +1788,12 @@ PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, - psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) + psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf host stream name for device %d", __func__, - psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); + psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); return PVRSRV_ERROR_INVALID_PARAMS; } @@ -1581,7 +1801,7 @@ PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize, TL_OPMODE_DROP_NEWER, _HWPerfHostOnConnectCB, psRgxDevInfo, - NULL, NULL); + NULL, NULL, NULL, NULL); PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream, @@ -1645,10 +1865,10 @@ PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) { - PVRSRV_VZ_RETN_IF_MODE(GUEST); - PVR_ASSERT (psRgxDevInfo); + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVINFO, psRgxDevInfo); + if (psRgxDevInfo->pui8DeferredEvents) { OSFreeMem(psRgxDevInfo->pui8DeferredEvents); @@ -1684,9 +1904,52 @@ void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) } } +static IMG_UINT64 RGXHWPerfFwSetEventFilterNoLock(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_L2_STREAM_ID eL2StreamId, + IMG_UINT64 uiFilter) +{ + IMG_UINT64 uiTmpFilter = 0; + IMG_UINT32 i; + + PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); + + /* Set filter for the given L2 stream. */ + psRgxDevInfo->ui64HWPerfFilter[eL2StreamId] = uiFilter; + + /* Compute compound filter from all existing L2 streams' filters. */ + for (i = 0; i < RGX_HWPERF_L2_STREAM_LAST; i++) + { + uiTmpFilter |= psRgxDevInfo->ui64HWPerfFilter[i]; + } + + psRgxDevInfo->ui64HWPerfFwFilter = uiTmpFilter; + +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_MESSAGE, "HWPerfFW mask has been SET to 0x%" IMG_UINT64_FMTSPECx + " (stream %u value SET to 0x%" IMG_UINT64_FMTSPECx ")", + psRgxDevInfo->ui64HWPerfFwFilter, eL2StreamId, + psRgxDevInfo->ui64HWPerfFilter[eL2StreamId])); +#endif + + return uiTmpFilter; +} + +IMG_UINT64 RGXHWPerfFwSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_L2_STREAM_ID eL2StreamId, + IMG_UINT64 uiFilter) +{ + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + + uiFilter = RGXHWPerfFwSetEventFilterNoLock(psRgxDevInfo, eL2StreamId, uiFilter); + + OSLockRelease(psRgxDevInfo->hHWPerfLock); + + return uiFilter; +} + inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter) { - PVRSRV_VZ_RETN_IF_MODE(GUEST); + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVINFO, psRgxDevInfo); psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter; } @@ -1945,7 +2208,8 @@ static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo, OSSpinLockAcquire(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); *pui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal++; - *pui64Timestamp = RGXTimeCorrGetClockus64(psRgxDevInfo->psDeviceNode); + /* For HWPerf we always use monotonic clock. */ + (void) OSClockMonotonicus64(pui64Timestamp); if (!bSleepAllowed) { @@ -1992,8 +2256,7 @@ static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo, OSSpinLockRelease(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); } -static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo, - IMG_UINT8 *pui8Dest, +static inline void _SetupHostPacketHeader(IMG_UINT8 *pui8Dest, RGX_HWPERF_HOST_EVENT_TYPE eEvType, IMG_UINT32 ui32Size, IMG_UINT32 ui32Ordinal, @@ -2064,7 +2327,7 @@ void RGXHWPerfHostPostRaw(PVRSRV_RGXDEV_INFO *psRgxDevInfo, goto cleanup; } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, eEvType, ui32PktSize, ui32Ordinal, ui64Timestamp); + _SetupHostPacketHeader(pui8Dest, eEvType, ui32PktSize, ui32Ordinal, ui64Timestamp); OSDeviceMemCopy((IMG_UINT8*)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)), pbPayload, ui32PayloadSize); _CommitHWPerfStream(psRgxDevInfo, ui32PktSize); @@ -2101,7 +2364,7 @@ void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, goto cleanup; } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size, + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size, ui32Ordinal, ui64Timestamp); _SetupHostEnqPacketData(pui8Dest, eEnqType, @@ -2237,7 +2500,7 @@ void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, pui8Dest = GET_DE_EVENT_DATA(pui8Dest); } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size, + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size, ui32Ordinal, ui64Timestamp); _SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData); @@ -2273,8 +2536,10 @@ static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize( if (*ppsName != NULL && *ui32NameSize > 0) { /* if string longer than maximum cut it (leave space for '\0') */ - if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) + if ((*ui32NameSize +1U) >= PVRSRV_SYNC_NAME_LENGTH) *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH; + else + *ui32NameSize += 1U; } else { @@ -2329,19 +2594,27 @@ static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest, switch (eAllocType) { case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: - psData->uAllocDetail.sSyncAlloc = puAllocDetail->sSyncAlloc; + psData->uAllocDetail.sSyncAlloc.ui32FWAddr = puAllocDetail->sSyncAlloc.ui32FWAddr; acName = psData->uAllocDetail.sSyncAlloc.acName; break; case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: - psData->uAllocDetail.sFenceAlloc = puAllocDetail->sFenceAlloc; + psData->uAllocDetail.sFenceAlloc.uiPID = puAllocDetail->sFenceAlloc.uiPID; + psData->uAllocDetail.sFenceAlloc.hFence = puAllocDetail->sFenceAlloc.hFence; + psData->uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = puAllocDetail->sFenceAlloc.ui32CheckPt_FWAddr; acName = psData->uAllocDetail.sFenceAlloc.acName; break; case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: - psData->uAllocDetail.sSWFenceAlloc = puAllocDetail->sSWFenceAlloc; + psData->uAllocDetail.sSWFenceAlloc.uiPID = puAllocDetail->sSWFenceAlloc.uiPID; + psData->uAllocDetail.sSWFenceAlloc.hSWFence = puAllocDetail->sSWFenceAlloc.hSWFence; + psData->uAllocDetail.sSWFenceAlloc.hSWTimeline = puAllocDetail->sSWFenceAlloc.hSWTimeline; + psData->uAllocDetail.sSWFenceAlloc.ui64SyncPtIndex = puAllocDetail->sSWFenceAlloc.ui64SyncPtIndex; acName = psData->uAllocDetail.sSWFenceAlloc.acName; break; case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: - psData->uAllocDetail.sSyncCheckPointAlloc = puAllocDetail->sSyncCheckPointAlloc; + psData->uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = puAllocDetail->sSyncCheckPointAlloc.ui32CheckPt_FWAddr; + psData->uAllocDetail.sSyncCheckPointAlloc.hTimeline = puAllocDetail->sSyncCheckPointAlloc.hTimeline; + psData->uAllocDetail.sSyncCheckPointAlloc.uiPID = puAllocDetail->sSyncCheckPointAlloc.uiPID; + psData->uAllocDetail.sSyncCheckPointAlloc.hFence = puAllocDetail->sSyncCheckPointAlloc.hFence; acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName; break; default: @@ -2356,7 +2629,7 @@ static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest, { if (ui32NameSize) { - OSStringLCopy(acName, psName, ui32NameSize); + OSStringSafeCopy(acName, psName, ui32NameSize); } else { @@ -2390,7 +2663,7 @@ void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo, goto cleanup; } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size, + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size, ui32Ordinal, ui64Timestamp); _SetupHostAllocPacketData(pui8Dest, @@ -2408,7 +2681,6 @@ void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo, static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest, RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, IMG_UINT64 ui64UID, - IMG_UINT32 ui32PID, IMG_UINT32 ui32FWAddr) { RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *) @@ -2446,6 +2718,8 @@ void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Ordinal; IMG_UINT64 ui64Timestamp; + PVR_UNREFERENCED_PARAMETER(ui32PID); + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); @@ -2455,12 +2729,11 @@ void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, goto cleanup; } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size, + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size, ui32Ordinal, ui64Timestamp); _SetupHostFreePacketData(pui8Dest, eFreeType, ui64UID, - ui32PID, ui32FWAddr); _CommitHWPerfStream(psRgxDevInfo, ui32Size); @@ -2545,7 +2818,7 @@ static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest, { if (ui32NameSize) { - OSStringLCopy(acName, psName, ui32NameSize); + OSStringSafeCopy(acName, psName, ui32NameSize); } else { @@ -2580,7 +2853,7 @@ void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, goto cleanup; } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size, + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size, ui32Ordinal, ui64Timestamp); _SetupHostModifyPacketData(pui8Dest, eModifyType, @@ -2600,13 +2873,19 @@ static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, { RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *) IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); - RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb; - IMG_UINT32 ui32CurrIdx = - RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); - RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx]; + RGXFWIF_GPU_UTIL_FW *psGpuUtilFW = psRgxDevInfo->psRGXFWIfGpuUtilFW; + IMG_UINT32 ui32CurrIdx; + RGXFWIF_TIME_CORR *psTimeCorr; + + RGXFwSharedMemCacheOpValue(psGpuUtilFW->ui32TimeCorrSeqCount, INVALIDATE); + ui32CurrIdx = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFW->ui32TimeCorrSeqCount); + + RGXFwSharedMemCacheOpValue(psGpuUtilFW->sTimeCorr[ui32CurrIdx], INVALIDATE); + psTimeCorr = &psGpuUtilFW->sTimeCorr[ui32CurrIdx]; psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp; - psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp; + psData->ui64OSSecondaryTimeStamp = psTimeCorr->ui64OSTimeStamp; + psData->ui64OSMonoTimestamp = psTimeCorr->ui64OSMonoTimeStamp; psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed; } @@ -2620,7 +2899,7 @@ void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo) /* if the buffer for time correlation data is not yet available (possibly * device not initialised yet) skip this event */ - if (psRgxDevInfo->psRGXFWIfGpuUtilFWCb == NULL) + if (psRgxDevInfo->psRGXFWIfGpuUtilFW == NULL) { return; } @@ -2634,7 +2913,7 @@ void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo) goto cleanup; } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size, + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size, ui32Ordinal, ui64Timestamp); _SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest); @@ -2644,40 +2923,10 @@ void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo) _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); } -static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS _ConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus) -{ - switch (eDeviceHealthStatus) - { - case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; - case PVRSRV_DEVICE_HEALTH_STATUS_OK: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK; - case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING; - case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD; - case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT; - default: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; - } -} - -static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON _ConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) -{ - switch (eDeviceHealthReason) - { - case PVRSRV_DEVICE_HEALTH_REASON_NONE: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE; - case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED; - case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING; - case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS; - case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; - case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED; - case PVRSRV_DEVICE_HEALTH_REASON_IDLING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING; - case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING; - case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; - default: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED; - } -} - -static inline void _SetupHostDeviceInfoPacketData(RGX_HWPERF_DEV_INFO_EV eEvType, - PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, - PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason, - IMG_UINT8 *pui8Dest) +static inline void _SetupHostDeviceInfoPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_DEV_INFO_EV eEvType, + RGX_HWPERF_HOST_DEV_INFO_DETAIL *puPacketData, + IMG_UINT8 *pui8Dest) { RGX_HWPERF_HOST_DEV_INFO_DATA *psData = (RGX_HWPERF_HOST_DEV_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); psData->eEvType = eEvType; @@ -2685,8 +2934,33 @@ static inline void _SetupHostDeviceInfoPacketData(RGX_HWPERF_DEV_INFO_EV eEvType switch (eEvType) { case RGX_HWPERF_DEV_INFO_EV_HEALTH: - psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = _ConvDeviceHealthStatus(eDeviceHealthStatus); - psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = _ConvDeviceHealthReason(eDeviceHealthReason); + if (puPacketData != NULL) + { + psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = + puPacketData->sDeviceStatus.eDeviceHealthStatus; + psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = + puPacketData->sDeviceStatus.eDeviceHealthReason; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: puPacketData is invalid.")); + } + break; + case RGX_HWPERF_DEV_INFO_EV_FEATURES: + { + PVRSRV_ERROR eError; + eError = RGXServerFeatureFlagsToHWPerfFlags(psRgxDevInfo, + &psData->uDevInfoDetail.sBVNC); + PVR_LOG_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags"); + psData->uDevInfoDetail.sBVNC.ui32BvncKmFeatureFlags |= +#if defined(RGX_FEATURE_HWPERF_ROGUE) + RGX_HWPERF_FEATURE_ROGUE_FLAG; +#elif defined(RGX_FEATURE_HWPERF_VOLCANIC) + RGX_HWPERF_FEATURE_VOLCANIC_FLAG; +#else + 0x0; +#endif + } break; default: // unknown type - this should never happen @@ -2705,6 +2979,9 @@ static inline IMG_UINT32 _CalculateHostDeviceInfoPacketSize(RGX_HWPERF_DEV_INFO_ case RGX_HWPERF_DEV_INFO_EV_HEALTH: ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sDeviceStatus); break; + case RGX_HWPERF_DEV_INFO_EV_FEATURES: + ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sBVNC); + break; default: // unknown type - this should never happen PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); @@ -2715,9 +2992,8 @@ static inline IMG_UINT32 _CalculateHostDeviceInfoPacketSize(RGX_HWPERF_DEV_INFO_ } void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, - RGX_HWPERF_DEV_INFO_EV eEvType, - PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, - PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) + RGX_HWPERF_DEV_INFO_EV eEvType, + RGX_HWPERF_HOST_DEV_INFO_DETAIL *puData) { IMG_UINT8 *pui8Dest; IMG_UINT32 ui32Ordinal; @@ -2734,8 +3010,8 @@ void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) { - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp); - _SetupHostDeviceInfoPacketData(eEvType, eDeviceHealthStatus, eDeviceHealthReason, pui8Dest); + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostDeviceInfoPacketData(psRgxDevInfo, eEvType, puData, pui8Dest); _CommitHWPerfStream(psRgxDevInfo, ui32Size); } @@ -2746,7 +3022,7 @@ void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, } static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType, - IMG_UINT32 ui32TotalMemoryUsage, + IMG_UINT64 ui64TotalMemoryUsage, IMG_UINT32 ui32LivePids, PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage, IMG_UINT8 *pui8Dest) @@ -2757,16 +3033,16 @@ static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType, switch (eEvType) { - case RGX_HWPERF_INFO_EV_MEM_USAGE: - psData->uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage = ui32TotalMemoryUsage; + case RGX_HWPERF_INFO_EV_MEM64_USAGE: + psData->uInfoDetail.sMemUsageStats.ui64TotalMemoryUsage = ui64TotalMemoryUsage; if (psPerProcessMemUsage) { for (i = 0; i < ui32LivePids; ++i) { psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32Pid = psPerProcessMemUsage[i].ui32Pid; - psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32KernelMemUsage = psPerProcessMemUsage[i].ui32KernelMemUsage; - psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32GraphicsMemUsage = psPerProcessMemUsage[i].ui32GraphicsMemUsage; + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui64KernelMemUsage = psPerProcessMemUsage[i].ui64KernelMemUsage; + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui64GraphicsMemUsage = psPerProcessMemUsage[i].ui64GraphicsMemUsage; } } break; @@ -2779,7 +3055,7 @@ static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType, } static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType, - IMG_UINT32 *pui32TotalMemoryUsage, + IMG_UINT64 *pui64TotalMemoryUsage, IMG_UINT32 *pui32LivePids, PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsage) { @@ -2787,12 +3063,12 @@ static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType switch (eEvType) { - case RGX_HWPERF_INFO_EV_MEM_USAGE: + case RGX_HWPERF_INFO_EV_MEM64_USAGE: #if !defined(__QNXNTO__) - if (PVRSRVGetProcessMemUsage(pui32TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK) + if (PVRSRVGetProcessMemUsage(pui64TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK) { - ui32Size += ((offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage) - ui32Size) - + ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DATA*)0)->uInfoDetail.sMemUsageStats.sPerProcessUsage))); + ui32Size += offsetof(RGX_HWPERF_HOST_INFO_DETAIL, sMemUsageStats.sPerProcessUsage) + + ((*pui32LivePids) * sizeof(struct _RGX_HWPERF_HOST_INFO_PER_PROC_USAGE_)); } #else PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); @@ -2814,7 +3090,7 @@ void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size; IMG_UINT32 ui32Ordinal; IMG_UINT64 ui64Timestamp; - IMG_UINT32 ui32TotalMemoryUsage = 0; + IMG_UINT64 ui64TotalMemoryUsage = 0; PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage = NULL; IMG_UINT32 ui32LivePids = 0; @@ -2825,12 +3101,12 @@ void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); - ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui32TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage); + ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui64TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage); if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) { - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp); - _SetupHostInfoPacketData(eEvType, ui32TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest); + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostInfoPacketData(eEvType, ui64TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest); _CommitHWPerfStream(psRgxDevInfo, ui32Size); } @@ -2920,7 +3196,7 @@ void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, goto cleanup; } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_FENCE_WAIT, + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_SYNC_FENCE_WAIT, ui32Size, ui32Ordinal, ui64Timestamp); _SetupHostFenceWaitPacketData(pui8Dest, eType, uiPID, hFence, ui32Data); @@ -2972,7 +3248,7 @@ void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, goto cleanup; } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE, + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE, ui32Size, ui32Ordinal, ui64Timestamp); _SetupHostSWTimelineAdvPacketData(pui8Dest, uiPID, hSWTimeline, ui64SyncPtIndex); @@ -3006,7 +3282,7 @@ void RGXHWPerfHostPostClientInfoProcName(PVRSRV_RGXDEV_INFO *psRgxDevInfo, goto cleanup; } - _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLIENT_INFO, + _SetupHostPacketHeader(pui8Dest, RGX_HWPERF_HOST_CLIENT_INFO, ui32Size, ui32Ordinal, ui64Timestamp); psPkt = (RGX_HWPERF_HOST_CLIENT_INFO_DATA*)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); @@ -3014,7 +3290,7 @@ void RGXHWPerfHostPostClientInfoProcName(PVRSRV_RGXDEV_INFO *psRgxDevInfo, psPkt->uDetail.sProcName.ui32Count = 1U; psPkt->uDetail.sProcName.asProcNames[0].uiClientPID = uiPID; psPkt->uDetail.sProcName.asProcNames[0].ui32Length = ui32NameLen; - (void)OSStringLCopy(psPkt->uDetail.sProcName.asProcNames[0].acName, psName, ui32NameLen); + (void)OSCachedMemCopy(psPkt->uDetail.sProcName.asProcNames[0].acName, psName, ui32NameLen); _CommitHWPerfStream(psRgxDevInfo, ui32Size); @@ -3061,8 +3337,6 @@ PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) RGX_HWPERF_CONNECTION* psHWPerfConnection; IMG_BOOL bFWActive = IMG_FALSE; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - /* avoid uninitialised data */ PVR_ASSERT(*ppsHWPerfConnection == NULL); PVR_ASSERT(psPVRSRVData); @@ -3081,12 +3355,18 @@ PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) while (psDeviceNode) { + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) { PVR_DPF((PVR_DBG_WARNING, "%s: HWPerf: Device not currently active. ID:%u", __func__, - psDeviceNode->sDevId.i32OsDeviceID)); + psDeviceNode->sDevId.i32KernelDeviceID)); psDeviceNode = psDeviceNode->psNext; continue; } @@ -3110,13 +3390,13 @@ PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) return PVRSRV_ERROR_OUT_OF_MEMORY; } if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName), - "hwperf_device_%d", psDeviceNode->sDevId.i32OsDeviceID) < 0) + "hwperf_device_%d", psDeviceNode->sDevId.i32KernelDeviceID) < 0) { OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf device name for device %d", __func__, - psDeviceNode->sDevId.i32OsDeviceID)); + psDeviceNode->sDevId.i32KernelDeviceID)); return PVRSRV_ERROR_INVALID_PARAMS; } @@ -3153,8 +3433,6 @@ PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection) IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING | PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - /* Validate input argument values supplied by the caller */ if (!psHWPerfConnection) { @@ -3167,14 +3445,27 @@ PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection) psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; psRgxDevInfo = psDevData->psRgxDevInfo; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psRgxDevInfo, PVRSRV_ERROR_NOT_IMPLEMENTED); + /* In the case where the AppHint has not been set we need to * initialise the HWPerf resources here. Allocated on-demand * to reduce RAM foot print on systems not needing HWPerf. */ OSLockAcquire(psRgxDevInfo->hHWPerfLock); - if (RGXHWPerfIsInitRequired(psRgxDevInfo)) + if (RGXHWPerfIsInitRequired(psRgxDevInfo, RGX_HWPERF_L2_STREAM_HWPERF)) { - eError = RGXHWPerfInitOnDemandResources(psRgxDevInfo); + eError = RGXHWPerfInitOnDemandL1Buffer(psRgxDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfFW resources failed", + __func__)); + OSLockRelease(psRgxDevInfo->hHWPerfLock); + return eError; + } + + /* if this fails it also cleans up L1 buffer */ + eError = RGXHWPerfInitOnDemandL2Stream(psRgxDevInfo, RGX_HWPERF_L2_STREAM_HWPERF); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -3204,12 +3495,12 @@ PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection) /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d", PVRSRV_TL_HWPERF_RGX_FW_STREAM, - psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) + psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf stream name for device %d", __func__, - psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); + psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); return PVRSRV_ERROR_INVALID_PARAMS; } /* Open the RGX TL stream for reading in this session */ @@ -3222,12 +3513,12 @@ PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection) /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, - psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) + psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf host stream name for device %d", __func__, - psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); + psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); return PVRSRV_ERROR_INVALID_PARAMS; } @@ -3272,8 +3563,6 @@ PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) { PVRSRV_ERROR eError; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - eError = RGXHWPerfLazyConnect(ppsHWPerfConnection); PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0); @@ -3307,14 +3596,14 @@ PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVR_ASSERT(psDeviceNode); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); PVR_DPF_ENTERED; PVR_LOG_RETURN_IF_INVALID_PARAM(psBlockIDs != NULL, "psBlockIDs"); PVR_LOG_RETURN_IF_INVALID_PARAM((ui32ArrayLen>0) && (ui32ArrayLen <= RGXFWIF_HWPERF_CTRL_BLKS_MAX), "ui32ArrayLen"); - PVR_ASSERT(psDeviceNode); psDevice = psDeviceNode->pvDevice; /* Fill in the command structure with the parameters needed @@ -3339,13 +3628,14 @@ PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); - -#if defined(DEBUG) if (bEnable) - PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen)); + { + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen)); + } else - PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen)); -#endif + { + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen)); + } PVR_DPF_RETURN_OK; } @@ -3362,14 +3652,15 @@ PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( { PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + PVR_ASSERT(psDeviceNode); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_IMPLEMENTED); PVR_DPF_ENTERED; - PVR_ASSERT(psDeviceNode); if (eStreamId == RGX_HWPERF_STREAM_ID0_FW) { - return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask); + return RGXHWPerfCtrlFwBuffer(psDeviceNode, RGX_HWPERF_L2_STREAM_HWPERF, + bToggle, ui64Mask); } else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST) { @@ -3391,6 +3682,51 @@ PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( PVR_DPF_RETURN_OK; } +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfFW( + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_L2_STREAM_ID eL2StreamId, + IMG_UINT64 ui64Mask, + HWPERF_FILTER_OPERATION eMaskOp) +{ + IMG_UINT64 uiTmpFilter; + + PVR_DPF_ENTERED; + PVR_ASSERT(psDeviceNode); + PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); + + uiTmpFilter = + ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui64HWPerfFilter[eL2StreamId]; + + switch (eMaskOp) + { + case HWPERF_FILTER_OPERATION_SET: + uiTmpFilter = ui64Mask; + break; + case HWPERF_FILTER_OPERATION_BIT_CLR: + uiTmpFilter &= ~ui64Mask; + break; + case HWPERF_FILTER_OPERATION_BIT_OR: + uiTmpFilter |= ui64Mask; + break; + } + + PVR_DPF_RETURN_RC(RGXHWPerfCtrlFwBuffer(psDeviceNode, eL2StreamId, IMG_FALSE, + uiTmpFilter)); +} + +/* + PVRSRVRGXGetHWPerfTimeStampKM + */ +PVRSRV_ERROR PVRSRVRGXGetHWPerfTimeStampKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT64 *pui64TimeStamp) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + *pui64TimeStamp = RGXTimeCorrGetClockus64(psDeviceNode); + return PVRSRV_OK; +} + PVRSRV_ERROR RGXHWPerfControl( RGX_HWPERF_CONNECTION *psHWPerfConnection, RGX_HWPERF_STREAM_ID eStreamId, @@ -3401,8 +3737,6 @@ PVRSRV_ERROR RGXHWPerfControl( RGX_KM_HWPERF_DEVDATA* psDevData; RGX_HWPERF_DEVICE* psHWPerfDev; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - /* Validate input argument values supplied by the caller */ if (!psHWPerfConnection) { @@ -3415,6 +3749,8 @@ PVRSRV_ERROR RGXHWPerfControl( { psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevData->psRgxDevNode, PVRSRV_ERROR_NOT_IMPLEMENTED); + /* Call the internal server API */ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask); PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); @@ -3444,8 +3780,6 @@ IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters( RGX_KM_HWPERF_DEVDATA* psDevData; RGX_HWPERF_DEVICE* psHWPerfDev; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs) { return PVRSRV_ERROR_INVALID_PARAMS; @@ -3462,6 +3796,8 @@ IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters( { psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevData->psRgxDevNode, PVRSRV_ERROR_NOT_IMPLEMENTED); + /* Call the internal server API */ eError = PVRSRVRGXControlHWPerfBlocksKM(NULL, psDevData->psRgxDevNode, @@ -3516,8 +3852,6 @@ PVRSRV_ERROR RGXHWPerfAcquireEvents( PVRSRVTL_PPACKETHDR psHDRptr; PVRSRVTL_PACKETTYPE ui16TlType; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - /* Reset the output arguments in case we discover an error */ *ppBuf = NULL; *pui32BufLen = 0; @@ -3528,6 +3862,8 @@ PVRSRV_ERROR RGXHWPerfAcquireEvents( return PVRSRV_ERROR_INVALID_PARAMS; } + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevData->psRgxDevNode, PVRSRV_ERROR_NOT_IMPLEMENTED); + if (psDevData->pTlBuf[eStreamId] == NULL) { /* Acquire some data to read from the HWPerf TL stream */ @@ -3615,14 +3951,14 @@ PVRSRV_ERROR RGXHWPerfReleaseEvents( PVRSRV_ERROR eError = PVRSRV_OK; RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - /* Valid input argument values supplied by the caller */ if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) { return PVRSRV_ERROR_INVALID_PARAMS; } + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevData->psRgxDevNode, PVRSRV_ERROR_NOT_IMPLEMENTED); + if (psDevData->bRelease[eStreamId]) { /* Inform the TL that we are done with reading the data. */ @@ -3646,8 +3982,6 @@ PVRSRV_ERROR RGXHWPerfGetFilter( PVRSRV_RGXDEV_INFO* psRgxDevInfo = hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - /* Valid input argument values supplied by the caller */ if (!psRgxDevInfo) { @@ -3656,13 +3990,15 @@ PVRSRV_ERROR RGXHWPerfGetFilter( return PVRSRV_ERROR_INVALID_PARAMS; } + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psRgxDevInfo, PVRSRV_ERROR_NOT_IMPLEMENTED); + /* No need to take hHWPerfLock here since we are only reading data * from always existing integers to return to debugfs which is an * atomic operation. */ switch (eStreamId) { case RGX_HWPERF_STREAM_ID0_FW: - *ui64Filter = psRgxDevInfo->ui64HWPerfFilter; + *ui64Filter = psRgxDevInfo->ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_HWPERF]; break; case RGX_HWPERF_STREAM_ID1_HOST: *ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter; @@ -3688,8 +4024,6 @@ PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection return PVRSRV_OK; } - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList; while (psHWPerfNextDev) { @@ -3721,12 +4055,13 @@ PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection) return PVRSRV_ERROR_INVALID_PARAMS; } - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - psHWPerfDev = psHWPerfConnection->psHWPerfDevList; while (psHWPerfDev) { psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevData->psRgxDevNode, PVRSRV_ERROR_NOT_IMPLEMENTED); + for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++) { /* If the TL buffer exists they have not called ReleaseData @@ -3774,8 +4109,6 @@ PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) { PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - eError = RGXHWPerfClose(*ppsHWPerfConnection); PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose"); @@ -3811,6 +4144,175 @@ IMG_UINT64 RGXHWPerfConvertCRTimeStamp( return ui64EventOSTimestamp; } +#define CLIENT_STREAM_REFCOUNT_MIN 0 +#define CLIENT_STREAM_REFCOUNT_MAX IMG_INT32_MAX + +struct HWPERF_STREAM_DESC_TAG +{ + TL_STREAM_DESC *psSD; + PMR *psPMR; + POS_LOCK hLock; + ATOMIC_T iRefCount; + IMG_UINT32 uiOrdinal; +}; + +PVRSRV_ERROR PVRSRVRGXOpenHWPerfClientStreamKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + HWPERF_STREAM_DESC **ppsSD) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + HWPERF_STREAM_DESC *psHwpSD; + TL_STREAM_DESC *psTlSD; + PMR *psPMR; + PVRSRV_ERROR eError; + IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; + + OSSNPrintf(acStreamName, sizeof(acStreamName), + PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC, + psDeviceNode->sDevId.i32KernelDeviceID, + psConnection->pid); + + OSLockAcquire(psPVRSRVData->hClientStreamTableLock); + + psHwpSD = (void *) HASH_Retrieve_Extended(psPVRSRVData->psClientStreamTable, + (void *) acStreamName); + if (psHwpSD != NULL) + { + IMG_INT32 iOldRefCount = OSAtomicAddUnless(&psHwpSD->iRefCount, 1, + CLIENT_STREAM_REFCOUNT_MAX); + if (iOldRefCount == CLIENT_STREAM_REFCOUNT_MAX) + { + PVR_LOG_GOTO_WITH_ERROR("HWPerf stream reference count overflow", + eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, + ErrUnlockAndReturnError); + } + + *ppsSD = psHwpSD; + + OSLockRelease(psPVRSRVData->hClientStreamTableLock); + + return PVRSRV_OK; + } + + psHwpSD = OSAllocMem(sizeof(*psHwpSD)); + PVR_LOG_GOTO_IF_NOMEM(psHwpSD, eError, ErrUnlockAndReturnError); + + eError = OSLockCreate(&psHwpSD->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "TLServerOpenStreamKM", ErrFreeSD); + + eError = TLServerOpenStreamKM(acStreamName, PVRSRV_STREAM_FLAG_OPEN_WO, + &psTlSD, &psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "TLServerOpenStreamKM", ErrFreeLock); + + psHwpSD->psSD = psTlSD; + psHwpSD->psPMR = psPMR; + psHwpSD->uiOrdinal = IMG_UINT32_MAX; + OSAtomicWrite(&psHwpSD->iRefCount, CLIENT_STREAM_REFCOUNT_MIN + 1); + + if (!HASH_Insert_Extended(psPVRSRVData->psClientStreamTable, (void *) acStreamName, + (uintptr_t) psHwpSD)) + { + PVR_LOG_GOTO_WITH_ERROR("HASH_Insert", eError, PVRSRV_ERROR_OUT_OF_MEMORY, + ErrCloseStream); + } + + OSLockRelease(psPVRSRVData->hClientStreamTableLock); + + *ppsSD = psHwpSD; + + return PVRSRV_OK; + +ErrCloseStream: + TLServerCloseStreamKM(psTlSD); +ErrFreeLock: + OSLockDestroy(psHwpSD->hLock); +ErrFreeSD: + OSFreeMem(psHwpSD); +ErrUnlockAndReturnError: + OSLockRelease(psPVRSRVData->hClientStreamTableLock); + + return eError; +} + +/* This function is called from the handle framework's handle destruction + * path which means there should not be any lookup outstanding, i.e. the handle + * should not be used at the given moment. */ +PVRSRV_ERROR PVRSRVRGXCloseHWPerfClientStreamKM(HWPERF_STREAM_DESC *psSD) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_INT iOldRefCount; + PVRSRV_ERROR eError; + void *psElement; + + PVR_ASSERT(psSD != NULL); + + iOldRefCount = OSAtomicSubtractUnless(&psSD->iRefCount, 1, CLIENT_STREAM_REFCOUNT_MIN); + if (iOldRefCount == CLIENT_STREAM_REFCOUNT_MIN) + { + /* If the old value is 0 return error. This means that something went + * wrong and the reference count was already 0 and resources were + * freed. */ + return PVRSRV_ERROR_REFCOUNT_OVERFLOW; + } + else if (iOldRefCount != (CLIENT_STREAM_REFCOUNT_MIN + 1)) + { + /* If the old value is not 1 then return ok, the stream is still used + * by someone. If it's 1, then progress with the resources destruction + * since the current value is 0. */ + return PVRSRV_OK; + } + + PVR_ASSERT(iOldRefCount > 0); + + OSLockAcquire(psPVRSRVData->hClientStreamTableLock); + + psElement = (void *) HASH_Remove_Extended(psPVRSRVData->psClientStreamTable, + psSD->psSD->psNode->psStream->szName); + PVR_ASSERT(psElement != NULL); + + OSLockRelease(psPVRSRVData->hClientStreamTableLock); + + eError = TLServerCloseStreamKM(psSD->psSD); + PVR_LOG_GOTO_IF_ERROR(eError, "TLServerCloseStreamKM", ErrReturnError); + + OSLockDestroy(psSD->hLock); + + OSFreeMem(psSD); + + return PVRSRV_OK; + +ErrReturnError: + return eError; +} + +/* This function is called from the handle framework's lookup path which means + * there should not be a valid pointer to the looked up resource, i.e. no + * destructor is allowed to run on the resource. */ +PVRSRV_ERROR PVRSRVRGXWriteHWPerfClientEventKM(HWPERF_STREAM_DESC *psSD, + IMG_UINT32 uiSize, + IMG_BYTE *puiData) +{ + RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) ((void *)puiData); + PVRSRV_ERROR eError; + + PVR_ASSERT(psSD != NULL); + + PVR_LOG_RETURN_IF_INVALID_PARAM(uiSize > sizeof(*psHeader), "uiSize"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psHeader->ui32Size == uiSize, "uiSize"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psHeader->ui32Sig == HWPERF_PACKET_V2B_SIG, "ui32Sig"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psHeader->ui32Ordinal == 0, "ui32Ordinal"); + + OSLockAcquire(psSD->hLock); + + psHeader->ui32Ordinal = ++psSD->uiOrdinal; + + eError = TLServerWriteDataKM(psSD->psSD, uiSize, puiData); + + OSLockRelease(psSD->hLock); + + return eError; +} + /****************************************************************************** End of file (rgxhwperf_common.c) ******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf_common.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf_common.h index 586559f96a35..6f1b932f3322 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf_common.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxhwperf_common.h @@ -53,12 +53,25 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "rgx_hwperf.h" #include "rgx_fwif_hwperf.h" +#if defined(PVR_ARCH_VOLCANIC) +#include "rgx_hwperf_table.h" +#endif +#include "cache_ops.h" +#include "rgxfwmemctx.h" /* HWPerf host buffer size constraints in KBs */ #define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB #define HWPERF_HOST_TL_STREAM_SIZE_MIN (32U) #define HWPERF_HOST_TL_STREAM_SIZE_MAX (3072U) +/* Operations on HWPerf filter. */ +typedef enum HWPERF_FILTER_OPERATION_TAG +{ + HWPERF_FILTER_OPERATION_SET, + HWPERF_FILTER_OPERATION_BIT_CLR, + HWPERF_FILTER_OPERATION_BIT_OR, +} HWPERF_FILTER_OPERATION; + /****************************************************************************** * RGX HW Performance decode Bvnc Features for HWPerf *****************************************************************************/ @@ -76,8 +89,13 @@ PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnecti PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo); PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); -PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); + +PVRSRV_ERROR RGXHWPerfInitOnDemandL1Buffer(PVRSRV_RGXDEV_INFO* psRgxDevInfo); +PVRSRV_ERROR RGXHWPerfInitOnDemandL2Stream(PVRSRV_RGXDEV_INFO* psRgxDevInfo, + RGX_HWPERF_L2_STREAM_ID eL2StreamId); +void RGXHWPerfDeinitL2Stream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_L2_STREAM_ID eL2StreamId); void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); void RGXHWPerfClientInitAppHintCallbacks(void); @@ -94,6 +112,7 @@ static INLINE PVRSRV_ERROR RGXAcquireHWPerfCtlCPUAddr(PVRSRV_DEVICE_NODE *psDevN eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void**)ppsHWPerfCtl); + RGXFwSharedMemCacheOpPtr(ppsHWPerfCtl, INVALIDATE); return eError; } @@ -132,6 +151,17 @@ PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( IMG_BOOL bToggle, IMG_UINT64 ui64Mask); +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfFW( + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_L2_STREAM_ID eL2StreamId, + IMG_UINT64 ui64Mask, + HWPERF_FILTER_OPERATION eMaskOp); + +PVRSRV_ERROR PVRSRVRGXGetHWPerfTimeStampKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT64 *pui64TimeStamp); + PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, @@ -143,10 +173,47 @@ PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( * RGX HW Performance Host Stream API *****************************************************************************/ +static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS +RGXHWPerfConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus) +{ + switch (eDeviceHealthStatus) + { + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; + case PVRSRV_DEVICE_HEALTH_STATUS_OK: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD; + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT; + default: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; + } +} + +static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON +RGXHWPerfConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) +{ + switch (eDeviceHealthReason) + { + case PVRSRV_DEVICE_HEALTH_REASON_NONE: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE; + case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED; + case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING; + case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED; + case PVRSRV_DEVICE_HEALTH_REASON_IDLING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING; + case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING; + case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; + case PVRSRV_DEVICE_HEALTH_REASON_PCI_ERROR: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_PCI_ERROR; + default: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED; + } +} + PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB); PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); +IMG_UINT64 RGXHWPerfFwSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_L2_STREAM_ID eL2StreamId, + IMG_UINT64 uiFilter); + void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter); @@ -198,8 +265,7 @@ void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo); void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_DEV_INFO_EV eEvType, - PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, - PVRSRV_DEVICE_HEALTH_REASON eDeviceHeathReason); + RGX_HWPERF_HOST_DEV_INFO_DETAIL *psData); void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_INFO_EV eEvType); @@ -221,6 +287,25 @@ void RGXHWPerfHostPostClientInfoProcName(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent); +#if defined(PVR_ARCH_VOLCANIC) +IMG_INTERNAL /*static inline*/ IMG_UINT32 +RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel); + +/*! +******************************************************************************* + @Function RGXHWPerfMaxDefinedBlks + + @Description Return the number of valid block-IDs for the given device node + + @Input (PVRSRV_RGXDEV_INFO *) pvDevice device-node to query + + @Returns (IMG_UINT32) Number of block-IDs (RGX_CNTBLK_ID) + valid for this device. +******************************************************************************/ +IMG_INTERNAL IMG_UINT32 +RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + #define _RGX_HWPERF_HOST_FILTER(CTX, EV) \ (((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \ & RGX_HWPERF_EVENT_MASK_VALUE(EV)) @@ -457,20 +542,36 @@ IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPER /** * This macro checks if HWPerfHost and the event are enabled and if they are - * it posts a device info event to the HWPerfHost stream. + * it posts a device info health event to the HWPerfHost stream. * * @param I Device info pointer - * @param T Event type - * @param H Health status enum - * @param R Health reason enum + * @param H Health status enum + * @param R Health reason enum */ -#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) \ - do { \ - if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \ - { \ - RGXHWPerfHostPostDeviceInfo((I), (T), (H), (R)); \ - } \ - } while (0) +#define RGXSRV_HWPERF_DEVICE_INFO_HEALTH(I, H, R) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \ + { \ + RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevDetail; \ + uDevDetail.sDeviceStatus.eDeviceHealthStatus = RGXHWPerfConvDeviceHealthStatus(H); \ + uDevDetail.sDeviceStatus.eDeviceHealthReason = RGXHWPerfConvDeviceHealthReason(R); \ + RGXHWPerfHostPostDeviceInfo((I), RGX_HWPERF_DEV_INFO_EV_HEALTH, &uDevDetail); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts a device info features event to the HWPerfHost stream. + * + * @param I Device info pointer + */ +#define RGXSRV_HWPERF_DEVICE_INFO_FEATURES(I) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \ + { \ + RGXHWPerfHostPostDeviceInfo((I), RGX_HWPERF_DEV_INFO_EV_FEATURES, NULL); \ + } \ + } while (0) /** * This macro checks if HWPerfHost and the event are enabled and if they are @@ -542,7 +643,8 @@ do { \ #define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) #define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) #define RGXSRV_HWPERF_CLK_SYNC(I) -#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) +#define RGXSRV_HWPERF_DEVICE_INFO_HEALTH(I, H, R) +#define RGXSRV_HWPERF_DEVICE_INFO_FEATURES(I) #define RGXSRV_HWPERF_HOST_INFO(I, T) #define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) #define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxinit.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxinit.h similarity index 85% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxinit.h rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxinit.h index a4cfbe758bfb..807f6a0b6fce 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxinit.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxinit.h @@ -51,6 +51,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "rgx_bridge.h" #include "fwload.h" +#include "rgxinit_apphints.h" #if defined(__linux__) #define OS_FW_VERIFY_FUNCTION OSVerifyFirmware @@ -73,11 +74,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ******************************************************************************/ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32DeviceFlags, - IMG_UINT32 ui32HWPerfHostFilter, - RGX_ACTIVEPM_CONF eActivePMConf, - IMG_UINT32 ui32AvailableSPUMask, - IMG_UINT32 ui32AvailableRACMask); + RGX_INIT_APPHINTS *psApphints); PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_DEVMEM_SIZE_T ui32FWCodeLen, @@ -102,28 +99,9 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, ******************************************************************************/ PVRSRV_ERROR RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, + RGX_INIT_APPHINTS *psApphints, IMG_UINT32 ui32ConfigFlags, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWRDebugDumpLimit, - IMG_UINT32 ui32RenderKillingCtl, - IMG_UINT32 ui32CDMTDMKillingCtl, - IMG_UINT32 *pui32TPUTrilinearFracMask, - IMG_UINT32 *pui32USRMNumRegions, - IMG_UINT64 *pui64UVBRMNumRegions, - IMG_UINT32 ui32HWPerfCountersDataSize, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, - IMG_BOOL bSPUClockGating, - FW_PERF_CONF eFirmwarePerf, - IMG_UINT32 ui32KCCBSizeLog2, IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32AvailableSPUMask, - IMG_UINT32 ui32AvailableRACMask, IMG_UINT32 ui32FwOsCfgFlags); @@ -140,8 +118,6 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, @Input ppsRGXFW - fw pointer - @Output ppbFWData - pointer to FW data (NULL if an error occurred) - @Return PVRSRV_ERROR - PVRSRV_OK on success PVRSRV_ERROR_NOT_READY if filesystem is not ready PVRSRV_ERROR_NOT_FOUND if no suitable FW image found @@ -150,8 +126,8 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, ******************************************************************************/ PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, - OS_FW_IMAGE **ppsRGXFW, - const IMG_BYTE **ppbFWData); + OS_FW_IMAGE **ppsRGXFW); + #if defined(PDUMP) /*! ******************************************************************************* @@ -179,7 +155,7 @@ PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode); Registers the device with the system - @Input: psDeviceNode - device node + @Input: psDeviceNode - device node @Return PVRSRV_ERROR @@ -262,6 +238,7 @@ PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser); PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser); #endif /* !defined(NO_HARDWARE) */ +#if defined(RGX_FEATURE_AXI_ACE_BIT_MASK) /*! ************************************************************************************ @Function RGXSystemGetFabricCoherency @@ -281,6 +258,7 @@ PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDeviceConfig, IMG_UINT32 ui32RegsSize, PVRSRV_DEVICE_FABRIC_TYPE *peDevFabricType, PVRSRV_DEVICE_SNOOP_MODE *peCacheSnoopingMode); +#endif /*! ******************************************************************************* @@ -306,4 +284,30 @@ PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode ******************************************************************************/ void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); +/*! + ******************************************************************************* + + @Function RGXHeapDerivePageSize + + @Description Ensure the desire page size is suitable for the RGX hardware + + @Input uiLog2PageSize target page log2 size + + @Return IMG_UINT32 valid page log2 size + ******************************************************************************/ +IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize); + +/*! + ******************************************************************************* + + @Function RGXGetNon4KHeapPageShift + + @Description Retrieves the log2 page size of the General non 4k heap + + @Output pui32Log2Non4KPgShift page shift + ******************************************************************************/ +PVRSRV_ERROR RGXGetNon4KHeapPageShift(const void *hPrivate, + IMG_UINT32 *pui32Log2Non4KPgShift); + + #endif /* RGXINIT_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxinit_apphints.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxinit_apphints.h new file mode 100644 index 000000000000..f3bea6280a2e --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxinit_apphints.h @@ -0,0 +1,120 @@ +/*************************************************************************/ /*! +@File +@Title Services device initialisation settings +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device initialisation settings +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXINIT_APPHINTS_H) +#define RGXINIT_APPHINTS_H + +#include "img_defs.h" + +#include "rgx_fwif_km.h" +#include "rgxdefs_km.h" +#include "rgxdevice.h" + +/* + * Container for all the apphints used by this module + */ +typedef struct _RGX_INIT_APPHINTS_ +{ + IMG_BOOL bEnableSignatureChecks; + IMG_UINT32 ui32SignatureChecksBufSize; + + IMG_BOOL bAssertOnOutOfMem; + IMG_BOOL bAssertOnHWRTrigger; +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) + IMG_UINT32 ui32TFBCVersion; + IMG_UINT32 ui32TFBCCompressionControlGroup; + IMG_UINT32 ui32TFBCCompressionControlScheme; + IMG_BOOL bTFBCCompressionControlYUVFormat; + IMG_BOOL bTFBCCompressionControlLossyMinChannel; +#endif + IMG_BOOL bCheckMlist; + IMG_BOOL bDisableClockGating; + IMG_BOOL bDisableDMOverlap; + IMG_BOOL bDisablePDP; + IMG_BOOL bEnableDMKillRand; + IMG_BOOL bEnableRandomCsw; + IMG_BOOL bEnableSoftResetCsw; + IMG_BOOL bHWPerfDisableCounterFilter; + IMG_UINT32 ui32DeviceFlags; + IMG_UINT32 ui32FilterFlags; + IMG_UINT32 ui32EnableFWContextSwitch; + IMG_UINT32 ui32FWContextSwitchProfile; + IMG_UINT32 ui32HWPerfFWBufSize; + IMG_UINT32 ui32HWPerfHostBufSize; + IMG_UINT32 ui32HWPerfFilter0; + IMG_UINT32 ui32HWPerfFilter1; + IMG_UINT32 ui32HWPerfHostFilter; + IMG_UINT32 ui32SecondaryOSClockSource; + IMG_UINT32 ui32HWRDebugDumpLimit; + IMG_UINT32 ui32LogType; + IMG_UINT32 ui32KCCBSizeLog2; +#if defined(PVR_ARCH_VOLCANIC) + IMG_UINT32 ui32ISPSchedulingLatencyMode; +#endif + FW_PERF_CONF eFirmwarePerf; + RGX_ACTIVEPM_CONF eRGXActivePMConf; + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf; +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + IMG_UINT32 ui32AvailablePowUnitsMask; + IMG_UINT32 ui32AvailableRACMask; + IMG_BOOL bSPUClockGating; +#endif + IMG_BOOL bEnableTrustedDeviceAceConfig; + IMG_UINT32 ui32FWContextSwitchCrossDM; +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + IMG_UINT32 ui32PhysMemTestPasses; +#endif + RGX_FWT_LOGTYPE eDebugDumpFWTLogType; +#if defined(SUPPORT_ICS) + IMG_UINT32 ui32EnableIdleCycleStealing; + IMG_UINT32 ui32FDTI; + IMG_UINT32 ui32ICSThreshold; + IMG_BOOL bTestModeOn; +#endif +} RGX_INIT_APPHINTS; + +#endif /* RGXINIT_APPHINTS_H */ + +/****************************************************************************** + End of file (rgxinit_apphints.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxkicksync.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxkicksync.c index 633c2c620c12..b7ab800400c8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxkicksync.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxkicksync.c @@ -46,7 +46,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "rgxmem.h" +#include "rgxutils.h" #include "rgxfwutils.h" +#include "rgxfwcmnctx.h" #include "allocmem.h" #include "sync.h" #include "rgxhwperf.h" @@ -66,6 +68,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define CHKPT_DBG(X) #endif + struct _RGX_SERVER_KICKSYNC_CONTEXT_ { PVRSRV_DEVICE_NODE * psDeviceNode; @@ -165,17 +168,9 @@ PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psK RGXFWIF_DM_GP, PDUMP_FLAGS_NONE); - if (eError == PVRSRV_ERROR_RETRY) - { - return eError; - } - else if (eError != PVRSRV_OK) - { - PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psKickSyncContext->psDeviceNode, + eError, + RGXFWRequestCommonContextCleanUp); /* ... it has so we can free its resources */ @@ -184,6 +179,7 @@ PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psK OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); FWCommonContextFree(psKickSyncContext->psServerCommonContext); + psKickSyncContext->psServerCommonContext = NULL; SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence); SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate); @@ -192,7 +188,7 @@ PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psK OSFreeMem(psKickSyncContext); - return PVRSRV_OK; + return eError; } PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, @@ -334,7 +330,7 @@ PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; } /* Ensure the string is null-terminated (Required for safety) */ - szUpdateFenceName[31] = '\0'; + szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; /* This will never be true if called from the bridge since piUpdateFence will always be valid */ if (iUpdateTimeline >= 0 && !piUpdateFence) @@ -619,9 +615,10 @@ PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); if (eError != PVRSRV_OK) { - goto fail_cmdaquire; + goto fail_cmdacquire; } + /* * We should reserve space in the kernel CCB here and fill in the command * directly. @@ -633,6 +630,20 @@ PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext * All the required resources are ready at this point, we can't fail so * take the required server sync operations and commit all the resources */ + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + goto fail_acquirepowerlock; + } + RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "KickSync", @@ -666,9 +677,9 @@ PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext NO_DEADLINE, NO_CYCEST); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice, + eError2 = RGXScheduleCommandWithoutPowerLock(psKickSyncContext->psDeviceNode->pvDevice, RGXFWIF_DM_GP, & sKickSyncKCCBCmd, PDUMP_FLAGS_NONE); @@ -677,7 +688,9 @@ PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); + + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode, ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, @@ -698,7 +711,7 @@ PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext */ if (eError != PVRSRV_OK ) { - goto fail_cmdaquire; + goto fail_cmdacquire; } #if defined(NO_HARDWARE) @@ -749,7 +762,8 @@ PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext OSLockRelease(psKickSyncContext->hLock); return PVRSRV_OK; -fail_cmdaquire: +fail_acquirepowerlock: +fail_cmdacquire: SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListFence); SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate); if (iUpdateFence != PVRSRV_NO_FENCE) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer.h similarity index 87% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer.h rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer.h index 431a7b6896a6..3141db5c4488 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer.h @@ -64,12 +64,14 @@ extern "C" { #include "img_types.h" #include "img_elf.h" #include "pvrsrv_error.h" /* includes pvrsrv_errors.h */ -#include "pvrsrv_firmware_boot.h" +#include "rgx_firmware_boot.h" #include "rgx_bvnc_defs_km.h" #include "rgx_fw_info.h" -#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */ +#include "rgx_common.h" #include "rgx_meta.h" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) #include "rgx_mips.h" +#endif #include "rgx_riscv.h" #include "rgxdefs_km.h" @@ -79,6 +81,14 @@ extern "C" { * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h) */ +/* + * Specific fields for RGX_CR_IDLE must not be polled in pdumps + * (technical reasons) + */ +#define CR_IDLE_UNSELECTED_MASK ((~RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK) | \ + (~RGX_CR_SLC_IDLE_OWDB_CLRMSK) | \ + (RGX_CR_SLC_IDLE_FBCDC_ARB_EN)) + /*! ******************************************************************************* @@ -158,6 +168,7 @@ void RGXErrorLog(const void *hPrivate, const IMG_CHAR *pszString, ...); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /*! ******************************************************************************* @@ -172,8 +183,9 @@ void RGXErrorLog(const void *hPrivate, ******************************************************************************/ IMG_UINT32 RGXGetOSPageSize(const void *hPrivate); +#endif -/* This is used to get the value of a specific feature from hprivate. +/* This is used to check if a specific feature is enabled. * Should be used instead of calling RGXDeviceHasFeature. */ #define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \ RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK) @@ -188,6 +200,19 @@ IMG_UINT32 RGXGetOSPageSize(const void *hPrivate); #define RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, Feature) \ RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) + +/* This is used to check if a specific ERN is enabled from hPrivate. + * Should be used instead of calling RGXDeviceHasErnBrn. */ +#define RGX_DEVICE_HAS_ERN(hPrivate, ERN) \ + RGXDeviceHasErnBrn(hPrivate, HW_ERN_##ERN##_BIT_MASK) + +/* This is used to check if a specific BRN is enabled from hPrivate. + * Should be used instead of calling RGXDeviceHasErnBrn. */ +#define RGX_DEVICE_HAS_BRN(hPrivate, BRN) \ + RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_##BRN##_BIT_MASK) + +#define CLK_CTRL_FORCE_ON(X, Module) \ + X = (((X) & RGX_CR_##Module##_CLRMSK) | RGX_CR_##Module##_ON) /*! ******************************************************************************* @@ -218,6 +243,21 @@ IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature) ******************************************************************************/ IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature); +/*! +******************************************************************************* + + @Function RGXDeviceHasErnBrn + + @Description Checks if a device has a particular errata + + @Input hPrivate : Implementation specific data + @Input ui64ErnsBrns : Flags to check + + @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise + +******************************************************************************/ +IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns); + /*! ******************************************************************************* @@ -320,6 +360,59 @@ PVRSRV_ERROR RGXPollReg64(const void *hPrivate, IMG_UINT64 ui64RegValue, IMG_UINT64 ui64RegMask); +/*! +******************************************************************************* + + @Function RGXWriteMetaRegThroughSP + + @Description Write a register value using the META slave port + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + @Input ui32RegValue : Value written to the register + + @Return PVRSRV_OK if the poll succeeds, + PVRSRV error code otherwise + +******************************************************************************/ +PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue); + +/*! +******************************************************************************* + + @Function RGXReadMetaRegThroughSP + + @Description Read a register value using the META slave port + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + @Input *ui32RegValue : Value read from the register + + @Return PVRSRV_OK if the poll succeeds, + PVRSRV error code otherwise + +******************************************************************************/ +PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32* ui32RegValue); + +/*! +******************************************************************************* + + @Function RGXSetPoweredState + + @Description Declare if the device is powered or not + + @Input hPrivate : Implementation specific data + @Input bPowered : true is powered, false otherwise + + @Return void + +******************************************************************************/ +void RGXSetPoweredState(const void *hPrivate, IMG_BOOL bPowered); + /*! ******************************************************************************* @@ -377,21 +470,25 @@ void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr); ******************************************************************************/ #if defined(PDUMP) +#if !defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) void RGXWriteKernelMMUPC64(const void *hPrivate, IMG_UINT32 ui32PCReg, IMG_UINT32 ui32PCRegAlignShift, IMG_UINT32 ui32PCRegShift, IMG_UINT64 ui64PCVal); +#endif void RGXWriteKernelMMUPC32(const void *hPrivate, IMG_UINT32 ui32PCReg, IMG_UINT32 ui32PCRegAlignShift, IMG_UINT32 ui32PCRegShift, IMG_UINT32 ui32PCVal); -#else /* defined(PDUMP) */ +#else /* defined(PDUMP) */ +#if !defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) #define RGXWriteKernelMMUPC64(priv, pcreg, alignshift, shift, pcval) \ RGXWriteReg64(priv, pcreg, pcval) +#endif #define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \ RGXWriteReg32(priv, pcreg, pcval) @@ -399,7 +496,7 @@ void RGXWriteKernelMMUPC32(const void *hPrivate, #endif /* defined(PDUMP) */ - +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /*! ******************************************************************************* @@ -648,6 +745,7 @@ void RGXTrampolineRemapConfig(const void *hPrivate, RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ } while (0) #endif +#endif /* defined(RGX_FEATURE_MIPS_BIT_MASK) */ /*! ******************************************************************************* @@ -669,7 +767,7 @@ IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate); @Function RGXFabricCoherencyTest - @Description Performs a coherency test + @Description Performs fabric coherency test @Input hPrivate : Implementation specific data @@ -679,29 +777,6 @@ IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate); ******************************************************************************/ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate); -/* This is used to check if a specific ERN/BRN is enabled from hprivate. - * Should be used instead of calling RGXDeviceHasErnBrn. */ -#define RGX_DEVICE_HAS_ERN(hPrivate, ERN) \ - RGXDeviceHasErnBrn(hPrivate, HW_ERN_##ERN##_BIT_MASK) - -#define RGX_DEVICE_HAS_BRN(hPrivate, BRN) \ - RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_##BRN##_BIT_MASK) - -/*! -******************************************************************************* - - @Function RGXDeviceHasErnBrn - - @Description Checks if a device has a particular errata - - @Input hPrivate : Implementation specific data - @Input ui64ErnsBrns : Flags to check - - @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise - -******************************************************************************/ -IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns); - /*! ******************************************************************************* @@ -730,6 +805,7 @@ IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate); ******************************************************************************/ IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /*! ******************************************************************************* @@ -759,6 +835,7 @@ IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate); ******************************************************************************/ IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate); +#endif /*! ******************************************************************************* @@ -775,6 +852,21 @@ IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate); ******************************************************************************/ void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr); +/*! +******************************************************************************* + + @Function RGXCalculateHostFWDataAddress + + @Description Calculates the base host address of the RISCV firmware data + + @Input hPrivate : Implementation specific data + @Input pvHostFWDataAddr : Initial host address of the firmware data + + @Return: Base host address of the RISCV firmware data + +******************************************************************************/ +void *RGXCalculateHostFWDataAddress(const void *hPrivate, void *pvHostFWDataAddr); + /*! ******************************************************************************* @@ -805,6 +897,22 @@ void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAd ******************************************************************************/ IMG_BOOL RGXDeviceAckIrq(const void *hPrivate); +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) +/*! +******************************************************************************* + +@Function RGXMMUInitRangeValue + +@Description Returns the appropriate config value for each MMU range + +@Input ui32MMURange : Hardware MMU range to be initialised + +@Return 64-bit register value + +******************************************************************************/ +IMG_UINT64 RGXMMUInitRangeValue(IMG_UINT32 ui32MMURange); +#endif + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer_impl.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer_impl.h similarity index 98% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer_impl.h rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer_impl.h index 4d7c0f0c7798..b46efee8c71c 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer_impl.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer_impl.h @@ -56,12 +56,14 @@ typedef struct _RGX_LAYER_PARAMS_ #endif IMG_DEV_PHYADDR sPCAddr; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) IMG_DEV_PHYADDR sGPURegAddr; IMG_DEV_PHYADDR sBootRemapAddr; IMG_DEV_PHYADDR sCodeRemapAddr; IMG_DEV_PHYADDR sDataRemapAddr; IMG_DEV_PHYADDR sTrampolineRemapAddr; IMG_BOOL bDevicePA0IsValid; +#endif } RGX_LAYER_PARAMS; #endif /* RGXLAYER_IMPL_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer_impl_common.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer_impl_common.c new file mode 100644 index 000000000000..7eab6fde4dcd --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxlayer_impl_common.c @@ -0,0 +1,914 @@ +/*************************************************************************/ /*! +@File +@Title Common DDK implementation of the Services abstraction layer +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Common DDK implementation of the Services abstraction layer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxlayer_impl.h" +#include "pdump_km.h" +#include "rgxfwutils.h" +#include "rgxfwimageutils.h" + +#if defined(PDUMP) +#if defined(__linux__) + #include + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + #include + #else + #include + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ +#else + #include +#endif /* __linux__ */ +#endif + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) +#define RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr) \ + (((ui32RegAddr) < RGX_HOST_SECURE_REGBANK_OFFSET) ? \ + ((psDevInfo)->pvRegsBaseKM) : ((psDevInfo)->pvSecureRegsBaseKM)) +#else +#define RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr) ((psDevInfo)->pvRegsBaseKM) +#endif + +void RGXMemCopy(const void *hPrivate, + void *pvDst, + void *pvSrc, + size_t uiSize) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSDeviceMemCopy(pvDst, pvSrc, uiSize); +} + +void RGXMemSet(const void *hPrivate, + void *pvDst, + IMG_UINT8 ui8Value, + size_t uiSize) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSDeviceMemSet(pvDst, ui8Value, uiSize); +} + +void RGXCommentLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...) +{ +#if defined(PDUMP) + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + va_list argList; + va_start(argList, pszString); + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + PDumpCommentWithFlagsVA(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, pszString, argList); + va_end(argList); +#else + PVR_UNREFERENCED_PARAMETER(hPrivate); + PVR_UNREFERENCED_PARAMETER(pszString); +#endif +} + +void RGXErrorLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...) +{ + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list argList; + + PVR_UNREFERENCED_PARAMETER(hPrivate); + + va_start(argList, pszString); + vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList); + va_end(argList); + + PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer)); +} + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +IMG_UINT32 RGXGetOSPageSize(const void *hPrivate) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + return OSGetPageSize(); +} +#endif + +IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature) +{ + IMG_INT32 i32Ret = -1; + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + psDeviceNode = psDevInfo->psDeviceNode; + + if ((psDeviceNode->pfnGetDeviceFeatureValue)) + { + i32Ret = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ui64Feature); + } + + return i32Ret; +} + +IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0; +} + +IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32CorememSize = 0; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + ui32CorememSize = RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE); + } + + return ui32CorememSize; +} + +void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + OSWriteUncheckedHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue); + } + + PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, + ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags); +} + +void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + OSWriteUncheckedHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue); + } + + PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, + ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags); +} + +IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + IMG_UINT32 ui32RegValue; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); + +#if defined(PDUMP) + if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) + { + ui32RegValue = IMG_UINT32_MAX; + } + else +#endif + { + ui32RegValue = OSReadUncheckedHWReg32(pvRegsBase, ui32RegAddr); + } + + PDUMPREGREAD32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, + ui32RegAddr, psParams->ui32PdumpFlags); + + return ui32RegValue; +} + +IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + IMG_UINT64 ui64RegValue; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); + +#if defined(PDUMP) + if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) + { + ui64RegValue = IMG_UINT64_MAX; + } + else +#endif + { + ui64RegValue = OSReadUncheckedHWReg64(pvRegsBase, ui32RegAddr); + } + + PDUMPREGREAD64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, + ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); + + return ui64RegValue; +} + +IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 uiRegValueNew, + IMG_UINT64 uiRegKeepMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; +#if defined(PDUMP) + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); + + /* only use the new values for bits we update according to the keep mask */ + uiRegValueNew &= ~uiRegKeepMask; + +#if defined(PDUMP) + + PDUMP_BLKSTART(ui32PDumpFlags); + + /* Store register offset to temp PDump variable */ + PDumpRegRead64ToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, + ":SYSMEM:$1", ui32RegAddr, ui32PDumpFlags); + + /* Keep the bits set in the mask */ + PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", + uiRegKeepMask, ui32PDumpFlags); + + /* OR the new values */ + PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", + uiRegValueNew, ui32PDumpFlags); + + /* Do the actual register write */ + PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, + ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); + + PDUMP_BLKEND(ui32PDumpFlags); + + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + + { + IMG_UINT64 uiRegValue = OSReadUncheckedHWReg64(pvRegsBase, ui32RegAddr); + uiRegValue &= uiRegKeepMask; + OSWriteUncheckedHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPollReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32RegMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), + ui32RegValue, + ui32RegMask, + POLL_FLAG_LOG_ERROR, + NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + } + + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32RegAddr, + ui32RegValue, + ui32RegMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPollReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + /* Split lower and upper words */ + IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32); + IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue); + IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32); + IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask); + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4), + ui32UpperValue, + ui32UpperMask, + POLL_FLAG_LOG_ERROR, + NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), + ui32LowerValue, + ui32LowerMask, + POLL_FLAG_LOG_ERROR, + NULL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for lower part of Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + } + + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32RegAddr + 4, + ui32UpperValue, + ui32UpperMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + + PDUMPREGPOL(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32RegAddr, + ui32LowerValue, + ui32LowerMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +} + +void RGXSetPoweredState(const void *hPrivate, IMG_BOOL bPowered) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + psDevInfo->bRGXPowered = bPowered; +} + +void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs) +{ + __maybe_unused PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + OSWaitus(ui32TimeUs); + PDUMPIDLWITHFLAGS(psDevInfo->psDeviceNode, ui32Cycles, PDUMP_FLAGS_CONTINUOUS); +} + +void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr; +} + +#if defined(PDUMP) +#if !defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) +void RGXWriteKernelMMUPC64(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT64 ui64PCVal) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write the cat-base address */ + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal); + + /* Pdump catbase address */ + MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, + RGX_PDUMPREG_NAME, + ui32PCReg, + 8, + ui32PCRegAlignShift, + ui32PCRegShift, + PDUMP_FLAGS_CONTINUOUS); +} +#endif + +void RGXWriteKernelMMUPC32(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT32 ui32PCVal) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32PCReg); + + /* Write the cat-base address */ + OSWriteUncheckedHWReg32(pvRegsBase, ui32PCReg, ui32PCVal); + + /* Pdump catbase address */ + MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, + RGX_PDUMPREG_NAME, + ui32PCReg, + 4, + ui32PCRegAlignShift, + ui32PCRegShift, + PDUMP_FLAGS_CONTINUOUS); +} +#endif /* defined(PDUMP) */ + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psGPURegsAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sGPURegAddr; +} + +#if defined(PDUMP) +void RGXMIPSWrapperConfig(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64GPURegsAddr, + IMG_UINT32 ui32GPURegsAlign, + IMG_UINT32 ui32BootMode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, + ui32RegAddr, + (ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode); + + PDUMP_BLKSTART(ui32PDumpFlags); + + /* Store register offset to temp PDump variable */ + PDumpRegLabelToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, + ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); + + /* Align register transactions identifier */ + PDumpWriteVarSHRValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", + ui32GPURegsAlign, ui32PDumpFlags); + + /* Enable micromips instruction encoding */ + PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", + ui32BootMode, ui32PDumpFlags); + + /* Do the actual register write */ + PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, + ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); + + PDUMP_BLKEND(ui32PDumpFlags); +} +#endif + +void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psBootRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sBootRemapAddr; +} + +void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psCodeRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr; +} + +void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psDataRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sDataRemapAddr; +} + +void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psTrampolineRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr; +} + +#if defined(PDUMP) +static inline +void RGXWriteRemapConfig2Reg(void __iomem *pvRegs, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64PhyAddr, + IMG_UINT64 ui64PhyMask, + IMG_UINT64 ui64Settings) +{ + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + PVRSRV_DEVICE_NODE *psDevNode; + + PVR_ASSERT(psPMR != NULL); + psDevNode = PMR_DeviceNode(psPMR); + + OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings); + + PDUMP_BLKSTART(ui32PDumpFlags); + + /* Store memory offset to temp PDump variable */ + PDumpMemLabelToInternalVar64(":SYSMEM:$1", psPMR, + uiLogicalOffset, ui32PDumpFlags); + + /* Keep only the relevant bits of the output physical address */ + PDumpWriteVarANDValueOp(psDevNode, ":SYSMEM:$1", ui64PhyMask, ui32PDumpFlags); + + /* Extra settings for this remapped region */ + PDumpWriteVarORValueOp(psDevNode, ":SYSMEM:$1", ui64Settings, ui32PDumpFlags); + + /* Do the actual register write */ + PDumpInternalVarToReg64(psDevNode, RGX_PDUMPREG_NAME, ui32RegAddr, + ":SYSMEM:$1", ui32PDumpFlags); + + PDUMP_BLKEND(ui32PDumpFlags); +} + +void RGXBootRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32BootRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE); + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write remap config1 register */ + RGXWriteReg64(hPrivate, + ui32Config1RegAddr, + ui64Config1RegValue); + + /* Write remap config2 register */ + RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, + psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, + psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset, + ui32Config2RegAddr, + ui64Config2PhyAddr, + ui64Config2PhyMask, + ui64Config2Settings); +} + +void RGXCodeRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32CodeRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE); + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write remap config1 register */ + RGXWriteReg64(hPrivate, + ui32Config1RegAddr, + ui64Config1RegValue); + + /* Write remap config2 register */ + RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, + psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, + psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset, + ui32Config2RegAddr, + ui64Config2PhyAddr, + ui64Config2PhyMask, + ui64Config2Settings); +} + +void RGXDataRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32DataRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write remap config1 register */ + RGXWriteReg64(hPrivate, + ui32Config1RegAddr, + ui64Config1RegValue); + + /* Write remap config2 register */ + RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, + psDevInfo->psRGXFWDataMemDesc->psImport->hPMR, + psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset, + ui32Config2RegAddr, + ui64Config2PhyAddr, + ui64Config2PhyMask, + ui64Config2Settings); +} + +void RGXTrampolineRemapConfig(const void *hPrivate, + IMG_UINT32 ui32Config1RegAddr, + IMG_UINT64 ui64Config1RegValue, + IMG_UINT32 ui32Config2RegAddr, + IMG_UINT64 ui64Config2PhyAddr, + IMG_UINT64 ui64Config2PhyMask, + IMG_UINT64 ui64Config2Settings) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* write the register for real, without PDump */ + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, + ui32Config1RegAddr, + ui64Config1RegValue); + + PDUMP_BLKSTART(ui32PDumpFlags); + + /* Store the memory address in a PDump variable */ + PDumpPhysHandleToInternalVar64(psDevInfo->psDeviceNode, ":SYSMEM:$1", + psDevInfo->psTrampoline->hPdumpPages, + ui32PDumpFlags); + + /* Keep only the relevant bits of the input physical address */ + PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", + ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK, + ui32PDumpFlags); + + /* Enable bit */ + PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", + RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN, + ui32PDumpFlags); + + /* Do the PDump register write */ + PDumpInternalVarToReg64(psDevInfo->psDeviceNode, + RGX_PDUMPREG_NAME, + ui32Config1RegAddr, + ":SYSMEM:$1", + ui32PDumpFlags); + + PDUMP_BLKEND(ui32PDumpFlags); + + /* this can be written directly */ + RGXWriteReg64(hPrivate, + ui32Config2RegAddr, + (ui64Config2PhyAddr & ui64Config2PhyMask) | ui64Config2Settings); +} +#endif /* defined(PDUMP) */ +#endif /* defined(RGX_FEATURE_MIPS_BIT_MASK) */ + +IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0; +} + +IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS); +} + +IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS); +} + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH); +} + +IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return psDevInfo->sLayerParams.bDevicePA0IsValid; +} +#endif + +void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase; +} + +void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase; +} + +void *RGXCalculateHostFWDataAddress(const void *hPrivate, void *pvHostFWDataAddr) +{ +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT8 *ui8HostFWDataAddr = (IMG_UINT8*)pvHostFWDataAddr; + IMG_UINT32 ui32Offset = 0U; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) + { + ui32Offset = + PVR_ALIGN(RGXGetFWImageSectionAllocSize(hPrivate, RISCV_UNCACHED_CODE), + RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN) + + PVR_ALIGN(RGXGetFWImageSectionAllocSize(hPrivate, RISCV_CACHED_CODE), + RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN); + } + + ui8HostFWDataAddr -= ui32Offset; + return (void*)ui8HostFWDataAddr; +#else + PVR_UNREFERENCED_PARAMETER(hPrivate); + + return pvHostFWDataAddr; +#endif +} + +IMG_BOOL RGXDeviceAckIrq(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->pfnRGXAckIrq != NULL) ? + psDevInfo->pfnRGXAckIrq(psDevInfo) : IMG_TRUE; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmem.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmem.c index 7885c34c2961..4df36ef5d1f0 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmem.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmem.c @@ -59,6 +59,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_bvnc_defs_km.h" #include "info_page.h" +# include "rgxmmudefs_km.h" + #if defined(PDUMP) #include "sync.h" #endif @@ -75,80 +77,6 @@ struct SERVER_MMU_CONTEXT_TAG PVRSRV_RGXDEV_INFO *psDevInfo; }; /* SERVER_MMU_CONTEXT is typedef-ed in rgxmem.h */ -PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDeviceNode, - MMU_CONTEXT *psMMUContext, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate) -{ - PVRSRV_ERROR eError; - DLLIST_NODE *psNode, *psNext; - RGXFWIF_KCCB_CMD sFlushInvalCmd; - SERVER_MMU_CONTEXT *psServerMMUContext = NULL; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - IMG_UINT32 ui32kCCBCommandSlot; - - OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); - - dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) - { - SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); - if (psIter->psMMUContext == psMMUContext) - { - psServerMMUContext = psIter; - } - } - - OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); - - if (! psServerMMUContext) - { - return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; - } - - /* Schedule the SLC flush command */ -#if defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Submit SLC flush and invalidate"); -#endif - sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = bInvalidate; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = uiSize; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = sDevVAddr.uiAddr; - eError = RGXGetFWCommonContextAddrFromServerMMUCtx(psDevInfo, - psServerMMUContext, - &sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext); - if (eError != PVRSRV_OK) - { - return eError; - } - - eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, - &sFlushInvalCmd, - PDUMP_FLAGS_CONTINUOUS, - &ui32kCCBCommandSlot); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "RGXSLCFlush: Failed to schedule SLC flush command with error (%u)", - eError)); - } - else - { - /* Wait for the SLC flush to complete */ - eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "RGXSLCFlush: SLC flush and invalidate aborted with error (%u)", - eError)); - } - } - - return eError; -} - PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT *psMMUContext, IMG_UINT64 ui64FBSCEntryMask) @@ -165,6 +93,7 @@ PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, if (psIter->psMMUContext == psMMUContext) { psServerMMUContext = psIter; + break; } } @@ -239,27 +168,16 @@ void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode, } #if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) - { - MMU_AppendCacheFlags(psMMUContext, ui32NewCacheFlags); - } - else + /* For VIVT devices only accumulate the flags on the Firmware MMU context + * since the Firmware/HW invalidates caches on every kick automatically. */ + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT) || + psMMUContext == psDevInfo->psKernelMMUCtx) #endif { MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32NewCacheFlags); } } -static inline void _GetAndResetCacheOpsPending(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 *pui32FWCacheFlags) -{ - /* - * Atomically exchange flags and 0 to ensure we never accidentally read - * state inconsistently or overwrite valid cache flags with 0. - */ - *pui32FWCacheFlags = MMU_ExchangeCacheFlags(psDevInfo->psKernelMMUCtx, 0); -} - static PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_DM eDM, @@ -279,7 +197,13 @@ PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode, SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim, &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr); - /* Indicate the firmware should signal command completion to the host */ + /* + * Indicate if the firmware should signal command completion to the host. + * The sync update will always happen. This flag requests that the FW pass + * back the result of the KCCB command and interrupt the host. The KCCB + * response is not used but the interrupt signal to the host is as a way + * to know the sync update may have happened. + */ if (bInterrupt) { ui32CacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT; @@ -304,6 +228,8 @@ PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode, "DM=%d with error (%u)", __func__, eDM, eError)); psDeviceNode->ui32NextMMUInvalidateUpdate--; + + MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32CacheFlags); } return eError; @@ -323,7 +249,11 @@ PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode, goto RGXMMUCacheInvalidateKick_exit; } - _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, &ui32FWCacheFlags); + /* + * Atomically clear flags to ensure we never accidentally read state + * inconsistently or overwrite valid cache flags with 0. + */ + ui32FWCacheFlags = MMU_GetAndResetCacheFlags(psDevInfo->psKernelMMUCtx); if (ui32FWCacheFlags == 0) { /* Nothing to do if no cache ops pending */ @@ -345,16 +275,19 @@ PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode, goto _PowerUnlockAndReturnErr; } - eError = _PrepareAndSubmitCacheCommand(psDeviceNode, RGXFWIF_DM_GP, ui32FWCacheFlags, - IMG_TRUE, pui32MMUInvalidateUpdate); - if (eError != PVRSRV_OK) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - /* failed to submit cache operations, return failure */ - PVR_DPF((PVR_DBG_WARNING, "%s: failed to submit cache command (%s)", - __func__, PVRSRVGetErrorString(eError))); - MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32FWCacheFlags); - goto _PowerUnlockAndReturnErr; + eError = _PrepareAndSubmitCacheCommand(psDeviceNode, RGXFWIF_DM_GP, ui32FWCacheFlags, + IMG_TRUE, pui32MMUInvalidateUpdate); + if (!PVRSRVIsRetryError(eError)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); } + END_LOOP_UNTIL_TIMEOUT_US(); + + PVR_LOG_IF_ERROR(eError, "_PrepareAndSubmitCacheCommand"); _PowerUnlockAndReturnErr: PVRSRVPowerUnlock(psDeviceNode); @@ -367,22 +300,141 @@ PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_DM eDM, IMG_UINT32 *pui32MMUInvalidateUpdate) { + PVRSRV_ERROR eError; PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; IMG_UINT32 ui32FWCacheFlags; /* Caller should ensure that power lock is held before calling this function */ PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); - _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, &ui32FWCacheFlags); + /* + * Atomically clear flags to ensure we never accidentally read state + * inconsistently or overwrite valid cache flags with 0. + */ + ui32FWCacheFlags = MMU_GetAndResetCacheFlags(psDevInfo->psKernelMMUCtx); if (ui32FWCacheFlags == 0) { /* Nothing to do if no cache ops pending */ return PVRSRV_OK; } - return _PrepareAndSubmitCacheCommand(psDeviceNode, eDM, ui32FWCacheFlags, - IMG_FALSE, pui32MMUInvalidateUpdate); + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = _PrepareAndSubmitCacheCommand(psDeviceNode, eDM, ui32FWCacheFlags, + IMG_FALSE, pui32MMUInvalidateUpdate); + if (!PVRSRVIsRetryError(eError)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } + END_LOOP_UNTIL_TIMEOUT_US(); + + PVR_LOG_RETURN_IF_ERROR(eError, "_PrepareAndSubmitCacheCommand"); + + return PVRSRV_OK; +} + +#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) +/* + * RGXMapBRN71422TargetPhysicalAddress + * + * Only used on early Volcanic cores. + * Set-up a special MMU tree mapping with a single page that eventually points + * to RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR. This is supplied by the + * customer (in rgxdefs_km.h) and hence this WA is off by default. + * + * PC entries are 32b, with the last 4 bits being 0 except for the LSB bit that should be 1 (Valid). Addr is 4KB aligned. + * PD entries are 64b, with addr in bits 39:5 and everything else 0 except for LSB bit that is Valid. Addr is byte aligned? + * PT entries are 64b, with phy addr in bits 39:12 and everything else 0 except for LSB bit that is Valid. Addr is 4KB aligned. + * So, we can construct the page tables in a single page like this: + * 0x00 : PCE (PCE index 0) + * 0x04 : 0x0 + * 0x08 : PDEa (PDE index 1) + * 0x0C : PDEb + * 0x10 : PTEa (PTE index 2) + * 0x14 : PTEb + * + * With the PCE and the PDE pointing to this same page. + * The VA address that we are mapping is therefore: + * VA = PCE_idx*PCE_size + PDE_idx*PDE_size + PTE_idx*PTE_size = + * = 0 * 1GB + 1 * 2MB + 2 * 4KB = + * = 0 + 0x20_0000 + 0x2000 = + * = 0x00_0020_2000 + */ +void RGXMapBRN71422TargetPhysicalAddress(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEV_PHYADDR sPhysAddrL1Px, + void *pxL1PxCpuVAddr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + IMG_UINT32 *pui32Px = pxL1PxCpuVAddr; + IMG_UINT64 *pui64Px = pxL1PxCpuVAddr; + IMG_UINT64 ui64Entry; + + + /* Only setup the BRN71422 workaround if this is the FW memory + * context and BRN present. + */ + if ((psConnection != NULL) || !RGX_IS_BRN_SUPPORTED(psDevInfo, 71422)) + { + return; + } + + /* Setup dummy mapping to fast constant time physical address. */ + /* PCE points to PC */ + ui64Entry = sPhysAddrL1Px.uiAddr; + ui64Entry = ui64Entry >> RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; + ui64Entry = ui64Entry << RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT; + ui64Entry = ui64Entry & ~RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK; + ui64Entry = ui64Entry | RGX_MMUCTRL_PC_DATA_VALID_EN; + pui32Px[0] = (IMG_UINT32) ui64Entry; + + /* PDE points to PC */ + ui64Entry = sPhysAddrL1Px.uiAddr; + ui64Entry = ui64Entry & ~RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK; + ui64Entry = ui64Entry | RGX_MMUCTRL_PD_DATA_VALID_EN; + pui64Px[1] = ui64Entry; + + /* PTE points to PAddr */ + ui64Entry = RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR; + ui64Entry = ui64Entry & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK; + ui64Entry = ui64Entry | RGX_MMUCTRL_PT_DATA_VALID_EN; + pui64Px[2] = ui64Entry; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapping the BRN71422 workaround to target physical address 0x%" IMG_UINT64_FMTSPECx ".", + __func__, RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR)); + } +#endif + +/* Common header, only needed for architectures with MIPS FW CPU */ +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +void RGXMMUTweakProtFlags(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_DEVICEATTRIBS *psDevAttrs, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + MMU_PROTFLAGS_T *puiMMUProtFlags) +{ + /* If we are allocating on the MMU of the firmware processor, the + * cached/uncached attributes must depend on the FIRMWARE_CACHED + * allocation flag. + */ + if (psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) + { + if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) + { + *puiMMUProtFlags |= MMU_PROTFLAGS_CACHED; + } + else + { + *puiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED; + + } + *puiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT; + } +} +#endif + /* page fault debug is the only current use case for needing to find process info * after that process device memory context has been destroyed @@ -423,7 +475,7 @@ static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERV { PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context")); } - OSStringLCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName)); + OSStringSafeCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName)); } @@ -488,7 +540,6 @@ PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_MEMALLOCFLAGS_T uiFWMemContextMemAllocFlags; RGXFWIF_FWMEMCONTEXT *psFWMemContext; DEVMEM_MEMDESC *psFWMemContextMemDesc; SERVER_MMU_CONTEXT *psServerMMUContext; @@ -500,14 +551,6 @@ PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, * of the MMU context for use when programming the BIF. */ psDevInfo->psKernelMMUCtx = psMMUContext; - -#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) - /* Setup the BRN71422 mapping in the FW memory context. */ - if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71422)) - { - RGXMapBRN71422TargetPhysicalAddress(psMMUContext); - } -#endif } else { @@ -522,23 +565,6 @@ PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, psServerMMUContext->ui64FBSCEntryMask = 0; psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = 0; - /* - * This FW MemContext is only mapped into kernel for initialisation purposes. - * Otherwise this allocation is only used by the FW. - * Therefore the GPU cache doesn't need coherency, and write-combine - * will suffice on the CPU side (WC buffer will be flushed at any kick) - */ - uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); - /* Allocate device memory for the firmware memory context for the new application. @@ -546,7 +572,7 @@ PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGX firmware memory context"); eError = DevmemFwAllocate(psDevInfo, sizeof(*psFWMemContext), - uiFWMemContextMemAllocFlags | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + RGX_FWCOMCTX_ALLOCFLAGS, "FwMemoryContext", &psFWMemContextMemDesc); @@ -596,7 +622,7 @@ PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, psFWMemContext->uiBPHandlerAddr = 0; psFWMemContext->uiBreakpointCtl = 0; -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) { IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; IMG_BOOL bOSidAxiProt; @@ -662,6 +688,7 @@ PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, /* * Release kernel address acquired above. */ + RGXFwSharedMemCacheOpPtr(psFWMemContext, FLUSH); DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); /* @@ -671,7 +698,7 @@ PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM(); psServerMMUContext->psMMUContext = psMMUContext; psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc; - OSStringLCopy(psServerMMUContext->szProcessName, + OSStringSafeCopy(psServerMMUContext->szProcessName, OSGetCurrentClientProcessNameKM(), sizeof(psServerMMUContext->szProcessName)); @@ -748,14 +775,17 @@ void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, } /* Lastly check for fault in the kernel allocated memory */ - if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - PVR_LOG(("Failed to get PC address for kernel memory context")); - } + if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for kernel memory context")); + } - if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) - { - MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr, psOutFaultData); + if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) + { + MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr, psOutFaultData); + } } out_unlock: @@ -796,12 +826,12 @@ IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR s if (psServerMMUContext != NULL) { psInfo->uiPID = psServerMMUContext->uiPID; - OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); + OSStringSafeCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); psInfo->bUnregistered = IMG_FALSE; bRet = IMG_TRUE; } /* else check if the input PC addr corresponds to the firmware */ - else + else if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { IMG_DEV_PHYADDR sKernelPCDevPAddr; PVRSRV_ERROR eError; @@ -817,7 +847,7 @@ IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR s if (sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr) { psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; - OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); + OSStringSafeCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); psInfo->bUnregistered = IMG_FALSE; bRet = IMG_TRUE; } @@ -847,7 +877,7 @@ IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR s if (psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr) { psInfo->uiPID = psRecord->uiPID; - OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); + OSStringSafeCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); psInfo->bUnregistered = IMG_TRUE; bRet = IMG_TRUE; break; @@ -883,7 +913,7 @@ IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, if (psServerMMUContext != NULL) { psInfo->uiPID = psServerMMUContext->uiPID; - OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); + OSStringSafeCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); psInfo->bUnregistered = IMG_FALSE; bRet = IMG_TRUE; } @@ -891,7 +921,7 @@ IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, else if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) { psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; - OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); + OSStringSafeCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); psInfo->bUnregistered = IMG_FALSE; bRet = IMG_TRUE; } @@ -916,7 +946,7 @@ IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, if (psRecord->uiPID == uiPID) { psInfo->uiPID = psRecord->uiPID; - OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); + OSStringSafeCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); psInfo->bUnregistered = IMG_TRUE; bRet = IMG_TRUE; break; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmem.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmem.h index cbcbed77f921..2e959fc79eda 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmem.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmem.h @@ -74,12 +74,6 @@ void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext, void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDevNode); void RGXMMUSyncPrimFree(void); -PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDevNode, - MMU_CONTEXT *psMMUContext, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_DEVMEM_SIZE_T uiLength, - IMG_BOOL bInvalidate); - PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT *psMMUContext, IMG_UINT64 ui64FBSCEntryMask); @@ -124,6 +118,22 @@ PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_DM eDM, IMG_UINT32 *pui32MMUInvalidateUpdate); +/* Needed for Volcanic architectures with BRN71422 */ +#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) +void RGXMapBRN71422TargetPhysicalAddress(struct _CONNECTION_DATA_ *psConnection, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_DEV_PHYADDR sPhysAddrL1Px, + void *pxL1PxCpuVAddr); +#endif + +/* Needed for Rogue architecture where a MIPS FW CPU is used */ +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +void RGXMMUTweakProtFlags(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_DEVICEATTRIBS *psDevAttrs, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + MMU_PROTFLAGS_T *uiMMUProtFlags); +#endif + void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData); PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDevNode, MMU_CONTEXT *psMMUContext, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmmuinit.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmmuinit.c similarity index 81% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmmuinit.c rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmmuinit.c index 8d13954f5e34..29049ad8c827 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmmuinit.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmmuinit.c @@ -52,21 +52,19 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_debug.h" #include "pvrsrv_error.h" #include "rgx_memallocflags.h" -#include "rgx_heaps.h" +#include "rgx_heaps_server.h" #include "pdump_km.h" - +#include "allocmem.h" /* useful macros */ /* units represented in a bitfield */ #define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1) - /* * Bits of PT, PD and PC not involving addresses */ - - +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) /* protection bits for MMU_VERSION <= 3 */ #define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \ ~RGX_MMUCTRL_PT_DATA_AXCACHE_CLRMSK | \ @@ -75,6 +73,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. RGX_MMUCTRL_PT_DATA_CC_EN | \ RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \ RGX_MMUCTRL_PT_DATA_VALID_EN) +#else +#define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \ + RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \ + RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \ + RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \ + RGX_MMUCTRL_PT_DATA_CC_EN | \ + RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \ + RGX_MMUCTRL_PT_DATA_VALID_EN) +#endif #define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \ ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \ @@ -84,6 +91,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. RGX_MMUCTRL_PC_DATA_VALID_EN) +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) /* * protection bits for MMU_VERSION >= 4 * MMU4 has no PENDING or PAGE_SIZE fields in PxE @@ -93,8 +101,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_MMU4CTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_VALID_EN) #define RGX_MMU4CTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_VALID_EN) - - +#endif static MMU_PxE_CONFIG sRGXMMUPCEConfig; @@ -182,13 +189,14 @@ static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) /* protection bits derivation functions for MMUv4 */ static IMG_UINT64 RGXMMU4DerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); static PVRSRV_ERROR RGXMMU4GetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); static PVRSRV_ERROR RGXMMU4GetPageSizeFromVirtAddr(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_DEV_VIRTADDR sDevVAddr, IMG_UINT32 *pui32Log2PageSize); - +#endif static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, const MMU_PxE_CONFIG **ppsMMUPDEConfig, @@ -203,10 +211,50 @@ static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32 static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) +/* This is a basis for a pattern of parity bit values for consecutive VAs. + For each PT with 512 entries we'd get either this pattern or its reverse. */ +static const IMG_UINT8 ui8ParityPTPattern[512] = { + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, + 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, + 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, + 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, + 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, + 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, + 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, + 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, + 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, + 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, + 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1}; +#endif + PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) { + +#if defined(RGX_FEATURE_MH_PARITY_BIT_MASK) || defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#endif + +#if defined(RGX_FEATURE_MH_PARITY_BIT_MASK) + IMG_BOOL bHaveParity = RGX_IS_FEATURE_SUPPORTED(psDevInfo, MH_PARITY); +#endif + +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) IMG_BOOL bHaveMMU4 = (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4); +#define RGX_GET_MMUCTRL_PROTMASK(entry) \ + (bHaveMMU4 ? RGX_MMU4CTRL_##entry##_PROTMASK : RGX_MMUCTRL_##entry##_PROTMASK) +#else +#define RGX_GET_MMUCTRL_PROTMASK(entry) RGX_MMUCTRL_##entry##_PROTMASK +#endif /* Setup of Px Entries: * @@ -255,13 +303,16 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPCEConfig */ + sRGXMMUPCEConfig.ePxLevel = MMU_LEVEL_3; + sRGXMMUPCEConfig.pszPxLevelStr = "PC"; sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */ - sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */ + sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */ sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */ sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */ - sRGXMMUPCEConfig.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PCE_PROTMASK : RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits */ + sRGXMMUPCEConfig.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PCE); /* Mask to get the status bits */ + sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the status bits */ sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ @@ -284,11 +335,16 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) * * Configuration for heaps with 4kB Data-Page size * + * Bit 39 30 29 21 20 12 11 0 + * | \ / \ / \ / \ + * |....PCE...|...PDE...|...PTE...|....VAddr...| */ /* * Setup sRGXMMUPDEConfig_4KBDP */ + sRGXMMUPDEConfig_4KBDP.ePxLevel = MMU_LEVEL_2; + sRGXMMUPDEConfig_4KBDP.pszPxLevelStr = "PD"; sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8; sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); @@ -298,7 +354,7 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1; - sRGXMMUPDEConfig_4KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PDE); sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; @@ -307,18 +363,28 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPTEConfig_4KBDP */ + sRGXMMUPTEConfig_4KBDP.ePxLevel = MMU_LEVEL_1; + sRGXMMUPTEConfig_4KBDP.pszPxLevelStr = "PT"; sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8; sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000); sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12; sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */ - sRGXMMUPTEConfig_4KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PTE); sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; +#if defined(RGX_FEATURE_MH_PARITY_BIT_MASK) + if (bHaveParity) + { + sRGXMMUPTEConfig_4KBDP.uiParityBitMask = RGX_MMUCTRL_PT_DATA_PT_PARITY_EN; + sRGXMMUPTEConfig_4KBDP.uiParityBitShift = RGX_MMUCTRL_PT_DATA_PT_PARITY_SHIFT; + } +#endif + /* * Setup sRGXMMUDevVAddrConfig_4KBDP */ @@ -355,11 +421,17 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) * * Configuration for heaps with 16kB Data-Page size * + * Bit 39 30 29 21 20 14 13 0 + * | \ / \ / \ / \ + * |....PCE...|...PDE...|..PTE..|.....VAddr....| + * */ /* * Setup sRGXMMUPDEConfig_16KBDP */ + sRGXMMUPDEConfig_16KBDP.ePxLevel = MMU_LEVEL_2; + sRGXMMUPDEConfig_16KBDP.pszPxLevelStr = "PD"; sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8; sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); @@ -369,7 +441,7 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1; - sRGXMMUPDEConfig_16KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PDE); sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; @@ -378,18 +450,28 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPTEConfig_16KBDP */ + sRGXMMUPTEConfig_16KBDP.ePxLevel = MMU_LEVEL_1; + sRGXMMUPTEConfig_16KBDP.pszPxLevelStr = "PT"; sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8; sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000); sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14; sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14; - sRGXMMUPTEConfig_16KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PTE); sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; +#if defined(RGX_FEATURE_MH_PARITY_BIT_MASK) + if (bHaveParity) + { + sRGXMMUPTEConfig_16KBDP.uiParityBitMask = RGX_MMUCTRL_PT_DATA_PT_PARITY_EN; + sRGXMMUPTEConfig_16KBDP.uiParityBitShift = RGX_MMUCTRL_PT_DATA_PT_PARITY_SHIFT; + } +#endif + /* * Setup sRGXMMUDevVAddrConfig_16KBDP */ @@ -428,11 +510,17 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) * * Configuration for heaps with 64kB Data-Page size * + * Bit 39 30 29 21 20 16 15 0 + * | \ / \ / \ / \ + * |....PCE...|...PDE...|.PTE.|.....VAddr......| + * */ /* * Setup sRGXMMUPDEConfig_64KBDP */ + sRGXMMUPDEConfig_64KBDP.ePxLevel = MMU_LEVEL_2; + sRGXMMUPDEConfig_64KBDP.pszPxLevelStr = "PD"; sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8; sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); @@ -442,7 +530,7 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1; - sRGXMMUPDEConfig_64KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PDE); sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; @@ -451,18 +539,28 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPTEConfig_64KBDP */ + sRGXMMUPTEConfig_64KBDP.ePxLevel = MMU_LEVEL_1; + sRGXMMUPTEConfig_64KBDP.pszPxLevelStr = "PT"; sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8; sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000); sRGXMMUPTEConfig_64KBDP.uiAddrShift = 16; sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16; - sRGXMMUPTEConfig_64KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PTE); sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; +#if defined(RGX_FEATURE_MH_PARITY_BIT_MASK) + if (bHaveParity) + { + sRGXMMUPTEConfig_64KBDP.uiParityBitMask = RGX_MMUCTRL_PT_DATA_PT_PARITY_EN; + sRGXMMUPTEConfig_64KBDP.uiParityBitShift = RGX_MMUCTRL_PT_DATA_PT_PARITY_SHIFT; + } +#endif + /* * Setup sRGXMMUDevVAddrConfig_64KBDP */ @@ -502,11 +600,17 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) * * Configuration for heaps with 256kB Data-Page size * + * Bit 39 30 29 21 20 18 17 0 + * | \ / \| |/ \ + * |....PCE...|...PDE...|PTE|.......VAddr......| + * */ /* * Setup sRGXMMUPDEConfig_256KBDP */ + sRGXMMUPDEConfig_256KBDP.ePxLevel = MMU_LEVEL_2; + sRGXMMUPDEConfig_256KBDP.pszPxLevelStr = "PD"; sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8; sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); @@ -516,7 +620,7 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1; - sRGXMMUPDEConfig_256KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PDE); sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; @@ -525,18 +629,28 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP */ + sRGXMMUPTEConfig_256KBDP.ePxLevel = MMU_LEVEL_1; + sRGXMMUPTEConfig_256KBDP.pszPxLevelStr = "PT"; sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8; sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000); sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18; sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18; - sRGXMMUPTEConfig_256KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PTE); sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; +#if defined(RGX_FEATURE_MH_PARITY_BIT_MASK) + if (bHaveParity) + { + sRGXMMUPTEConfig_256KBDP.uiParityBitMask = RGX_MMUCTRL_PT_DATA_PT_PARITY_EN; + sRGXMMUPTEConfig_256KBDP.uiParityBitShift = RGX_MMUCTRL_PT_DATA_PT_PARITY_SHIFT; + } +#endif + /* * Setup sRGXMMUDevVAddrConfig_256KBDP */ @@ -571,9 +685,21 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) gsPageSizeConfig256KB.uiRefCount = 0; gsPageSizeConfig256KB.uiMaxRefCount = 0; + /* + * + * Configuration for heaps with 1MB Data-Page size + * + * Bit 39 30 29 21 20 19 0 + * | \ / \ | / \ + * |....PCE...|...PDE...|.|........VAddr.......| + * PTE + */ + /* * Setup sRGXMMUPDEConfig_1MBDP */ + sRGXMMUPDEConfig_1MBDP.ePxLevel = MMU_LEVEL_2; + sRGXMMUPDEConfig_1MBDP.pszPxLevelStr = "PD"; sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8; sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); @@ -587,7 +713,7 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1; - sRGXMMUPDEConfig_1MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PDE); sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; @@ -596,18 +722,28 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPTEConfig_1MBDP */ + sRGXMMUPTEConfig_1MBDP.ePxLevel = MMU_LEVEL_1; + sRGXMMUPTEConfig_1MBDP.pszPxLevelStr = "PT"; sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000); sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20; sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20; - sRGXMMUPTEConfig_1MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PTE); sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; +#if defined(RGX_FEATURE_MH_PARITY_BIT_MASK) + if (bHaveParity) + { + sRGXMMUPTEConfig_1MBDP.uiParityBitMask = RGX_MMUCTRL_PT_DATA_PT_PARITY_EN; + sRGXMMUPTEConfig_1MBDP.uiParityBitShift = RGX_MMUCTRL_PT_DATA_PT_PARITY_SHIFT; + } +#endif + /* * Setup sRGXMMUDevVAddrConfig_1MBDP */ @@ -642,9 +778,21 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) gsPageSizeConfig1MB.uiRefCount = 0; gsPageSizeConfig1MB.uiMaxRefCount = 0; + /* + * + * Configuration for heaps with 2MB Data-Page size + * + * Bit 39 30 29 21 20 0 + * | \ / \ / \ + * |....PCE...|...PDE...|.........VAddr.......| + * + */ + /* * Setup sRGXMMUPDEConfig_2MBDP */ + sRGXMMUPDEConfig_2MBDP.ePxLevel = MMU_LEVEL_2; + sRGXMMUPDEConfig_2MBDP.pszPxLevelStr = "PD"; sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8; sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); @@ -658,22 +806,32 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1; - sRGXMMUPDEConfig_2MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PDE); sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; +#if defined(RGX_FEATURE_MH_PARITY_BIT_MASK) + if (bHaveParity) + { + sRGXMMUPTEConfig_2MBDP.uiParityBitMask = RGX_MMUCTRL_PT_DATA_PT_PARITY_EN; + sRGXMMUPTEConfig_2MBDP.uiParityBitShift = RGX_MMUCTRL_PT_DATA_PT_PARITY_SHIFT; + } +#endif + /* * Setup sRGXMMUPTEConfig_2MBDP */ + sRGXMMUPTEConfig_2MBDP.ePxLevel = MMU_LEVEL_1; + sRGXMMUPTEConfig_2MBDP.pszPxLevelStr = "PT"; sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8; sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000); sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21; sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21; - sRGXMMUPTEConfig_2MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_GET_MMUCTRL_PROTMASK(PTE); sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; @@ -717,11 +875,44 @@ PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) * Setup sRGXMMUDeviceAttributes */ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT; - sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3; sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig; sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + { + IMG_UINT32 i; + + PVR_ASSERT(sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT <= 512); + PVR_ASSERT(sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT <= 512); + PVR_ASSERT(sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT <= 512); + PVR_ASSERT(sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT <= 512); + PVR_ASSERT(sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT <= 512); + PVR_ASSERT(sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT <= 512); + + PVR_ASSERT(sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry == 8); + PVR_ASSERT(sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry == 8); + PVR_ASSERT(sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry == 8); + PVR_ASSERT(sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry == 8); + PVR_ASSERT(sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry == 8); + PVR_ASSERT(sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry == 8); + + sRGXMMUDeviceAttributes.pui64PrecomputedAllocParity[0] = + OSAllocZMem(sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT * sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry); + sRGXMMUDeviceAttributes.pui64PrecomputedAllocParity[1] = + OSAllocZMem(sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT * sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry); + + /* Generate two precomputed pages in host memory for zero-initialised PTs with parity bits set. */ + for (i=0; ipsMMUDevAttrs = &sRGXMMUDeviceAttributes; @@ -805,61 +1000,6 @@ PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) return eError; } -/*************************************************************************/ /*! -@Function RGXMMUInit_GetConfigRangeValue -@Description Helper Function - For a given virtual address range and page size, return the - value to load into an MMU_PAGE_SIZE_RANGE config register. -@Return 64-bit register value -*/ /**************************************************************************/ -IMG_UINT64 RGXMMUInit_GetConfigRangeValue(IMG_UINT32 ui32DataPageSize, IMG_UINT64 ui64BaseAddress, IMG_UINT64 ui64RangeSize) -{ - /* end address of range is inclusive */ - IMG_UINT64 ui64EndAddress = ui64BaseAddress + ui64RangeSize - (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT); - IMG_UINT64 ui64RegValue = 0; - - switch (ui32DataPageSize) - { - case 16*1024: - ui64RegValue = 1; - break; - case 64*1024: - ui64RegValue = 2; - break; - case 256*1024: - ui64RegValue = 3; - break; - case 1024*1024: - ui64RegValue = 4; - break; - case 2*1024*1024: - ui64RegValue = 5; - break; - case 4*1024: - /* fall through */ - default: - /* anything we don't support, use 4K */ - break; - } - - /* check that the range is defined by valid 40 bit virtual addresses */ - PVR_ASSERT((ui64BaseAddress & ~((1ULL << 40) - 1)) == 0); - PVR_ASSERT((ui64EndAddress & ~((1ULL << 40) - 1)) == 0); - - /* the range config register addresses are in 2MB chunks so check 21 lsb are zero */ - PVR_ASSERT((ui64BaseAddress & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT) - 1)) == 0); - PVR_ASSERT((ui64EndAddress & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT) - 1)) == 0); - - ui64BaseAddress >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT; - ui64EndAddress >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT; - - ui64RegValue = (ui64RegValue << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT) | - (ui64EndAddress << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT) | - (ui64BaseAddress << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT); - return ui64RegValue; -} - - /*************************************************************************/ /*! @Function RGXDerivePCEProt4 @Description calculate the PCE protection flags based on a 4 byte entry @@ -937,8 +1077,8 @@ static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2Dat break; default: PVR_DPF((PVR_DBG_ERROR, - "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]", - __FILE__, __LINE__, __func__, uiLog2DataPageSize)); + "%s: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]", + __func__, uiLog2DataPageSize)); } } return ret_value; @@ -994,6 +1134,14 @@ static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2Dat ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN; } +#if defined(RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) + /* cache setup */ + if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN; + } +#endif + if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) { ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN; @@ -1004,6 +1152,7 @@ static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2Dat ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN; } +#if defined(RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC) /** * Always enable caching on the fabric level cache irrespective of type of * cache coherent interconnect and memory cache attributes. @@ -1012,6 +1161,7 @@ static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2Dat * cache coherent interconnect. */ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC; +#endif return ui64MMUFlags; } @@ -1176,9 +1326,7 @@ static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32 return PVRSRV_OK; } - - - +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) /*************************************************************************/ /*! @Function RGXMMU4DerivePDEProt8 @Description derive the PDE protection flags based on an 8 byte entry @@ -1270,3 +1418,4 @@ static PVRSRV_ERROR RGXMMU4GetPageSizeFromVirtAddr(struct _PVRSRV_DEVICE_NODE_ * return eError; } +#endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmmuinit.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmmuinit.h similarity index 99% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmmuinit.h rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmmuinit.h index 0591628d6ad3..b70ddad0cb64 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmmuinit.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxmmuinit.h @@ -56,5 +56,4 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); - #endif /* #ifndef SRVKM_RGXMMUINIT_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump.h index 54443a59e0c5..365269af59ac 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump.h @@ -66,21 +66,6 @@ PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32PDumpFlags); -#if defined(SUPPORT_VALIDATION) -/*! -******************************************************************************* - - @Function PVRSRVPDumpComputeCRCSignatureCheckKM - - @Description - - @Return PVRSRV_ERROR - -******************************************************************************/ -PVRSRV_ERROR PVRSRVPDumpComputeCRCSignatureCheckKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE * psDeviceNode, - IMG_UINT32 ui32PDumpFlags); -#endif /*! ******************************************************************************* diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump_common.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump_common.c new file mode 100644 index 000000000000..fb1303c5f565 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump_common.c @@ -0,0 +1,121 @@ +/*************************************************************************/ /*! +@File +@Title RGX PDump Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX common PDump functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxpdump_common.h" +#include "devicemem_utils.h" +#include "devicemem_server.h" + +PVRSRV_ERROR PDumpGetValidRegion(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc, + IMG_UINT32 uiSize, + DLLIST_NODE *psRegionList) +{ + IMG_DEV_VIRTADDR sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr; + sDevAddrStart.uiAddr += psMemDesc->uiOffset; + + return DevmemIntPDumpGetValidRegions( + GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psDeviceNode, + psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext, + sDevAddrStart, + uiSize, + psRegionList + ); +} + +void PDumpSaveToFileVirtual(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc, + DLLIST_NODE *psRegionList, + const IMG_CHAR *pszFileName, + IMG_UINT32 uiPDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = DevmemIntPDumpSaveFromRegionListToFileVirtual( + GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psDeviceNode, + psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext, + psRegionList, + pszFileName, + 0, + uiPDumpFlags + ); + PVR_LOG_IF_ERROR(eError, "DevmemIntPDumpSaveFromRegionListToFileVirtual"); + + /* If PDump was rejected for this device, suppress silently */ + if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) + { + PVR_ASSERT(eError == PVRSRV_OK); + } +} + +void PDumpSaveToFileVirtualNoValidate(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 uiSize, + const IMG_CHAR *pszFileName, + IMG_UINT32 uiFileOffset, + IMG_UINT32 uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr; + + sDevAddrStart.uiAddr += psMemDesc->uiOffset + uiOffset; + + eError = DevmemIntPDumpSaveToFileVirtualNoValidate( + psDeviceNode, + psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext, + sDevAddrStart, + uiSize, + pszFileName, + uiFileOffset, + uiPDumpFlags + ); + PVR_LOG_IF_ERROR(eError, "DevmemIntPDumpSaveToFileVirtualNoValidate"); + + /* If PDump was rejected for this device, suppress silently */ + if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) + { + PVR_ASSERT(eError == PVRSRV_OK); + } +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump_common.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump_common.h new file mode 100644 index 000000000000..8df9389a13ed --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdump_common.h @@ -0,0 +1,106 @@ +/*************************************************************************/ /*! +@File +@Title RGX PDump Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description RGX common PDump functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXPDUMP_COMMON_H +#define RGXPDUMP_COMMON_H + +#if defined(PDUMP) + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "dllist.h" +#include "devicemem_typedefs.h" +#include "device.h" + +/*************************************************************************/ /*! +@Function PDumpGetValidRegion +@Description Checks valid regions within a given DEVMEM_MEMDESC and returns + a list containing region descriptors. +@Input psDeviceNode Pointer to the device node. +@Input psMemDesc Pointer to the memory descriptor. +@Input uiSize Size of the allocation. +@Output psRegionList List of valid regions. +@Return PVRSRV_ERROR PVRSRV_OK on success or error code on failure. +*/ /**************************************************************************/ +PVRSRV_ERROR PDumpGetValidRegion(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc, + IMG_UINT32 uiSize, + DLLIST_NODE *psRegionList); + +/*************************************************************************/ /*! +@Function PDumpSaveToFileVirtual +@Description Emits SAB PDump command to file. +@Input psDeviceNode Pointer to the device node. +@Input psMemDesc Pointer to the memory descriptor. +@Input psRegionList List of valid regions. +@Input pszFileName Output file name. +@Input uiPDumpFlags PDump flags. +*/ /**************************************************************************/ +void PDumpSaveToFileVirtual(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc, + DLLIST_NODE *psRegionList, + const IMG_CHAR *pszFileName, + IMG_UINT32 uiPDumpFlags); + +/*************************************************************************/ /*! +@Function PDumpSaveToFileVirtualNoValidate +@Description Emits SAB PDump command to file. +@Input psDeviceNode Pointer to the device node. +@Input psMemDesc Pointer to the memory descriptor. +@Input uiOffset Offset within memory region. +@Input uiSize Size of memory region. +@Input pszFileName Output file name. +@Input uiFileOffset Offset within file. +@Input uiPDumpFlags PDump flags. +*/ /**************************************************************************/ +void PDumpSaveToFileVirtualNoValidate(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 uiSize, + const IMG_CHAR *pszFileName, + IMG_UINT32 uiFileOffset, + IMG_UINT32 uiPDumpFlags); + +#endif /* PDUMP */ + +#endif /* RGXPDUMP_COMMON_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdvfs.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdvfs.c index 7b84d61b36ce..e96091ec8b23 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdvfs.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdvfs.c @@ -44,32 +44,19 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxpdvfs.h" #include "rgxfwutils.h" +#include "rgxpower.h" #include "rgx_options.h" #include "rgxtimecorr.h" #define USEC_TO_MSEC 1000 -static inline IMG_BOOL _PDVFSEnabled(void) -{ - PVRSRV_DATA *psSRVData = PVRSRVGetPVRSRVData(); - - if (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions & - psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions & - OPTIONS_PDVFS_EN) - { - return IMG_TRUE; - } - - return IMG_FALSE; -} - PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MaxOPPPoint) { RGXFWIF_KCCB_CMD sGPCCBCmd; PVRSRV_ERROR eError; IMG_UINT32 ui32CmdKCCBSlot; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); if (!_PDVFSEnabled()) { @@ -82,7 +69,7 @@ PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui sGPCCBCmd.uCmdData.sPDVFSMaxFreqData.ui32MaxOPPPoint = ui32MaxOPPPoint; /* Submit command to the firmware. */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sGPCCBCmd, @@ -93,7 +80,7 @@ PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); return eError; } @@ -104,7 +91,7 @@ PVRSRV_ERROR PDVFSLimitMinFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui PVRSRV_ERROR eError; IMG_UINT32 ui32CmdKCCBSlot; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); if (!_PDVFSEnabled()) { @@ -117,7 +104,7 @@ PVRSRV_ERROR PDVFSLimitMinFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui sGPCCBCmd.uCmdData.sPDVFSMinFreqData.ui32MinOPPPoint = ui32MinOPPPoint; /* Submit command to the firmware. */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sGPCCBCmd, @@ -128,136 +115,11 @@ PVRSRV_ERROR PDVFSLimitMinFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); return eError; } - -#if (PDVFS_COM == PDVFS_COM_HOST) -/*************************************************************************/ /*! -@Function PDVFSProcessCoreClkChangeRequest -@Description Processes a core clock rate change request. -@Input psDevInfo A pointer to PVRSRV_RGXDEV_INFO. -@Input ui32CoreClockRate New core clock rate. -@Return PVRSRV_ERROR. -*/ /**************************************************************************/ -PVRSRV_ERROR PDVFSProcessCoreClkChangeRequest(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate) -{ - PVRSRV_DEVICE_CONFIG *psDevConfig = psDevInfo->psDeviceNode->psDevConfig; - IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &psDevConfig->sDVFS.sDVFSDeviceCfg; - RGX_TIMING_INFORMATION *psRGXTimingInfo = ((RGX_DATA*)(psDevConfig->hDevData))->psRGXTimingInfo; - IMG_UINT32 ui32CoreClockRateCurrent = psRGXTimingInfo->ui32CoreClockSpeed; - const IMG_OPP *psOpp = NULL; - IMG_UINT32 ui32Index; - PVRSRV_ERROR eError; - - if (!_PDVFSEnabled()) - { - /* No error message to avoid excessive messages */ - return PVRSRV_OK; - } - - PVR_DPF((PVR_DBG_MESSAGE, "Core clock rate = %u", ui32CoreClockRate)); - - /* Find the matching OPP (Exact). */ - for (ui32Index = 0; ui32Index < psDVFSDeviceCfg->ui32OPPTableSize; ui32Index++) - { - if (ui32CoreClockRate == psDVFSDeviceCfg->pasOPPTable[ui32Index].ui32Freq) - { - psOpp = &psDVFSDeviceCfg->pasOPPTable[ui32Index]; - break; - } - } - - if (! psOpp) - { - PVR_DPF((PVR_DBG_ERROR, "Frequency not present in OPP table - %u", ui32CoreClockRate)); - return PVRSRV_ERROR_INVALID_PARAMS; - } - - eError = PVRSRVDevicePreClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange failed")); - return eError; - } - - psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate; - - /* Increasing frequency, change voltage first */ - if (ui32CoreClockRate > ui32CoreClockRateCurrent) - { - psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt); - } - - psDVFSDeviceCfg->pfnSetFrequency(ui32CoreClockRate); - - /* Decreasing frequency, change frequency first */ - if (ui32CoreClockRate < ui32CoreClockRateCurrent) - { - psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt); - } - - PVRSRVDevicePostClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL); - - return PVRSRV_OK; -} -#else -/*************************************************************************/ /*! -@Function PDVFSProcessCoreClkChangeNotification -@Description Processes a core clock rate change notification. -@Input psDevInfo A pointer to PVRSRV_RGXDEV_INFO. -@Input ui32CoreClockRate New core clock rate. -@Return PVRSRV_ERROR. -*/ /**************************************************************************/ -PVRSRV_ERROR PDVFSProcessCoreClkChangeNotification(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate) -{ - PVRSRV_DEVICE_CONFIG *psDevConfig = psDevInfo->psDeviceNode->psDevConfig; - RGX_TIMING_INFORMATION *psRGXTimingInfo = ((RGX_DATA*)(psDevConfig->hDevData))->psRGXTimingInfo; - PVRSRV_DEV_POWER_STATE ePowerState; - PVRSRV_ERROR eError; - - eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", - __func__, PVRSRVGetErrorString(eError))); - return eError; - } - - eError = PVRSRVGetDevicePowerState(psDevInfo->psDeviceNode, &ePowerState); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire power state (%s)", - __func__, PVRSRVGetErrorString(eError))); - PVRSRVPowerUnlock(psDevInfo->psDeviceNode); - return eError; - } - - /* Guest drivers do not initialize psRGXFWIfFwSysData */ - if ((ePowerState != PVRSRV_DEV_POWER_STATE_OFF) - && ((psDevInfo->psRGXFWIfFwSysData == NULL) || (psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF))) - { - /* Update GPU frequency and timer correlation related data */ - RGXTimeCorrEnd(psDevInfo->psDeviceNode, RGXTIMECORR_EVENT_DVFS); - psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate; - RGXTimeCorrBegin(psDevInfo->psDeviceNode, RGXTIMECORR_EVENT_DVFS); - } - else - { - psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate; - } - - PVRSRVPowerUnlock(psDevInfo->psDeviceNode); - - return PVRSRV_OK; -} -#endif - - #if defined(RGXFW_META_SUPPORT_2ND_THREAD) /*************************************************************************/ /*! @Function RGXPDVFSCheckCoreClkRateChange @@ -278,7 +140,7 @@ void RGXPDVFSCheckCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo) if (ui32CoreClkRate != 0 && psDevInfo->ui32CoreClkRateSnapshot != ui32CoreClkRate) { psDevInfo->ui32CoreClkRateSnapshot = ui32CoreClkRate; - PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, ui32CoreClkRate); + RGX_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, ui32CoreClkRate); } } #endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdvfs.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdvfs.h index 13a94b5a031b..f2bec1184077 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdvfs.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpdvfs.h @@ -47,20 +47,28 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" #include "rgxdevice.h" +#include "rgx_options.h" +#include "pvrsrv.h" +static inline IMG_BOOL _PDVFSEnabled(void) +{ + PVRSRV_DATA *psSRVData = PVRSRVGetPVRSRVData(); + + if (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions & + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions & + OPTIONS_PDVFS_EN) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MaxOPPPoint); PVRSRV_ERROR PDVFSLimitMinFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MinOPPPoint); -#if (PDVFS_COM == PDVFS_COM_HOST) -PVRSRV_ERROR PDVFSProcessCoreClkChangeRequest(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate); -#define PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(devinfo, clk) PDVFSProcessCoreClkChangeRequest(devinfo, clk) -#else -PVRSRV_ERROR PDVFSProcessCoreClkChangeNotification(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate); -#define PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(devinfo, clk) PDVFSProcessCoreClkChangeNotification(devinfo, clk) -#endif - #if defined(RGXFW_META_SUPPORT_2ND_THREAD) void RGXPDVFSCheckCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo); #endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpower.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpower.c similarity index 51% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpower.c rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpower.c index a8101d0aca87..7f77edefe558 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpower.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpower.c @@ -51,10 +51,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxinit.h" #include "rgx_fwif_km.h" #include "rgxfwutils.h" +#include "rgxfwriscv.h" #include "pdump_km.h" #include "pvr_debug.h" #include "osfunc.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "devicemem.h" #include "devicemem_pdump.h" #include "rgxtimecorr.h" @@ -71,8 +72,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if defined(SUPPORT_LINUX_DVFS) #include "pvr_dvfs_device.h" #endif -#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) -#include "oskm_apphint.h" + +#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) +#include "pvr_gpufreq.h" +#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) */ + +#if defined(SUPPORT_PDVFS) && (PDVFS_COM == PDVFS_COM_HOST) +#include "rgxpdvfs.h" #endif static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) @@ -96,47 +102,68 @@ static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo) { - RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb; - IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OS]; - IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OS][RGXFWIF_GPU_UTIL_STATE_NUM]; + RGXFWIF_GPU_UTIL_FW *psUtilFW; IMG_UINT64 ui64LastPeriod; IMG_UINT64 ui64LastState; + IMG_UINT64 ui64LastReducedState; IMG_UINT64 ui64LastTime; IMG_UINT64 ui64TimeNow; - RGXFWIF_DM eDM; + IMG_UINT32 ui32DriverID; + IMG_UINT64 ui64DMOSStatsCounter; - psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; - paui64DMOSLastWord = &psUtilFWCb->aaui64DMOSLastWord[0]; - paaui64DMOSStatsCounters = &psUtilFWCb->aaaui64DMOSStatsCounters[0]; + psUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFW, INVALIDATE); OSLockAcquire(psDevInfo->hGPUUtilLock); ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDevInfo->psDeviceNode)); /* Update counters to account for the time since the last update */ - ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord); - ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64GpuLastWord); + ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFW->ui64GpuLastWord); + ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFW->ui64GpuLastWord); ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); - psUtilFWCb->aui64GpuStatsCounters[ui64LastState] += ui64LastPeriod; + psUtilFW->aui64GpuStatsCounters[ui64LastState] += ui64LastPeriod; /* Update state and time of the latest update */ - psUtilFWCb->ui64GpuLastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); + psUtilFW->ui64GpuLastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); - for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++) + /* convert last period into the same units as used by fw */ + ui64TimeNow = ui64TimeNow >> RGXFWIF_DM_OS_TIMESTAMP_SHIFT; + + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - IMG_UINT32 ui32OSid; + RGXFWIF_GPU_STATS *psStats = &psUtilFW->sStats[ui32DriverID]; + RGXFWIF_DM eDM; - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + for (eDM = 0; eDM < RGXFWIF_GPU_UTIL_DM_MAX; eDM++) { - ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32OSid]); - ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32OSid]); + ui64LastState = (IMG_UINT64)RGXFWIF_GPU_UTIL_GET_STATE32(psStats->aui32DMOSLastWord[eDM]); + ui64LastTime = (IMG_UINT64)RGXFWIF_GPU_UTIL_GET_TIME32(psStats->aui32DMOSLastWord[eDM]) + + ((IMG_UINT64)psStats->aui32DMOSLastWordWrap[eDM] << 32); ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); - paaui64DMOSStatsCounters[eDM][ui32OSid][ui64LastState] += ui64LastPeriod; + /* for states statistics per DM per driver we only care about the time in Active state, + so we "combine" other states (Idle and Blocked) together */ + ui64LastReducedState = (ui64LastState == RGXFWIF_GPU_UTIL_STATE_ACTIVE) ? + RGXFWIF_GPU_UTIL_STATE_ACTIVE : RGXFWIF_GPU_UTIL_STATE_INACTIVE; + ui64DMOSStatsCounter = (IMG_UINT64)psStats->aaui32DMOSStatsCounters[eDM][ui64LastReducedState] + ui64LastPeriod; + psStats->aaui32DMOSStatsCounters[eDM][ui64LastReducedState] = (IMG_UINT32)(ui64DMOSStatsCounter & IMG_UINT32_MAX); + if (ui64DMOSStatsCounter > IMG_UINT32_MAX) + { + psStats->aaui32DMOSCountersWrap[eDM][ui64LastReducedState] += (IMG_UINT32)(ui64DMOSStatsCounter >> 32); + } /* Update state and time of the latest update */ - paui64DMOSLastWord[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); + psStats->aui32DMOSLastWord[eDM] = RGXFWIF_GPU_UTIL_MAKE_WORD32((ui64TimeNow & (IMG_UINT64)IMG_UINT32_MAX), ui64LastState); + if (ui64TimeNow > IMG_UINT32_MAX) + { + if (psStats->aui32DMOSLastWordWrap[eDM] != (IMG_UINT32)(ui64TimeNow >> 32)) + { + psStats->aui32DMOSLastWordWrap[eDM] = (IMG_UINT32)(ui64TimeNow >> 32); + } + } } } + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFW, FLUSH); OSLockRelease(psDevInfo->hGPUUtilLock); } @@ -144,180 +171,262 @@ static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo) static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) - PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); - if (psDevConfig->pfnTDRGXStop == NULL) +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (psDeviceNode->psDevConfig->pfnTDRGXStop == NULL) { PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!")); return PVRSRV_ERROR_NOT_IMPLEMENTED; } - eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData); + psDevInfo->bRGXPowered = IMG_FALSE; + eError = psDeviceNode->psDevConfig->pfnTDRGXStop(psDeviceNode->psDevConfig->hSysData); #else - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - eError = RGXStop(&psDevInfo->sLayerParams); #endif return eError; } -/* - RGXPrePowerState -*/ -PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags) +/*************************************************************************/ /*! +@Function RGXSendPowerOffKick +@Description Send a KCCB kick to power off the GPU FW. This function will wait + for completion of the command before exiting. + +@Input psDeviceNode The device node struct associated with the GPU. +@Input bForce A boolean indicating if the power off command + should be forced. + +@Return Failure code if the virtual address is invalid. +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXSendPowerOffKick(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bForce) { + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_KCCB_CMD sPowCmd; + IMG_UINT32 ui32CmdKCCBSlot; PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - if ((eNewPowerState != eCurrentPowerState) && - (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) + PVR_ASSERT(psDeviceNode != NULL); + PVR_ASSERT(psDeviceNode->pvDevice != NULL); + + psDevInfo = psDeviceNode->pvDevice; + + /* Send the Power off request to the FW */ + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = bForce; + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) { - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGXFWIF_KCCB_CMD sPowCmd; - IMG_UINT32 ui32CmdKCCBSlot; + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request", + __func__)); + return eError; + } - /* Send the Power off request to the FW */ - sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; - sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ; - sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED); + /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies + * on the EventObject which is signalled in this MISR */ + return RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); +} - eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", - __func__)); - return eError; - } +/*************************************************************************/ /*! +@Function RGXFinalisePowerOff +@Description Finalises the GPU power transition to off. - eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, - &sPowCmd, - PDUMP_FLAGS_NONE, - &ui32CmdKCCBSlot); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request", - __func__)); - return eError; - } +@Input psDeviceNode The device node struct associated with the GPU. - /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies - on the EventObject which is signalled in this MISR */ - eError = RGXPollForGPCommandCompletion(psDeviceNode, - psDevInfo->psPowSyncPrim->pui32LinAddr, - 0x1, 0xFFFFFFFF); +@Return Result code indicating the success or reason for failure. +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXFinalisePowerOff(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; - /* Check the Power state after the answer */ - if (eError == PVRSRV_OK) - { - /* Finally, de-initialise some registers. */ - if (psFwSysData->ePowState == RGXFWIF_POW_OFF) - { -#if !defined(NO_HARDWARE) - IMG_UINT32 ui32idx; +#if !defined(NO_HARDWARE) && !defined(SUPPORT_SYNC_IRQ) + IMG_UINT32 ui32idx; +#endif - /* Driver takes the VZ Fw-KM connection down, preventing the - * firmware from submitting further interrupts */ - KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + PVR_ASSERT(psDeviceNode != NULL); + PVR_ASSERT(psDeviceNode->pvDevice != NULL); + psDevInfo = psDeviceNode->pvDevice; + +#if !defined(NO_HARDWARE) + /* Driver takes the VZ Fw-KM connection down, preventing the + * firmware from submitting further interrupts */ + KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); + +#if defined(SUPPORT_SYNC_IRQ) + /* Wait for the pending IRQ handlers to complete. */ + OSSyncIRQ(psDeviceNode->psDevConfig->ui32IRQ); +#else #if defined(RGX_FW_IRQ_OS_COUNTERS) - ui32idx = RGXFW_HOST_OS; + ui32idx = RGXFW_HOST_DRIVER_ID; #else - for_each_irq_cnt(ui32idx) + for_each_irq_cnt(ui32idx) #endif /* RGX_FW_IRQ_OS_COUNTERS */ - { - IMG_UINT32 ui32IrqCnt; - - get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); - - /* Wait for the pending FW processor to host interrupts to come back. */ - eError = PVRSRVPollForValueKM(psDeviceNode, - (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32idx], - ui32IrqCnt, - 0xffffffff, - POLL_FLAG_LOG_ERROR); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Wait for pending interrupts failed (DevID %u)." MSG_IRQ_CNT_TYPE " %u Host: %u, FW: %u", - __func__, - psDeviceNode->sDevId.ui32InternalID, - ui32idx, - psDevInfo->aui32SampleIRQCount[ui32idx], - ui32IrqCnt)); - - RGX_WaitForInterruptsTimeout(psDevInfo); - } - } + { + IMG_UINT32 ui32IrqCnt; + + get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); + + /* Wait for the pending FW processor to host interrupts to come back. */ + eError = PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32idx], + ui32IrqCnt, + 0xffffffff, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP, + NULL); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Wait for pending interrupts failed (DevID %u)." MSG_IRQ_CNT_TYPE " %u Host: %u, FW: %u", + __func__, + psDeviceNode->sDevId.ui32InternalID, + ui32idx, + psDevInfo->aui32SampleIRQCount[ui32idx], + ui32IrqCnt)); + + RGX_WaitForInterruptsTimeout(psDevInfo); +#if !defined(RGX_FW_IRQ_OS_COUNTERS) + break; +#endif + } + } +#endif /* SUPPORT_SYNC_IRQ */ #endif /* NO_HARDWARE */ - /* Update GPU frequency and timer correlation related data */ - RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER); + /* Update GPU frequency and timer correlation related data */ + RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER); - /* Update GPU state counters */ - _RGXUpdateGPUUtilStats(psDevInfo); + /* Update GPU state counters */ + _RGXUpdateGPUUtilStats(psDevInfo); #if defined(SUPPORT_LINUX_DVFS) - eError = SuspendDVFS(psDeviceNode); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__)); - return eError; - } + eError = SuspendDVFS(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__)); + return eError; + } #endif - psDevInfo->bRGXPowered = IMG_FALSE; + eError = RGXDoStop(psDeviceNode); + if (eError != PVRSRV_OK) + { + /* Power down failures are treated as successful since the power was removed but logged. */ + PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)", + __func__, PVRSRVGetErrorString(eError))); + psDevInfo->ui32ActivePMReqNonIdle++; + eError = PVRSRV_OK; + } - eError = RGXDoStop(psDeviceNode); - if (eError != PVRSRV_OK) - { - /* Power down failures are treated as successful since the power was removed but logged. */ - PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)", - __func__, PVRSRVGetErrorString(eError))); - psDevInfo->ui32ActivePMReqNonIdle++; - eError = PVRSRV_OK; - } - } - else - { - /* the sync was updated but the pow state isn't off -> the FW denied the transition */ - eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; - - if (BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED)) - { /* It is an error for a forced request to be denied */ - PVR_DPF((PVR_DBG_ERROR, - "%s: Failure to power off during a forced power off. FW: %d", - __func__, psFwSysData->ePowState)); - } - } + return eError; +} + +/*************************************************************************/ /*! +@Function RGXPrePowerState +@Description Initial step for setting power state, to be followed by + RGXPostPowerState. + +@Input psDeviceNode The device node struct associated with the GPU. +@Input eNewPowerState The power state the GPU is to transition to. +@Input eCurrentPowerState The current power state of the GPU. +@Input ePwrFlags Flags indicating the behaviour of the transition. + +@Return Result code indicating the success or reason for failure. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPrePowerState(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_POWER_FLAGS ePwrFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + const RGXFWIF_SYSDATA *psFwSysData; +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + IMG_BOOL bDeviceOk; +#endif + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psDevInfo != NULL); + + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if ((eNewPowerState == eCurrentPowerState) || + (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)) + { + return PVRSRV_OK; + } + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + bDeviceOk = (OSAtomicRead(&psDeviceNode->eHealthStatus) == PVRSRV_DEVICE_HEALTH_STATUS_OK); + if (bDeviceOk) +#endif + { + IMG_BOOL bForce = IMG_FALSE; + + if (BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED)) + { + bForce = IMG_TRUE; } - else if (eError == PVRSRV_ERROR_TIMEOUT) + + eError = RGXSendPowerOffKick(psDeviceNode, bForce); + if (eError == PVRSRV_ERROR_TIMEOUT) { /* timeout waiting for the FW to ack the request: return timeout */ PVR_DPF((PVR_DBG_WARNING, - "%s: Timeout waiting for powoff ack from the FW", - __func__)); + "%s: Timeout waiting for powoff ack from the FW", + __func__)); + return eError; } - else + else if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Error waiting for powoff ack from the FW (%s)", - __func__, PVRSRVGetErrorString(eError))); - eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; + "%s: Error waiting for powoff ack from the FW (%s)", + __func__, PVRSRVGetErrorString(eError))); + return PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; } } - return eError; + /* Check the Power state after the answer */ + RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, INVALIDATE); + if ((psFwSysData->ePowState != RGXFWIF_POW_OFF) +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + && (bDeviceOk) +#endif + ) + { + if (BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED)) + { /* It is an error for a forced request to be denied */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Failure to power off during a forced power off. FW: %d", + __func__, psFwSysData->ePowState)); + } + + /* the sync was updated but the pow state isn't off -> the FW denied the transition */ + return PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; + } + + return RGXFinalisePowerOff(psDeviceNode); } #if defined(SUPPORT_AUTOVZ) @@ -327,22 +436,26 @@ static PVRSRV_ERROR _RGXWaitForGuestsToDisconnect(PVRSRV_DEVICE_NODE *psDeviceNo PVRSRV_ERROR eError = PVRSRV_ERROR_TIMEOUT; IMG_UINT32 ui32FwTimeout = (20 * SECONDS_TO_MICROSECONDS); - LOOP_UNTIL_TIMEOUT(ui32FwTimeout) + LOOP_UNTIL_TIMEOUT_US(ui32FwTimeout) { - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; IMG_BOOL bGuestOnline = IMG_FALSE; - for (ui32OSid = RGXFW_GUEST_OSID_START; - ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror, + INVALIDATE); + + for (ui32DriverID = RGXFW_GUEST_DRIVER_ID_START; + ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED; ui32DriverID++) { RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE) - psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32OSid].bfOsState; + psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32DriverID].bfOsState; if ((eGuestState == RGXFW_CONNECTION_FW_ACTIVE) || - (eGuestState == RGXFW_CONNECTION_FW_OFFLOADING)) + (eGuestState == RGXFW_CONNECTION_FW_GRACEFUL_OFFLOAD) || + (eGuestState == RGXFW_CONNECTION_FW_FORCED_OFFLOAD)) { bGuestOnline = IMG_TRUE; - PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32OSid)); + PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32DriverID)); } } @@ -370,7 +483,7 @@ static PVRSRV_ERROR _RGXWaitForGuestsToDisconnect(PVRSRV_DEVICE_NODE *psDeviceNo } OSSleepms(10); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (!PVRSRVPwrLockIsLockedByMe(psDeviceNode)) { @@ -385,13 +498,12 @@ static PVRSRV_ERROR _RGXWaitForGuestsToDisconnect(PVRSRV_DEVICE_NODE *psDeviceNo /* RGXVzPrePowerState */ -PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXVzPrePowerState(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEV_POWER_STATE eNewPowerState, PVRSRV_DEV_POWER_STATE eCurrentPowerState, PVRSRV_POWER_FLAGS ePwrFlags) { PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); @@ -399,14 +511,29 @@ PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, { /* powering down */ #if defined(SUPPORT_AUTOVZ) - if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp)) + if (PVRSRV_VZ_MODE_IS(HOST, DEVNODE, psDeviceNode) && (!psDeviceNode->bAutoVzFwIsUp || psDeviceNode->bAutoVzAllowGPUPowerdown)) { + if (psDeviceNode->bAutoVzFwIsUp) + { + /* bAutoVzAllowGPUPowerdown must be TRUE here and + * bAutoVzFwIsUp=TRUE indicates that this is a powerdown event + * so send requests to the FW to disconnect all guest connections + * before GPU is powered down. */ + eError = RGXDisconnectAllGuests(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXDisconnectAllGuests"); + } + /* The Host must ensure all Guest drivers have disconnected from the GPU before powering it down. * Guest drivers regularly access hardware registers during runtime. If an attempt is made to * access a GPU register while the GPU is down, the SoC might lock up. */ eError = _RGXWaitForGuestsToDisconnect(psDeviceNode); PVR_LOG_RETURN_IF_ERROR(eError, "_RGXWaitForGuestsToDisconnect"); + if (psDeviceNode->bAutoVzAllowGPUPowerdown) + { + psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; + } + /* Temporarily restore all power callbacks used by the driver to fully power down the GPU. * Under AutoVz, power transitions requests (e.g. on driver deinitialisation and unloading) * are generally ignored and the GPU power state is unaffected. Special power requests like @@ -415,69 +542,119 @@ PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, &RGXVzPrePowerState, &RGXVzPostPowerState, psDeviceNode->psDevConfig->pfnPrePowerState, psDeviceNode->psDevConfig->pfnPostPowerState, - &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest); + &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, + &RGXCancelForcedIdleRequestAsync); } else { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + KM_CONNECTION_CACHEOP(Os, INVALIDATE); + if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)) { - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError = RGXFWSetFwOsState(psDevInfo, 0, RGXFWIF_OS_OFFLINE); PVR_LOG_RETURN_IF_ERROR(eError, "RGXFWSetFwOsState"); } } #endif PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", - __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", + __func__, PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)? "GUEST" : "HOST", psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); } else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) { /* powering up */ PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", - __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", + __func__, PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)? "GUEST" : "HOST", psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); } - if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) + if (!(PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) || (psDeviceNode->bAutoVzFwIsUp))) { /* call regular device power function */ - eError = RGXPrePowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags); + eError = RGXPrePowerState(psDeviceNode, eNewPowerState, eCurrentPowerState, ePwrFlags); } return eError; } +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) +static PVRSRV_ERROR RGXVzWaitFirmwareReady(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) + { + PVR_LOG(("%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__)); + } + + LOOP_UNTIL_TIMEOUT_US(RGX_VZ_CONNECTION_TIMEOUT_US) + { + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + if (KM_FW_CONNECTION_IS(READY, psDevInfo)) + { + PVR_LOG(("%s: Firmware Connection is Ready. Initialisation proceeding.", __func__)); + break; + } + else + { + OSSleepms(10); + } + } END_LOOP_UNTIL_TIMEOUT_US(); + + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Ready state.", __func__)); + return PVRSRV_ERROR_TIMEOUT; + } + + return PVRSRV_OK; +} +#endif + /* RGXVzPostPowerState */ -PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXVzPostPowerState(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEV_POWER_STATE eNewPowerState, PVRSRV_DEV_POWER_STATE eCurrentPowerState, PVRSRV_POWER_FLAGS ePwrFlags) { PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); - if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) + if (!(PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) || (psDeviceNode->bAutoVzFwIsUp))) { + if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) + { + KM_SET_OS_CONNECTION(READY, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); + } /* call regular device power function */ - eError = RGXPostPowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags); + eError = RGXPostPowerState(psDeviceNode, eNewPowerState, eCurrentPowerState, ePwrFlags); + } + else + { + KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); } if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON) { /* powering down */ - PVR_LOG_RETURN_IF_FALSE((!psDeviceNode->bAutoVzFwIsUp), "AutoVz Fw active, power not changed", eError); + if (psDeviceNode->bAutoVzFwIsUp) + { + PVR_LOG(("%s: AutoVz Fw active, power not changed", __func__)); + return eError; + } PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", - __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", + __func__, PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)? "GUEST" : "HOST", psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); #if !defined(SUPPORT_AUTOVZ_HW_REGS) @@ -485,9 +662,10 @@ PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, * in hardware scratch registers, they will be cleared on power down. When using shared * memory the connection data must be explicitly cleared by the driver. */ OSCachedMemSetWMB(psDevInfo->psRGXFWIfConnectionCtl, 0, sizeof(RGXFWIF_CONNECTION_CTL)); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfConnectionCtl, FLUSH); #endif /* defined(SUPPORT_AUTOVZ) && !defined(SUPPORT_AUTOVZ_HW_REGS) */ - if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) || (psDeviceNode->bAutoVzFwIsUp)) { #if defined(SUPPORT_AUTOVZ) /* AutoVz Guests attempting to suspend have updated their connections earlier in RGXVzPrePowerState. @@ -497,6 +675,7 @@ PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, { /* Take the VZ connection down to prevent firmware from submitting further interrupts */ KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); } /* Power transition callbacks were not executed, update RGXPowered flag here */ psDevInfo->bRGXPowered = IMG_FALSE; @@ -506,12 +685,15 @@ PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, { /* powering up */ IMG_UINT32 ui32FwTimeout = (3 * SECONDS_TO_MICROSECONDS); - volatile IMG_BOOL *pbUpdatedFlag = &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated; + volatile IMG_BOOL *pbUpdatedFlag; + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, + INVALIDATE); + pbUpdatedFlag = &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated; PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", - __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", + __func__, PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)? "GUEST" : "HOST", psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { /* Guests don't execute the power transition callbacks, so update their RGXPowered flag here */ psDevInfo->bRGXPowered = IMG_TRUE; @@ -519,40 +701,15 @@ PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, #if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) /* Guest drivers expect the firmware to have set its end of the * connection to Ready state by now. */ - if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__)); - } - - LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US) - { - if (KM_FW_CONNECTION_IS(READY, psDevInfo)) - { - PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__)); - break; - } - else - { - OSSleepms(10); - } - } END_LOOP_UNTIL_TIMEOUT(); - - if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Ready state.", __func__)); - return PVRSRV_ERROR_TIMEOUT; - } + eError = RGXVzWaitFirmwareReady(psDevInfo); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXVzWaitFirmwareReady()"); #endif /* RGX_VZ_STATIC_CARVEOUT_FW_HEAPS */ - /* Guests can only access the register holding the connection states, - * after the GPU is confirmed to be powered up */ - KM_SET_OS_CONNECTION(READY, psDevInfo); - OSWriteDeviceMem32WithWMB(pbUpdatedFlag, IMG_FALSE); /* Kick an initial dummy command to make the firmware initialise all * its internal guest OS data structures and compatibility information. - * Use the lower-level RGXSendCommandAndGetKCCBSlot() for the job, to make + * Use the lower-level RGXSendCommand() for the job, to make * sure only 1 KCCB command is issued to the firmware. * The default RGXFWHealthCheckCmd() prefaces each HealthCheck command with * a pre-kick cache command which can interfere with the FW-KM init handshake. */ @@ -560,31 +717,52 @@ PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, RGXFWIF_KCCB_CMD sCmpKCCBCmd; sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; - eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sCmpKCCBCmd, PDUMP_FLAGS_CONTINUOUS, NULL); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot()"); + KM_SET_OS_CONNECTION(READY, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); + + eError = RGXSendCommand(psDevInfo, &sCmpKCCBCmd, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXSendCommand()"); } } else { - KM_SET_OS_CONNECTION(READY, psDevInfo); - +#if defined(SUPPORT_AUTOVZ) /* Disable power callbacks that should not be run on virtualised drivers after the GPU * is fully initialised: system layer pre/post functions and driver idle requests. * The original device RGX Pre/Post functions are called from this Vz wrapper. */ PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev, &RGXVzPrePowerState, &RGXVzPostPowerState, - NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL); -#if defined(SUPPORT_AUTOVZ) - /* During first-time boot the flag is set here, while subsequent reboots will already - * have set it earlier in RGXInit. Set to true from this point onwards in any case. */ - psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; + /* AutoVz Host driver reconnecting to running Firmware */ + if (psDeviceNode->bAutoVzFwIsUp) + { + /* Firmware already running, send a KCCB command to establish the new connection */ + RGXFWIF_KCCB_CMD sCmpKCCBCmd; + sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; + + eError = RGXVzWaitFirmwareReady(psDevInfo); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXVzWaitFirmwareReady()"); + + KM_SET_OS_CONNECTION(READY, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); + + eError = RGXSendCommand(psDevInfo, &sCmpKCCBCmd, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXSendCommand()"); + } + else + { + /* During first-time boot the flag is set here, while subsequent reboots will already + * have set it earlier in RGXInit. Set to true from this point on. */ + psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; + } #endif } /* Wait for the firmware to accept and enable the connection with this OS by setting its state to Active */ - LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US) + LOOP_UNTIL_TIMEOUT_US(RGX_VZ_CONNECTION_TIMEOUT_US) { + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo)) { PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__)); @@ -595,27 +773,32 @@ PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__)); OSSleepms(10); } - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); if (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo)) { PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Active state.", __func__)); return PVRSRV_ERROR_TIMEOUT; } - /* poll on the Firmware supplying the compatibility data */ - LOOP_UNTIL_TIMEOUT(ui32FwTimeout) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - if (*pbUpdatedFlag) + /* poll on the Firmware supplying the compatibility data */ + LOOP_UNTIL_TIMEOUT_US(ui32FwTimeout) { - break; - } - OSSleepms(10); - } END_LOOP_UNTIL_TIMEOUT(); + if (*pbUpdatedFlag) + { + break; + } + OSSleepms(10); + } END_LOOP_UNTIL_TIMEOUT_US(); - PVR_LOG_RETURN_IF_FALSE(*pbUpdatedFlag, "Firmware does not respond with compatibility data. ", PVRSRV_ERROR_TIMEOUT); + PVR_LOG_RETURN_IF_FALSE(*pbUpdatedFlag, "Firmware does not respond with compatibility data.", PVRSRV_ERROR_TIMEOUT); + } KM_SET_OS_CONNECTION(ACTIVE, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); } return PVRSRV_OK; @@ -626,22 +809,14 @@ static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo) { FW_BOOT_STAGE eStage; -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) { /* Boot stage temporarily stored to the register below */ eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_FW_BOOT_STAGE_REGISTER); } - else -#endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SCRATCH14); - } - else -#endif +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { IMG_BYTE *pbBootData; @@ -677,6 +852,11 @@ static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo) DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); } } +#endif + else + { + eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SCRATCH14); + } PVR_LOG(("%s: FW reached boot stage %i/%i.", __func__, eStage, FW_BOOT_INIT_DONE)); @@ -686,6 +866,7 @@ static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo) static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; @@ -697,82 +878,19 @@ static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode) } eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData); -#else - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + if (eError == PVRSRV_OK) + { + psDevInfo->bRGXPowered = IMG_TRUE; + } +#else eError = RGXStart(&psDevInfo->sLayerParams); #endif return eError; } - -#if defined(NO_HARDWARE) && defined(PDUMP) - -#if 0 -#include "emu_cr_defs.h" -#else -#define EMU_CR_SYSTEM_IRQ_STATUS (0x00E0U) -/* IRQ is officially defined [8 .. 0] but here we split out the old deprecated single irq. */ -#define EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE01)) -#define EMU_CR_SYSTEM_IRQ_STATUS_OLD_IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE)) -#endif - -static PVRSRV_ERROR -_ValidateIrqs(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - IMG_UINT32 ui32OSid; - PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; - - /* Check if the Validation IRQ flag is set */ - if ((psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_IRQ) == 0) - { - return PVRSRV_OK; - } - - PDUMPIF(psDevInfo->psDeviceNode, "IMG_PVR_TESTBENCH", ui32PDumpFlags); - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Poll for TB irq status to be set (irqs signalled)..."); - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_TB_PDUMPREG_NAME, - EMU_CR_SYSTEM_IRQ_STATUS, - ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK, - ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "... and then clear them"); - for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++) - { - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_IRQ_OS0_EVENT_CLEAR + ui32OSid * 0x10000, - RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL, - ui32PDumpFlags); - } - - PDUMPFI(psDevInfo->psDeviceNode, "IMG_PVR_TESTBENCH", ui32PDumpFlags); - - /* Poll on all the interrupt status registers for all OSes */ - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Validate Interrupt lines."); - - for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++) - { - PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - RGX_CR_IRQ_OS0_EVENT_STATUS + ui32OSid * 0x10000, - 0x0, - 0xFFFFFFFF, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - } - - return PVRSRV_OK; -} -#endif /* defined(NO_HARDWARE) && defined(PDUMP) */ - -#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) +#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) /* * To validate the MTS unit we do the following: * - Immediately after firmware loading for each OSID @@ -782,7 +900,7 @@ _ValidateIrqs(PVRSRV_RGXDEV_INFO *psDevInfo) * - FW clears the memory location if OSid matches * - Host checks that memory location is cleared * - * See firmware/devices/rgx/rgxfw_bg.c + * See firmware/rgxfw_bg.c */ static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit, @@ -803,10 +921,10 @@ static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *ps ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, GPUVIRT_VALIDATION_NUM_OS); - if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS) + if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OSIDS) { PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:")); - PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped)); + PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OSIDS, ui32OsRegBanksMapped)); PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped)); } @@ -826,6 +944,7 @@ static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *ps /* Force a read-back to memory to avoid posted writes on certain buses */ OSWriteMemoryBarrier(&psFwSysInit->ui32OSKickTest); + RGXFwSharedMemCacheOpValue(psFwSysInit->ui32OSKickTest, FLUSH); /* kick register */ ui32ScheduleRegister = RGX_CR_MTS_SCHEDULE + (ui32OSid * RGX_VIRTUALISATION_REG_SIZE_PER_OS); @@ -833,7 +952,6 @@ static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *ps ui32OSid, ui32ScheduleRegister)); OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType); - OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister); #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "VZ sideband test, kicking MTS register %u", ui32OSid); @@ -849,12 +967,16 @@ static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *ps PDUMP_FLAGS_CONTINUOUS); #endif +#if !defined(NO_HARDWARE) + OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister); + /* Wait test enable bit to be unset */ if (PVRSRVPollForValueKM(psDeviceNode, - (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest, + (volatile IMG_UINT32 __iomem *)&psFwSysInit->ui32OSKickTest, 0, RGXFWIF_KICK_TEST_ENABLED_BIT, - POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP, + RGXFwSharedMemCacheOpExecPfn) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware did not clear test location (contents: 0x%X)", ui32OSid, @@ -873,261 +995,155 @@ static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *ps } PVR_DPF((PVR_DBG_MESSAGE, " PASS")); +#endif } PVR_LOG(("MTS passed sideband tests")); return PVRSRV_OK; } -#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */ - -#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) -#define SCRATCH_VALUE (0x12345678U) - -static void RGXRiscvDebugModuleTest(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - void *pvAppHintState = NULL; - const IMG_BOOL bDefaultFalse = IMG_FALSE; - IMG_BOOL bRunRiscvDmiTest; - - IMG_UINT32 *pui32FWCode = NULL; - PVRSRV_ERROR eError; - - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, RiscvDmiTest, - &bDefaultFalse, &bRunRiscvDmiTest); - OSFreeKMAppHintState(pvAppHintState); - - if (bRunRiscvDmiTest == IMG_FALSE) - { - return; - } - - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32FWCode); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Error acquiring FW code memory pointer (%s)", - __func__, - PVRSRVGetErrorString(eError))); - } - - PDumpIfKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST BEGIN"); - - RGXRiscvHalt(psDevInfo); - - /* - * Test RISC-V register reads/writes. - * RGXRiscv[Write/Poll]Reg are used to access internal RISC-V registers - * via debug module. - */ - - /* Write RISC-V mscratch register */ - RGXRiscvWriteReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); - /* Read RISC-V misa register (compare against default standard value) */ - RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MISA_ADDR, RGXRISCVFW_MISA_VALUE); - /* Read RISC-V mscratch register (compare against previously written value) */ - RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); - - /* - * Test RISC-V memory reads/writes. - * RGXRiscv[Write/Poll]Mem are used to access system memory via debug module - * (from RISC-V point of view). - */ - - if (pui32FWCode != NULL) - { - IMG_UINT32 ui32Tmp; - - /* Acquire pointer to FW code (bootloader) */ - pui32FWCode += RGXGetFWImageSectionOffset(NULL, RISCV_UNCACHED_CODE) / sizeof(IMG_UINT32); - /* Save FW code at address (bootloader) */ - ui32Tmp = *pui32FWCode; - - /* Write FW code at address (bootloader) */ - RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, SCRATCH_VALUE); - /* Read FW code at address (bootloader + 4) (compare against value read from Host) */ - RGXRiscvPollMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE + 4, *(pui32FWCode + 1)); - /* Read FW code at address (bootloader) (compare against previously written value) */ - RGXRiscvPollMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, SCRATCH_VALUE); - /* Restore FW code at address (bootloader) */ - RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, ui32Tmp); - - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); - } - - /* - * Test GPU register reads/writes. - * RGXRiscv[Write/Poll]Mem are used to access GPU registers via debug module - * (from RISC-V point of view). - * Note that system memory and GPU register accesses both use the same - * debug module interface, targeting different address ranges. - */ +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION_MTS) */ - /* Write SCRATCH0 from the Host */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, - SCRATCH_VALUE, PDUMP_FLAGS_CONTINUOUS); - /* Read SCRATCH0 */ - RGXRiscvPollMem(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, SCRATCH_VALUE); - /* Write SCRATCH0 */ - RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, ~SCRATCH_VALUE); - /* Read SCRATCH0 from the Host */ - PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, - ~SCRATCH_VALUE, 0xFFFFFFFFU, - PDUMP_FLAGS_CONTINUOUS, PDUMP_POLL_OPERATOR_EQUAL); - - RGXRiscvResume(psDevInfo); - - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST END"); - PDumpFiKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); -} -#endif /* RGXPostPowerState */ -PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXPostPowerState(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEV_POWER_STATE eNewPowerState, PVRSRV_DEV_POWER_STATE eCurrentPowerState, PVRSRV_POWER_FLAGS ePwrFlags) { - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError = PVRSRV_OK; - if ((eNewPowerState != eCurrentPowerState) && - (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) + PVR_UNREFERENCED_PARAMETER(ePwrFlags); + + if ((eNewPowerState == eCurrentPowerState) || + (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF)) { - PVRSRV_ERROR eError; + PDUMPCOMMENT(psDeviceNode, + "RGXPostPowerState: Current state: %d, New state: %d", + eCurrentPowerState, eNewPowerState); - if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) - { - /* Update timer correlation related data */ - RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER); + return PVRSRV_OK; + } - /* Update GPU state counters */ - _RGXUpdateGPUUtilStats(psDevInfo); + /* Update timer correlation related data */ + RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER); - eError = RGXDoStart(psDeviceNode); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: RGXDoStart failed")); - return eError; - } + /* Update GPU state counters */ + _RGXUpdateGPUUtilStats(psDevInfo); - OSMemoryBarrier(NULL); + eError = RGXDoStart(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXDoStart", fail); + + OSMemoryBarrier(NULL); - /* - * Check whether the FW has started by polling on bFirmwareStarted flag - */ - if (PVRSRVPollForValueKM(psDeviceNode, - (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, - IMG_TRUE, - 0xFFFFFFFF, - POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed.")); - eError = PVRSRV_ERROR_TIMEOUT; + + /* + * Check whether the FW has started by polling on bFirmwareStarted flag + */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, + INVALIDATE); + if (PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, + IMG_TRUE, + 0xFFFFFFFF, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP, + RGXFwSharedMemCacheOpExecPfn) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed.")); + eError = PVRSRV_ERROR_TIMEOUT; #if defined(TRACK_FW_BOOT) - RGXCheckFWBootStage(psDevInfo); + RGXCheckFWBootStage(psDevInfo); #endif - /* - * When bFirmwareStarted fails some info may be gained by doing the following - * debug dump but unfortunately it could be potentially dangerous if the reason - * for not booting is the GPU power is not ON. However, if we have reached this - * point the System Layer has returned without errors, we assume the GPU power - * is indeed ON. - */ - RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE); - RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice); - - return eError; - } + /* + * When bFirmwareStarted fails some info may be gained by doing the following + * debug dump but unfortunately it could be potentially dangerous if the reason + * for not booting is the GPU power is not ON. However, if we have reached this + * point the System Layer has returned without errors, we assume the GPU power + * is indeed ON. + */ + RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE); + RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice); + + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPollForValueKM(bFirmwareStarted)", fail); + } #if defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start."); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, - offsetof(RGXFWIF_SYSINIT, bFirmwareStarted), - IMG_TRUE, - 0xFFFFFFFFU, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)", - eError)); - return eError; - } + PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start."); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, bFirmwareStarted), + IMG_TRUE, + 0xFFFFFFFFU, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); -#if defined(NO_HARDWARE) && defined(PDUMP) - eError = _ValidateIrqs(psDevInfo); - if (eError != PVRSRV_OK) - { - return eError; - } -#endif -#endif + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)", + eError)); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemPDumpDevmemPol32", fail); + } -#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) - eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo); - if (eError != PVRSRV_OK) - { - return eError; - } -#endif +#endif /* defined(PDUMP) */ -#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) - RGXRiscvDebugModuleTest(psDevInfo); +#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) + eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXVirtualisationPowerupSidebandTest", fail); #endif + #if defined(PVRSRV_ENABLE_PROCESS_STATS) - PVRSRVSetFirmwareStartTime(psDeviceNode->psPowerDev, - psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp, + INVALIDATE); + PVRSRVSetFirmwareStartTime(psDeviceNode->psPowerDev, + psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp, + FLUSH); #endif - HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal); - - psDevInfo->bRGXPowered = IMG_TRUE; + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal, + INVALIDATE); + HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal); #if defined(SUPPORT_LINUX_DVFS) - eError = ResumeDVFS(psDeviceNode); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Failed to resume DVFS")); - return eError; - } + eError = ResumeDVFS(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "ResumeDVFS", fail); #endif - } - } PDUMPCOMMENT(psDeviceNode, "RGXPostPowerState: Current state: %d, New state: %d", eCurrentPowerState, eNewPowerState); - return PVRSRV_OK; + return eError; + +fail: + psDevInfo->bRGXPowered = IMG_FALSE; + + return eError; } /* RGXPreClockSpeedChange */ -PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXPreClockSpeedChange(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEV_POWER_STATE eCurrentPowerState) { PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + __maybe_unused const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - PVR_UNREFERENCED_PARAMETER(psRGXData); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); PVR_DPF((PVR_DBG_MESSAGE, "RGXPreClockSpeedChange: RGX clock speed was %uHz", psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); + RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, + INVALIDATE); + if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && (psFwSysData->ePowState != RGXFWIF_POW_OFF)) { @@ -1141,22 +1157,24 @@ PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, /* RGXPostClockSpeedChange */ -PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXPostClockSpeedChange(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEV_POWER_STATE eCurrentPowerState) { - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; PVRSRV_ERROR eError = PVRSRV_OK; IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); /* Update runtime configuration with the new value */ OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed, ui32NewClockSpeed); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed, FLUSH); + RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, + INVALIDATE); if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && (psFwSysData->ePowState != RGXFWIF_POW_OFF)) { @@ -1184,6 +1202,11 @@ PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, return eError; } +#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) + GpuTraceFrequency(psDeviceNode->sDevId.ui32InternalID, + psRGXData->psRGXTimingInfo->ui32CoreClockSpeed); +#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) */ + PVR_DPF((PVR_DBG_MESSAGE, "RGXPostClockSpeedChange: RGX clock speed changed to %uHz", psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); } @@ -1192,51 +1215,71 @@ PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, } /*! - ****************************************************************************** +****************************************************************************** - @Function RGXDustCountChange + @Function RGXPowUnitsChange - @Description + @Description Change power units state - Does change of number of DUSTs - - @Input hDevHandle : RGX Device Node - @Input ui32NumberOfDusts : Number of DUSTs to make transition to + @Input psDeviceNode : RGX Device Node + @Input ui32PowUnits : On Rogue: Number of DUSTs to make transition to. + On Volcanic: Mask containing power state of SPUs. + Each bit corresponds to an SPU and value must be non-zero. @Return PVRSRV_ERROR : - ******************************************************************************/ -PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, - IMG_UINT32 ui32NumberOfDusts) -{ +******************************************************************************/ +PVRSRV_ERROR RGXPowUnitsChange(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PowUnits) - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; +{ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; - RGXFWIF_KCCB_CMD sDustCountChange; - IMG_UINT32 ui32MaxAvailableDusts = psDevInfo->sDevFeatureCfg.ui32MAXDustCount; + RGXFWIF_KCCB_CMD sPowUnitsChange; + IMG_UINT32 ui32AvailablePowUnits; IMG_UINT32 ui32CmdKCCBSlot; RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); + +#if defined(PVR_ARCH_VOLCANIC) + ui32AvailablePowUnits = psDevInfo->ui32AvailablePowUnitsMask; - if (ui32NumberOfDusts > ui32MaxAvailableDusts) + /** + * Validate the input. At-least one PU must be powered on and all requested + * PU's must be a subset of full PU mask. + */ + if ((ui32PowUnits == 0) || (ui32PowUnits & ~ui32AvailablePowUnits)) { - eError = PVRSRV_ERROR_INVALID_PARAMS; PVR_DPF((PVR_DBG_ERROR, - "%s: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u", + "%s: Invalid Power Units mask requested (0x%X). Value should be non-zero and sub-set of 0x%X mask", __func__, - ui32NumberOfDusts, - ui32MaxAvailableDusts, - eError)); - return eError; + ui32PowUnits, + ui32AvailablePowUnits)); + return PVRSRV_ERROR_INVALID_SPU_MASK; } +#else + ui32AvailablePowUnits = psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount; - psRuntimeCfg->ui32DefaultDustsNumInit = ui32NumberOfDusts; - OSWriteMemoryBarrier(&psRuntimeCfg->ui32DefaultDustsNumInit); + if (ui32PowUnits > ui32AvailablePowUnits) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid number of DUSTs (%u) while expecting value within <0,%u>", + __func__, + ui32PowUnits, + ui32AvailablePowUnits)); + return PVRSRV_ERROR_INVALID_PARAMS; + } +#endif + + psRuntimeCfg->ui32PowUnitsState = ui32PowUnits; + OSWriteMemoryBarrier(&psRuntimeCfg->ui32PowUnitsState); + RGXFwSharedMemCacheOpValue(psRuntimeCfg->ui32PowUnitsState, FLUSH); #if !defined(NO_HARDWARE) { const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, + INVALIDATE); if (psFwSysData->ePowState == RGXFWIF_POW_OFF) { @@ -1247,7 +1290,7 @@ PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, { eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; PVR_DPF((PVR_DBG_ERROR, - "%s: Attempt to change dust count when not IDLE", + "%s: Powered units state can not be changed when not IDLE", __func__)); return eError; } @@ -1262,25 +1305,34 @@ PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, return eError; } - sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW; - sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE; - sDustCountChange.uCmdData.sPowData.uPowerReqData.ui32NumOfDusts = ui32NumberOfDusts; + sPowUnitsChange.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowUnitsChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE; + sPowUnitsChange.uCmdData.sPowData.uPowerReqData.ui32PowUnits = ui32PowUnits; +#if defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) + sPowUnitsChange.uCmdData.sPowData.uPowerReqData.ui32RACUnits = 0; + + if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) >= 3) + { + sPowUnitsChange.uCmdData.sPowData.uPowerReqData.ui32RACUnits = + (1U << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1; + } +#endif PDUMPCOMMENT(psDeviceNode, - "Scheduling command to change Dust Count to %u", - ui32NumberOfDusts); + "Scheduling command to change power units state to 0x%X", + ui32PowUnits); eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, - &sDustCountChange, + &sPowUnitsChange, PDUMP_FLAGS_NONE, &ui32CmdKCCBSlot); if (eError != PVRSRV_OK) { PDUMPCOMMENT(psDeviceNode, - "Scheduling command to change Dust Count failed. Error:%u", + "Scheduling command to change power units state. Error:%u", eError); PVR_DPF((PVR_DBG_ERROR, - "%s: Scheduling KCCB to change Dust Count failed. Error:%u", + "%s: Scheduling KCCB to change power units state. Error:%u", __func__, eError)); return eError; } @@ -1298,8 +1350,9 @@ PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, - "RGXDustCountChange: Poll for Kernel SyncPrim [0x%p] on DM %d", - psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + "%s: Poll for Kernel SyncPrim [0x%p] on DM %d", + __func__, psDevInfo->psPowSyncPrim->pui32LinAddr, + RGXFWIF_DM_GP); SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, 1, @@ -1314,24 +1367,20 @@ PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, /* @Function RGXAPMLatencyChange */ -PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, - IMG_UINT32 ui32ActivePMLatencyms, - IMG_BOOL bActivePMLatencyPersistant) +PVRSRV_ERROR RGXAPMLatencyChange(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32ActivePMLatencyms, + IMG_BOOL bActivePMLatencyPersistant) { - - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; IMG_UINT32 ui32CmdKCCBSlot; PVRSRV_DEV_POWER_STATE ePowerState; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); - eError = PVRSRVPowerLock(psDeviceNode); - if (eError != PVRSRV_OK) + if (psRuntimeCfg == NULL) { - PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Failed to acquire power lock")); - return eError; + return PVRSRV_ERROR_NOT_INITIALISED; } /* Update runtime configuration with the new values and ensure the @@ -1341,6 +1390,21 @@ PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms; psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant; OSWriteMemoryBarrier(&psRuntimeCfg->bActivePMLatencyPersistant); + RGXFwSharedMemCacheOpValue(psRuntimeCfg->ui32ActivePMLatencyms, FLUSH); + RGXFwSharedMemCacheOpValue(psRuntimeCfg->bActivePMLatencyPersistant, FLUSH); + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF) + { + /* Power is off, APM latency will be read on next firmware boot */ + return PVRSRV_OK; + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock (%u)", + __func__, eError)); + return eError; + } eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); @@ -1361,9 +1425,11 @@ PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, if (eError != PVRSRV_OK) { PDUMPCOMMENT(psDeviceNode, - "Scheduling command to change APM latency failed. Error:%u", - eError); - PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError)); + "Scheduling command to change APM latency failed. Error:%u", + eError); + PVR_DPF((PVR_DBG_ERROR, + "%s: Scheduling KCCB to change APM latency failed. Error:%u", + __func__, eError)); goto ErrorExit; } } @@ -1377,13 +1443,12 @@ PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, /* RGXActivePowerRequest */ -PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle) +PVRSRV_ERROR RGXActivePowerRequest(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); psDevInfo->ui32ActivePMReqTotal++; @@ -1408,6 +1473,9 @@ PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle) goto _RGXActivePowerRequest_PowerLock_failed; } + RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, + INVALIDATE); + /* Check again for IDLE once we have the power lock */ if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) { @@ -1448,9 +1516,8 @@ PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle) #define RGX_FORCED_IDLE_RETRY_COUNT 10 -PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted) +PVRSRV_ERROR RGXForcedIdleRequest(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_BOOL bDeviceOffPermitted) { - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGXFWIF_KCCB_CMD sPowCmd; PVRSRV_ERROR eError; @@ -1459,10 +1526,12 @@ PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPerm #if !defined(NO_HARDWARE) const RGXFWIF_SYSDATA *psFwSysData; #endif - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); #if !defined(NO_HARDWARE) psFwSysData = psDevInfo->psRGXFWIfFwSysData; + RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, + INVALIDATE); /* Firmware already forced idle */ if (psFwSysData->ePowState == RGXFWIF_POW_FORCED_IDLE) @@ -1473,6 +1542,8 @@ PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPerm /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */ if (psFwSysData->ePowState == RGXFWIF_POW_OFF) { + PVR_DPF((PVR_DBG_WARNING, "Firmware is powered OFF (bDeviceOffPermitted = %s)", + bDeviceOffPermitted ? "Yes" : "No")); return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; } #endif @@ -1541,8 +1612,12 @@ PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPerm #if !defined(NO_HARDWARE) /* Check the firmware state for idleness */ + RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, + INVALIDATE); if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) { + PVR_DPF((PVR_DBG_WARNING, "FW power state (%u) is not RGXFWIF_POW_FORCED_IDLE (%u)", + psFwSysData->ePowState, RGXFWIF_POW_FORCED_IDLE)); return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; } #endif @@ -1550,25 +1625,18 @@ PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPerm return PVRSRV_OK; } -/* - RGXCancelForcedIdleRequest -*/ -PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) +static PVRSRV_ERROR _RGXSendCancelForceIdleCommand(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *ui32CmdKCCBSlot) { - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGXFWIF_KCCB_CMD sPowCmd; PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 ui32CmdKCCBSlot; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", - __func__)); - goto ErrorExit; - } + PVR_LOG_GOTO_IF_ERROR(eError, + "SyncPrimSet: Failed to set Power sync prim", + ErrorExit); /* Send the IDLE request to the FW */ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; @@ -1582,8 +1650,7 @@ PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sPowCmd, PDUMP_FLAGS_NONE, - &ui32CmdKCCBSlot); - + ui32CmdKCCBSlot); if (eError != PVRSRV_OK) { PDUMPCOMMENT(psDeviceNode, @@ -1592,16 +1659,31 @@ PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) goto ErrorExit; } +ErrorExit: + return eError; +} +/* + RGXCancelForcedIdleRequest +*/ +PVRSRV_ERROR RGXCancelForcedIdleRequest(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32CmdKCCBSlot; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); + + eError = _RGXSendCancelForceIdleCommand(psDeviceNode, &ui32CmdKCCBSlot); + PVR_LOG_GOTO_IF_ERROR(eError, + "_RGXSendCancelForceIdleCommand", + ErrorExit); + /* Wait for the firmware to answer. */ eError = RGXPollForGPCommandCompletion(psDeviceNode, psDevInfo->psPowSyncPrim->pui32LinAddr, 1, 0xFFFFFFFF); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for cancel idle request", __func__)); - goto ErrorExit; - } + PVR_LOG_GOTO_IF_ERROR(eError, + "RGXPollForGPCommandCompletion: Timeout waiting for cancel idle request", + ErrorExit); #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, @@ -1618,58 +1700,153 @@ PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) return eError; ErrorExit: - PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state err: %u", + __func__, + eError)); return eError; } -/*! - ****************************************************************************** +PVRSRV_ERROR RGXCancelForcedIdleRequestAsync(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = _RGXSendCancelForceIdleCommand(psDeviceNode, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state err: %u", + __func__, + eError)); + } + + return eError; +} + +#if defined(SUPPORT_FW_CORE_CLK_RATE_CHANGE_NOTIFY) +#if defined(SUPPORT_PDVFS) && (PDVFS_COM == PDVFS_COM_HOST) +/*************************************************************************/ /*! +@Function PDVFSProcessCoreClkChangeRequest +@Description Processes a core clock rate change request. +@Input psDevInfo A pointer to PVRSRV_RGXDEV_INFO. +@Input ui32CoreClockRate New core clock rate. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXProcessCoreClkChangeRequest(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevInfo->psDeviceNode->psDevConfig; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &psDevConfig->sDVFS.sDVFSDeviceCfg; + RGX_TIMING_INFORMATION *psRGXTimingInfo = ((RGX_DATA*)(psDevConfig->hDevData))->psRGXTimingInfo; + IMG_UINT32 ui32CoreClockRateCurrent = psRGXTimingInfo->ui32CoreClockSpeed; + const IMG_OPP *psOpp = NULL; + IMG_UINT32 ui32Index; + PVRSRV_ERROR eError; - @Function PVRSRVGetNextDustCount + if (!_PDVFSEnabled()) + { + /* No error message to avoid excessive messages */ + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_MESSAGE, "Core clock rate = %u", ui32CoreClockRate)); + + /* Find the matching OPP (Exact). */ + for (ui32Index = 0; ui32Index < psDVFSDeviceCfg->ui32OPPTableSize; ui32Index++) + { + if (ui32CoreClockRate == psDVFSDeviceCfg->pasOPPTable[ui32Index].ui32Freq) + { + psOpp = &psDVFSDeviceCfg->pasOPPTable[ui32Index]; + break; + } + } - @Description + if (! psOpp) + { + PVR_DPF((PVR_DBG_ERROR, "Frequency not present in OPP table - %u", ui32CoreClockRate)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = PVRSRVDevicePreClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange failed")); + return eError; + } - Calculate a sequence of dust counts to achieve full transition coverage. - We increment two counts of dusts and switch up and down between them. - It does contain a few redundant transitions. If two dust exist, the - output transitions should be as follows. + psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate; - 0->1, 0<-1, 0->2, 0<-2, (0->1) - 1->1, 1->2, 1<-2, (1->2) - 2->2, (2->0), - 0->0. Repeat. + /* Increasing frequency, change voltage first */ + if (ui32CoreClockRate > ui32CoreClockRateCurrent) + { + psDVFSDeviceCfg->pfnSetVoltage(psDevConfig->hSysData, psOpp->ui32Volt); + } - Redundant transitions in brackets. + psDVFSDeviceCfg->pfnSetFrequency(psDevConfig->hSysData, ui32CoreClockRate); - @Input psDustReqState : Counter state used to calculate next dust count - @Input ui32DustCount : Number of dusts in the core + /* Decreasing frequency, change frequency first */ + if (ui32CoreClockRate < ui32CoreClockRateCurrent) + { + psDVFSDeviceCfg->pfnSetVoltage(psDevConfig->hSysData, psOpp->ui32Volt); + } - @Return PVRSRV_ERROR + PVRSRVDevicePostClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL); - ******************************************************************************/ -IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustReqState, IMG_UINT32 ui32DustCount) + return PVRSRV_OK; +} +#else +/*************************************************************************/ /*! +@Function PDVFSProcessCoreClkChangeNotification +@Description Processes a core clock rate change notification. +@Input psDevInfo A pointer to PVRSRV_RGXDEV_INFO. +@Input ui32CoreClockRate New core clock rate. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXProcessCoreClkChangeNotification(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate) { - if (psDustReqState->bToggle) + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevInfo->psDeviceNode->psDevConfig; + RGX_TIMING_INFORMATION *psRGXTimingInfo = ((RGX_DATA*)(psDevConfig->hDevData))->psRGXTimingInfo; + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_ERROR eError; + + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + + if (eError != PVRSRV_OK) { - psDustReqState->ui32DustCount2++; + PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", + __func__, PVRSRVGetErrorString(eError))); + return eError; } - if (psDustReqState->ui32DustCount2 > ui32DustCount) + eError = PVRSRVGetDevicePowerState(psDevInfo->psDeviceNode, &ePowerState); + + if (eError != PVRSRV_OK) { - psDustReqState->ui32DustCount1++; - psDustReqState->ui32DustCount2 = psDustReqState->ui32DustCount1; + PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire power state (%s)", + __func__, PVRSRVGetErrorString(eError))); + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + return eError; } - if (psDustReqState->ui32DustCount1 > ui32DustCount) + /* Guest drivers do not initialize psRGXFWIfFwSysData */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ePowState, + INVALIDATE); + if ((ePowerState != PVRSRV_DEV_POWER_STATE_OFF) + && ((psDevInfo->psRGXFWIfFwSysData == NULL) || (psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF))) + { + /* Update GPU frequency and timer correlation related data */ + RGXTimeCorrEnd(psDevInfo->psDeviceNode, RGXTIMECORR_EVENT_DVFS); + psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate; + RGXTimeCorrBegin(psDevInfo->psDeviceNode, RGXTIMECORR_EVENT_DVFS); + } + else { - psDustReqState->ui32DustCount1 = 0; - psDustReqState->ui32DustCount2 = 0; + psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate; } - psDustReqState->bToggle = !psDustReqState->bToggle; + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); - return (psDustReqState->bToggle) ? psDustReqState->ui32DustCount1 : psDustReqState->ui32DustCount2; + return PVRSRV_OK; } +#endif +#endif /* SUPPORT_FW_CORE_CLK_RATE_CHANGE_NOTIFY */ + + /****************************************************************************** End of file (rgxpower.c) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpower.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpower.h similarity index 70% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpower.h rename to drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpower.h index a6cd3f2b2d10..206f44db6cba 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpower.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxpower.h @@ -59,14 +59,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. does necessary preparation before power state transition - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Input eNewPowerState : New power state @Input eCurrentPowerState : Current power state @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXPrePowerState(PPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_DEV_POWER_STATE eNewPowerState, PVRSRV_DEV_POWER_STATE eCurrentPowerState, PVRSRV_POWER_FLAGS ePwrFlags); @@ -80,14 +80,14 @@ PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, does necessary preparation after power state transition - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Input eNewPowerState : New power state @Input eCurrentPowerState : Current power state @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXPostPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_DEV_POWER_STATE eNewPowerState, PVRSRV_DEV_POWER_STATE eCurrentPowerState, PVRSRV_POWER_FLAGS ePwrFlags); @@ -101,14 +101,14 @@ PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, does necessary preparation before power state transition on a vz driver - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Input eNewPowerState : New power state @Input eCurrentPowerState : Current power state @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXVzPrePowerState(PPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_DEV_POWER_STATE eNewPowerState, PVRSRV_DEV_POWER_STATE eCurrentPowerState, PVRSRV_POWER_FLAGS ePwrFlags); @@ -122,14 +122,14 @@ PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, does necessary preparation after power state transition on a vz driver - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Input eNewPowerState : New power state @Input eCurrentPowerState : Current power state @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXVzPostPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_DEV_POWER_STATE eNewPowerState, PVRSRV_DEV_POWER_STATE eCurrentPowerState, PVRSRV_POWER_FLAGS ePwrFlags); @@ -143,13 +143,13 @@ PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, Does processing required before an RGX clock speed change. - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Input eCurrentPowerState : Power state of the device @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXPreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_DEV_POWER_STATE eCurrentPowerState); /*! @@ -161,31 +161,66 @@ PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, Does processing required after an RGX clock speed change. - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Input eCurrentPowerState : Power state of the device @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXPostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_DEV_POWER_STATE eCurrentPowerState); +#if defined(SUPPORT_FW_CORE_CLK_RATE_CHANGE_NOTIFY) +#if defined(SUPPORT_PDVFS) && (PDVFS_COM == PDVFS_COM_HOST) /*! ****************************************************************************** - @Function RGXDustCountChange + @Function RGXProcessCoreClkChangeRequest - @Description Change of number of DUSTs + @Input psDevInfo : RGX Device Info + @Input ui32CoreClockRate : New clock frequency to send to system layer. - @Input hDevHandle : RGX Device Node - @Input ui32NumberOfDusts : Number of DUSTs to make transition to + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXProcessCoreClkChangeRequest(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate); +#define RGX_PROCESS_CORE_CLK_RATE_CHANGE(devinfo, clk) RGXProcessCoreClkChangeRequest(devinfo, clk) + +#else +/*! +****************************************************************************** + + @Function RGXProcessCoreClkChangeNotification + + @Input psDevInfo : RGX Device Info + @Input ui32CoreClockRate : New clock frequency. @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, - IMG_UINT32 ui32NumberOfDusts); +PVRSRV_ERROR RGXProcessCoreClkChangeNotification(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate); +#define RGX_PROCESS_CORE_CLK_RATE_CHANGE(devinfo, clk) RGXProcessCoreClkChangeNotification(devinfo, clk) +#endif +#endif /* SUPPORT_FW_CORE_CLK_RATE_CHANGE_NOTIFY */ + +/*! +****************************************************************************** + + @Function RGXPowUnitsChange + + @Description Change power units state + + @Input psDeviceNode : RGX Device Node + @Input ui32PowUnits : On Rogue: Number of DUSTs to make transition to. + On Volcanic: Mask containing power state of SPUs. + Each bit corresponds to an SPU and value must be non-zero. + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPowUnitsChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_UINT32 ui32PowUnits); /*! ****************************************************************************** @@ -200,14 +235,14 @@ PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, power management activity. If bPersistent is NOT set, APM latency will return back to system default on power up. - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Input ui32ActivePMLatencyms : Number of milliseconds to wait @Input bActivePMLatencyPersistant : Set to ensure new value is not reset @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, +PVRSRV_ERROR RGXAPMLatencyChange(PPVRSRV_DEVICE_NODE psDeviceNode, IMG_UINT32 ui32ActivePMLatencyms, IMG_BOOL bActivePMLatencyPersistant); @@ -218,12 +253,12 @@ PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, @Description Initiate a handshake with the FW to power off the GPU - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle); +PVRSRV_ERROR RGXActivePowerRequest(PPVRSRV_DEVICE_NODE psDeviceNode); /*! ****************************************************************************** @@ -232,7 +267,7 @@ PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle); @Description Initiate a handshake with the FW to idle the GPU - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Input bDeviceOffPermitted : Set to indicate device state being off is not erroneous. @@ -240,7 +275,8 @@ PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle); @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted); +PVRSRV_ERROR RGXForcedIdleRequest(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bDeviceOffPermitted); /*! ****************************************************************************** @@ -249,38 +285,27 @@ PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPerm @Description Send a request to cancel idle to the firmware. - @Input hDevHandle : RGX Device Node + @Input psDeviceNode : RGX Device Node @Return PVRSRV_ERROR : ******************************************************************************/ -PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle); +PVRSRV_ERROR RGXCancelForcedIdleRequest(PPVRSRV_DEVICE_NODE psDeviceNode); /*! ****************************************************************************** - @Function PVRSRVGetNextDustCount - - @Description - - Calculate a sequence of dust counts to achieve full transition coverage. - We increment two counts of dusts and switch up and down between them. - It does contain a few redundant transitions. If two dust exist, the - output transitions should be as follows. - - 0->1, 0<-1, 0->2, 0<-2, (0->1) - 1->1, 1->2, 1<-2, (1->2) - 2->2, (2->0), - 0->0. Repeat. + @Function RGXCancelForcedIdleRequestAsync - Redundant transitions in brackets. + @Description Send a request to cancel idle to the firmware. + Does not wait for response from FW. - @Input psDustReqState : Counter state used to calculate next dust count - @Input ui32DustCount : Number of dusts in the core + @Input psDeviceNode : RGX Device Node - @Return PVRSRV_ERROR + @Return PVRSRV_ERROR : ******************************************************************************/ -IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustState, IMG_UINT32 ui32DustCount); +PVRSRV_ERROR RGXCancelForcedIdleRequestAsync(PPVRSRV_DEVICE_NODE psDeviceNode); + #endif /* RGXPOWER_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxregconfig.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxregconfig.c index ef39bea255eb..c2f681c70f12 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxregconfig.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxregconfig.c @@ -57,7 +57,7 @@ PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, #if defined(SUPPORT_USER_REGISTER_CONFIGURATION) PVRSRV_ERROR eError = PVRSRV_OK; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegConfig; RGXFWIF_REG_CFG_TYPE eRegCfgType = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType; PVR_UNREFERENCED_PARAMETER(psDevConnection); @@ -83,6 +83,8 @@ PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, return eError; #else PVR_UNREFERENCED_PARAMETER(psDevConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui8RegCfgType); PVR_DPF((PVR_DBG_ERROR, "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", @@ -101,11 +103,11 @@ PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, PVRSRV_ERROR eError = PVRSRV_OK; RGXFWIF_KCCB_CMD sRegCfgCmd; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegConfig; PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); OSLockAcquire(psRegCfg->hLock); @@ -154,6 +156,10 @@ PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, return eError; #else PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32RegAddr); + PVR_UNREFERENCED_PARAMETER(ui64RegValue); + PVR_UNREFERENCED_PARAMETER(ui64RegMask); PVR_DPF((PVR_DBG_ERROR, "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", @@ -169,11 +175,11 @@ PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, PVRSRV_ERROR eError = PVRSRV_OK; RGXFWIF_KCCB_CMD sRegCfgCmd; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegConfig; PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); OSLockAcquire(psRegCfg->hLock); @@ -210,12 +216,14 @@ PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, return eError; #else + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_DPF((PVR_DBG_ERROR, "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", __func__)); - PVR_UNREFERENCED_PARAMETER(psConnection); - return PVRSRV_ERROR_FEATURE_DISABLED; #endif } @@ -227,11 +235,11 @@ PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, PVRSRV_ERROR eError = PVRSRV_OK; RGXFWIF_KCCB_CMD sRegCfgCmd; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegConfig; PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); OSLockAcquire(psRegCfg->hLock); @@ -259,6 +267,7 @@ PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, return eError; #else PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_DPF((PVR_DBG_ERROR, "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", @@ -274,11 +283,11 @@ PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, PVRSRV_ERROR eError = PVRSRV_OK; RGXFWIF_KCCB_CMD sRegCfgCmd; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegConfig; PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); OSLockAcquire(psRegCfg->hLock); @@ -305,10 +314,13 @@ PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, return eError; #else + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_DPF((PVR_DBG_ERROR, "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", __func__)); - PVR_UNREFERENCED_PARAMETER(psConnection); return PVRSRV_ERROR_FEATURE_DISABLED; #endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxshader.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxshader.c index e29a4e3ab683..8eace4b16c81 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxshader.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxshader.c @@ -51,6 +51,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "physmem.h" #include "ri_server.h" #include "pvr_ricommon.h" +#include "pvrsrv.h" static void RGXShaderReadHeader(OS_FW_IMAGE *psShaderFW, RGX_SHADER_HEADER *psHeader) @@ -111,14 +112,12 @@ _GetShaderFileName(PVRSRV_DEVICE_NODE * psDeviceNode, PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; OSSNPrintf(pszShaderFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, - "%s." RGX_BVNC_STR_FMTSPEC, - RGX_SH_FILENAME, + RGX_SH_FILENAME "." RGX_BVNC_STR_FMTSPEC, psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); OSSNPrintf(pszShaderpFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, - "%s." RGX_BVNC_STRP_FMTSPEC, - RGX_SH_FILENAME, + RGX_SH_FILENAME "." RGX_BVNC_STRP_FMTSPEC, psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); } @@ -135,6 +134,9 @@ PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) IMG_CHAR aszShaderpFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE]; const IMG_CHAR *pszShaderFilenameStr = aszShaderFilenameStr; size_t uiNumBytes; + PVRSRV_DEVICE_NODE *psHostDevNode = PVRSRVGetPVRSRVData()->psHostMemDeviceNode; + size_t uiLog2PageSize = OSGetPageShift(); + IMG_DEVMEM_SIZE_T uiTQUSCMemSize; PVRSRV_ERROR eError; _GetShaderFileName(psDeviceNode, aszShaderFilenameStr, aszShaderpFilenameStr); @@ -170,17 +172,27 @@ PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) goto failed_firmware; } - ui32NumPages = (sHeader.ui32SizeFragment / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; + ui32NumPages = (sHeader.ui32SizeFragment / IMG_PAGE2BYTES32(uiLog2PageSize)) + 1; + + uiTQUSCMemSize = ui32NumPages * IMG_PAGE2BYTES32(uiLog2PageSize); + + if (uiTQUSCMemSize > RGXFWIF_KM_USC_TQ_SHADER_CODE_MAX_SIZE_BYTES) + { + PVR_DPF((PVR_DBG_ERROR, "%s: TQ shaders out of space ("IMG_DEVMEM_SIZE_FMTSPEC" > 0x%X)", + __func__, uiTQUSCMemSize, RGXFWIF_KM_USC_TQ_SHADER_CODE_MAX_SIZE_BYTES)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + goto failed_firmware; + } PDUMPCOMMENT(psDeviceNode, "Allocate TDM USC PMR Block (Pages %08X)", ui32NumPages); eError = PhysmemNewRamBackedPMR(NULL, psDeviceNode, - (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + uiTQUSCMemSize, 1, 1, &ui32MappingTable, - RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + uiLog2PageSize, PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT @@ -219,21 +231,25 @@ PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) goto failed_uscpmr; } - ui32NumPages = (sHeader.ui32SizeClientMem / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; + PDUMPCOMMENT(psDeviceNode, "Load TDM USC PMR Block (Size "IMG_SIZE_FMTSPECX")", RGXShaderUSCMemSize(psShaderFW)); + PMRPDumpLoadMem(psDevInfo->hTQUSCSharedMem, 0, RGXShaderUSCMemSize(psShaderFW), PDUMP_FLAGS_CONTINUOUS, false); + + ui32NumPages = (sHeader.ui32SizeClientMem / IMG_PAGE2BYTES32(uiLog2PageSize)) + 1; PDUMPCOMMENT(psDeviceNode, "Allocate TDM Client PMR Block (Pages %08X)", ui32NumPages); eError = PhysmemNewRamBackedPMR(NULL, - psDeviceNode, - (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + psHostDevNode, + (IMG_DEVMEM_SIZE_T)ui32NumPages * IMG_PAGE2BYTES32(uiLog2PageSize), 1, 1, &ui32MappingTable, - RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + uiLog2PageSize, PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT - | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER, + | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER + | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL), sizeof("tqclipmr"), "tqclipmr", PVR_SYS_ALLOC_PID, @@ -276,9 +292,9 @@ PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) return PVRSRV_OK; failed_clipmr: - PMRUnrefPMR(psDevInfo->hTQCLISharedMem); + (void) PMRUnrefPMR(psDevInfo->hTQCLISharedMem); failed_uscpmr: - PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); + (void) PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); failed_firmware: OSUnloadFirmware(psShaderFW); failed_init: @@ -287,15 +303,12 @@ PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) void PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE * psDeviceNode, - PMR ** ppsCLIPMRMem, - PMR ** ppsUSCPMRMem) + PMR ** ppsCLIPMRMem) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL); PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL); - *ppsUSCPMRMem = psDevInfo->hTQUSCSharedMem; *ppsCLIPMRMem = psDevInfo->hTQCLISharedMem; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxshader.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxshader.h index 7676ede51b7f..4ba50fc3fc86 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxshader.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxshader.h @@ -66,12 +66,10 @@ PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE *psDeviceNode); @Description Get handle to ready allocated shader PMR memory @Input psDeviceNode Device node @Output ppsCLIPMRMem Shader data used by CPU client side. -@Output ppsUSCPMRMem Shader usc code used by GPU. */ /**************************************************************************/ void PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE *psDeviceNode, - PMR **ppsCLIPMRMem, - PMR **ppsUSCPMRMem); + PMR **ppsCLIPMRMem); /*************************************************************************/ /*! @Function PVRSRVTQUnLoadShaders diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxsyncutils.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxsyncutils.h index 2133da85e78a..bfc46937f185 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxsyncutils.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxsyncutils.h @@ -46,7 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "sync_server.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "rgx_fwif_km.h" typedef struct _RGX_SYNC_DATA_ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtdmtransfer.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtdmtransfer.c index e541360cac87..dd9d05ee18b2 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtdmtransfer.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtdmtransfer.c @@ -45,7 +45,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "rgxccb.h" #include "rgxutils.h" -#include "rgxfwutils.h" +#include "rgxfwcmnctx.h" #include "rgxtdmtransfer.h" #include "rgx_tq_shared.h" #include "rgxmem.h" @@ -59,7 +59,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_memallocflags.h" #include "rgxhwperf.h" #include "ospvr_gputrace.h" -#include "htbuffer.h" #include "rgxshader.h" #include "pdump_km.h" @@ -72,9 +71,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_buffer_sync.h" #endif -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -#include "validation_soc.h" -#endif #if defined(SUPPORT_WORKLOAD_ESTIMATION) #include "rgxworkest.h" @@ -82,6 +78,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxtimerquery.h" + /* Enable this to dump the compiled list of UFOs prior to kick call */ #define ENABLE_TDM_UFO_DUMP 0 @@ -125,6 +122,7 @@ static PVRSRV_ERROR _CreateTDMTransferContext( SERVER_MMU_CONTEXT * psServerMMUContext, DEVMEM_MEMDESC * psFWMemContextMemDesc, IMG_INT32 i32Priority, + IMG_INT32 ui32MaxDeadlineMS, RGX_COMMON_CONTEXT_INFO * psInfo, RGX_SERVER_TQ_TDM_DATA * psTDMData, IMG_UINT32 ui32CCBAllocSizeLog2, @@ -163,7 +161,7 @@ static PVRSRV_ERROR _CreateTDMTransferContext( ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2, ui32ContextFlags, i32Priority, - UINT_MAX, /* max deadline MS */ + ui32MaxDeadlineMS, ui64RobustnessAddress, psInfo, &psTDMData->psServerCommonContext); @@ -198,27 +196,21 @@ static PVRSRV_ERROR _DestroyTDMTransferContext( psTDMData->psServerCommonContext, RGXFWIF_DM_TDM, PDUMP_FLAGS_CONTINUOUS); - if (eError == PVRSRV_ERROR_RETRY) - { - return eError; - } - else if (eError != PVRSRV_OK) - { - PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psDeviceNode, + eError, + RGXFWRequestCommonContextCleanUp); /* ... it has so we can free it's resources */ FWCommonContextFree(psTDMData->psServerCommonContext); + psTDMData->psServerCommonContext = NULL; #if defined(SUPPORT_BUFFER_SYNC) pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); psTDMData->psBufferSyncContext = NULL; #endif - return PVRSRV_OK; + return eError; } /* @@ -234,6 +226,7 @@ PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( IMG_UINT32 ui32PackedCCBSizeU88, IMG_UINT32 ui32ContextFlags, IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext) { RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext; @@ -318,6 +311,7 @@ PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( hMemCtxPrivData, psFWMemContextMemDesc, i32Priority, + ui32MaxDeadlineMS, &sInfo, &psTransferContext->sTDMData, U32toU8_Unpack1(ui32PackedCCBSizeU88), @@ -330,7 +324,10 @@ PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData); + } #endif SyncAddrListInit(&psTransferContext->sSyncAddrListFence); @@ -363,10 +360,11 @@ PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, - PMR ** ppsCLIPMRMem, - PMR ** ppsUSCPMRMem) + PMR ** ppsCLIPMRMem) { - PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem); return PVRSRV_OK; } @@ -381,35 +379,41 @@ PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem) PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext) { PVRSRV_ERROR eError; - PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - RGXFWIF_FWTDMCONTEXT *psFWTransferContext; - IMG_UINT32 ui32WorkEstCCBSubmitted; + PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc, - (void **)&psFWTransferContext); - if (eError != PVRSRV_OK) +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware transfer context (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + RGXFWIF_FWTDMCONTEXT *psFWTransferContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; - ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted; + eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc, + (void **)&psFWTransferContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware transfer context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } - DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc); + RGXFwSharedMemCacheOpValue(psFWTransferContext->ui32WorkEstCCBSubmitted, INVALIDATE); + ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted; - /* Check if all of the workload estimation CCB commands for this workload are read */ - if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived) - { - PVR_DPF((PVR_DBG_WARNING, - "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", - __func__, ui32WorkEstCCBSubmitted, - psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)); + DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc); - return PVRSRV_ERROR_RETRY; + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)); + + return PVRSRV_ERROR_RETRY; + } } #endif @@ -431,7 +435,10 @@ PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psT } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData); + } #endif if (psTransferContext->psFWFrameworkMemDesc) @@ -463,6 +470,7 @@ PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psT /* * PVRSRVSubmitTQ3DKickKM */ +/* Old bridge call for backwards compatibility. */ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, IMG_UINT32 ui32PDumpFlags, @@ -483,6 +491,50 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( IMG_UINT32 ui32TDMCharacteristic1, IMG_UINT32 ui32TDMCharacteristic2, IMG_UINT64 ui64DeadlineInus) +{ + return PVRSRVRGXTDMSubmitTransfer3KM(psTransferContext, + ui32PDumpFlags, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateSyncOffset, + paui32ClientUpdateValue, + iCheckFence, + iUpdateTimeline, + piUpdateFence, + szUpdateFenceName, + PVRSRV_NO_FENCE, + ui32FWCommandSize, + pui8FWCommand, + ui32ExtJobRef, + ui32SyncPMRCount, + paui32SyncPMRFlags, + ppsSyncPMRs, + ui32TDMCharacteristic1, + ui32TDMCharacteristic2, + ui64DeadlineInus); +} + +PVRSRV_ERROR PVRSRVRGXTDMSubmitTransfer3KM( + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateSyncOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iExportFenceToSignal, + IMG_UINT32 ui32FWCommandSize, + IMG_UINT8 * pui8FWCommand, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 * paui32SyncPMRFlags, + PMR ** ppsSyncPMRs, + IMG_UINT32 ui32TDMCharacteristic1, + IMG_UINT32 ui32TDMCharacteristic2, + IMG_UINT64 ui64DeadlineInus) { PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; RGX_CCB_CMD_HELPER_DATA *psCmdHelper; @@ -499,7 +551,6 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( IMG_UINT64 ui64FBSCEntryMask = 0; - IMG_UINT32 ui32CmdOffset = 0; IMG_BOOL bCCBStateOpen; PRGXFWIF_TIMESTAMP_ADDR pPreAddr; @@ -509,6 +560,7 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( IMG_UINT64 uiCheckFenceUID = 0; IMG_UINT64 uiUpdateFenceUID = 0; #if defined(SUPPORT_WORKLOAD_ESTIMATION) + IMG_UINT32 ui32CmdOffset = 0; RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0}; IMG_UINT32 ui32TDMWorkloadDataRO = 0; IMG_UINT32 ui32TDMCmdHeaderOffset = 0; @@ -524,6 +576,7 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( #endif PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT psExportFenceSyncCheckpoint = NULL; PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; IMG_UINT32 ui32FenceSyncCheckpointCount = 0; IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; @@ -555,7 +608,7 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( } /* Ensure the string is null-terminated (Required for safety) */ - szUpdateFenceName[31] = '\0'; + szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; if (ui32SyncPMRCount != 0) { @@ -693,6 +746,7 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( ui32IntClientUpdateCount++; } #else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_UNREFERENCED_PARAMETER(paui32SyncPMRFlags); PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); eError = PVRSRV_ERROR_INVALID_PARAMS; goto fail_populate_sync_addr_list; @@ -739,6 +793,38 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( goto fail_create_output_fence; } + /* Resolve the iExportFenceToSignal (if required) */ + if (iExportFenceToSignal != PVRSRV_NO_FENCE) + { + IMG_UINT32 iii; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncCheckpointResolveExportFence(iExportFenceToSignal=%d), ui32FenceSyncCheckpointCount=%d", __func__, iExportFenceToSignal, ui32FenceSyncCheckpointCount)); + eError = SyncCheckpointResolveExportFence(iExportFenceToSignal, + psTransferContext->psDeviceNode->hSyncCheckpointContext, + &psExportFenceSyncCheckpoint, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%s) psExportFenceSyncCheckpoint=<%p>", __func__, PVRSRVGetErrorString(eError), psExportFenceSyncCheckpoint)); + goto fail_resolve_export_fence; + } + + /* Check that the export fence was not also included as part of the + * check fence (which is an error and would lead to a stalled kick). + */ + for (iii=0; iii, FWAddr=0x%x", __func__, iii, apsFenceSyncCheckpoints[iii], SyncCheckpointGetFirmwareAddr(apsFenceSyncCheckpoints[iii]))); + CHKPT_DBG((PVR_DBG_ERROR, "%s: ERROR psExportFenceSyncCheckpoint=<%p>", __func__, psExportFenceSyncCheckpoint)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, " %s - iCheckFence includes iExportFenceToSignal", PVRSRVGetErrorString(eError))); + goto fail_check_fence_includes_export_fence; + } + } + } + /* Append the sync prim update for the timeline (if required) */ if (psFenceTimelineUpdateSync) { @@ -858,6 +944,34 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( #endif } + if (psExportFenceSyncCheckpoint) + { + /* Append the update (from export fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Transfer CDM Update (&psTransferContext->sSyncAddrListUpdate=<%p>, psExportFenceSyncCheckpoint=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)psExportFenceSyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, + 1, + &psExportFenceSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + #if (ENABLE_TDM_UFO_DUMP == 1) PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__)); { @@ -915,17 +1029,20 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( #endif #if defined(SUPPORT_WORKLOAD_ESTIMATION) - sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1; - sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2; - - /* Prepare workload estimation */ - WorkEstPrepare(psDeviceNode->pvDevice, - &psTransferContext->sWorkEstData, - &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM, - eType, - &sWorkloadCharacteristics, - ui64DeadlineInus, - &sWorkloadKickDataTransfer); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1; + sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2; + + /* Prepare workload estimation */ + WorkEstPrepare(psDeviceNode->pvDevice, + &psTransferContext->sWorkEstData, + &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM, + eType, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataTransfer); + } #endif /* @@ -957,6 +1074,7 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( pszCommandName, bCCBStateOpen, psCmdHelper); + } /* @@ -979,7 +1097,27 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( Only do the command helper release (which takes the server sync operations if the acquire succeeded */ - ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); + + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + goto fail_acquirepowerlock; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); + } +#endif RGXCmdHelperReleaseCmdCCB(1, psCmdHelper, "TQ_TDM", @@ -987,22 +1125,25 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* The following is used to determine the offset of the command header containing - the workload estimation data so that can be accessed when the KCCB is read */ - ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper); - ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); + ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); - /* This checks if the command would wrap around at the end of the CCB and - * therefore would start at an offset of 0 rather than the current command - * offset */ - if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck) - { - ui32TDMWorkloadDataRO = ui32CmdOffset; - } - else - { - ui32TDMWorkloadDataRO = 0; + /* This checks if the command would wrap around at the end of the CCB and + * therefore would start at an offset of 0 rather than the current command + * offset */ + if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck) + { + ui32TDMWorkloadDataRO = ui32CmdOffset; + } + else + { + ui32TDMWorkloadDataRO = 0; + } } #endif @@ -1025,9 +1166,12 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( /* Add the Workload data into the KCCB kick */ #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Store the offset to the CCCB command header so that it can be referenced - * when the KCCB command reaches the FW */ - sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset; + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Store the offset to the CCCB command header so that it can be referenced + * when the KCCB command reaches the FW */ + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset; + } #endif /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */ @@ -1047,9 +1191,9 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( NO_DEADLINE, NO_CYCEST); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError = RGXScheduleCommand(psDeviceNode->pvDevice, + eError = RGXScheduleCommandWithoutPowerLock(psDeviceNode->pvDevice, RGXFWIF_DM_TDM, & sTDMKCCBCmd, ui32PDumpFlags); @@ -1058,7 +1202,9 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); + + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); if (eError != PVRSRV_OK) { @@ -1081,6 +1227,11 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); } SyncCheckpointNoHWUpdateTimelines(NULL); + if (psExportFenceSyncCheckpoint) + { + SyncCheckpointSignalNoHW(psExportFenceSyncCheckpoint); + SyncCheckpointNoHWSignalExportFence(iExportFenceToSignal); + } #endif /* defined(NO_HARDWARE) */ #if defined(SUPPORT_BUFFER_SYNC) @@ -1129,6 +1280,7 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( as only the client CCB release will modify the client CCB */ fail_2dcmdacquire: +fail_acquirepowerlock: fail_3dcmdacquire: #if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED) fail_invalfbsc: @@ -1143,10 +1295,12 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( pui32IntAllocatedUpdateValues = NULL; } fail_alloc_update_values_mem: - -/* fail_pdumpcheck: */ -/* fail_cmdtype: */ - + if (psExportFenceSyncCheckpoint) + { + SyncCheckpointRollbackExportFence(iExportFenceToSignal); + } +fail_check_fence_includes_export_fence: +fail_resolve_export_fence: if (iUpdateFence != PVRSRV_NO_FENCE) { SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); @@ -1171,7 +1325,6 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( #endif /* defined(SUPPORT_BUFFER_SYNC) */ fail_populate_sync_addr_list: - PVR_ASSERT(eError != PVRSRV_OK); OSFreeMem(psCmdHelper); fail_allochelper: @@ -1180,6 +1333,7 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); } OSLockRelease(psTransferContext->hLock); + PVR_ASSERT(eError != PVRSRV_OK); return eError; } @@ -1197,7 +1351,7 @@ PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice, RGXFWIF_DM_TDM, @@ -1208,7 +1362,7 @@ PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (eError != PVRSRV_OK) { diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtdmtransfer.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtdmtransfer.h index dc75be3f3866..b0f003cf3ad3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtdmtransfer.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtdmtransfer.h @@ -49,7 +49,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "rgxfwutils.h" #include "rgx_fwif_resetframework.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "pvr_notifier.h" #include "sync_server.h" @@ -68,14 +68,14 @@ PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( IMG_UINT32 ui32PackedCCBSizeU88, IMG_UINT32 ui32ContextFlags, IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext); PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, - PMR ** ppsCLIPMRMem, - PMR ** ppsUSCPMRMem); + PMR ** ppsCLIPMRMem); PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psUSCPMRMem); @@ -105,6 +105,28 @@ PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( IMG_UINT32 ui32TDMCharacteristic2, IMG_UINT64 ui64DeadlineInus); +PVRSRV_ERROR PVRSRVRGXTDMSubmitTransfer3KM( + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateSyncOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iExportFenceToSignal, + IMG_UINT32 ui32FWCommandSize, + IMG_UINT8 * pui8FWCommand, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 * pui32SyncPMRFlags, + PMR ** ppsSyncPMRs, + IMG_UINT32 ui32TDMCharacteristic1, + IMG_UINT32 ui32TDMCharacteristic2, + IMG_UINT64 ui64DeadlineInus); + PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, IMG_UINT32 ui32PDumpFlags); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimecorr.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimecorr.c index 584dbf1e3f64..bea5cd1c98ef 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimecorr.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimecorr.c @@ -46,6 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxfwutils.h" #include "htbserver.h" #include "pvrsrv_apphint.h" +#include "rgxpower.h" /****************************************************************************** * @@ -85,17 +86,30 @@ static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, return PVRSRV_ERROR_INVALID_PARAMS; } - RGXTimeCorrEnd((PVRSRV_DEVICE_NODE *) psDeviceNode, - RGXTIMECORR_EVENT_CLOCK_CHANGE); + PVR_DPF((PVR_DBG_WARNING, + "Setting time correlation clock from \"%s\" to \"%s\"", + apszClocks[psDevInfo->ui32ClockSource], + apszClocks[ui32Value])); - PVR_DPF((PVR_DBG_WARNING, "Setting time correlation clock from \"%s\" to \"%s\"", - apszClocks[psDevInfo->ui32ClockSource], - apszClocks[ui32Value])); + /* PVRSRVPowerLock() fails only when power is off. */ + if (PVRSRVPowerLock((PVRSRV_DEVICE_NODE *) psDeviceNode) == PVRSRV_OK) + { + RGXTimeCorrEnd((PVRSRV_DEVICE_NODE *) psDeviceNode, + RGXTIMECORR_EVENT_CLOCK_CHANGE); + + psDevInfo->ui32ClockSource = ui32Value; - psDevInfo->ui32ClockSource = ui32Value; + RGXTimeCorrBegin((PVRSRV_DEVICE_NODE *) psDeviceNode, + RGXTIMECORR_EVENT_CLOCK_CHANGE); - RGXTimeCorrBegin((PVRSRV_DEVICE_NODE *) psDeviceNode, - RGXTIMECORR_EVENT_CLOCK_CHANGE); + PVRSRVPowerUnlock((PVRSRV_DEVICE_NODE *)psDeviceNode); + } + else + { + /* Set the new clock source without updating the time correlation + * data. This is going to be accounted for during the next power up. */ + psDevInfo->ui32ClockSource = ui32Value; + } return PVRSRV_OK; } @@ -116,8 +130,9 @@ static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) { - PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock, - _SetClock, psDeviceNode, NULL); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_SecondaryOSClockSource, + _GetClock, _SetClock, psDeviceNode, + NULL); } /* @@ -147,17 +162,48 @@ IMG_UINT64 RGXTimeCorrGetClockus64(const PVRSRV_DEVICE_NODE *psDeviceNode) return OSDivide64r64(RGXTimeCorrGetClockns64(psDeviceNode), 1000, &rem); } +static IMG_UINT64 RGXTimeGetDeviceTimestampInTicks(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + IMG_UINT64 ui64DeviceTimestamp; + + /* In case of powerlock acquire fails or device is powered off, return + * last device power off timestamp plus ticks corresponding to device off time. */ + ui64DeviceTimestamp = psDeviceNode->ui64LastDeviceOffTimestamp + + RGXTimeCorrDeltaOSNsToDeltaCR(psDeviceNode, + OSClockns64() - psDeviceNode->ui64LastDeviceOffHostTimestampNs); + + if (PVRSRV_OK == PVRSRVPowerLock(psDeviceNode)) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_GPU_UTIL_FW *psGpuUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; + + if (PVRSRVIsDevicePowered(psDeviceNode)) + { + ui64DeviceTimestamp = RGXReadHWTimerReg(psDevInfo) + psGpuUtilFW->i64DeviceTimestampOffset; + } + PVRSRVPowerUnlock(psDeviceNode); + } + + return ui64DeviceTimestamp; +} + void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_TIME_CORR *psTimeCorrs, IMG_UINT32 ui32NumOut) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; - IMG_UINT32 ui32CurrentIndex = psGpuUtilFWCB->ui32TimeCorrSeqCount; + RGXFWIF_GPU_UTIL_FW *psGpuUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; + IMG_UINT32 ui32CurrentIndex; + + RGXFwSharedMemCacheOpValue(psGpuUtilFW->ui32TimeCorrSeqCount, INVALIDATE); + ui32CurrentIndex = psGpuUtilFW->ui32TimeCorrSeqCount; + RGXFwSharedMemCacheOpExec(&psGpuUtilFW->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex - ui32NumOut)], + sizeof(psGpuUtilFW->sTimeCorr[0]) * ui32NumOut, + PVRSRV_CACHE_OP_INVALIDATE); while (ui32NumOut--) { - *(psTimeCorrs++) = psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)]; + *(psTimeCorrs++) = psGpuUtilFW->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)]; ui32CurrentIndex--; } } @@ -226,51 +272,75 @@ static inline void _DumpTimerCorrelationHistory(PVRSRV_RGXDEV_INFO *psDevInfo) static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_EVENT eEvent) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; - IMG_UINT32 ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1; - RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)]; + RGXFWIF_GPU_UTIL_FW *psGpuUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; + IMG_UINT32 ui32NewSeqCount; + RGXFWIF_TIME_CORR *psTimeCorr; + RGXFWIF_TIME_CORR sTimeCorr = {0}; + + RGXFwSharedMemCacheOpValue(psGpuUtilFW->ui32TimeCorrSeqCount, INVALIDATE); + ui32NewSeqCount = psGpuUtilFW->ui32TimeCorrSeqCount + 1; + RGXFwSharedMemCacheOpValue(psGpuUtilFW->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)], INVALIDATE); + psTimeCorr = &psGpuUtilFW->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)]; /* * The following reads must be done as close together as possible, because * they represent the same current time sampled from different clock sources. */ -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - if (OSClockMonotonicns64(&psTimeCorr->ui64OSMonoTimeStamp) != PVRSRV_OK) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - PVR_DPF((PVR_DBG_ERROR, - "_RGXMakeTimeCorrData: System Monotonic Clock not available.")); - PVR_ASSERT(0); + if (OSClockMonotonicns64(&sTimeCorr.ui64OSMonoTimeStamp) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "_RGXMakeTimeCorrData: System Monotonic Clock not available.")); + PVR_ASSERT(0); + } } -#endif - psTimeCorr->ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); - psTimeCorr->ui64OSTimeStamp = RGXTimeCorrGetClockns64(psDeviceNode); - psTimeCorr->ui32CoreClockSpeed = _RGXGetEstimatedGPUClockSpeed(psDevInfo); - psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed); - if (psTimeCorr->ui64CRDeltaToOSDeltaKNs == 0) + sTimeCorr.ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); + sTimeCorr.ui64OSTimeStamp = + ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource == RGXTIMECORR_CLOCK_MONO + ? sTimeCorr.ui64OSMonoTimeStamp + : RGXTimeCorrGetClockns64(psDeviceNode); + sTimeCorr.ui32CoreClockSpeed = _RGXGetEstimatedGPUClockSpeed(psDevInfo); + sTimeCorr.ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(sTimeCorr.ui32CoreClockSpeed); + + if (sTimeCorr.ui64CRDeltaToOSDeltaKNs == 0) { #if defined(PVRSRV_TIMER_CORRELATION_HISTORY) _DumpTimerCorrelationHistory(psDevInfo); #endif /* Revert to original clock speed (error already printed) */ - psTimeCorr->ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); - psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed); + sTimeCorr.ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); + sTimeCorr.ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(sTimeCorr.ui32CoreClockSpeed); } + OSCachedMemCopy(psTimeCorr, &sTimeCorr, sizeof(sTimeCorr)); /* Make sure the values are written to memory before updating the index of the current entry */ OSWriteMemoryBarrier(psTimeCorr); + RGXFwSharedMemCacheOpPtr(psTimeCorr, FLUSH); + /* Update the index of the current entry in the timer correlation array */ - psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount; + psGpuUtilFW->ui32TimeCorrSeqCount = ui32NewSeqCount; + RGXFwSharedMemCacheOpValue(psGpuUtilFW->ui32TimeCorrSeqCount, FLUSH); + + + if (RGXTIMECORR_EVENT_POWER == eEvent && psDeviceNode->ui64LastDeviceOffTimestamp) + { + /* Capture effective timestamp offset on device power on */ + psGpuUtilFW->i64DeviceTimestampOffset = psDeviceNode->ui64LastDeviceOffTimestamp - RGXReadHWTimerReg(psDevInfo) + + RGXTimeCorrDeltaOSNsToDeltaCR(psDeviceNode, + OSClockns64() - psDeviceNode->ui64LastDeviceOffHostTimestampNs); + } PVR_DPF((PVR_DBG_MESSAGE, "Timer correlation data (post %s event): OS %" IMG_UINT64_FMTSPEC " ns, " "CR %" IMG_UINT64_FMTSPEC ", GPU freq. %u Hz (given as %u Hz)", _EventToString(eEvent), - psTimeCorr->ui64OSTimeStamp, - psTimeCorr->ui64CRTimeStamp, - RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed), + sTimeCorr.ui64OSTimeStamp, + sTimeCorr.ui64CRTimeStamp, + RGXFWIF_ROUND_TO_KHZ(sTimeCorr.ui32CoreClockSpeed), _RGXGetSystemLayerGPUClockSpeed(psDeviceNode))); /* @@ -280,9 +350,9 @@ static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_E * partition marker is written. */ HTBSyncScale(eEvent != RGXTIMECORR_EVENT_POWER, - psTimeCorr->ui64OSTimeStamp, - psTimeCorr->ui64CRTimeStamp, - psTimeCorr->ui32CoreClockSpeed); + sTimeCorr.ui64OSTimeStamp, + sTimeCorr.ui64CRTimeStamp, + sTimeCorr.ui32CoreClockSpeed); } static void _RGXCheckTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, @@ -291,14 +361,19 @@ static void _RGXCheckTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, #if !defined(NO_HARDWARE) && !defined(VIRTUAL_PLATFORM) && defined(DEBUG) #define SCALING_FACTOR (10) PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; - IMG_UINT32 ui32Index = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); - RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32Index]; + RGXFWIF_GPU_UTIL_FW *psGpuUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; + IMG_UINT32 ui32Index; + RGXFWIF_TIME_CORR *psTimeCorr; IMG_UINT64 ui64EstimatedTime, ui64CRTimeStamp, ui64OSTimeStamp; IMG_UINT64 ui64CRTimeDiff, ui64OSTimeDiff; IMG_INT64 i64Diff; IMG_UINT32 ui32Ratio, ui32Remainder; + RGXFwSharedMemCacheOpValue(psGpuUtilFW->ui32TimeCorrSeqCount, INVALIDATE); + ui32Index = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFW->ui32TimeCorrSeqCount); + RGXFwSharedMemCacheOpValue(psGpuUtilFW->sTimeCorr[ui32Index], INVALIDATE); + psTimeCorr = &psGpuUtilFW->sTimeCorr[ui32Index]; + /* * The following reads must be done as close together as possible, because * they represent the same current time sampled from different clock sources. @@ -482,10 +557,11 @@ static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode, { #if !defined(DISABLE_GPU_FREQUENCY_CALIBRATION) GPU_FREQ_TRACKING_DATA *psTrackingData; - IMG_UINT32 ui32EstCoreClockSpeed, ui32PrevCoreClockSpeed; + IMG_UINT32 ui32EstCoreClockSpeed, ui32PrevCoreClockSpeed, ui32SysGPUClockSpeed; IMG_INT32 i32Diff; IMG_UINT32 ui32Remainder; + ui32SysGPUClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); /* * Find out what the GPU frequency was in the last period. * This should return a value very close to the frequency passed by the system layer. @@ -511,7 +587,7 @@ static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode, "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " "more than 1 MHz difference between old and new value " "(%u Hz -> %u Hz over %" IMG_UINT64_FMTSPEC " us)", - _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), + ui32SysGPUClockSpeed, _EventToString(eEvent), RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), @@ -522,13 +598,46 @@ static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_DPF((PVR_DBG_MESSAGE, "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " "%u Hz -> %u Hz done over %" IMG_UINT64_FMTSPEC " us", - _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), + ui32SysGPUClockSpeed, _EventToString(eEvent), RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), psGpuDVFSTable->ui64CalibrationOSTimediff)); } + if (eEvent == RGXTIMECORR_EVENT_PERIODIC) + { + PVRSRV_DEV_POWER_STATE ePowerState; + + i32Diff = (IMG_INT32) (ui32EstCoreClockSpeed - ui32SysGPUClockSpeed); + + /* + * Notify the Firmware about unexpected frequency differences observed during + * periodic frequency calibration events only. + * Other events like PDVFS and power transitions are already likely to call + * the pre/post clock callback directly to set frequencies as needed. + * Platforms without PDVFS or APM need a method to correct the Firmware's + * internal timing measurements. + */ + if (((i32Diff < -1000000) || (i32Diff > 1000000)) && + (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) == PVRSRV_OK)) + { + PVRSRV_ERROR eError = RGXPreClockSpeedChange((IMG_HANDLE)psDeviceNode, ePowerState); + + PVR_LOG_IF_ERROR(eError, "RGXPreClockSpeedChange"); + if (eError == PVRSRV_OK) + { + RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + + /* Update the internal core frequency variable and notify the Firmware of the change */ + psRGXData->psRGXTimingInfo->ui32CoreClockSpeed = RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed); + + eError = RGXPostClockSpeedChange((IMG_HANDLE)psDeviceNode, ePowerState); + PVR_LOG_IF_ERROR(eError, "RGXPostClockSpeedChange"); + } + } + } + /* Reset time deltas to avoid recalibrating the same frequency over and over again */ psGpuDVFSTable->ui64CalibrationCRTimediff = 0; psGpuDVFSTable->ui64CalibrationOSTimediff = 0; @@ -557,7 +666,7 @@ void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; - PVRSRV_VZ_RETN_IF_MODE(GUEST); + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVNODE, psDeviceNode); _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable); _RGXMakeTimeCorrData(psDeviceNode, eEvent); @@ -568,7 +677,7 @@ void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; - PVRSRV_VZ_RETN_IF_MODE(GUEST); + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVNODE, psDeviceNode); _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable); @@ -576,6 +685,15 @@ void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) { _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable, eEvent); } + + if (RGXTIMECORR_EVENT_POWER == eEvent) + { + RGXFWIF_GPU_UTIL_FW *psGpuUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; + + /* Capture effective device power off timestamp */ + psDeviceNode->ui64LastDeviceOffTimestamp = RGXReadHWTimerReg(psDevInfo) + psGpuUtilFW->i64DeviceTimestampOffset; + psDeviceNode->ui64LastDeviceOffHostTimestampNs = OSClockns64(); + } } void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle) @@ -585,7 +703,7 @@ void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle) RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; IMG_UINT64 ui64TimeNow = RGXTimeCorrGetClockus64(psDeviceNode); PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT; - PVRSRV_VZ_RETN_IF_MODE(GUEST); + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVNODE, psDeviceNode); if (psGpuDVFSTable == NULL) { @@ -628,19 +746,44 @@ RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(const PVRSRV_DEVICE_NODE *psDev PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_CLOCK_TYPE eClockType) { - return _SetClock(psDeviceNode, NULL, eClockType); + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO* psRGXDevNode = psDeviceNode->pvDevice; + RGXTIMECORR_CLOCK_TYPE eLastClock = psRGXDevNode->ui32ClockSource; + + eError = _SetClock(psDeviceNode, NULL, eClockType); + PVR_LOG_RETURN_IF_ERROR(eError, "_SetClock"); + + psRGXDevNode->ui32LastClockSource = eLastClock; + return eError; } PVRSRV_ERROR -PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE * psDeviceNode, - IMG_UINT64 * pui64Time) +PVRSRVRGXCurrentTime(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT8 ui8TimestampType, + IMG_UINT64 *pui64Time) { + RGX_QUERY_TIMESTAMP_TYPE eTimestampType = (RGX_QUERY_TIMESTAMP_TYPE)ui8TimestampType; + PVRSRV_ERROR eError = PVRSRV_OK; + PVR_UNREFERENCED_PARAMETER(psConnection); - *pui64Time = RGXTimeCorrGetClockns64(psDeviceNode); + switch (eTimestampType) + { + case RGX_QUERY_HOST_TIMESTAMP: + *pui64Time = RGXTimeCorrGetClockns64(psDeviceNode); + break; + + case RGX_QUERY_DEVICE_TIMESTAMP: + *pui64Time = RGXTimeGetDeviceTimestampInTicks(psDeviceNode); + break; - return PVRSRV_OK; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + return eError; } /****************************************************************************** diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimecorr.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimecorr.h index e1cfff9b7abc..9758e61909c8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimecorr.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimecorr.h @@ -48,6 +48,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "device.h" #include "osfunc.h" #include "connection_server.h" +#include "rgxdevice.h" typedef enum { @@ -117,6 +118,41 @@ static inline IMG_UINT64 RGXTimeCorrGetConversionFactor(IMG_UINT32 ui32ClockSpee RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed), &ui32Remainder); } +/*! +****************************************************************************** + + @Function RGXTimeCorrDeltaOSNsToDeltaCR + + @Description Convert OS timestamp difference in ns to device tick difference + + @Input psDeviceNode, ui64DeltaOSNs + + @Return 0 on failure, deltaCR otherwise + +******************************************************************************/ +static inline IMG_UINT64 RGXTimeCorrDeltaOSNsToDeltaCR(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64DeltaOSNs) +{ + RGXFWIF_GPU_UTIL_FW *psGpuUtilFW; + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_TIME_CORR *psTimeCorr; + IMG_UINT32 ui32Remainder; + + PVR_ASSERT(psDeviceNode && psDeviceNode->pvDevice); + psDevInfo = psDeviceNode->pvDevice; + psGpuUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; + PVR_ASSERT(psGpuUtilFW); + psTimeCorr = &psGpuUtilFW->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFW->ui32TimeCorrSeqCount)]; + + if (psTimeCorr->ui64CRDeltaToOSDeltaKNs == 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: ui64CRDeltaToOSDeltaKNs is 0", __func__)); + return 0; + } + + return OSDivide64r64(ui64DeltaOSNs << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT, + psTimeCorr->ui64CRDeltaToOSDeltaKNs, &ui32Remainder); +} + /*! ****************************************************************************** @@ -257,16 +293,24 @@ void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_TIME_CORR *psTimeCorrs, IMG_UINT32 ui32NumOut); -/**************************************************************************/ /*! -@Function PVRSRVRGXCurrentTime -@Description Returns the current state of the device timer -@Input psDevData Device data. -@Out pui64Time -@Return PVRSRV_OK on success. -*/ /***************************************************************************/ -PVRSRV_ERROR -PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE * psDeviceNode, - IMG_UINT64 * pui64Time); +/*! +****************************************************************************** + @Function PVRSRVRGXCurrentTime + + @Description Server-side implementation of RGXCurrentTime + + @Input psConnection : Connection handle + @Input psDeviceNode : RGX Device Node + @Input ui8TimestampType : Timestamp type + @Output pui64Time : Timestamp + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXCurrentTime(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT8 ui8TimestampType, + IMG_UINT64 *pui64Time); #endif /* RGXTIMECORR_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimerquery.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimerquery.c index d5d11bff9129..82900863ed26 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimerquery.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimerquery.c @@ -73,6 +73,8 @@ PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection, psDevInfo->pui64StartTimeById[ui32QueryId] = 0UL; psDevInfo->pui64EndTimeById[ui32QueryId] = 0UL; OSWriteMemoryBarrier(&psDevInfo->pui64EndTimeById[ui32QueryId]); + RGXFwSharedMemCacheOpValue(psDevInfo->pui64StartTimeById[ui32QueryId], FLUSH); + RGXFwSharedMemCacheOpValue(psDevInfo->pui64EndTimeById[ui32QueryId], FLUSH); /* save of the active query index */ psDevInfo->ui32ActiveQueryId = ui32QueryId; @@ -135,6 +137,8 @@ PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, #endif ui32Scheduled = psDevInfo->aui32ScheduledOnId[ui32QueryId]; + + RGXFwSharedMemCacheOpValue(psDevInfo->pui32CompletedById[ui32QueryId], INVALIDATE); ui32Completed = psDevInfo->pui32CompletedById[ui32QueryId]; /* if there was no kick since the Begin() on this id we return 0-s as Begin cleared @@ -143,6 +147,8 @@ PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, */ if (ui32Completed >= ui32Scheduled) { + RGXFwSharedMemCacheOpValue(psDevInfo->pui64StartTimeById[ui32QueryId], INVALIDATE); + RGXFwSharedMemCacheOpValue(psDevInfo->pui64EndTimeById[ui32QueryId], INVALIDATE); * pui64StartTime = psDevInfo->pui64StartTimeById[ui32QueryId]; * pui64EndTime = psDevInfo->pui64EndTimeById[ui32QueryId]; @@ -164,31 +170,6 @@ PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, /****************************************************************************** NOT BRIDGED/EXPORTED FUNCS ******************************************************************************/ -/* writes a time stamp command in the client CCB */ -void -RGXWriteTimestampCommand(void ** ppvPtr, - RGXFWIF_CCB_CMD_TYPE eCmdType, - PRGXFWIF_TIMESTAMP_ADDR pAddr) -{ - RGXFWIF_CCB_CMD_HEADER * psHeader; - PRGXFWIF_TIMESTAMP_ADDR * psTimestampAddr; - - psHeader = (RGXFWIF_CCB_CMD_HEADER *) (*ppvPtr); - - PVR_ASSERT(eCmdType == RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP - || eCmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP); - - psHeader->eCmdType = eCmdType; - psHeader->ui32CmdSize = (sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1); - - (*ppvPtr) = IMG_OFFSET_ADDR(*ppvPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); - - psTimestampAddr = (PRGXFWIF_TIMESTAMP_ADDR *) *ppvPtr; - psTimestampAddr->ui32Addr = pAddr.ui32Addr; - - (*ppvPtr) = IMG_OFFSET_ADDR(*ppvPtr, psHeader->ui32CmdSize); -} - void RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimerquery.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimerquery.h index 81898860dc23..7a43e7966f88 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimerquery.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxtimerquery.h @@ -103,12 +103,6 @@ PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, NON BRIDGED/EXPORTED interface ******************************************************************************/ -/* write the timestamp cmd from the helper*/ -void -RGXWriteTimestampCommand(void ** ppvCmd, - RGXFWIF_CCB_CMD_TYPE eCmdType, - PRGXFWIF_TIMESTAMP_ADDR pAddr); - /* get the relevant data from the Kick to the helper*/ void RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxutils.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxutils.c index 866fd014a44d..09fb841e0e60 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxutils.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxutils.c @@ -51,7 +51,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv.h" #include "sync_internal.h" #include "rgxfwutils.h" - +#include "rgxlayer.h" +#include "rgxmmudefs_km.h" +#include "rgxta3d.h" PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, const void *pvPrivateData, @@ -216,6 +218,333 @@ inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM) } } +PHYS_HEAP_POLICY RGXPhysHeapGetLMAPolicy(PHYS_HEAP_USAGE_FLAGS ui32UsageFlags, PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PHYS_HEAP_POLICY ui32Policy; + + if (OSIsMapPhysNonContigSupported()) + { + ui32Policy = PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG; + + if (BITMASK_ANY(ui32UsageFlags, + (PHYS_HEAP_USAGE_FW_SHARED | + PHYS_HEAP_USAGE_FW_PRIVATE | + PHYS_HEAP_USAGE_FW_PREMAP_PT | + PHYS_HEAP_USAGE_FW_CODE | + PHYS_HEAP_USAGE_FW_PRIV_DATA))) + { + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Guest Firmware heaps are always premapped */ + ui32Policy = PHYS_HEAP_POLICY_DEFAULT; + } +#if defined(RGX_PREMAP_FW_HEAPS) + else if (PVRSRV_VZ_MODE_IS(HOST, DEVNODE, psDeviceNode)) + { + /* All Firmware heaps are premapped under AutoVz*/ + ui32Policy = PHYS_HEAP_POLICY_DEFAULT; + } +#endif + } + + if (BITMASK_ANY(ui32UsageFlags, PHYS_HEAP_USAGE_FW_PREMAP)) + { + ui32Policy = PHYS_HEAP_POLICY_DEFAULT; + } + } + else + { + ui32Policy = PHYS_HEAP_POLICY_DEFAULT; + } + + return ui32Policy; +} + +IMG_BOOL RGXIsErrorAndDeviceRecoverable(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_ERROR *peError) +{ + IMG_BOOL bRecoverable = IMG_TRUE; + + if (*peError == PVRSRV_OK) + { + /* No recovery required */ + return IMG_FALSE; + } + + if (!PVRSRVIsStatusRecoverable(OSAtomicRead(&psDeviceNode->eHealthStatus))) + { + bRecoverable = IMG_FALSE; + } + else + { + RGXUpdateHealthStatus(psDeviceNode, IMG_FALSE); + + if (!PVRSRVIsStatusRecoverable(OSAtomicRead(&psDeviceNode->eHealthStatus))) + { + bRecoverable = IMG_FALSE; + } + } + + if (bRecoverable && !PVRSRVIsRetryError(*peError)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Device is recoverable. Changing error type (%s) to retry.", + __func__, PVRSRVGetErrorString(*peError))); + *peError = PVRSRV_ERROR_RETRY; + } + + if (!bRecoverable && PVRSRVIsRetryError(*peError)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Device is not recoverable. Error type should not be retry (%s).", + __func__, PVRSRVGetErrorString(*peError))); + *peError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return bRecoverable; +} + +/* + * Function that returns the MList Size required for a given max PB size. + * + * The maximum MList size required always depends on the maximum PB Size + * chosen and must also take into account the additional pages that will + * be provided by a local PB. + */ +IMG_UINT32 RGXCalcMListSize(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT64 ui64MaxLocalPBSize, + IMG_UINT64 ui64MaxGlobalPBSize) +{ + IMG_UINT32 ui32PTEPages = 0, ui32PDEPages = 0, ui32PCEPages = 0, ui32MListSize = 0; + IMG_UINT32 ui32NumOfPipes = 1; + IMG_UINT64 ui64TotalPages = 0; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psDevInfo); + /* + * Assert if Size of PB exceeds maximum theoretical limit + * RGX_PM_MAX_PB_VIRT_ADDR_SPACE represents the 16G address space# + */ + PVR_ASSERT(ui64MaxLocalPBSize+ui64MaxGlobalPBSize <= RGX_PM_MAX_PB_VIRT_ADDR_SPACE); + + /* Calculate the total number of pages which is the number of Page table entries */ + ui64TotalPages = ((ui64MaxLocalPBSize+ui64MaxGlobalPBSize)/RGX_BIF_PM_PHYSICAL_PAGE_SIZE); + + /* Calculate the total number of pages required for the PTE's (minimum of 1) */ + ui32PTEPages = (IMG_UINT32)(ui64TotalPages/RGX_MMUCTRL_ENTRIES_PT_VALUE); + if (ui32PTEPages == 0U) + { + ui32PTEPages = 1; + } + + /* Calculate the total number of pages required to hold the PDE's (minimum of 1) */ + ui32PDEPages = ui32PTEPages/RGX_MMUCTRL_ENTRIES_PD_VALUE; + if (ui32PDEPages == 0U) + { + ui32PDEPages = 1; + } + + /* Calculate the total number of pages required to hold the PCE's (minimum of 1) */ + ui32PCEPages = ui32PDEPages/RGX_MMUCTRL_ENTRIES_PC_VALUE; + if (ui32PCEPages == 0U) + { + ui32PCEPages = 1; + } + + /* Calculate the maximum number of TA/VCE pipes */ +#if defined(RGX_FEATURE_SCALABLE_TE_ARCH_IDX) + { + IMG_UINT32 ui32Val = RGX_GET_FEATURE_VALUE(psDevInfo, RGX_FEATURE_SCALABLE_TE_ARCH); + if (ui32Val > ui32NumOfPipes) + { + ui32NumOfPipes = ui32Val; + } + } +#endif + +#if defined(RGX_FEATURE_SCALABLE_TE_ARCH_IDX) + { + IMG_UINT32 ui32Val = RGX_GET_FEATURE_VALUE(psDevInfo, RGX_FEATURE_SCALABLE_VCE); + if (ui32Val > ui32NumOfPipes) + { + ui32NumOfPipes = ui32Val; + } + } +#endif + + /* + * Calculate the MList size considering the total number of pages in PB are shared + * among all the PM address spaces... + */ + ui32MListSize = (ui32PCEPages + ui32PDEPages + ui32PTEPages) * + RGX_NUM_PM_ADDR_SPACES * ui32NumOfPipes * RGX_MLIST_ENTRY_STRIDE; + + /* Round it off to the nearest page granularity */ + ui32MListSize = PVR_ALIGN(ui32MListSize, RGX_BIF_PM_PHYSICAL_PAGE_SIZE); + + return ui32MListSize; +} + +/* + * Critical PMRs are PMRs that are created by client that might contain physical page addresses. + * We need to validate if they were allocated with proper flags. + */ +PVRSRV_ERROR ValidateCriticalPMR(PMR* psPMR, IMG_DEVMEM_SIZE_T ui64MinSize) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); + + IMG_BOOL bCPUCacheSnoop = + (PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) && + psDevNode->pfnGetDeviceSnoopMode(psDevNode) == PVRSRV_DEVICE_SNOOP_CPU_ONLY); + + PMR_FLAGS_T uiFlags = PMR_Flags(psPMR); + + /* Critical PMR cannot be user CPU mappable */ + if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || + PVRSRV_CHECK_CPU_WRITEABLE(uiFlags)) + { + PVR_DPF((PVR_DBG_ERROR, + "Critical PMR allows CPU mapping (0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC ")", + uiFlags)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); + } + + /* Critical PMR must not be user CPU cacheable (unless snooping is on) */ + if (!bCPUCacheSnoop && + (PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) || + PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || + PVRSRV_CHECK_CPU_CACHED(uiFlags))) + { + PVR_DPF((PVR_DBG_ERROR, + "Critical PMR allows CPU caching (0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC ")", + uiFlags)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); + } + + /* Critical PMRs must be allocated with PMMETA_PROTECT */ + if ((uiFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT)) == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Critical PMR must have PMMETA_PROTECT set", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); + } + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + if (PVRSRV_CHECK_OS_LINUX_MOVABLE(uiFlags)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Critical PMR must not have OS_LINUX_MOVABLE set", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); + } +#endif + + if (PMR_LogicalSize(psPMR) < ui64MinSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Critical PMR doesn't have sufficient size", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); + } + + return PVRSRV_OK; +return_error: + return eError; +} + +/* Check if all global freelists have the same size and if all local freelists have the same size.*/ +PVRSRV_ERROR ValidateFreeListSizes(RGX_FREELIST* apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], + IMG_UINT32* pui32LocalFLMaxPages, + IMG_UINT32* pui32GlobalFLMaxPages) +{ + IMG_UINT32 i,j; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32GlobalFLMaxPages = apsFreeLists[RGXFW_GLOBAL_FREELIST]->ui32MaxFLPages; + IMG_UINT32 ui32LocalFLMaxPages = apsFreeLists[RGXFW_LOCAL_FREELIST]->ui32MaxFLPages; + IMG_UINT32 ui32NumFLPerGD = RGXMKIF_NUM_RTDATA_FREELISTS/RGXMKIF_NUM_GEOMDATAS; + + for (i=0; iui32MaxFLPages != ui32LocalFLMaxPages) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Check if all global freelists have the same size */ + for (j=RGXFW_GLOBAL_FREELIST; jui32MaxFLPages != ui32GlobalFLMaxPages) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + } + } + + *pui32LocalFLMaxPages = ui32LocalFLMaxPages; + *pui32GlobalFLMaxPages = ui32GlobalFLMaxPages; + + return eError; +} + +PVRSRV_ERROR +AcquireValidateRefCriticalBuffer(PVRSRV_DEVICE_NODE* psDevNode, + DEVMEMINT_RESERVATION* psReservation, + IMG_DEVMEM_SIZE_T ui64MinSize, + PMR** ppsPMR, + IMG_DEV_VIRTADDR* psDevVAddr) +{ + PVRSRV_ERROR eError; + + /* Obtain reference to reservation object */ + if (!DevmemIntReservationAcquire(psReservation)) + { + eError = PVRSRV_ERROR_REFCOUNT_OVERFLOW; + PVR_LOG_GOTO_IF_ERROR_VA(eError, ReturnError, + "%s: Failed to acquire reservation for critical buffer", __func__); + } + + eError = DevmemIntGetReservationData(psReservation, ppsPMR, psDevVAddr); + PVR_LOG_GOTO_IF_ERROR_VA(eError, RollbackReservation, + "%s: Error from DevmemIntGetReservationData for critical buffer: %s", + __func__, PVRSRVGetErrorString(eError)); + + + /* Check buffer sizes and flags are as required */ + eError = ValidateCriticalPMR(*ppsPMR, ui64MinSize); + PVR_LOG_GOTO_IF_ERROR_VA(eError, RollbackReservation, + "%s: Validation of critical PMR failed: %s", + __func__, PVRSRVGetErrorString(eError)); + + /* If no error on validation ref the PMR */ + (void) PMRRefPMR(*ppsPMR); + + return PVRSRV_OK; + +RollbackReservation: + DevmemIntReservationRelease(psReservation); +ReturnError: + return eError; +} + +void UnrefAndReleaseCriticalBuffer(DEVMEMINT_RESERVATION* psReservation) +{ + PVRSRV_ERROR eError; + PMR* psPMR; + IMG_DEV_VIRTADDR sDummy; + /* Skip error check. If this function is called it means we already + Acquired a reservation and confirmed that mapping exists. */ + eError = DevmemIntGetReservationData(psReservation, &psPMR, &sDummy); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, + "Error when trying to obtain reservation data in %s", __func__); + + eError = PMRUnrefPMR(psPMR); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, + "Error on PMR unref in %s", __func__); + + DevmemIntReservationRelease(psReservation); +} + /****************************************************************************** End of file (rgxutils.c) ******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxutils.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxutils.h index 670986323d2b..cf5046433826 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxutils.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxutils.h @@ -43,9 +43,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "device.h" #include "rgxdevice.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "pvr_notifier.h" #include "pvrsrv.h" +#include "pvrsrv_error.h" +#include "rgxta3d.h" /*! ****************************************************************************** @@ -179,7 +181,131 @@ PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, ******************************************************************************/ const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM); +/*************************************************************************/ /*! + +@Function RGXPhysHeapGetLMAPolicy + +@Description Returns the optimal LMA allocation policy based on a heap's + usage flags + +@Input ui32UsageFlags Flags specifying a heap's intended use +@Input psDeviceNode The device node. + +@Return PHYS_HEAP_POLICY The recommended LMA policy + +*/ /**************************************************************************/ +PHYS_HEAP_POLICY RGXPhysHeapGetLMAPolicy(PHYS_HEAP_USAGE_FLAGS ui32UsageFlags, PVRSRV_DEVICE_NODE *psDeviceNode); + #define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : "" + +/*************************************************************************/ /*! +@Function RGXIsErrorAndDeviceRecoverable +@Description This function is used to check if device (and firmware) is in + a state that can be recovered from without a full reset of the + device. +@Input psDeviceNode The device node. +@Input peError Pointer to error. Can be changed to retry type. +@Return IMG_BOOL Return true if device is recoverable. +*/ /**************************************************************************/ +IMG_BOOL RGXIsErrorAndDeviceRecoverable(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_ERROR *peError); + +/* + * To avoid repeated calls and avoid double frees, the error value is set to PVRSRV_OK + * if RGXIsErrorAndDeviceRecoverable is false. + */ +#define RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psDeviceNode, eError, cleanupFunc) \ + do \ + { \ + if (RGXIsErrorAndDeviceRecoverable(psDeviceNode, &eError)) \ + { \ + return eError; \ + } \ + else if (eError != PVRSRV_OK) \ + { \ + PVR_LOG(("%s: Unexpected error from " #cleanupFunc "(%s)", \ + __func__, \ + PVRSRVGetErrorString(eError))); \ + /* Device is dead. \ + * Change error type to make callers destroy the resource handle. \ + * This is to prevent repeated calls to this function. \ + */ \ + eError = PVRSRV_OK; \ + } \ + } while (false) + +/*************************************************************************/ /*! +@Function RGXCalcMListSize +@Description Function that calculates the MList Size required for + given local and global PB sizes. +@Input psDeviceNode The device node. +@Input ui64MaxLocalPBSize Maximum local PB size in bytes +@Input ui64MaxGlobalPBSize Maximum global PB size in bytes + +@Return IMG_UINT32 Returns size of the mlist in bytes aligned to + RGX_BIF_PM_PHYSICAL_PAGE_SIZE. +*/ /**************************************************************************/ +IMG_UINT32 RGXCalcMListSize(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT64 ui64MaxLocalPBSize, + IMG_UINT64 ui64MaxGlobalPBSize); + +/*************************************************************************/ /*! +@Function ValidateCriticalPMR +@Description Validate if critical PMR has proper flags and size. +@Input psPMR Pointer to PMR to validate. +@Input ui64MinSize Minimum size that PMR needs to have + +@Return PVRSRV_ERROR PVRSRV_OK if validation successful. + Appropriate error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR ValidateCriticalPMR(PMR* psPMR, IMG_DEVMEM_SIZE_T ui64MinSize); + +/*************************************************************************/ /*! +@Function ValidateFreeListSizes +@Description Helper function for RGXCreateHWRTDataSet + For the freelist array passed to RGXCreateHWRTDataSet, validate + if all global freelists have the same size and if all local + freelists have the same size. Return the sizes in output params. + +@Output pui32LocalFLMaxPages Max number of pages for local freelist +@Output pui32GlobalFLMaxPages Max number of pages for global freelist + +@Return PVRSRV_ERROR PVRSRV_OK if validation successful. + Appropriate error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR ValidateFreeListSizes(RGX_FREELIST* apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], + IMG_UINT32* pui32LocalFLMaxPages, + IMG_UINT32* pui32GlobalFLMaxPages); + +/*************************************************************************/ /*! +@Function AcquireValidateRefCriticalBuffer +@Description Helper function for RGXCreateHWRTDataSet + Acquire the reservation validate if the underlying PMR is + appropriate for use as critical buffer and ref it. +@Input psDevNode The device node. +@Input psReservation The reservation describing the critical buffer +@Input ui64MinSize Minimum size that buffer needs to have +@Output ppsPMR Pointer to be written with the PMR on success. +@Output psDevVAddr The device vaddress to be written on success. + +@Return PVRSRV_ERROR PVRSRV_OK if validation successful. + Appropriate error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR AcquireValidateRefCriticalBuffer(PVRSRV_DEVICE_NODE* psDevNode, + DEVMEMINT_RESERVATION* psReservation, + IMG_DEVMEM_SIZE_T ui64MinSize, + PMR** ppsPMR, + IMG_DEV_VIRTADDR* psDevVAddr); + + +/*************************************************************************/ /*! +@Function UnrefAndReleaseCriticalBuffer +@Description Helper function for RGXCreateHWRTDataSet + Unref the critical buffer and release the reservation object. +@Input psReservation The reservation describing the critical buffer + +*/ /**************************************************************************/ +void UnrefAndReleaseCriticalBuffer(DEVMEMINT_RESERVATION* psReservation); + /****************************************************************************** End of file (rgxutils.h) ******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxworkest.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxworkest.c index 666f490f1ab9..1143910295c5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxworkest.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rgxworkest.c @@ -50,6 +50,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "device.h" #include "hash.h" #include "pvr_debug.h" +#if defined(SUPPORT_SOC_TIMER) +#include "rgxtimecorr.h" +#endif #define ROUND_DOWN_TO_NEAREST_1024(number) (((number) >> 10) << 10) @@ -243,20 +246,24 @@ void WorkEstCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) RGXFWIF_WORKEST_FWCCB_CMD *psFwCCBCmd; IMG_UINT8 *psFWCCB = psDevInfo->psWorkEstFirmwareCCB; RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; + RGXFWIF_CCB_CTL *psFWCCBCtlLocal = psDevInfo->psWorkEstFirmwareCCBCtlLocal; - while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset) + RGXFwSharedMemCacheOpPtr(psFWCCBCtl, INVALIDATE); + while (psFWCCBCtlLocal->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset) { PVRSRV_ERROR eError; /* Point to the next command */ - psFwCCBCmd = (RGXFWIF_WORKEST_FWCCB_CMD *)((uintptr_t)psFWCCB + psFWCCBCtl->ui32ReadOffset * sizeof(RGXFWIF_WORKEST_FWCCB_CMD)); + psFwCCBCmd = (RGXFWIF_WORKEST_FWCCB_CMD *)((uintptr_t)psFWCCB + psFWCCBCtlLocal->ui32ReadOffset * sizeof(RGXFWIF_WORKEST_FWCCB_CMD)); eError = WorkEstRetire(psDevInfo, psFwCCBCmd); PVR_LOG_IF_ERROR(eError, "WorkEstCheckFirmwareCCB: WorkEstRetire failed"); /* Update read offset */ - psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask; + psFWCCBCtlLocal->ui32ReadOffset = (psFWCCBCtlLocal->ui32ReadOffset + 1) & psFWCCBCtlLocal->ui32WrapMask; + psFWCCBCtl->ui32ReadOffset = psFWCCBCtlLocal->ui32ReadOffset; } + RGXFwSharedMemCacheOpValue(psFWCCBCtl->ui32ReadOffset, FLUSH); } PVRSRV_ERROR WorkEstPrepare(PVRSRV_RGXDEV_INFO *psDevInfo, @@ -303,8 +310,15 @@ PVRSRV_ERROR WorkEstPrepare(PVRSRV_RGXDEV_INFO *psDevInfo, #if defined(SUPPORT_SOC_TIMER) psDevConfig = psDevInfo->psDeviceNode->psDevConfig; - PVR_LOG_RETURN_IF_FALSE(psDevConfig->pfnSoCTimerRead, "SoC timer not available", eError); - ui64CurrentSoCTime = psDevConfig->pfnSoCTimerRead(psDevConfig->hSysData); + if (psDevConfig->pfnSoCTimerRead) + { + ui64CurrentSoCTime = psDevConfig->pfnSoCTimerRead(psDevConfig->hSysData); + } + else + { + /* Fallback to OS clock */ + ui64CurrentSoCTime = 0; + } #endif eError = OSClockMonotonicus64(&ui64CurrentTime); @@ -322,8 +336,22 @@ PVRSRV_ERROR WorkEstPrepare(PVRSRV_RGXDEV_INFO *psDevInfo, { /* Rounding is done to reduce multiple deadlines with minor spread flooding the fw workload array. */ #if defined(SUPPORT_SOC_TIMER) - IMG_UINT64 ui64TimeDelta = (ui64DeadlineInus - ui64CurrentTime) * SOC_TIMER_FREQ; - psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64CurrentSoCTime + ui64TimeDelta); + if (psDevConfig->pfnSoCTimerRead) + { + RGX_DATA *psRGXData = (RGX_DATA*)psDevConfig->hDevData; + RGX_TIMING_INFORMATION *psRGXTimingInfo = psRGXData->psRGXTimingInfo; + IMG_UINT32 ui32Remainder; + IMG_UINT64 ui64TimeDelta = + OSDivide64r64((ui64DeadlineInus - ui64CurrentTime) * psRGXTimingInfo->ui32SOCClockSpeed, SECONDS_TO_MICROSECONDS, &ui32Remainder); + + psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64CurrentSoCTime + ui64TimeDelta); + PVR_DPF((PVR_DBG_MESSAGE, "Current SOC time: %llu. New deadline: %llu SOC ticks", + ui64CurrentSoCTime, psWorkEstKickData->ui64Deadline)); + } + else + { + psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64DeadlineInus); + } #else psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64DeadlineInus); #endif @@ -418,6 +446,7 @@ PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_WORKLOAD *pasWorkloadHashKeys; IMG_UINT32 ui32HashArrayWO; IMG_UINT64 *pui64CyclesTaken; + IMG_UINT64 ui64ActualCyclesTaken; WORKEST_RETURN_DATA *psReturnData; WORKEST_HOST_DATA *psWorkEstHostData; @@ -431,6 +460,8 @@ PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo, "WorkEstRetire: Missing return command", PVRSRV_ERROR_INVALID_PARAMS); + RGXFwSharedMemCacheOpPtr(psReturnCmd, INVALIDATE); + if (psReturnCmd->ui16ReturnDataIndex >= RETURN_DATA_ARRAY_SIZE) { PVR_DPF((PVR_DBG_ERROR, "WorkEstRetire: Handle reference out-of-bounds:" @@ -450,6 +481,11 @@ PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo, "WorkEstRetire: Missing host data", unlock_workest); + /* Skip if cycle data unavailable */ + PVR_LOG_GOTO_IF_FALSE(psReturnCmd->ui32CyclesTaken, + "WorkEstRetire: Cycle data not available", + unlock_workest); + /* Retrieve/validate completed workload matching data */ psWorkloadMatchingData = psReturnData->psWorkloadMatchingData; PVR_LOG_GOTO_IF_FALSE(psWorkloadMatchingData, @@ -482,11 +518,14 @@ PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo, (void) HASH_Remove(psWorkloadMatchingData->psHashTable, (uintptr_t)psWorkloadHashKey); } + ui64ActualCyclesTaken = (IMG_UINT64)psReturnCmd->ui16CyclesTakenHigh << 32U; + ui64ActualCyclesTaken += psReturnCmd->ui32CyclesTaken; + if (pui64CyclesTaken == NULL) { /* There is no existing entry for this workload characteristics, * store it */ - paui64WorkloadHashData[ui32HashArrayWO] = psReturnCmd->ui32CyclesTaken; + paui64WorkloadHashData[ui32HashArrayWO] = ui64ActualCyclesTaken; pasWorkloadHashKeys[ui32HashArrayWO] = *psWorkloadCharacteristics; } else @@ -494,7 +533,7 @@ PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo, /* Found prior entry for workload characteristics, average with * completed; also reset the old value to 0 so it is known to be * invalid */ - paui64WorkloadHashData[ui32HashArrayWO] = (*pui64CyclesTaken + psReturnCmd->ui32CyclesTaken)/2; + paui64WorkloadHashData[ui32HashArrayWO] = (*pui64CyclesTaken + ui64ActualCyclesTaken)/2; pasWorkloadHashKeys[ui32HashArrayWO] = *psWorkloadCharacteristics; *pui64CyclesTaken = 0; } @@ -509,6 +548,13 @@ PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_LOG(("WorkEstRetire: HASH_Insert failed")); } +#if defined(DEBUG) + /* Zero the current entry in the return data table. + * Helps detect invalid ReturnDataIndex values from the + * firmware before the hash table is corrupted. */ + memset(psReturnData, 0, sizeof(WORKEST_RETURN_DATA)); +#endif + psWorkloadMatchingData->ui32HashArrayWO = (ui32HashArrayWO + 1) & WORKLOAD_HASH_WRAP_MASK; OSLockRelease(psWorkloadMatchingData->psHashLock); @@ -522,7 +568,12 @@ PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo, unlock_workest: OSLockRelease(psDevInfo->hWorkEstLock); - psWorkEstHostData->ui32WorkEstCCBReceived++; + + PVR_ASSERT(psWorkEstHostData); + if (psWorkEstHostData) + { + psWorkEstHostData->ui32WorkEstCCBReceived++; + } return PVRSRV_ERROR_INVALID_PARAMS; } @@ -575,8 +626,6 @@ void _WorkEstDeInit(PVRSRV_RGXDEV_INFO *psDevInfo, /* Remove the hash lock */ WorkEstHashLockDestroy(psWorkloadMatchingData->psHashLock); - - return; } void WorkEstInitTA3D(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdebug.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdebug.c index 176dcc062248..93738fdf6f9a 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdebug.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdebug.c @@ -50,14 +50,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "allocmem.h" #include "cache_km.h" #include "osfunc.h" +#include "os_apphint.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "pvrversion.h" #include "pvr_debug.h" #include "srvkm.h" #include "rgxutils.h" #include "tlstream.h" -#include "rgxfwutils.h" +#include "rgxfwriscv.h" #include "pvrsrv.h" #include "services_km.h" @@ -66,36 +67,24 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "devicemem_utils.h" #include "rgx_fwif_km.h" #include "rgx_fwif_sf.h" -#include "rgxfw_log_helper.h" -#include "fwtrace_string.h" -#include "rgxfwimageutils.h" -#include "fwload.h" +#include "debug_common.h" #include "rgxta3d.h" +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) #include "rgxkicksync.h" +#endif #include "rgxcompute.h" #include "rgxtransfer.h" #include "rgxtdmtransfer.h" #include "rgxtimecorr.h" #include "rgx_options.h" #include "rgxinit.h" +#include "rgxlayer_impl.h" #include "devicemem_history_server.h" -#include "info_page.h" -#include "rgx_bvnc_defs_km.h" - -#define PVR_DUMP_FIRMWARE_INFO(x) \ - PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ - PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \ - PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \ - (x).ui32DDKBuild, \ - ((x).ui32BuildOptions & OPTIONS_DEBUG_EN) ? "debug":"release", \ - (x).ui32BuildOptions); #define DD_SUMMARY_INDENT "" -#define DD_NORMAL_INDENT " " #define RGX_DEBUG_STR_SIZE (150U) -#define MAX_FW_DESCRIPTION_LENGTH (500U) #define RGX_CR_BIF_CAT_BASE0 (0x1200U) #define RGX_CR_BIF_CAT_BASE1 (0x1208U) @@ -163,60 +152,6 @@ static const IMG_CHAR *const pszBIFNames[] = #undef X }; -typedef struct _IMG_FLAGS2DESC_ -{ - IMG_UINT32 uiFlag; - const IMG_CHAR *pszLabel; -} IMG_FLAGS2DESC; - -static const IMG_FLAGS2DESC asCswOpts2Description[] = -{ - {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"}, - {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"}, - {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"}, - {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"}, - {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"}, - {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"}, -}; - -static const IMG_FLAGS2DESC asMisc2Description[] = -{ - {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"}, - {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"}, - {RGXFWIF_INICFG_FBCDC_V3_1_EN, " FBCDCv3.1;"}, - {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"}, - {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"}, - {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"}, - {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"}, - {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"}, - {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"}, - {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"}, - {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"}, - {RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED, " Coherent fabric on;"}, - {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"}, - {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"}, - {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"}, - {RGXFWIF_INICFG_WORKEST, " Workload Estim;"}, - {RGXFWIF_INICFG_PDVFS, " PDVFS;"}, - {RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND, " CDM task demand arbitration;"}, - {RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN, " CDM round-robin arbitration;"}, - {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"}, - {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"}, - {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"}, -}; - -static const IMG_FLAGS2DESC asFwOsCfg2Description[] = -{ - {RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN, " TDM;"}, - {RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN, " TA;"}, - {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"}, - {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"}, - {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"}, - {RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM, " LowPrio TA;"}, - {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"}, - {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"}, -}; - static const IMG_FLAGS2DESC asHwrState2Description[] = { {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"}, @@ -224,147 +159,30 @@ static const IMG_FLAGS2DESC asHwrState2Description[] = {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"}, {RGXFWIF_HWR_DM_STALLING, " DM stalling;"}, {RGXFWIF_HWR_FW_FAULT, " FW fault;"}, - {RGXFWIF_HWR_RESTART_REQUESTED, " Restarting;"}, + {RGXFWIF_HWR_RESTART_REQUESTED, " Restart requested;"}, }; static const IMG_FLAGS2DESC asDmState2Description[] = { - {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"}, - {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"}, - {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"}, - {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"}, - {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"}, - {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"}, - {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"}, - {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"}, - {RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH, " hard context switching;"}, - {RGXFWIF_DM_STATE_GPU_ECC_HWR, " GPU ECC hwr;"}, -}; - -const IMG_CHAR * const gapszMipsPermissionPTFlags[4] = -{ - " ", - "XI ", - "RI ", - "RIXI" -}; - -const IMG_CHAR * const gapszMipsCoherencyPTFlags[8] = -{ - "C", - "C", - " ", - "C", - "C", - "C", - "C", - " " -}; - -const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8] = -{ - " ", - " G", - " V ", - " VG", - "D ", - "D G", - "DV ", - "DVG" -}; - -#if !defined(NO_HARDWARE) -/* Translation of MIPS exception encoding */ -typedef struct _MIPS_EXCEPTION_ENCODING_ -{ - const IMG_CHAR *const pszStr; /* Error type */ - const IMG_BOOL bIsFatal; /* Error is fatal or non-fatal */ -} MIPS_EXCEPTION_ENCODING; - -static const MIPS_EXCEPTION_ENCODING apsMIPSExcCodes[] = -{ - {"Interrupt", IMG_FALSE}, - {"TLB modified exception", IMG_FALSE}, - {"TLB exception (load/instruction fetch)", IMG_FALSE}, - {"TLB exception (store)", IMG_FALSE}, - {"Address error exception (load/instruction fetch)", IMG_TRUE}, - {"Address error exception (store)", IMG_TRUE}, - {"Bus error exception (instruction fetch)", IMG_TRUE}, - {"Bus error exception (load/store)", IMG_TRUE}, - {"Syscall exception", IMG_FALSE}, - {"Breakpoint exception (FW assert)", IMG_FALSE}, - {"Reserved instruction exception", IMG_TRUE}, - {"Coprocessor Unusable exception", IMG_FALSE}, - {"Arithmetic Overflow exception", IMG_FALSE}, - {"Trap exception", IMG_FALSE}, - {NULL, IMG_FALSE}, - {NULL, IMG_FALSE}, - {"Implementation-Specific Exception 1 (COP2)", IMG_FALSE}, - {"CorExtend Unusable", IMG_FALSE}, - {"Coprocessor 2 exceptions", IMG_FALSE}, - {"TLB Read-Inhibit", IMG_TRUE}, - {"TLB Execute-Inhibit", IMG_TRUE}, - {NULL, IMG_FALSE}, - {NULL, IMG_FALSE}, - {"Reference to WatchHi/WatchLo address", IMG_FALSE}, - {"Machine check", IMG_FALSE}, - {NULL, IMG_FALSE}, - {"DSP Module State Disabled exception", IMG_FALSE}, - {NULL, IMG_FALSE}, - {NULL, IMG_FALSE}, - {NULL, IMG_FALSE}, - /* Can only happen in MIPS debug mode */ - {"Parity error", IMG_FALSE}, - {NULL, IMG_FALSE} -}; - -static IMG_CHAR const *_GetMIPSExcString(IMG_UINT32 ui32ExcCode) -{ - if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) - { - PVR_DPF((PVR_DBG_WARNING, - "Only %lu exceptions available in MIPS, %u is not a valid exception code", - (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); - return NULL; - } - - return apsMIPSExcCodes[ui32ExcCode].pszStr; -} -#endif - -typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_ -{ - IMG_UINT32 ui32Mask; - const IMG_CHAR * pszExplanation; -} RGXMIPSFW_C0_DEBUG_TBL_ENTRY; - -#if !defined(NO_HARDWARE) -static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] = -{ - { RGXMIPSFW_C0_DEBUG_DSS, "Debug single-step exception occurred" }, - { RGXMIPSFW_C0_DEBUG_DBP, "Debug software breakpoint exception occurred" }, - { RGXMIPSFW_C0_DEBUG_DDBL, "Debug data break exception occurred on a load" }, - { RGXMIPSFW_C0_DEBUG_DDBS, "Debug data break exception occurred on a store" }, - { RGXMIPSFW_C0_DEBUG_DIB, "Debug instruction break exception occurred" }, - { RGXMIPSFW_C0_DEBUG_DINT, "Debug interrupt exception occurred" }, - { RGXMIPSFW_C0_DEBUG_DIBIMPR, "Imprecise debug instruction break exception occurred" }, - { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" }, - { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" }, - { RGXMIPSFW_C0_DEBUG_IEXI, "Imprecise error exception inhibit controls exception occurred" }, - { RGXMIPSFW_C0_DEBUG_DBUSEP, "Data access Bus Error exception pending" }, - { RGXMIPSFW_C0_DEBUG_CACHEEP, "Imprecise Cache Error pending" }, - { RGXMIPSFW_C0_DEBUG_MCHECKP, "Imprecise Machine Check exception pending" }, - { RGXMIPSFW_C0_DEBUG_IBUSEP, "Instruction fetch Bus Error exception pending" }, - { (IMG_UINT32)RGXMIPSFW_C0_DEBUG_DBD, "Debug exception occurred in branch delay slot" } + {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"}, + {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"}, + {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"}, + {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"}, + {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"}, + {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"}, + {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"}, + {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"}, + {RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH, " hard context switching;"}, + {RGXFWIF_DM_STATE_GPU_ECC_HWR, " GPU ECC hwr;"}, }; -#endif static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] = { "offline", "ready", "active", - "offloading" + "offloading", + "cooldown" }; #if defined(PVR_ENABLE_PHR) @@ -372,383 +190,9 @@ static const IMG_FLAGS2DESC asPHRConfig2Description[] = { {BIT_ULL(RGXFWIF_PHR_MODE_OFF), "off"}, {BIT_ULL(RGXFWIF_PHR_MODE_RD_RESET), "reset RD hardware"}, - {BIT_ULL(RGXFWIF_PHR_MODE_FULL_RESET), "full gpu reset "}, }; #endif -static PVRSRV_ERROR -RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset, - IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask) -{ - IMG_UINT32 ui32RegValue, ui32NumPolls = 0; - PVRSRV_ERROR eError; - - do - { - eError = RGXReadFWModuleAddr(psDevInfo, ui32RegOffset, &ui32RegValue); - if (eError != PVRSRV_OK) - { - return eError; - } - } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000)); - - return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY; -} - -static PVRSRV_ERROR -RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal) -{ - PVRSRV_ERROR eError; - - /* Core Read Ready? */ - eError = RGXPollMetaRegThroughSP(psDevInfo, - META_CR_TXUXXRXRQ_OFFSET, - META_CR_TXUXXRXRQ_DREADY_BIT, - META_CR_TXUXXRXRQ_DREADY_BIT); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); - - /* Set the reg we are interested in reading */ - eError = RGXWriteFWModuleAddr(psDevInfo, META_CR_TXUXXRXRQ_OFFSET, - ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteFWModuleAddr"); - - /* Core Read Done? */ - eError = RGXPollMetaRegThroughSP(psDevInfo, - META_CR_TXUXXRXRQ_OFFSET, - META_CR_TXUXXRXRQ_DREADY_BIT, - META_CR_TXUXXRXRQ_DREADY_BIT); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); - - /* Read the value */ - return RGXReadFWModuleAddr(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal); -} - -#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) -static PVRSRV_ERROR _ValidateWithFWModule(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_DEV_VIRTADDR *psFWAddr, - void *pvHostCodeAddr, - IMG_UINT32 ui32MaxLen, - const IMG_CHAR *pszDesc, - IMG_UINT32 ui32StartOffset) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 ui32Value = 0; - IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset; - IMG_UINT32 *pui32FWCode = (IMG_PUINT32) ((IMG_PBYTE)pvHostCodeAddr + ui32StartOffset); - IMG_UINT32 i; - -#if defined(EMULATOR) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - return PVRSRV_OK; - } -#endif - - ui32MaxLen -= ui32StartOffset; - ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */ - - for (i = 0; i < ui32MaxLen; i++) - { - eError = RGXReadFWModuleAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); - return eError; - } - -#if defined(EMULATOR) - if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -#endif - { - PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value)); - - if (pui32FWCode[i] != ui32Value) - { - PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)", - __func__, pszDesc, - (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr); - return PVRSRV_ERROR_FW_IMAGE_MISMATCH; - } - } - - ui32FWCodeDevVAAddr += 4; - } - - PVR_DUMPDEBUG_LOG("Match between Host and Firmware view of the %s", pszDesc); - return PVRSRV_OK; -} -#endif - -static PVRSRV_ERROR _ValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) - PVRSRV_ERROR eError; - IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL; - OS_FW_IMAGE *psRGXFW = NULL; - const IMG_BYTE *pbRGXFirmware = NULL; - IMG_UINT32 *pui32CodeMemoryPointer; - RGXFWIF_DEV_VIRTADDR sFWAddr; - IMG_UINT32 ui32StartOffset = 0; - RGX_LAYER_PARAMS sLayerParams; - sLayerParams.psDevInfo = psDevInfo; - -#if defined(EMULATOR) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - PVR_DUMPDEBUG_LOG("Validation of RISC-V FW code is disabled on emulator"); - return PVRSRV_OK; - } -#endif - - if (psDevInfo->pvRegsBaseKM == NULL) - { - PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__)); - return PVRSRV_ERROR_BAD_MAPPING; - } - - /* Load FW from system for code verification */ - pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); - if (pui32HostFWCode == NULL) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed in allocating memory for FW code. " - "So skipping FW code verification", - __func__)); - return PVRSRV_ERROR_OUT_OF_MEMORY; - } - - if (psDevInfo->ui32FWCorememCodeSizeInBytes) - { - pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes); - if (pui32HostFWCoremem == NULL) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed in allocating memory for FW core code. " - "So skipping FW code verification", - __func__)); - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto freeHostFWCode; - } - } - - /* Load FW image */ - eError = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW, &pbRGXFirmware); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file (%s).", - __func__, PVRSRVGetErrorString(eError))); - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto cleanup_initfw; - } - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - eError = ProcessLDRCommandStream(&sLayerParams, pbRGXFirmware, - (void*) pui32HostFWCode, NULL, - (void*) pui32HostFWCoremem, NULL, NULL); - } - else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) - { - eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, - pui32HostFWCode, NULL, - NULL, NULL); - } - else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, - pui32HostFWCode, NULL, - pui32HostFWCoremem, NULL); - } - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); - goto cleanup_initfw; - } - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) - { - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32CodeMemoryPointer); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Error in acquiring MIPS FW code memory area (%s)", - __func__, - PVRSRVGetErrorString(eError))); - goto cleanup_initfw; - } - - if (OSMemCmp(pui32HostFWCode, pui32CodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes) == 0) - { - PVR_DUMPDEBUG_LOG("Match between Host and MIPS views of the FW code" ); - } - else - { - IMG_UINT32 ui32Count = 10; /* Show only the first 10 mismatches */ - IMG_UINT32 ui32Offset; - - PVR_DUMPDEBUG_LOG("Mismatch between Host and MIPS views of the FW code"); - for (ui32Offset = 0; (ui32Offset*4 < psDevInfo->ui32FWCodeSizeInBytes) || (ui32Count == 0); ui32Offset++) - { - if (pui32HostFWCode[ui32Offset] != pui32CodeMemoryPointer[ui32Offset]) - { - PVR_DUMPDEBUG_LOG("At %d bytes, code should be 0x%x but it is instead 0x%x", - ui32Offset*4, pui32HostFWCode[ui32Offset], pui32CodeMemoryPointer[ui32Offset]); - ui32Count--; - } - } - } - - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); - } - else - { - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - /* starting checking after BOOT LOADER config */ - sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; - - ui32StartOffset = RGXFW_MAX_BOOTLDR_OFFSET; - } - else - { - /* Use bootloader code remap which is always configured before the FW is started */ - sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE; - } - - eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, - psDevInfo, &sFWAddr, - pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes, - "FW code", ui32StartOffset); - if (eError != PVRSRV_OK) - { - goto cleanup_initfw; - } - - if (psDevInfo->ui32FWCorememCodeSizeInBytes) - { - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); - } - else - { - sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); - - /* Core must be halted while issuing abstract commands */ - eError = RGXRiscvHalt(psDevInfo); - PVR_GOTO_IF_ERROR(eError, cleanup_initfw); - } - - eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, - psDevInfo, &sFWAddr, - pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes, - "FW coremem code", 0); - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - eError = RGXRiscvResume(psDevInfo); - PVR_GOTO_IF_ERROR(eError, cleanup_initfw); - } - } - } - -cleanup_initfw: - if (psRGXFW) - { - OSUnloadFirmware(psRGXFW); - } - - if (pui32HostFWCoremem) - { - OSFreeMem(pui32HostFWCoremem); - } -freeHostFWCode: - if (pui32HostFWCode) - { - OSFreeMem(pui32HostFWCode); - } - return eError; -#else - PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); - PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); - PVR_UNREFERENCED_PARAMETER(psDevInfo); - return PVRSRV_OK; -#endif -} - -#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) - IMG_PBYTE pbCodeMemoryPointer; - PVRSRV_ERROR eError; - RGXFWIF_DEV_VIRTADDR sFWAddr; - - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer); - if (eError != PVRSRV_OK) - { - return eError; - } - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; - } - else - { - PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); - sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE; - }; - - eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0); - if (eError != PVRSRV_OK) - { - goto releaseFWCodeMapping; - } - - if (psDevInfo->ui32FWCorememCodeSizeInBytes) - { - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, (void **)&pbCodeMemoryPointer); - if (eError != PVRSRV_OK) - { - goto releaseFWCoreCodeMapping; - } - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); - } - else - { - PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); - sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); - } - - eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, - psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0); - } - -releaseFWCoreCodeMapping: - if (psDevInfo->ui32FWCorememCodeSizeInBytes) - { - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); - } -releaseFWCodeMapping: - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); - - return eError; -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); - return PVRSRV_OK; -#endif -} -#endif - /*! ******************************************************************************* @@ -1569,7 +1013,6 @@ static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, } - /*! ******************************************************************************* @@ -1581,7 +1024,7 @@ static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, @Input ui32MMULevel - MMU level - @Return IMG_CHAR* to the sting describing the MMU level that faulted. + @Return IMG_CHAR* to the string describing the MMU level that faulted. ******************************************************************************/ static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel) @@ -1603,4223 +1046,1533 @@ static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel) /*! ******************************************************************************* - @Function _RGXDecodeMMUReqTags + @Function _RGXDumpRGXBIFBank @Description - Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and - RGX_CR_MMU_FAULT_STATUS regs. - - @Input ui32TagID - Tag ID value - @Input ui32TagSB - Tag Sideband data - @Input bRead - Read flag - @Output ppszTagID - Decoded string from the Tag ID - @Output ppszTagSB - Decoded string from the Tag SB - @Output pszScratchBuf - Buffer provided to the function to generate the debug strings - @Input ui32ScratchBufSize - Size of the provided buffer + Dump BIF Bank state in human readable form. + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input eBankID - BIF identifier + @Input ui64MMUStatus - MMU Status register value + @Input ui64ReqStatus - BIF request Status register value @Return void ******************************************************************************/ -static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32TagID, - IMG_UINT32 ui32TagSB, - IMG_BOOL bRead, - IMG_CHAR **ppszTagID, - IMG_CHAR **ppszTagSB, - IMG_CHAR *pszScratchBuf, - IMG_UINT32 ui32ScratchBufSize) +static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + RGXDBG_BIF_ID eBankID, + IMG_UINT64 ui64MMUStatus, + IMG_UINT64 ui64ReqStatus, + const IMG_CHAR *pszIndent) { - IMG_INT32 i32SideBandType = -1; - IMG_CHAR *pszTagID = "-"; - IMG_CHAR *pszTagSB = "-"; - - PVR_ASSERT(ppszTagID != NULL); - PVR_ASSERT(ppszTagSB != NULL); - - - switch (ui32TagID) + if (ui64MMUStatus == 0x0) { - case 0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break; - case 1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break; - case 2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break; - case 3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break; - case 4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break; - case 5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break; - case 6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break; - case 7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break; - case 8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break; - case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break; - case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break; - case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break; - case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break; - case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break; - case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break; - case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break; - case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break; - case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break; - case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break; - case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break; - case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break; - case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break; - case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break; - case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break; - case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break; - case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break; - case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break; - case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break; - case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break; - case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break; - case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break; - case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break; - case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break; - case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break; - case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break; - case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break; - case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break; - case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break; - case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break; - case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break; + PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]); } - if (('-' == pszTagID[0]) && '\n' == pszTagID[1]) + else { + IMG_UINT32 ui32PageSize; + IMG_UINT32 ui32PC = + (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; - if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539) || - (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, FBCDC_ARCHITECTURE) && RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC_ARCHITECTURE) >= 3)) - { - switch (ui32TagID) - { - case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break; - case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break; - case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break; - case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break; - case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break; - case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break; - case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break; - case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break; - } - - if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539)) - { - switch (ui32TagID) - { - case 9: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; - case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; - case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break; - case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; - case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; - case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break; - case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; - case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; - case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break; - case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; - case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; - case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break; - case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; - case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; - case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break; - case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; - case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; - case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break; - case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; - case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; - case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break; - case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; - case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; - case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break; - } - }else - { - switch (ui32TagID) - { - case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; - case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; - case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break; - case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; - case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; - case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break; - case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; - case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; - case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break; - case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; - case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; - case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break; - case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; - case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; - case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break; - case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; - case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; - case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break; - case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; - case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; - case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break; - case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; - case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; - case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break; - } - } - }else - { - switch (ui32TagID) - { - case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; - case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; - case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; - case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break; - case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break; - case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; - case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; - case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; - case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; - case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; - case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; - case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break; - case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break; - case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; - case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; - case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; - case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; - case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; - case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; - case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break; - case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break; - case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; - case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; - case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; - case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; - case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; - case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; - case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break; - case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break; - case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; - case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; - case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; - } - } - - } - - switch (i32SideBandType) - { - case RGXDBG_META: - { - switch (ui32TagSB) - { - case 0x0: pszTagSB = "DCache - Thread 0"; break; - case 0x1: pszTagSB = "ICache - Thread 0"; break; - case 0x2: pszTagSB = "JTag - Thread 0"; break; - case 0x3: pszTagSB = "Slave bus - Thread 0"; break; - case 0x4: pszTagSB = "DCache - Thread 1"; break; - case 0x5: pszTagSB = "ICache - Thread 1"; break; - case 0x6: pszTagSB = "JTag - Thread 1"; break; - case 0x7: pszTagSB = "Slave bus - Thread 1"; break; - } - break; - } - - case RGXDBG_TLA: - { - switch (ui32TagSB) - { - case 0x0: pszTagSB = "Pixel data"; break; - case 0x1: pszTagSB = "Command stream data"; break; - case 0x2: pszTagSB = "Fence or flush"; break; - } - break; - } - - case RGXDBG_VDMM: - { - switch (ui32TagSB) - { - case 0x0: pszTagSB = "Control Stream - Read Only"; break; - case 0x1: pszTagSB = "PPP State - Read Only"; break; - case 0x2: pszTagSB = "Indices - Read Only"; break; - case 0x4: pszTagSB = "Call Stack - Read/Write"; break; - case 0x6: pszTagSB = "DrawIndirect - Read Only"; break; - case 0xA: pszTagSB = "Context State - Write Only"; break; - } - break; - } - - case RGXDBG_CDM: - { - switch (ui32TagSB) - { - case 0x0: pszTagSB = "Control Stream"; break; - case 0x1: pszTagSB = "Indirect Data"; break; - case 0x2: pszTagSB = "Event Write"; break; - case 0x3: pszTagSB = "Context State"; break; - } - break; - } - - case RGXDBG_IPP: - { - switch (ui32TagSB) - { - case 0x0: pszTagSB = "Macrotile Header"; break; - case 0x1: pszTagSB = "Region Header"; break; - } - break; - } - - case RGXDBG_PM: - { - switch (ui32TagSB) - { - case 0x0: pszTagSB = "PMA_TAFSTACK"; break; - case 0x1: pszTagSB = "PMA_TAMLIST"; break; - case 0x2: pszTagSB = "PMA_3DFSTACK"; break; - case 0x3: pszTagSB = "PMA_3DMLIST"; break; - case 0x4: pszTagSB = "PMA_PMCTX0"; break; - case 0x5: pszTagSB = "PMA_PMCTX1"; break; - case 0x6: pszTagSB = "PMA_MAVP"; break; - case 0x7: pszTagSB = "PMA_UFSTACK"; break; - case 0x8: pszTagSB = "PMD_TAFSTACK"; break; - case 0x9: pszTagSB = "PMD_TAMLIST"; break; - case 0xA: pszTagSB = "PMD_3DFSTACK"; break; - case 0xB: pszTagSB = "PMD_3DMLIST"; break; - case 0xC: pszTagSB = "PMD_PMCTX0"; break; - case 0xD: pszTagSB = "PMD_PMCTX1"; break; - case 0xF: pszTagSB = "PMD_UFSTACK"; break; - case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break; - case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break; - case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break; - case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break; - case 0x14: pszTagSB = "PMA_TAUFSTACK"; break; - case 0x15: pszTagSB = "PMA_3DUFSTACK"; break; - case 0x16: pszTagSB = "PMD_TAUFSTACK"; break; - case 0x17: pszTagSB = "PMD_3DUFSTACK"; break; - case 0x18: pszTagSB = "PMA_TAVFP"; break; - case 0x19: pszTagSB = "PMD_3DVFP"; break; - case 0x1A: pszTagSB = "PMD_TAVFP"; break; - } - break; - } - - case RGXDBG_TILING: - { - switch (ui32TagSB) - { - case 0x0: pszTagSB = "PSG Control Stream TP0"; break; - case 0x1: pszTagSB = "TPC TP0"; break; - case 0x2: pszTagSB = "VCE0"; break; - case 0x3: pszTagSB = "VCE1"; break; - case 0x4: pszTagSB = "PSG Control Stream TP1"; break; - case 0x5: pszTagSB = "TPC TP1"; break; - case 0x8: pszTagSB = "PSG Region Header TP0"; break; - case 0xC: pszTagSB = "PSG Region Header TP1"; break; - } - break; - } + /* Bank 0 & 1 share the same fields */ + PVR_DUMPDEBUG_LOG("%s%s - FAULT:", + pszIndent, + pszBIFNames[eBankID]); - case RGXDBG_VDMS: + /* MMU Status */ { - switch (ui32TagSB) - { - case 0x0: pszTagSB = "Context State - Write Only"; break; - } - break; - } + IMG_UINT32 ui32MMUDataType = + (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT; - case RGXDBG_IPF: - { - switch (ui32TagSB) - { - case 0x00: - case 0x20: pszTagSB = "CPF"; break; - case 0x01: pszTagSB = "DBSC"; break; - case 0x02: - case 0x04: - case 0x06: - case 0x08: - case 0x0A: - case 0x0C: - case 0x0E: - case 0x10: pszTagSB = "Control Stream"; break; - case 0x03: - case 0x05: - case 0x07: - case 0x09: - case 0x0B: - case 0x0D: - case 0x0F: - case 0x11: pszTagSB = "Primitive Block"; break; - } - break; - } + IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0; + IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0; - case RGXDBG_ISP: - { - switch (ui32TagSB) - { - case 0x00: pszTagSB = "ZLS read/write"; break; - case 0x20: pszTagSB = "Occlusion query read/write"; break; - } - break; - } + ui32PageSize = (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; - case RGXDBG_TPF: - { - switch (ui32TagSB) - { - case 0x0: pszTagSB = "TPF0: Primitive Block"; break; - case 0x1: pszTagSB = "TPF0: Depth Bias"; break; - case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break; - case 0x3: pszTagSB = "CPF - Tables"; break; - case 0x4: pszTagSB = "TPF1: Primitive Block"; break; - case 0x5: pszTagSB = "TPF1: Depth Bias"; break; - case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break; - case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break; - case 0x8: pszTagSB = "TPF2: Primitive Block"; break; - case 0x9: pszTagSB = "TPF2: Depth Bias"; break; - case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break; - case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break; - case 0xC: pszTagSB = "TPF3: Primitive Block"; break; - case 0xD: pszTagSB = "TPF3: Depth Bias"; break; - case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break; - case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break; - } - break; + PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d%s, Page Size = %d%s%s%s.", + pszIndent, + ui64MMUStatus, + ui32PC, + (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC), + ui32PageSize, + (bROFault)?", Read Only fault":"", + (bProtFault)?", PM/META protection fault":"", + _RGXDecodeMMULevel(ui32MMUDataType)); } - case RGXDBG_FBCDC: + /* Req Status */ { - /* - * FBC faults on a 4-cluster phantom does not always set SB - * bit 5, but since FBC is write-only and FBDC is read-only, - * we can set bit 5 if this is a write fault, before decoding. - */ - if (bRead == IMG_FALSE) - { - ui32TagSB |= 0x20; - } + IMG_CHAR *pszTagID; + IMG_CHAR *pszTagSB; + IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; + IMG_BOOL bRead; + IMG_UINT32 ui32TagSB, ui32TagID; + IMG_UINT64 ui64Addr; - switch (ui32TagSB) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) { - case 0x00: pszTagSB = "FBDC Request, originator ZLS"; break; - case 0x02: pszTagSB = "FBDC Request, originator MCU Dust 0"; break; - case 0x03: pszTagSB = "FBDC Request, originator MCU Dust 1"; break; - case 0x20: pszTagSB = "FBC Request, originator ZLS"; break; - case 0x22: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0"; break; - case 0x23: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1"; break; - case 0x24: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0"; break; - case 0x25: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1"; break; - case 0x28: pszTagSB = "FBC Request, originator ZLS Fence"; break; - case 0x2a: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0, Fence"; break; - case 0x2b: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1, Fence"; break; - case 0x2c: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0, Fence"; break; - case 0x2d: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1, Fence"; break; + bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN) != 0; + ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT; + ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT; } - break; - } - - case RGXDBG_MCU: - { - IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7; - IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7; - IMG_UINT32 ui32Group = ui32TagSB & 0x3; - - IMG_CHAR* pszGroup = ""; - - switch (ui32Group) + else { - case 0x0: pszGroup = "Banks 0-1"; break; - case 0x1: pszGroup = "Banks 2-3"; break; - case 0x2: pszGroup = "Banks 4-5"; break; - case 0x3: pszGroup = "Banks 6-7"; break; + bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0; + ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT; + ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT; } + ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) << + RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT; - OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, - "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup); - pszTagSB = pszScratchBuf; - break; - } + _RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE); - default: - { - OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB); - pszTagSB = pszScratchBuf; - break; + PVR_DUMPDEBUG_LOG("%s * Request (0x%016" IMG_UINT64_FMTSPECx + "): %s (%s), %s " IMG_DEV_VIRTADDR_FMTSPEC ".", + pszIndent, + ui64ReqStatus, + pszTagID, + pszTagSB, + (bRead)?"Reading from":"Writing to", + ui64Addr); } } - - *ppszTagID = pszTagID; - *ppszTagSB = pszTagSB; -} - - -static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, - IMG_UINT64 *pui64Seconds, - IMG_UINT64 *pui64Nanoseconds) -{ - IMG_UINT32 ui32Remainder; - - *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder); - *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL); } +static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN), + "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN mismatch!"); +static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK), + "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK mismatch!"); +static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT), + "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT mismatch!"); +static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK), + "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK mismatch!"); +static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT), + "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT mismatch!"); +static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK), + "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK mismatch!"); +static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT), + "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT mismatch!"); +static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT), + "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_ -{ - DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING, - DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED, - DEVICEMEM_HISTORY_QUERY_INDEX_NEXT, - DEVICEMEM_HISTORY_QUERY_INDEX_COUNT, -} DEVICEMEM_HISTORY_QUERY_INDEX; - - -/*! -******************************************************************************* - - @Function _PrintDevicememHistoryQueryResult - - @Description - - Print details of a single result from a DevicememHistory query - - @Input pfnDumpDebugPrintf - Debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psFaultProcessInfo - The process info derived from the page fault - @Input psResult - The DevicememHistory result to be printed - @Input ui32Index - The index of the result - - @Return void - -******************************************************************************/ -static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - RGXMEM_PROCESS_INFO *psFaultProcessInfo, - DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult, - IMG_UINT32 ui32Index, - const IMG_CHAR* pszIndent) -{ - IMG_UINT32 ui32Remainder; - IMG_UINT64 ui64Seconds, ui64Nanoseconds; - - ConvertOSTimestampToSAndNS(psResult->ui64When, - &ui64Seconds, - &ui64Nanoseconds); - - if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE) - { - PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC - " Size: " IMG_DEVMEM_SIZE_FMTSPEC - " Operation: %s Modified: %" IMG_UINT64_FMTSPEC - " us ago (OS time %" IMG_UINT64_FMTSPEC - ".%09" IMG_UINT64_FMTSPEC " s)", - pszIndent, - ui32Index, - psResult->szString, - psResult->sBaseDevVAddr.uiAddr, - psResult->uiSize, - psResult->bMap ? "Map": "Unmap", - OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), - ui64Seconds, - ui64Nanoseconds); - } - else - { - PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC - " Size: " IMG_DEVMEM_SIZE_FMTSPEC - " Operation: %s Modified: %" IMG_UINT64_FMTSPEC - " us ago (OS time %" IMG_UINT64_FMTSPEC - ".%09" IMG_UINT64_FMTSPEC - ") PID: %u (%s)", - pszIndent, - ui32Index, - psResult->szString, - psResult->sBaseDevVAddr.uiAddr, - psResult->uiSize, - psResult->bMap ? "Map": "Unmap", - OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), - ui64Seconds, - ui64Nanoseconds, - psResult->sProcessInfo.uiPID, - psResult->sProcessInfo.szProcessName); - } - - if (!psResult->bRange) - { - PVR_DUMPDEBUG_LOG("%s Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped"); - } - else - { - PVR_DUMPDEBUG_LOG("%s Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s", - pszIndent, - psResult->ui32StartPage, - psResult->ui32StartPage + psResult->ui32PageCount - 1, - psResult->sMapStartAddr.uiAddr, - psResult->sMapEndAddr.uiAddr, - psResult->bAll ? "(whole allocation) " : "", - psResult->bMap ? "mapped": "unmapped"); - } -} - -/*! -******************************************************************************* - - @Function _PrintDevicememHistoryQueryOut - - @Description - - Print details of all the results from a DevicememHistory query - - @Input pfnDumpDebugPrintf - Debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psFaultProcessInfo - The process info derived from the page fault - @Input psQueryOut - Storage for the query results - - @Return void - -******************************************************************************/ -static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - RGXMEM_PROCESS_INFO *psFaultProcessInfo, - DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, - const IMG_CHAR* pszIndent) -{ - IMG_UINT32 i; - - if (psQueryOut->ui32NumResults == 0) - { - PVR_DUMPDEBUG_LOG("%s No results", pszIndent); - } - else - { - for (i = 0; i < psQueryOut->ui32NumResults; i++) - { - _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile, - psFaultProcessInfo, - &psQueryOut->sResults[i], - i, - pszIndent); - } - } -} - -/* table of HW page size values and the equivalent */ -static const unsigned int aui32HWPageSizeTable[][2] = -{ - { 0, PVRSRV_4K_PAGE_SIZE }, - { 1, PVRSRV_16K_PAGE_SIZE }, - { 2, PVRSRV_64K_PAGE_SIZE }, - { 3, PVRSRV_256K_PAGE_SIZE }, - { 4, PVRSRV_1M_PAGE_SIZE }, - { 5, PVRSRV_2M_PAGE_SIZE } -}; - -/*! -******************************************************************************* - - @Function _PageSizeHWToBytes - - @Description - - Convert a HW page size value to its size in bytes - - @Input ui32PageSizeHW - The HW page size value - - @Return IMG_UINT32 The page size in bytes - -******************************************************************************/ -static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW) -{ - if (ui32PageSizeHW > 5) - { - /* This is invalid, so return a default value as we cannot ASSERT in this code! */ - return PVRSRV_4K_PAGE_SIZE; - } - - return aui32HWPageSizeTable[ui32PageSizeHW][1]; -} - -/*! -******************************************************************************* - - @Function _GetDevicememHistoryData - - @Description - - Get the DevicememHistory results for the given PID and faulting device virtual address. - The function will query DevicememHistory for information about the faulting page, as well - as the page before and after. - - @Input psDeviceNode - The device which this allocation search should be made on - @Input uiPID - The process ID to search for allocations belonging to - @Input sFaultDevVAddr - The device address to search for allocations at/before/after - @Input asQueryOut - Storage for the query results - @Input ui32PageSizeBytes - Faulted page size in bytes - - @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault - -******************************************************************************/ -static IMG_BOOL _GetDevicememHistoryData(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PID uiPID, - IMG_DEV_VIRTADDR sFaultDevVAddr, - DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT], - IMG_UINT32 ui32PageSizeBytes) -{ - DEVICEMEM_HISTORY_QUERY_IN sQueryIn; - IMG_BOOL bAnyHits = IMG_FALSE; - - /* if the page fault originated in the firmware then the allocation may - * appear to belong to any PID, because FW allocations are attributed - * to the client process creating the allocation, so instruct the - * devicemem_history query to search all available PIDs - */ - if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) - { - sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY; - } - else - { - sQueryIn.uiPID = uiPID; - } - - sQueryIn.psDevNode = psDeviceNode; - /* Query the DevicememHistory for all allocations in the previous page... */ - sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - ui32PageSizeBytes; - if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING], - ui32PageSizeBytes, IMG_TRUE)) - { - bAnyHits = IMG_TRUE; - } - - /* Query the DevicememHistory for any record at the exact address... */ - sQueryIn.sDevVAddr = sFaultDevVAddr; - if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], - ui32PageSizeBytes, IMG_FALSE)) - { - bAnyHits = IMG_TRUE; - } - else - { - /* If not matched then try matching any record in the faulting page... */ - if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], - ui32PageSizeBytes, IMG_TRUE)) - { - bAnyHits = IMG_TRUE; - } - } - - /* Query the DevicememHistory for all allocations in the next page... */ - sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes; - if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT], - ui32PageSizeBytes, IMG_TRUE)) - { - bAnyHits = IMG_TRUE; - } - - return bAnyHits; -} - -/* stored data about one page fault */ -typedef struct _FAULT_INFO_ -{ - /* the process info of the memory context that page faulted */ - RGXMEM_PROCESS_INFO sProcessInfo; - IMG_DEV_VIRTADDR sFaultDevVAddr; - MMU_FAULT_DATA sMMUFaultData; - DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT]; - /* the CR timer value at the time of the fault, recorded by the FW. - * used to differentiate different page faults - */ - IMG_UINT64 ui64CRTimer; - /* time when this FAULT_INFO entry was added. used for timing - * reference against the map/unmap information - */ - IMG_UINT64 ui64When; - IMG_UINT32 ui32FaultInfoFlags; -} FAULT_INFO; - -/* history list of page faults. - * Keeps the first `n` page faults and the last `n` page faults, like the FW - * HWR log - */ -typedef struct _FAULT_INFO_LOG_ -{ - IMG_UINT32 ui32Head; - /* the number of faults in this log need not correspond exactly to - * the HWINFO number of the FW, as the FW HWINFO log may contain - * non-page fault HWRs - */ - FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX]; -} FAULT_INFO_LOG; - -#define FAULT_INFO_PROC_INFO (0x1U) -#define FAULT_INFO_DEVMEM_HIST (0x2U) - -static FAULT_INFO_LOG gsFaultInfoLog = { 0 }; - -static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo, - FAULT_INFO *psInfo, - RGXMEM_PROCESS_INFO *psProcInfo) -{ - IMG_UINT32 i, j; - - for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) - { - for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++) - { - IMG_BOOL bFound; - - RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo; - bFound = RGXPCPIDToProcessInfo(psDevInfo, - psProcInfo->uiPID, - psProcInfo); - if (!bFound) - { - OSStringLCopy(psProcInfo->szProcessName, - "(unknown)", - sizeof(psProcInfo->szProcessName)); - } - } - } -} - -/*! -******************************************************************************* - - @Function _PrintFaultInfo - - @Description - - Print all the details of a page fault from a FAULT_INFO structure - - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psInfo - The page fault occurrence to print - - @Return void - -******************************************************************************/ -static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - FAULT_INFO *psInfo, - const IMG_CHAR* pszIndent) -{ - IMG_UINT32 i; - IMG_UINT64 ui64Seconds, ui64Nanoseconds; - - ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds); - - if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO)) - { - IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ? - 0 : psInfo->sProcessInfo.uiPID; - - PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC - ", PID: %u " - "(%s, unregistered: %u) OS time: " - "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, - pszIndent, - psInfo->sFaultDevVAddr.uiAddr, - uiPID, - psInfo->sProcessInfo.szProcessName, - psInfo->sProcessInfo.bUnregistered, - ui64Seconds, - ui64Nanoseconds); - } - else - { - PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent); - } - - if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST)) - { - for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) - { - const IMG_CHAR *pszWhich = NULL; - - switch (i) - { - case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: - pszWhich = "Preceding page"; - break; - case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: - pszWhich = "Faulted page"; - break; - case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: - pszWhich = "Next page"; - break; - } - - PVR_DUMPDEBUG_LOG("%s %s:", pszIndent, pszWhich); - _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile, - &psInfo->sProcessInfo, - &psInfo->asQueryOut[i], - pszIndent); - } - } - else - { - PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent); - } -} - -static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, - FAULT_INFO *psInfo, - IMG_DEV_VIRTADDR sFaultDevVAddr, - IMG_DEV_PHYADDR sPCDevPAddr, - IMG_UINT64 ui64CRTimer, - IMG_UINT32 ui32PageSizeBytes) -{ - IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE; - RGXMEM_PROCESS_INFO sProcessInfo; - - psInfo->ui32FaultInfoFlags = 0; - psInfo->sFaultDevVAddr = sFaultDevVAddr; - psInfo->ui64CRTimer = ui64CRTimer; - psInfo->ui64When = OSClockns64(); - - if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) - { - /* Check if this is PM fault */ - if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM) - { - bIsPMFault = IMG_TRUE; - bFound = IMG_TRUE; - sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM; - OSStringLCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName)); - sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0'; - sProcessInfo.bUnregistered = IMG_FALSE; - } - else - { - /* look up the process details for the faulting page catalogue */ - bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo); - } - - if (bFound) - { - IMG_BOOL bHits; - - psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO; - psInfo->sProcessInfo = sProcessInfo; - - if (bIsPMFault) - { - bHits = IMG_TRUE; - } - else - { - /* get any DevicememHistory data for the faulting address */ - bHits = _GetDevicememHistoryData(psDevInfo->psDeviceNode, - sProcessInfo.uiPID, - sFaultDevVAddr, - psInfo->asQueryOut, - ui32PageSizeBytes); - - if (bHits) - { - psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST; - - /* if the page fault was caused by the firmware then get information about - * which client application created the related allocations. - * - * Fill in the process info data for each query result. - */ - - if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) - { - _FillAppForFWFaults(psDevInfo, psInfo, &sProcessInfo); - } - } - } - } - } -} - -/*! -******************************************************************************* - - @Function _DumpFaultAddressHostView - - @Description - - Dump FW HWR fault status in human readable form. - - @Input ui32Index - Index of global Fault info - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Return void - -******************************************************************************/ -static void _DumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - const IMG_CHAR* pszIndent) -{ - MMU_LEVEL eTopLevel; - const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" }; - const IMG_CHAR szPageError[][3] = {"", "PT", "PD", "PC" }; - - eTopLevel = psFaultData->eTopLevel; - - if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN) - { - PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent); - return; - } - else if (psFaultData->eType == MMU_FAULT_TYPE_PM) - { - PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address); - } - else - { - MMU_LEVEL eCurrLevel; - PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST); - - for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--) - { - MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel]; - if (psMMULevelData->ui64Address) - { - if (psMMULevelData->uiBytesPerEntry == 4) - { - PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s", - pszIndent, - szPageLevel[eCurrLevel], - psMMULevelData->ui32Index, - (IMG_UINT) psMMULevelData->ui64Address, - psMMULevelData->psDebugStr); - } - else - { - PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s", - pszIndent, - szPageLevel[eCurrLevel], - psMMULevelData->ui32Index, - psMMULevelData->ui64Address, - psMMULevelData->psDebugStr); - } - } - else - { - PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)", - pszIndent, - szPageError[eCurrLevel], - psMMULevelData->ui32Index, - psMMULevelData->ui32NumOfEntries); - break; - } - } - } - -} - -/*! -******************************************************************************* - - @Function _RGXDumpRGXBIFBank - - @Description - - Dump BIF Bank state in human readable form. - - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - @Input eBankID - BIF identifier - @Input ui64MMUStatus - MMU Status register value - @Input ui64ReqStatus - BIF request Status register value - @Return void - -******************************************************************************/ -static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo, - RGXDBG_BIF_ID eBankID, - IMG_UINT64 ui64MMUStatus, - IMG_UINT64 ui64ReqStatus, - const IMG_CHAR *pszIndent) -{ - if (ui64MMUStatus == 0x0) - { - PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]); - } - else - { - IMG_UINT32 ui32PageSize; - IMG_UINT32 ui32PC = - (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; - - /* Bank 0 & 1 share the same fields */ - PVR_DUMPDEBUG_LOG("%s%s - FAULT:", - pszIndent, - pszBIFNames[eBankID]); - - /* MMU Status */ - { - IMG_UINT32 ui32MMUDataType = - (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT; - - IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0; - IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0; - - ui32PageSize = (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; - - PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d%s, Page Size = %d%s%s%s.", - pszIndent, - ui64MMUStatus, - ui32PC, - (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC), - ui32PageSize, - (bROFault)?", Read Only fault":"", - (bProtFault)?", PM/META protection fault":"", - _RGXDecodeMMULevel(ui32MMUDataType)); - } - - /* Req Status */ - { - IMG_CHAR *pszTagID; - IMG_CHAR *pszTagSB; - IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; - IMG_BOOL bRead; - IMG_UINT32 ui32TagSB, ui32TagID; - IMG_UINT64 ui64Addr; - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) - { - bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN) != 0; - ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT; - ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT; - } - else - { - bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0; - ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT; - ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT; - } - ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) << - RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT; - - _RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE); - - PVR_DUMPDEBUG_LOG("%s * Request (0x%016" IMG_UINT64_FMTSPECx - "): %s (%s), %s " IMG_DEV_VIRTADDR_FMTSPEC ".", - pszIndent, - ui64ReqStatus, - pszTagID, - pszTagSB, - (bRead)?"Reading from":"Writing to", - ui64Addr); - } - } -} -static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN), - "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN mismatch!"); -static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK), - "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK mismatch!"); -static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT), - "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT mismatch!"); -static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK), - "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK mismatch!"); -static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT), - "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT mismatch!"); -static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK), - "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK mismatch!"); -static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT), - "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT mismatch!"); -static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT), - "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT mismatch!"); - -/*! -******************************************************************************* - - @Function _RGXDumpRGXMMUFaultStatus - - @Description - - Dump MMU Fault status in human readable form. - - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - @Input ui64MMUStatus - MMU Status register value - @Input pszMetaOrCore - string representing call is for META or MMU core - @Return void - -******************************************************************************/ -static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 ui64MMUStatus, - const IMG_PCHAR pszMetaOrCore, - const IMG_CHAR *pszIndent) -{ - if (ui64MMUStatus == 0x0) - { - PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore); - } - else - { - IMG_UINT32 ui32PC = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT; - IMG_UINT64 ui64Addr = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT) << 4; /* align shift */ - IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT; - IMG_UINT32 ui32SideBand = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT; - IMG_UINT32 ui32MMULevel = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT; - IMG_BOOL bRead = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0; - IMG_BOOL bFault = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0; - IMG_BOOL bROFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2; - IMG_BOOL bProtFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3; - IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; - IMG_CHAR *pszTagID; - IMG_CHAR *pszTagSB; - - _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32SideBand, bRead, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE); - - PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore); - PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECx ", %s (%s)%s%s%s%s.", - pszIndent, - ui64MMUStatus, - ui32PC, - (bRead)?"Reading from":"Writing to", - ui64Addr, - pszTagID, - pszTagSB, - (bFault)?", Fault":"", - (bROFault)?", Read Only fault":"", - (bProtFault)?", PM/META protection fault":"", - _RGXDecodeMMULevel(ui32MMULevel)); - - } -} -static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), - "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); - - - -#if !defined(NO_HARDWARE) -static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_MIPS_STATE *psMIPSState) -{ - void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - IMG_UINT32 ui32RegRead; - IMG_UINT32 eError = PVRSRV_OK; - IMG_UINT32 *pui32NMIMemoryPointer; - IMG_UINT32 volatile *pui32SyncFlag; - IMG_DEVMEM_OFFSET_T uiNMIMemoryBootOffset; - - /* Map the FW data area to the kernel */ - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, - (void **)&pui32NMIMemoryPointer); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to acquire NMI shared memory area (%s)", - __func__, - PVRSRVGetErrorString(eError))); - goto map_error_fail; - } - - /* Calculate offset to the boot/NMI data page */ - uiNMIMemoryBootOffset = RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA)); - - /* Jump to the NMI shared data area within the page above */ - pui32NMIMemoryPointer += uiNMIMemoryBootOffset + RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_NMI_SHARED_DATA_BASE); - - /* Acquire the NMI operations lock */ - OSLockAcquire(psDevInfo->hNMILock); - - /* Make sure the synchronisation flag is set to 0 */ -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) - pui32SyncFlag = &psDevInfo->psRGXFWIfSysInit->sMIPSState.ui32Sync; - PVR_UNREFERENCED_PARAMETER(pui32NMIMemoryPointer); -#else - pui32SyncFlag = &pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET]; -#endif - *pui32SyncFlag = 0; - - /* Readback performed as a part of memory barrier */ - OSWriteMemoryBarrier(pui32SyncFlag); - - /* Enable NMI issuing in the MIPS wrapper */ - OSWriteHWReg64(pvRegsBaseKM, - RGX_CR_MIPS_WRAPPER_NMI_ENABLE, - RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN); - (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); - - /* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */ - ui32RegRead = OSReadHWReg32(pvRegsBaseKM, - RGX_CR_MIPS_EXCEPTION_STATUS); - if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)) - { - - eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; - goto fail; - } - ui32RegRead = 0; - - /* Issue NMI */ - OSWriteHWReg32(pvRegsBaseKM, - RGX_CR_MIPS_WRAPPER_NMI_EVENT, - RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN); - (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_EVENT); - - - /* Wait for NMI Taken to be asserted */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - ui32RegRead = OSReadHWReg32(pvRegsBaseKM, - RGX_CR_MIPS_EXCEPTION_STATUS); - if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) - { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - - if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0) - { - eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; - goto fail; - } - ui32RegRead = 0; - - /* Allow the firmware to proceed */ - *pui32SyncFlag = 1; - - /* Readback performed as a part of memory barrier */ - OSWriteMemoryBarrier(pui32SyncFlag); - - /* Wait for the FW to have finished the NMI routine */ - ui32RegRead = OSReadHWReg32(pvRegsBaseKM, - RGX_CR_MIPS_EXCEPTION_STATUS); - - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - ui32RegRead = OSReadHWReg32(pvRegsBaseKM, - RGX_CR_MIPS_EXCEPTION_STATUS); - if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)) - { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) - { - eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; - goto fail; - } - ui32RegRead = 0; - - /* Copy state */ -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) - OSDeviceMemCopy(psMIPSState, &psDevInfo->psRGXFWIfSysInit->sMIPSState, sizeof(*psMIPSState)); -#else - OSDeviceMemCopy(psMIPSState, pui32NMIMemoryPointer + RGXMIPSFW_NMI_STATE_OFFSET, sizeof(*psMIPSState)); -#endif - - --(psMIPSState->ui32ErrorEPC); - --(psMIPSState->ui32EPC); - - /* Disable NMI issuing in the MIPS wrapper */ - OSWriteHWReg32(pvRegsBaseKM, - RGX_CR_MIPS_WRAPPER_NMI_ENABLE, - 0); - (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); - -fail: - /* Release the NMI operations lock */ - OSLockRelease(psDevInfo->hNMILock); - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); -map_error_fail: - return eError; -} - -/* Print decoded information from cause register */ -static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - IMG_UINT32 ui32Cause, - IMG_UINT32 ui32ErrorState) -{ -#define INDENT " " - const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause); - const IMG_CHAR * const pszException = _GetMIPSExcString(ui32ExcCode); - - if (ui32ErrorState != 0 && - pszException != NULL) - { - PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException); - } - - if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING) - { - PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending"); - } - - if (!(ui32Cause & RGXMIPSFW_C0_CAUSE_IV)) - { - PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses general interrupt vector"); - } - - if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING) - { - PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending"); - } - - /* Unusable Coproc exception */ - if (ui32ExcCode == 11) - { - PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause)); - } - -#undef INDENT -} - -static IMG_BOOL _IsFWCodeException(IMG_UINT32 ui32ExcCode) -{ - if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) - { - PVR_DPF((PVR_DBG_WARNING, - "Only %lu exceptions available in MIPS, %u is not a valid exception code", - (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); - return IMG_FALSE; - } - - return apsMIPSExcCodes[ui32ExcCode].bIsFatal; -} - -static void _RGXMipsDumpDebugDecode(PVRSRV_RGXDEV_INFO *psDevInfo, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - IMG_UINT32 ui32Debug, - IMG_UINT32 ui32DEPC) -{ - const IMG_CHAR *pszDException = NULL; - IMG_UINT32 i; -#define INDENT " " - - if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM)) - { - return; - } - - PVR_DUMPDEBUG_LOG("DEBUG :"); - - pszDException = _GetMIPSExcString(RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug)); - - if (pszDException != NULL) - { - PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException); - } - - for (i = 0; i < ARRAY_SIZE(sMIPS_C0_DebugTable); ++i) - { - const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i]; - - if (ui32Debug & psDebugEntry->ui32Mask) - { - PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation); - } - } -#undef INDENT - PVR_DUMPDEBUG_LOG("DEPC :0x%08X", ui32DEPC); -} - -static inline void _GetMipsTLBPARanges(const RGX_MIPS_TLB_ENTRY *psTLBEntry, - const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, - const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, - IMG_UINT64 *pui64PA0Start, - IMG_UINT64 *pui64PA0End, - IMG_UINT64 *pui64PA1Start, - IMG_UINT64 *pui64PA1End) -{ - IMG_BOOL bUseRemapOutput = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; - IMG_UINT64 ui64PageSize = RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask); - - if ((psTLBEntry->ui32TLBLo0 & RGXMIPSFW_TLB_VALID) == 0) - { - /* Dummy values to fail the range checks later */ - *pui64PA0Start = -1ULL; - *pui64PA0End = -1ULL; - } - else if (bUseRemapOutput) - { - *pui64PA0Start = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; - *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; - } - else - { - *pui64PA0Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); - *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; - } - - if ((psTLBEntry->ui32TLBLo1 & RGXMIPSFW_TLB_VALID) == 0) - { - /* Dummy values to fail the range checks later */ - *pui64PA1Start = -1ULL; - *pui64PA1End = -1ULL; - } - else if (bUseRemapOutput) - { - *pui64PA1Start = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; - *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; - } - else - { - *pui64PA1Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); - *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; - } -} - -static void _CheckMipsTLBDuplicatePAs(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - const RGX_MIPS_TLB_ENTRY *psTLB, - const RGX_MIPS_REMAP_ENTRY *psRemap) -{ - IMG_UINT64 ui64PA0StartI, ui64PA1StartI, ui64PA0StartJ, ui64PA1StartJ; - IMG_UINT64 ui64PA0EndI, ui64PA1EndI, ui64PA0EndJ, ui64PA1EndJ; - IMG_UINT32 i, j; - -#define RANGES_OVERLAP(start0,end0,start1,end1) ((start0) < (end1) && (start1) < (end0)) - - for (i = 0; i < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; i++) - { - _GetMipsTLBPARanges(&psTLB[i], - psRemap ? &psRemap[i] : NULL, - psRemap ? &psRemap[i + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, - &ui64PA0StartI, &ui64PA0EndI, - &ui64PA1StartI, &ui64PA1EndI); - - for (j = i + 1; j < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; j++) - { - _GetMipsTLBPARanges(&psTLB[j], - psRemap ? &psRemap[j] : NULL, - psRemap ? &psRemap[j + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, - &ui64PA0StartJ, &ui64PA0EndJ, - &ui64PA1StartJ, &ui64PA1EndJ); - - if (RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA0StartJ, ui64PA0EndJ) || - RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA1StartJ, ui64PA1EndJ) || - RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA0StartJ, ui64PA0EndJ) || - RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA1StartJ, ui64PA1EndJ) ) - { - PVR_DUMPDEBUG_LOG("Overlap between TLB entry %u and %u", i , j); - } - } - } -} - -static inline IMG_UINT32 _GetMIPSRemapRegionSize(IMG_UINT32 ui32RegionSizeEncoding) -{ - return 1U << ((ui32RegionSizeEncoding + 1U) << 1U); -} - -static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - const RGX_MIPS_TLB_ENTRY *psTLBEntry, - const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, - const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, - IMG_UINT32 ui32Index) -{ - IMG_BOOL bDumpRemapEntries = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; - IMG_UINT64 ui64PA0 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); - IMG_UINT64 ui64PA1 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); - IMG_UINT64 ui64Remap0AddrOut = 0, ui64Remap1AddrOut = 0; - IMG_UINT32 ui32Remap0AddrIn = 0, ui32Remap1AddrIn = 0; - - if (bDumpRemapEntries) - { - /* RemapAddrIn is always 4k aligned and on 32 bit */ - ui32Remap0AddrIn = psRemapEntry0->ui32RemapAddrIn << 12; - ui32Remap1AddrIn = psRemapEntry1->ui32RemapAddrIn << 12; - - /* RemapAddrOut is always 4k aligned and on 32 or 36 bit */ - ui64Remap0AddrOut = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; - ui64Remap1AddrOut = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; - - /* If TLB and remap entries match, then merge them else, print them separately */ - if ((IMG_UINT32)ui64PA0 == ui32Remap0AddrIn && - (IMG_UINT32)ui64PA1 == ui32Remap1AddrIn) - { - ui64PA0 = ui64Remap0AddrOut; - ui64PA1 = ui64Remap1AddrOut; - bDumpRemapEntries = IMG_FALSE; - } - } - - PVR_DUMPDEBUG_LOG("%2u) VA 0x%08X (%3uk) -> PA0 0x%08" IMG_UINT64_FMTSPECx " %s%s%s, " - "PA1 0x%08" IMG_UINT64_FMTSPECx " %s%s%s", - ui32Index, - psTLBEntry->ui32TLBHi, - RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask), - ui64PA0, - gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo0)], - gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo0)], - gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo0)], - ui64PA1, - gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo1)], - gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo1)], - gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo1)]); - - if (bDumpRemapEntries) - { - PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, - ui32Index, - ui32Remap0AddrIn, - _GetMIPSRemapRegionSize(psRemapEntry0->ui32RemapRegionSize), - ui64Remap0AddrOut); - - PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, - ui32Index + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES, - ui32Remap1AddrIn, - _GetMIPSRemapRegionSize(psRemapEntry1->ui32RemapRegionSize), - ui64Remap1AddrOut); - } -} - -#endif /* !defined(NO_HARDWARE) */ - -static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause) -{ - switch (ui32Mcause) - { -#define X(value, fatal, description) \ - case value: \ - if (fatal) \ - return description; \ - return NULL; - - RGXRISCVFW_MCAUSE_TABLE -#undef X - - default: - PVR_DPF((PVR_DBG_WARNING, "Invalid RISC-V FW mcause value 0x%08x", ui32Mcause)); - return NULL; - } -} - -/* - Appends flags strings to a null-terminated string buffer - each flag - description string starts with a space. -*/ -static void _Flags2Description(IMG_CHAR *psDesc, - IMG_UINT32 ui32DescSize, - const IMG_FLAGS2DESC *psConvTable, - IMG_UINT32 ui32TableSize, - IMG_UINT32 ui32Flags) -{ - IMG_UINT32 ui32Idx; - - for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) - { - if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag) - { - OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); - } - } -} - -/* - Writes flags strings to an uninitialised buffer. -*/ -static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) -{ - const IMG_CHAR szCswLabel[] = "Ctx switch options:"; - size_t uLabelLen = sizeof(szCswLabel) - 1; - const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; - - OSStringLCopy(psDesc, szCswLabel, ui32DescSize); - - _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags); - _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags); -} - -static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) -{ - const IMG_CHAR szCswLabel[] = "Ctx switch:"; - size_t uLabelLen = sizeof(szCswLabel) - 1; - const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; - - OSStringLCopy(psDesc, szCswLabel, ui32DescSize); - - _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags); -} - - -/*! -******************************************************************************* - - @Function _RGXDumpFWAssert - - @Description - - Dump FW assert strings when a thread asserts. - - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer - - @Return void - -******************************************************************************/ -static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl) -{ - const IMG_CHAR *pszTraceAssertPath; - const IMG_CHAR *pszTraceAssertInfo; - IMG_INT32 ui32TraceAssertLine; - IMG_UINT32 i; - - for (i = 0; i < RGXFW_THREAD_NUM; i++) - { - pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath; - pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo; - ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum; - - /* print non-null assert strings */ - if (*pszTraceAssertInfo) - { - PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)", - i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine); - } - } -} - -/*! -******************************************************************************* - - @Function _RGXDumpFWFaults - - @Description - - Dump FW assert strings when a thread asserts. - - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psFwSysData - RGX FW shared system data - - @Return void - -******************************************************************************/ -static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - const RGXFWIF_SYSDATA *psFwSysData) -{ - if (psFwSysData->ui32FWFaults > 0) - { - IMG_UINT32 ui32StartFault = psFwSysData->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX; - IMG_UINT32 ui32EndFault = psFwSysData->ui32FWFaults - 1; - IMG_UINT32 ui32Index; - - if (psFwSysData->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX) - { - ui32StartFault = 0; - } - - for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++) - { - const RGX_FWFAULTINFO *psFaultInfo = &psFwSysData->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX]; - IMG_UINT64 ui64Seconds, ui64Nanoseconds; - - /* Split OS timestamp in seconds and nanoseconds */ - ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); - - PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)", - ui32Index+1, psFaultInfo->sFaultBuf.szInfo, - psFaultInfo->sFaultBuf.szPath, - psFaultInfo->sFaultBuf.ui32LineNum); - PVR_DUMPDEBUG_LOG(" Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, - psFaultInfo->ui32Data, - psFaultInfo->ui64CRTimer, - ui64Seconds, ui64Nanoseconds); - } - } -} - -static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - const RGXFWIF_SYSDATA *psFwSysData) -{ - IMG_UINT32 i; - for (i = 0; i < RGXFW_THREAD_NUM; i++) - { - if (psFwSysData->aui32CrPollAddr[i]) - { - PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)", - i, - ((psFwSysData->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), - psFwSysData->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET, - psFwSysData->aui32CrPollMask[i]); - } - } - -} - -static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - const RGXFWIF_SYSDATA *psFwSysData, - const RGXFWIF_HWRINFOBUF *psHWRInfoBuf, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ - IMG_BOOL bAnyLocked = IMG_FALSE; - IMG_UINT32 dm, i; - IMG_UINT32 ui32LineSize; - IMG_CHAR *pszLine, *pszTemp; - const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "TA", "3D", "CDM", "RAY", "TA2", "TA3", "TA4"}; - const IMG_CHAR szMsgHeader[] = "Number of HWR: "; - const IMG_CHAR szMsgFalse[] = "FALSE("; - IMG_CHAR *pszLockupType = ""; - const IMG_UINT32 ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */ - const IMG_UINT32 ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1; - IMG_UINT32 ui32HWRRecoveryFlags; - IMG_UINT32 ui32ReadIndex; - - if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))) - { - apszDmNames[RGXFWIF_DM_TDM] = "2D"; - } - - for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) - { - if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] || - psHWRInfoBuf->aui32HwrDmOverranCount[dm]) - { - bAnyLocked = IMG_TRUE; - break; - } - } - - if (!PVRSRV_VZ_MODE_IS(GUEST) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK)) - { - /* No HWR situation, print nothing */ - return; - } - - if (PVRSRV_VZ_MODE_IS(GUEST)) - { - IMG_BOOL bAnyHWROccured = IMG_FALSE; - - for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) - { - if (psHWRInfoBuf->aui32HwrDmRecoveredCount[dm] != 0 || - psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 || - psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0) - { - bAnyHWROccured = IMG_TRUE; - break; - } - } - - if (!bAnyHWROccured) - { - return; - } - } - - ui32LineSize = sizeof(IMG_CHAR) * ( - ui32MsgHeaderCharCount + - (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*( 4/*DM name + left parenthesis*/ + - 10/*UINT32 max num of digits*/ + - 1/*slash*/ + - 10/*UINT32 max num of digits*/ + - 3/*right parenthesis + comma + space*/)) + - ui32MsgFalseCharCount + 1 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6) + 1 - /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */ - ); - - pszLine = OSAllocMem(ui32LineSize); - if (pszLine == NULL) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Out of mem allocating line string (size: %d)", - __func__, - ui32LineSize)); - return; - } - - OSStringLCopy(pszLine, szMsgHeader, ui32LineSize); - pszTemp = pszLine + ui32MsgHeaderCharCount; - - for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) - { - pszTemp += OSSNPrintf(pszTemp, - 4 + 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1 - /* (name + left parenthesis) + UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */, - "%s(%u/%u+%u), ", - apszDmNames[dm], - psHWRInfoBuf->aui32HwrDmRecoveredCount[dm], - psHWRInfoBuf->aui32HwrDmLockedUpCount[dm], - psHWRInfoBuf->aui32HwrDmOverranCount[dm]); - } - - OSStringLCat(pszLine, szMsgFalse, ui32LineSize); - pszTemp += ui32MsgFalseCharCount; - - for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) - { - pszTemp += OSSNPrintf(pszTemp, - 10 + 1 + 1 /* UINT32 max num + comma + \0 */, - (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"), - psHWRInfoBuf->aui32HwrDmFalseDetectCount[dm]); - } - - PVR_DUMPDEBUG_LOG("%s", pszLine); - - OSFreeMem(pszLine); - - /* Print out per HWR info */ - for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) - { - if (dm == RGXFWIF_DM_GP) - { - PVR_DUMPDEBUG_LOG("DM %d (GP)", dm); - } - else - { - if (!PVRSRV_VZ_MODE_IS(GUEST)) - { - IMG_UINT32 ui32HWRRecoveryFlags = psFwSysData->aui32HWRRecoveryFlags[dm]; - IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE]; - sPerDmHwrDescription[0] = '\0'; - - if (ui32HWRRecoveryFlags == RGXFWIF_DM_STATE_WORKING) - { - OSStringLCopy(sPerDmHwrDescription, " working;", RGX_DEBUG_STR_SIZE); - } - else - { - _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE, - asDmState2Description, ARRAY_SIZE(asDmState2Description), - ui32HWRRecoveryFlags); - } - PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%s)", dm, ui32HWRRecoveryFlags, sPerDmHwrDescription); - } - else - { - PVR_DUMPDEBUG_LOG("DM %d", dm); - } - } - - ui32ReadIndex = 0; - for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) - { - IMG_BOOL bPMFault = IMG_FALSE; - IMG_UINT32 ui32PC; - IMG_UINT32 ui32PageSize = 0; - IMG_DEV_PHYADDR sPCDevPAddr = { 0 }; - const RGX_HWRINFO *psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex]; - - if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0)) - { - IMG_CHAR aui8RecoveryNum[10+10+1]; - IMG_UINT64 ui64Seconds, ui64Nanoseconds; - IMG_BOOL bPageFault = IMG_FALSE; - IMG_DEV_VIRTADDR sFaultDevVAddr; - - /* Split OS timestamp in seconds and nanoseconds */ - ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); - - ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags; - if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; } - else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; } - else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; } - else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; } - else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; } - else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_ECC_HWR) { pszLockupType = ", GPU ECC HWR"; } - - OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber); - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) - { - PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u / %s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", - aui8RecoveryNum, - psHWRInfo->ui32CoreID, - psHWRInfo->ui32PID, - psHWRInfo->szProcName, - psHWRInfo->ui32FrameNum, - psHWRInfo->ui32ActiveHWRTData, - psHWRInfo->ui32EventStatus, - pszLockupType); - } - else - { - PVR_DUMPDEBUG_LOG(" %s PID = %u / %s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", - aui8RecoveryNum, - psHWRInfo->ui32PID, - psHWRInfo->szProcName, - psHWRInfo->ui32FrameNum, - psHWRInfo->ui32ActiveHWRTData, - psHWRInfo->ui32EventStatus, - pszLockupType); - } - pszTemp = &aui8RecoveryNum[0]; - while (*pszTemp != '\0') - { - *pszTemp++ = ' '; - } - - /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */ - if (!PVRSRV_VZ_MODE_IS(GUEST)) - { - PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd, - aui8RecoveryNum, - psHWRInfo->ui64CRTimer, - ui64Seconds, - ui64Nanoseconds, - (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); - } - else - { - PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", CyclesElapsed = %" IMG_INT64_FMTSPECd, - aui8RecoveryNum, - psHWRInfo->ui64CRTimer, - (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); - } - - if (psHWRInfo->ui64CRTimeHWResetFinish != 0) - { - if (psHWRInfo->ui64CRTimeFreelistReady != 0) - { - /* If ui64CRTimeFreelistReady is less than ui64CRTimeHWResetFinish it means APM kicked in and the time is not valid. */ - if (psHWRInfo->ui64CRTimeHWResetFinish < psHWRInfo->ui64CRTimeFreelistReady) - { - PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd, - aui8RecoveryNum, - (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, - (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, - (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256, - (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256); - } - else - { - PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = , TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, - aui8RecoveryNum, - (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, - (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, - (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); - } - } - else - { - PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, - aui8RecoveryNum, - (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, - (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, - (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); - } - } - - switch (psHWRInfo->eHWRType) - { - case RGX_HWRTYPE_BIF0FAULT: - case RGX_HWRTYPE_BIF1FAULT: - { - if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))) - { - _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType), - psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, - psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, - DD_NORMAL_INDENT); - - bPageFault = IMG_TRUE; - sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); - ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; - bPMFault = (ui32PC >= 8); - ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; - sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; - } - } - break; - case RGX_HWRTYPE_TEXASBIF0FAULT: - { - if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))) - { - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) - { - _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, - psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, - psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, - DD_NORMAL_INDENT); - - bPageFault = IMG_TRUE; - sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); - ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; - bPMFault = (ui32PC >= 8); - ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> - RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; - sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; - } - } - } - break; - - case RGX_HWRTYPE_ECCFAULT: - { - PVR_DUMPDEBUG_LOG(" ECC fault GPU=0x%08x", psHWRInfo->uHWRData.sECCInfo.ui32FaultGPU); - } - break; - - case RGX_HWRTYPE_MMUFAULT: - { - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, - psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], - "Core", - DD_NORMAL_INDENT); - - bPageFault = IMG_TRUE; - sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; - sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK; - sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT; - sFaultDevVAddr.uiAddr <<= 4; /* align shift */ - ui32PC = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT; -#if defined(SUPPORT_TRUSTED_DEVICE) - ui32PC = ui32PC - 1; -#endif - bPMFault = (ui32PC <= 8); - sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; - } - } - break; - - case RGX_HWRTYPE_MMUMETAFAULT: - { - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, - psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], - "Meta", - DD_NORMAL_INDENT); - - bPageFault = IMG_TRUE; - sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; - sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK; - sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT; - sFaultDevVAddr.uiAddr <<= 4; /* align shift */ - sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; - } - } - break; - - case RGX_HWRTYPE_POLLFAILURE: - { - PVR_DUMPDEBUG_LOG(" T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)", - psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum, - ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")), - psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET, - psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask, - psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue); - } - break; - - case RGX_HWRTYPE_MIPSTLBFAULT: - { - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) - { - IMG_UINT32 ui32EntryLo = psHWRInfo->uHWRData.sTLBInfo.ui32EntryLo; - - /* This is not exactly what the MMU code does, but the result should be the same */ - const IMG_UINT32 ui32UnmappedEntry = - ((IMG_UINT32)(MMU_BAD_PHYS_ADDR & 0xffffffff) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) | RGXMIPSFW_ENTRYLO_UNCACHED; - - PVR_DUMPDEBUG_LOG(" MIPS TLB fault: BadVA = 0x%08X, EntryLo = 0x%08X" - " (page PA 0x%" IMG_UINT64_FMTSPECx", V %u)", - psHWRInfo->uHWRData.sTLBInfo.ui32BadVAddr, - ui32EntryLo, - RGXMIPSFW_TLB_GET_PA(ui32EntryLo), - ui32EntryLo & RGXMIPSFW_TLB_VALID ? 1 : 0); - - if (ui32EntryLo == ui32UnmappedEntry) - { - PVR_DUMPDEBUG_LOG(" Potential use-after-free detected"); - } - } - } - break; - - case RGX_HWRTYPE_MMURISCVFAULT: - { - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE, - psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, - psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, - DD_NORMAL_INDENT); - - bPageFault = IMG_TRUE; - bPMFault = IMG_FALSE; - sFaultDevVAddr.uiAddr = - (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & - ~RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK); - ui32PageSize = - (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & - ~RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK) >> - RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT; - sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; - } - } - break; - - case RGX_HWRTYPE_OVERRUN: - case RGX_HWRTYPE_UNKNOWNFAILURE: - { - /* Nothing to dump */ - } - break; - - default: - { - PVR_DUMPDEBUG_LOG(" Unknown HWR Info type: 0x%x", psHWRInfo->eHWRType); - } - break; - } - - if (bPageFault) - { - - FAULT_INFO *psInfo; - - OSLockAcquire(psDevInfo->hDebugFaultInfoLock); - - /* Find the matching Fault Info for this HWRInfo */ - psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex]; - - /* if they do not match, we need to update the psInfo */ - if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) || - (psInfo->sFaultDevVAddr.uiAddr != sFaultDevVAddr.uiAddr)) - { - MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData; - - psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN; - - if (bPMFault) - { - /* PM fault and we dump PC details only */ - psFaultData->eTopLevel = MMU_LEVEL_0; - psFaultData->eType = MMU_FAULT_TYPE_PM; - psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = sPCDevPAddr.uiAddr; - } - else - { - RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr, psFaultData); - } - - _RecordFaultInfo(psDevInfo, psInfo, - sFaultDevVAddr, sPCDevPAddr, psHWRInfo->ui64CRTimer, - _PageSizeHWToBytes(ui32PageSize)); - - } - - _DumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT); - - if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) - { - _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT); - } - - OSLockRelease(psDevInfo->hDebugFaultInfoLock); - } - - } - - if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1) - ui32ReadIndex = psHWRInfoBuf->ui32WriteIndex; - else - ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST; - } - } -} - -#if !defined(NO_HARDWARE) - -/*! -******************************************************************************* - - @Function _CheckForPendingPage - - @Description - - Check if the MMU indicates it is blocked on a pending page - - @Input psDevInfo - RGX device info - - @Return IMG_BOOL - IMG_TRUE if there is a pending page - -******************************************************************************/ -static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - IMG_UINT32 ui32BIFMMUEntry; - - ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY); - - if (ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN) - { - return IMG_TRUE; - } - else - { - return IMG_FALSE; - } -} - -/*! -******************************************************************************* - - @Function _GetPendingPageInfo - - @Description - - Get information about the pending page from the MMU status registers - - @Input psDevInfo - RGX device info - @Output psDevVAddr - The device virtual address of the pending MMU address translation - @Output pui32CatBase - The page catalog base - @Output pui32DataType - The MMU entry data type - - @Return void - -******************************************************************************/ -static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr, - IMG_UINT32 *pui32CatBase, - IMG_UINT32 *pui32DataType) -{ - IMG_UINT64 ui64BIFMMUEntryStatus; - - ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS); - - psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK); - - *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >> - RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT; - - *pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >> - RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT; -} - -#endif - -void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_BOOL bRGXPoweredON) -{ - IMG_CHAR *pszState, *pszReason; - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - IMG_UINT32 ui32OSid; - const RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; - /* space for the current clock speed and 3 previous */ - RGXFWIF_TIME_CORR asTimeCorrs[4]; - IMG_UINT32 ui32NumClockSpeedChanges; - -#if defined(NO_HARDWARE) - PVR_UNREFERENCED_PARAMETER(bRGXPoweredON); -#else - if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) - { - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - - IMG_UINT64 ui64RegValMMUStatus; - - ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS); - _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Core", DD_SUMMARY_INDENT); - - ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META); - _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Meta", DD_SUMMARY_INDENT); - } - else - { - IMG_UINT64 ui64RegValMMUStatus, ui64RegValREQStatus; - - ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS); - ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS); - - _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); - - if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SINGLE_BIF))) - { - ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS); - ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS); - _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); - } - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS); - ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS); - _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); - } - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) - { - IMG_UINT32 ui32PhantomCnt = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_REQ_NUM_PHANTOMS(RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) : 0; - - if (ui32PhantomCnt > 1) - { - IMG_UINT32 ui32Phantom; - for (ui32Phantom = 0; ui32Phantom < ui32PhantomCnt; ui32Phantom++) - { - /* This can't be done as it may interfere with the FW... */ - /*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/ - - ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); - ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); - - _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); - } - }else - { - ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); - ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); - _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); - } - } - } +#if !defined(NO_HARDWARE) +static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause) +{ + switch (ui32Mcause) + { +#define X(value, fatal, description) \ + case value: \ + if (fatal) \ + return description; \ + return NULL; - if (_CheckForPendingPage(psDevInfo)) - { - IMG_UINT32 ui32CatBase; - IMG_UINT32 ui32DataType; - IMG_DEV_VIRTADDR sDevVAddr; + RGXRISCVFW_MCAUSE_TABLE +#undef X - PVR_DUMPDEBUG_LOG("MMU Pending page: Yes"); + default: + PVR_DPF((PVR_DBG_WARNING, "Invalid RISC-V FW mcause value 0x%08x", ui32Mcause)); + return NULL; + } +} +#endif /* !defined(NO_HARDWARE) */ - _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType); - if (ui32CatBase >= 8) - { - PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase); - } - else - { - IMG_DEV_PHYADDR sPCDevPAddr; - MMU_FAULT_DATA sFaultData; +static const IMG_FLAGS2DESC asHWErrorState[] = +{ + {RGX_HW_ERR_NA, "N/A"}, + {RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL, "Primitive ID failure during DM kill."}, +}; - sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase)); +/* + * Translate ID code to descriptive string. + * Returns on the first match. + */ +static void _ID2Description(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, const IMG_FLAGS2DESC *psConvTable, IMG_UINT32 ui32TableSize, IMG_UINT32 ui32ID) +{ + IMG_UINT32 ui32Idx; - PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC - " on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECx, - sDevVAddr.uiAddr, - ui32CatBase, - sPCDevPAddr.uiAddr); - RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData); - _DumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT); - } + for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) + { + if (ui32ID == psConvTable[ui32Idx].uiFlag) + { + OSStringSafeCopy(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); + return; } } -#endif /* NO_HARDWARE */ +} - /* Firmware state */ - switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus)) - { - case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszState = "OK"; break; - case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszState = "NOT RESPONDING"; break; - case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszState = "DEAD"; break; - case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszState = "FAULT"; break; - case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszState = "UNDEFINED"; break; - default: pszState = "UNKNOWN"; break; - } - switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason)) - { - case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; - case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " - Asserted"; break; - case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " - Poll failing"; break; - case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " - Global Event Object timeouts rising"; break; - case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " - KCCB offset invalid"; break; - case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " - KCCB stalled"; break; - case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " - Idling"; break; - case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " - Restarting"; break; - case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " - Missing interrupts"; break; - default: pszReason = " - Unknown reason"; break; - } +/*! +******************************************************************************* -#if !defined(NO_HARDWARE) - /* Determine the type virtualisation support used */ -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) - if (!PVRSRV_VZ_MODE_IS(NATIVE)) - { -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -#if defined(SUPPORT_AUTOVZ) -#if defined(SUPPORT_AUTOVZ_HW_REGS) - PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with HW register support"); -#else - PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with shared memory"); -#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ -#else - PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with static Fw heap allocation"); -#endif /* defined(SUPPORT_AUTOVZ) */ -#else - PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation"); -#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ - } -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + @Function _RGXDumpFWAssert -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)) - if (!PVRSRV_VZ_MODE_IS(NATIVE)) - { - RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo); - RGXFWIF_CONNECTION_OS_STATE eOsState = KM_GET_OS_CONNECTION(psDevInfo); + @Description - PVR_DUMPDEBUG_LOG("RGX Virtualisation firmware connection state: %s (Fw=%s; OS=%s)", - ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"), - (eFwState < RGXFW_CONNECTION_FW_STATE_COUNT) ? (apszFwOsStateName[eFwState]) : ("invalid"), - (eOsState < RGXFW_CONNECTION_OS_STATE_COUNT) ? (apszFwOsStateName[eOsState]) : ("invalid")); + Dump FW assert strings when a thread asserts. - } -#endif + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer -#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) - if (!PVRSRV_VZ_MODE_IS(NATIVE)) - { - IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo); - IMG_UINT32 ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo); + @Return void - PVR_DUMPDEBUG_LOG("RGX Virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u", - ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS); - } -#endif -#endif /* !defined(NO_HARDWARE) */ +******************************************************************************/ +static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl) +{ + const IMG_CHAR *pszTraceAssertPath; + const IMG_CHAR *pszTraceAssertInfo; + IMG_INT32 ui32TraceAssertLine; + IMG_UINT32 i; - if (!PVRSRV_VZ_MODE_IS(GUEST)) + for (i = 0; i < RGXFW_THREAD_NUM; i++) { - IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE]; - IMG_BOOL bOsIsolationEnabled = IMG_FALSE; + RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf, INVALIDATE); + pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath; + pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo; + ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum; - if (psFwSysData == NULL) + /* print non-null assert strings */ + if (*pszTraceAssertInfo) { - /* can't dump any more information */ - PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason); - return; + PVR_DUMPDEBUG_LOG("FW-T%d Assert: %.*s (%.*s:%d)", + i, RGXFW_TRACE_BUFFER_ASSERT_SIZE, pszTraceAssertInfo, + RGXFW_TRACE_BUFFER_ASSERT_SIZE, pszTraceAssertPath, ui32TraceAssertLine); } + } +} - sHwrStateDescription[0] = '\0'; - - _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE, - asHwrState2Description, ARRAY_SIZE(asHwrState2Description), - psFwSysData->ui32HWRStateFlags); - PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription); - PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)", - pszPowStateName[psFwSysData->ePowState], - (psDevInfo->pvAPMISRData)?"enabled":"disabled", - psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle, - psDevInfo->ui32ActivePMReqDenied, - psDevInfo->ui32ActivePMReqNonIdle, - psDevInfo->ui32ActivePMReqRetry, - psDevInfo->ui32ActivePMReqTotal - - psDevInfo->ui32ActivePMReqOk - - psDevInfo->ui32ActivePMReqDenied - - psDevInfo->ui32ActivePMReqRetry - - psDevInfo->ui32ActivePMReqNonIdle, - psDevInfo->ui32ActivePMReqTotal, - psRuntimeCfg->ui32ActivePMLatencyms); +/*! +******************************************************************************* - ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges); - RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs)); + @Function _RGXDumpFWFaults - PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. " - "Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC " ns). " - "FW frequency: %u.%03u MHz.", - ui32NumClockSpeedChanges, - asTimeCorrs[0].ui32CoreClockSpeed / 1000000, - (asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000, - asTimeCorrs[0].ui64OSTimeStamp, - psRuntimeCfg->ui32CoreClockSpeed / 1000000, - (psRuntimeCfg->ui32CoreClockSpeed / 1000) % 1000); - if (ui32NumClockSpeedChanges > 0) - { - PVR_DUMPDEBUG_LOG(" Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at " - "%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")", - asTimeCorrs[1].ui32CoreClockSpeed / 1000000, - (asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000, - asTimeCorrs[2].ui32CoreClockSpeed / 1000000, - (asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000, - asTimeCorrs[3].ui32CoreClockSpeed / 1000000, - (asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000, - asTimeCorrs[1].ui64OSTimeStamp, - asTimeCorrs[2].ui64OSTimeStamp, - asTimeCorrs[3].ui64OSTimeStamp); - } + @Description - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) - { - RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; - IMG_BOOL bMTSEnabled = IMG_FALSE; + Dump FW assert strings when a thread asserts. -#if !defined(NO_HARDWARE) - if (bRGXPoweredON) - { - bMTSEnabled = (RGX_IS_BRN_SUPPORTED(psDevInfo, 64502) || !RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ? - IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32OSid)) != 0); - } -#endif + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFwSysData - RGX FW shared system data - PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d;%s %s", ui32OSid, - apszFwOsStateName[sFwRunFlags.bfOsState], - (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok", - (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "", - psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid], - (sFwRunFlags.bfIsolatedOS) ? " Isolated;" : "", - (bMTSEnabled) ? "MTS on;" : "MTS off;" - ); + @Return void - bOsIsolationEnabled |= sFwRunFlags.bfIsolatedOS; - } +******************************************************************************/ +static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGXFWIF_SYSDATA *psFwSysData) +{ + if (psFwSysData->ui32FWFaults > 0) + { + IMG_UINT32 ui32StartFault = psFwSysData->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX; + IMG_UINT32 ui32EndFault = psFwSysData->ui32FWFaults - 1; + IMG_UINT32 ui32Index; -#if defined(PVR_ENABLE_PHR) + if (psFwSysData->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX) { - IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE]; - - sPHRConfigDescription[0] = '\0'; - _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE, - asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description), - BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode)); - - PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %s", psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, sPHRConfigDescription); + ui32StartFault = 0; } -#endif - if (bRGXPoweredON && RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++) { - if (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM) > 1U) - { - PVR_DUMPDEBUG_LOG("RGX MC Configuration: 0x%X (1:primary, 0:secondary)", psFwSysData->ui32McConfig); - } - } + const RGX_FWFAULTINFO *psFaultInfo = &psFwSysData->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX]; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; - if (bOsIsolationEnabled) - { - PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); - } + /* Split OS timestamp in seconds and nanoseconds */ + RGXConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); - _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl); - _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); - _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); - } - else - { - PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation"); - PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation"); + PVR_DUMPDEBUG_LOG("FW Fault %d: %.*s (%.*s:%d)", + ui32Index+1, RGXFW_TRACE_BUFFER_ASSERT_SIZE, psFaultInfo->sFaultBuf.szInfo, + RGXFW_TRACE_BUFFER_ASSERT_SIZE, psFaultInfo->sFaultBuf.szPath, + psFaultInfo->sFaultBuf.ui32LineNum); + PVR_DUMPDEBUG_LOG(" Data = 0x%016"IMG_UINT64_FMTSPECx", CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, + psFaultInfo->ui64Data, + psFaultInfo->ui64CRTimer, + ui64Seconds, ui64Nanoseconds); + } } +} - _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo); +static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGXFWIF_SYSDATA *psFwSysData) +{ + IMG_UINT32 i; -#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) - /* Dump all non-zero values in lines of 8... */ + for (i = 0; i < RGXFW_THREAD_NUM; i++) { - IMG_CHAR pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1]; - const IMG_UINT32 *pui32FWStatsBuf = psFwSysData->aui32FWStatsBuf; - IMG_UINT32 ui32Index1, ui32Index2; - - PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX); - for (ui32Index1 = 0; ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX; ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE) + if (psFwSysData->aui32CrPollAddr[i]) { - IMG_UINT32 ui32OrOfValues = 0; - IMG_CHAR *pszBuf = pszLine; - - /* Print all values in this line and skip if all zero... */ - for (ui32Index2 = 0; ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE; ui32Index2++) - { - ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2]; - OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]); - pszBuf += 9; /* write over the '\0' */ - } - - if (ui32OrOfValues != 0) - { - PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine); - } + PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)", + i, + ((psFwSysData->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psFwSysData->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET, + psFwSysData->aui32CrPollMask[i]); } - PVR_DUMPDEBUG_LOG("STATS[END]"); } -#endif + } -static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) +static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const RGXFWIF_SYSDATA *psFwSysData, + const RGXFWIF_HWRINFOBUF *psHWRInfoBuf, + PVRSRV_RGXDEV_INFO *psDevInfo) { -/* List of extra META Slave Port debug registers */ -#define RGX_META_SP_EXTRA_DEBUG \ - X(RGX_CR_META_SP_MSLVCTRL0) \ - X(RGX_CR_META_SP_MSLVCTRL1) \ - X(RGX_CR_META_SP_MSLVDATAX) \ - X(RGX_CR_META_SP_MSLVIRQSTATUS) \ - X(RGX_CR_META_SP_MSLVIRQENABLE) \ - X(RGX_CR_META_SP_MSLVIRQLEVEL) - - IMG_UINT32 ui32Idx; - IMG_UINT32 ui32RegVal; - IMG_UINT32 ui32RegAddr; - - const IMG_UINT32 aui32DebugRegAddr[] = { -#define X(A) A, - RGX_META_SP_EXTRA_DEBUG -#undef X - }; - - const IMG_CHAR* apszDebugRegName[] = { -#define X(A) #A, - RGX_META_SP_EXTRA_DEBUG -#undef X - }; - - PVR_DUMPDEBUG_LOG("META Slave Port extra debug:"); + IMG_BOOL bAnyLocked = IMG_FALSE; + IMG_UINT32 dm, i; + IMG_UINT32 ui32LineSize; + IMG_CHAR *pszLine, *pszTemp; + const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "GEOM", "3D", "CDM", "RAY", "GEOM2", "GEOM3", "GEOM4"}; + const IMG_CHAR szMsgHeader[] = "Number of HWR: "; + const IMG_CHAR szMsgFalse[] = "FALSE("; + IMG_CHAR *pszLockupType = ""; + const IMG_UINT32 ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */ + const IMG_UINT32 ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1; + IMG_UINT32 ui32HWRRecoveryFlags; + IMG_UINT32 ui32ReadIndex; - /* dump set of Slave Port debug registers */ - for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++) + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))) { - const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx]; - - ui32RegAddr = aui32DebugRegAddr[ui32Idx]; - ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); - PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal); + apszDmNames[RGXFWIF_DM_TDM] = "2D"; } -} - -/* - * Array of all the Firmware Trace log IDs used to convert the trace data. - */ -typedef struct _TRACEBUF_LOG_ { - RGXFW_LOG_SFids eSFId; - const IMG_CHAR *pszName; - const IMG_CHAR *pszFmt; - IMG_UINT32 ui32ArgNum; -} TRACEBUF_LOG; - -static const TRACEBUF_LOG aLogDefinitions[] = -{ -#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e}, - RGXFW_LOG_SFIDLIST -#undef X -}; - -#define NARGS_MASK ~(0xF<<16) -static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile) -{ - const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0]; - IMG_BOOL bIntegrityOk = IMG_TRUE; - /* - * For every log ID, check the format string and number of arguments is valid. - */ - while (psLogDef->eSFId != RGXFW_SF_LAST) + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) { - const TRACEBUF_LOG *psLogDef2; - const IMG_CHAR *pszString; - IMG_UINT32 ui32Count; - - /* - * Check the number of arguments matches the number of '%' in the string and - * check that no string uses %s which is not supported as it requires a - * pointer to memory that is not going to be valid. - */ - pszString = psLogDef->pszFmt; - ui32Count = 0; - - while (*pszString != '\0') - { - if (*pszString++ == '%') - { - ui32Count++; - if (*pszString == 's') - { - bIntegrityOk = IMG_FALSE; - PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.", - psLogDef->pszName, *pszString); - } - else if (*pszString == '%') - { - /* Double % is a printable % sign and not a format string... */ - ui32Count--; - } - } - } - - if (ui32Count != psLogDef->ui32ArgNum) + if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] || + psHWRInfoBuf->aui32HwrDmOverranCount[dm]) { - bIntegrityOk = IMG_FALSE; - PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.", - psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum); + bAnyLocked = IMG_TRUE; + break; } + } - /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */ - if (ui32Count > 20) - { - bIntegrityOk = IMG_FALSE; - PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.", - psLogDef->pszName, ui32Count); - } + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK)) + { + /* No HWR situation, print nothing */ + return; + } - /* Check the id number is unique (don't take into account the number of arguments) */ - ui32Count = 0; - psLogDef2 = &aLogDefinitions[0]; + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + IMG_BOOL bAnyHWROccurred = IMG_FALSE; - while (psLogDef2->eSFId != RGXFW_SF_LAST) + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) { - if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK)) - { - ui32Count++; - } - psLogDef2++; + if (psHWRInfoBuf->aui32HwrDmRecoveredCount[dm] != 0 || + psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 || + psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0) + { + bAnyHWROccurred = IMG_TRUE; + break; + } } - if (ui32Count != 1) + if (!bAnyHWROccurred) { - bIntegrityOk = IMG_FALSE; - PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.", - psLogDef->pszName, psLogDef->eSFId, ui32Count - 1); + return; } - - /* Move to the next log ID... */ - psLogDef++; } - return bIntegrityOk; -} - -typedef struct { - IMG_UINT16 ui16Mask; - const IMG_CHAR *pszStr; -} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */ - - -/*! -******************************************************************************* - - @Function RGXPrepareExtraDebugInfo - - @Description - - Prepares debug info string by decoding ui16DebugInfo value passed +/* + + + + + + + */ +#define FWHWRINFO_DM_STR_SIZE (5U + 10U + 1U + 10U + 1U + 10U + 3U) - @Input pszBuffer - pointer to debug info string buffer - - @Return void + ui32LineSize = sizeof(IMG_CHAR) * ( + ui32MsgHeaderCharCount + + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount * FWHWRINFO_DM_STR_SIZE) + + ui32MsgFalseCharCount + 1 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6) + 1 + /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */ + ); -******************************************************************************/ -static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo) -{ - const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] = + pszLine = OSAllocMem(ui32LineSize); + if (pszLine == NULL) { -#define X(a, b) {a, b}, - RGXFWT_DEBUG_INFO_MSKSTRLIST -#undef X - }; - - IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR); - IMG_UINT32 i; - IMG_BOOL bHasExtraDebugInfo = IMG_FALSE; + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of mem allocating line string (size: %d)", + __func__, + ui32LineSize)); + return; + } - /* Add prepend string */ - OSStringLCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize); + OSStringSafeCopy(pszLine, szMsgHeader, ui32LineSize); + pszTemp = pszLine + ui32MsgHeaderCharCount; - /* Add debug info strings */ - for (i = 0; i < ui32NumFields; i++) + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) { - if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask) - { - if (bHasExtraDebugInfo) - { - OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */ - } - OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize); - bHasExtraDebugInfo = IMG_TRUE; - } + pszTemp += OSSNPrintf(pszTemp, + FWHWRINFO_DM_STR_SIZE, + "%s(%u/%u+%u), ", + apszDmNames[dm], + psHWRInfoBuf->aui32HwrDmRecoveredCount[dm], + psHWRInfoBuf->aui32HwrDmLockedUpCount[dm], + psHWRInfoBuf->aui32HwrDmOverranCount[dm]); } - /* Add append string */ - OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize); -} - -void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE; + OSStringLCat(pszLine, szMsgFalse, ui32LineSize); + pszTemp += ui32MsgFalseCharCount; - /* Check that the firmware trace is correctly defined... */ - if (!bIntegrityCheckPassed) + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) { - bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile); - if (!bIntegrityCheckPassed) - { - return; - } + pszTemp += OSSNPrintf(pszTemp, + 10 + 1 + 1 /* UINT32 max num + comma + \0 */, + (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"), + psHWRInfoBuf->aui32HwrDmFalseDetectCount[dm]); } - /* Dump FW trace information... */ - if (psRGXFWIfTraceBufCtl != NULL) - { - IMG_UINT32 tid; - IMG_UINT32 ui32TraceBufSizeInDWords = psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; + PVR_DUMPDEBUG_LOG("%s", pszLine); - PVR_DUMPDEBUG_LOG("Device ID: %u", psDevInfo->psDeviceNode->sDevId.ui32InternalID); + OSFreeMem(pszLine); - /* Print the log type settings... */ - if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + /* Print out per HWR info */ + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + if (dm == RGXFWIF_DM_GP) { - PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", - ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), - RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) - ); + PVR_DUMPDEBUG_LOG("DM %d (GP)", dm); } else { - PVR_DUMPDEBUG_LOG("Debug log type: none"); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + IMG_UINT32 ui32HWRRecoveryFlags = psFwSysData->aui32HWRRecoveryFlags[dm]; + IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE]; + sPerDmHwrDescription[0] = '\0'; + + if (ui32HWRRecoveryFlags == RGXFWIF_DM_STATE_WORKING) + { + OSStringSafeCopy(sPerDmHwrDescription, " working;", RGX_DEBUG_STR_SIZE); + } + else + { + DebugCommonFlagStrings(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE, + asDmState2Description, ARRAY_SIZE(asDmState2Description), + ui32HWRRecoveryFlags); + } + PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%.*s)", dm, ui32HWRRecoveryFlags, + RGX_DEBUG_STR_SIZE, sPerDmHwrDescription); + } + else + { + PVR_DUMPDEBUG_LOG("DM %d", dm); + } } - /* Print the decoded log for each thread... */ - for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) + ui32ReadIndex = 0; + for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) { - volatile IMG_UINT32 *pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32WrapCount); - volatile IMG_UINT32 *pui32FWTracePtr = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); - IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; - IMG_UINT32 ui32HostWrapCount = *pui32FWWrapCount; - IMG_UINT32 ui32HostTracePtr = *pui32FWTracePtr; - IMG_UINT32 ui32Count = 0; - - if (pui32TraceBuf == NULL) + IMG_BOOL bPMFault = IMG_FALSE; + IMG_UINT32 ui32PC; + IMG_UINT32 ui32PageSize = 0; + IMG_DEV_PHYADDR sPCDevPAddr = { 0 }; + const RGX_HWRINFO *psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex]; + + if (ui32ReadIndex >= RGXFWIF_HWINFO_MAX) { - /* trace buffer not yet allocated */ - continue; + PVR_DUMPDEBUG_LOG("HWINFO index error: %u", ui32ReadIndex); + break; } - while (ui32Count < ui32TraceBufSizeInDWords) - { - IMG_UINT32 ui32Data, ui32DataToId; + if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0)) + { + IMG_CHAR aui8RecoveryNum[10+10+1]; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + IMG_BOOL bPageFault = IMG_FALSE; + IMG_DEV_VIRTADDR sFaultDevVAddr; + + /* Split OS timestamp in seconds and nanoseconds */ + RGXConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); + + ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags; + if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_ECC_HWR) { pszLockupType = ", GPU ECC HWR"; } + + OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u / %.*s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", + aui8RecoveryNum, + psHWRInfo->ui32CoreID, + psHWRInfo->ui32PID, + RGXFW_PROCESS_NAME_LEN, psHWRInfo->szProcName, + psHWRInfo->ui32FrameNum, + psHWRInfo->ui32ActiveHWRTData, + psHWRInfo->ui32EventStatus, + pszLockupType); + } + else + { + PVR_DUMPDEBUG_LOG(" %s PID = %u / %.*s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", + aui8RecoveryNum, + psHWRInfo->ui32PID, + RGXFW_PROCESS_NAME_LEN, psHWRInfo->szProcName, + psHWRInfo->ui32FrameNum, + psHWRInfo->ui32ActiveHWRTData, + psHWRInfo->ui32EventStatus, + pszLockupType); + } + + if (psHWRInfo->eHWErrorCode != RGX_HW_ERR_NA) + { + IMG_CHAR sHWDebugInfo[RGX_DEBUG_STR_SIZE] = ""; + + _ID2Description(sHWDebugInfo, RGX_DEBUG_STR_SIZE, asHWErrorState, ARRAY_SIZE(asHWErrorState), + psHWRInfo->eHWErrorCode); + PVR_DUMPDEBUG_LOG(" HW error code = 0x%X: %s", + psHWRInfo->eHWErrorCode, sHWDebugInfo); + } + + pszTemp = &aui8RecoveryNum[0]; + while (*pszTemp != '\0') + { + *pszTemp++ = ' '; + } + + /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + psHWRInfo->ui64CRTimer, + ui64Seconds, + ui64Nanoseconds, + (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); + } + else + { + PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", CyclesElapsed = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + psHWRInfo->ui64CRTimer, + (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); + } + + if (psHWRInfo->ui64CRTimeHWResetFinish != 0) + { + if (psHWRInfo->ui64CRTimeFreelistReady != 0) + { + /* If ui64CRTimeFreelistReady is less than ui64CRTimeHWResetFinish it means APM kicked in and the time is not valid. */ + if (psHWRInfo->ui64CRTimeHWResetFinish < psHWRInfo->ui64CRTimeFreelistReady) + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256, + (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256); + } + else + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = , TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); + } + } + else + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); + } + } + +#if defined(HW_ERN_65104_BIT_MASK) + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 65104)) + { + PVR_DUMPDEBUG_LOG(" Active PDS DM USCs = 0x%08x", psHWRInfo->ui32PDSActiveDMUSCs); + } +#endif - /* Find the first valid log ID, skipping whitespace... */ - do +#if defined(HW_ERN_69700_BIT_MASK) + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 69700)) { - ui32Data = pui32TraceBuf[ui32HostTracePtr]; - ui32DataToId = idToStringID(ui32Data, SFs); + PVR_DUMPDEBUG_LOG(" DMs stalled waiting on PDS Store space = 0x%08x", psHWRInfo->ui32PDSStalledDMs); + } +#endif - /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */ - if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data)) + switch (psHWRInfo->eHWRType) + { + case RGX_HWRTYPE_BIF0FAULT: + case RGX_HWRTYPE_BIF1FAULT: { - PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data); + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType), + psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, + psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, + DD_NORMAL_INDENT); + + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); + ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; + bPMFault = (ui32PC >= 8); + ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; } - - /* Update the trace pointer... */ - ui32HostTracePtr++; - if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) + break; + case RGX_HWRTYPE_TEXASBIF0FAULT: { - ui32HostTracePtr = 0; - ui32HostWrapCount++; - } - ui32Count++; - } while ((RGXFW_SF_LAST == ui32DataToId) && - ui32Count < ui32TraceBufSizeInDWords); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) + { + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, + psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, + psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, + DD_NORMAL_INDENT); - if (ui32Count < ui32TraceBufSizeInDWords) - { - IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> "; - IMG_CHAR szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = ""; - IMG_UINT64 ui64Timestamp; - IMG_UINT16 ui16DebugInfo; + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); + ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; + bPMFault = (ui32PC >= 8); + ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> + RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; + } + } + break; - /* If we hit the ASSERT message then this is the end of the log... */ - if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED) + case RGX_HWRTYPE_ECCFAULT: { - PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u", - psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo, - psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath, - psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum); - break; + PVR_DUMPDEBUG_LOG(" ECC fault GPU=0x%08x", psHWRInfo->uHWRData.sECCInfo.ui32FaultGPU); } + break; - ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 | - (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 1) % ui32TraceBufSizeInDWords]); - - ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT); - ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT; - - /* - * Print the trace string and provide up to 20 arguments which - * printf function will be able to use. We have already checked - * that no string uses more than this. - */ - OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN); - - /* Check and append any extra debug info available */ - if (ui16DebugInfo) + case RGX_HWRTYPE_MMUFAULT: { - /* Prepare debug info string */ - RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo); - - /* Append debug info string */ - OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN); } + break; - PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)], - pui32TraceBuf[(ui32HostTracePtr + 2) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 3) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 4) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 5) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 6) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 7) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 8) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 9) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 10) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 11) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 12) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 13) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 14) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 15) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 16) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 17) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 18) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 19) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 20) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 21) % ui32TraceBufSizeInDWords]); - - /* Update the trace pointer... */ - ui32HostTracePtr = ui32HostTracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data); - if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) + case RGX_HWRTYPE_MMUMETAFAULT: { - ui32HostTracePtr = ui32HostTracePtr % ui32TraceBufSizeInDWords; - ui32HostWrapCount++; } - ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data)); + break; - /* Has the FW trace buffer overtaken the host pointer during the last line printed??? */ - if ((*pui32FWWrapCount > ui32HostWrapCount) || - ((*pui32FWWrapCount == ui32HostWrapCount) && (*pui32FWTracePtr > ui32HostTracePtr))) + case RGX_HWRTYPE_POLLFAILURE: { - /* Move forward to the oldest entry again... */ - PVR_DUMPDEBUG_LOG(". . ."); - ui32HostWrapCount = *pui32FWWrapCount; - ui32HostTracePtr = *pui32FWTracePtr; + PVR_DUMPDEBUG_LOG(" T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)", + psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum, + ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET, + psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask, + psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue); } - } - } - } - } -} - -#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) -void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - - /* Print the power monitoring counters... */ - if (psFwSysData != NULL) - { - const IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer; - IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer; - IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords; - IMG_UINT32 ui32Count = 0; - IMG_UINT64 ui64Timestamp; - - if (pui32TraceBuf == NULL) - { - /* power monitoring buffer not yet allocated */ - return; - } - - if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER) - { - PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available.")); - return; - } - ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 | - (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]); - - /* Update the trace pointer... */ - ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords; - ui32Count = (ui32Count + 3); - - PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x", - pui32TraceBuf, - ui32TracePtr, - ui32PowerMonBufSizeInDWords)); - - while (ui32Count < ui32PowerMonBufSizeInDWords) - { - /* power monitoring data is (register, value) dword pairs */ - PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON 0x%08x 0x%08x 0x%08x 0x%08x", - ui64Timestamp, - pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords], - pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords], - pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords], - pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]); - - if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID || - pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID) - { - /* end of buffer */ - break; - } - - /* Update the trace pointer... */ - ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords; - ui32Count = (ui32Count + 4); - } - } -} -#endif - -static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState) -{ - switch (eDevState) - { - case PVRSRV_DEVICE_STATE_INIT: - return "Initialising"; - case PVRSRV_DEVICE_STATE_ACTIVE: - return "Active"; - case PVRSRV_DEVICE_STATE_DEINIT: - return "De-initialising"; - case PVRSRV_DEVICE_STATE_BAD: - return "Bad"; - case PVRSRV_DEVICE_STATE_UNDEFINED: - PVR_ASSERT(!"Device has undefined state"); - __fallthrough; - default: - return "Unknown"; - } -} - -static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState) -{ - switch (ePowerState) - { - case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT"; - case PVRSRV_DEV_POWER_STATE_OFF: return "OFF"; - case PVRSRV_DEV_POWER_STATE_ON: return "ON"; - default: return "UNKNOWN"; - } -} - -/* Helper macros to emit data */ -#define REG32_FMTSPEC "%-30s: 0x%08X" -#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECx -#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R)); -#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R)); -#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R)); -#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R)); -#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V); - -#if !defined(NO_HARDWARE) -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) -static RGX_MIPS_REMAP_ENTRY RGXDecodeMIPSRemap(IMG_UINT64 ui64RemapReg) -{ - RGX_MIPS_REMAP_ENTRY sRemapInfo; - - sRemapInfo.ui32RemapAddrIn = - (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK) - >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT; - - sRemapInfo.ui32RemapAddrOut = - (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK) - >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT; - - sRemapInfo.ui32RemapRegionSize = - (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK) - >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT; - - return sRemapInfo; -} -#endif - -static void RGXDumpMIPSState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ - void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - RGX_MIPS_STATE sMIPSState = {0}; - PVRSRV_ERROR eError; - - eError = _RGXMipsExtraDebug(psDevInfo, &sMIPSState); - PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----"); - if (eError != PVRSRV_OK) - { - PVR_DUMPDEBUG_LOG("MIPS extra debug not available"); - } - else - { - DDLOGVAL32("PC", sMIPSState.ui32ErrorEPC); - DDLOGVAL32("STATUS_REGISTER", sMIPSState.ui32StatusRegister); - DDLOGVAL32("CAUSE_REGISTER", sMIPSState.ui32CauseRegister); - _RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile, - sMIPSState.ui32CauseRegister, sMIPSState.ui32ErrorState); - DDLOGVAL32("BAD_REGISTER", sMIPSState.ui32BadRegister); - DDLOGVAL32("EPC", sMIPSState.ui32EPC); - DDLOGVAL32("SP", sMIPSState.ui32SP); - DDLOGVAL32("BAD_INSTRUCTION", sMIPSState.ui32BadInstr); - _RGXMipsDumpDebugDecode(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, - sMIPSState.ui32Debug, sMIPSState.ui32DEPC); - - { - IMG_UINT32 ui32Idx; - - IMG_BOOL bCheckBRN63553WA = - RGX_IS_BRN_SUPPORTED(psDevInfo, 63553) && - (OSReadHWReg32(pvRegsBaseKM, RGX_CR_MIPS_ADDR_REMAP5_CONFIG1) == (0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN)); - - IMG_BOOL bUseRemapRanges = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32; -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) - RGX_MIPS_REMAP_ENTRY *psMipsRemaps = NULL; + break; - if (bUseRemapRanges) - { - psMipsRemaps = OSAllocMem(sizeof(RGX_MIPS_REMAP_ENTRY) * RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES); - PVR_LOG_RETURN_VOID_IF_FALSE(psMipsRemaps != NULL, "psMipsRemaps alloc failed."); - } -#endif +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + case RGX_HWRTYPE_MIPSTLBFAULT: + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + IMG_UINT32 ui32EntryLo = psHWRInfo->uHWRData.sTLBInfo.ui32EntryLo; - PVR_DUMPDEBUG_LOG("TLB :"); + /* This is not exactly what the MMU code does, but the result should be the same */ + const IMG_UINT32 ui32UnmappedEntry = + ((IMG_UINT32)(MMU_BAD_PHYS_ADDR & 0xffffffff) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) | RGXMIPSFW_ENTRYLO_UNCACHED; - for (ui32Idx = 0; ui32Idx < ARRAY_SIZE(sMIPSState.asTLB); ui32Idx++) - { -#if !defined(ENABLE_SECURE_MIPS_DEBUG_WA) - RGX_MIPS_REMAP_ENTRY *psRemapEntry0 = NULL; - RGX_MIPS_REMAP_ENTRY *psRemapEntry1 = NULL; -#endif - if (bUseRemapRanges) - { -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) - psMipsRemaps[ui32Idx] = - RGXDecodeMIPSRemap(sMIPSState.aui64Remap[ui32Idx]); + PVR_DUMPDEBUG_LOG(" MIPS TLB fault: BadVA = 0x%08X, EntryLo = 0x%08X" + " (page PA 0x%" IMG_UINT64_FMTSPECx", V %u)", + psHWRInfo->uHWRData.sTLBInfo.ui32BadVAddr, + ui32EntryLo, + RGXMIPSFW_TLB_GET_PA(ui32EntryLo), + ui32EntryLo & RGXMIPSFW_TLB_VALID ? 1 : 0); - psMipsRemaps[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] = - RGXDecodeMIPSRemap(sMIPSState.aui64Remap[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]); -#else - psRemapEntry0 = &sMIPSState.asRemap[ui32Idx]; - psRemapEntry1 = &sMIPSState.asRemap[ui32Idx+16]; + if (ui32EntryLo == ui32UnmappedEntry) + { + PVR_DUMPDEBUG_LOG(" Potential use-after-free detected"); + } + } + } + break; #endif - } - _RGXMipsDumpTLBEntry(pfnDumpDebugPrintf, - pvDumpDebugFile, - &sMIPSState.asTLB[ui32Idx], -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) - (bUseRemapRanges) ? &psMipsRemaps[ui32Idx] : NULL, - (bUseRemapRanges) ? &psMipsRemaps[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, -#else - psRemapEntry0, - psRemapEntry1, -#endif - ui32Idx); + case RGX_HWRTYPE_MMURISCVFAULT: + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE, + psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, + psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, + DD_NORMAL_INDENT); - if (bCheckBRN63553WA) - { - const RGX_MIPS_TLB_ENTRY *psTLBEntry = &sMIPSState.asTLB[ui32Idx]; + bPageFault = IMG_TRUE; + bPMFault = IMG_FALSE; + sFaultDevVAddr.uiAddr = + (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & + ~RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK); + ui32PageSize = + (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & + ~RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK) >> + RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT; + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; + } + } + break; - #define BRN63553_TLB_IS_NUL(X) (((X) & RGXMIPSFW_TLB_VALID) && (RGXMIPSFW_TLB_GET_PA(X) == 0x0)) + case RGX_HWRTYPE_OVERRUN: + case RGX_HWRTYPE_UNKNOWNFAILURE: + { + /* Nothing to dump */ + } + break; - if (BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo0) || BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo1)) + default: { - PVR_DUMPDEBUG_LOG("BRN63553 WA present with a valid TLB entry mapping address 0x0."); + PVR_DUMPDEBUG_LOG(" Unknown HWR Info type: 0x%x", psHWRInfo->eHWRType); } + break; } - } - - /* This implicitly also checks for overlaps between memory and regbank addresses */ - _CheckMipsTLBDuplicatePAs(pfnDumpDebugPrintf, - pvDumpDebugFile, - sMIPSState.asTLB, -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) - bUseRemapRanges ? psMipsRemaps : NULL); -#else - bUseRemapRanges ? sMIPSState.asRemap : NULL); -#endif - if (bUseRemapRanges) - { - /* Dump unmapped address if it was dumped in FW, otherwise it will be 0 */ - if (sMIPSState.ui32UnmappedAddress) + if (bPageFault) { - PVR_DUMPDEBUG_LOG("Remap unmapped address => 0x%08X", - sMIPSState.ui32UnmappedAddress); + RGXDumpFaultInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psHWRInfo, + ui32ReadIndex, &sFaultDevVAddr, &sPCDevPAddr, bPMFault, ui32PageSize); } - } -#if defined(ENABLE_SECURE_MIPS_DEBUG_WA) - if (psMipsRemaps != NULL) - { - OSFreeMem(psMipsRemaps); - } -#endif - } - /* Check FW code corruption in case of known errors */ - if (_IsFWCodeException(RGXMIPSFW_C0_CAUSE_EXCCODE(sMIPSState.ui32CauseRegister))) - { - eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - if (eError != PVRSRV_OK) - { - PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); } + + if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1) + ui32ReadIndex = psHWRInfoBuf->ui32WriteIndex; + else + ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST; } } - PVR_DUMPDEBUG_LOG("--------------------------------"); } -#endif - -static PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ - void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - RGXRISCVFW_STATE sRiscvState; - const IMG_CHAR *pszException; - PVRSRV_ERROR eError; - - DDLOG64(FWCORE_MEM_CAT_BASE0); - DDLOG64(FWCORE_MEM_CAT_BASE1); - DDLOG64(FWCORE_MEM_CAT_BASE2); - DDLOG64(FWCORE_MEM_CAT_BASE3); - DDLOG64(FWCORE_MEM_CAT_BASE4); - DDLOG64(FWCORE_MEM_CAT_BASE5); - DDLOG64(FWCORE_MEM_CAT_BASE6); - DDLOG64(FWCORE_MEM_CAT_BASE7); - - /* Limit dump to what is currently being used */ - DDLOG64(FWCORE_ADDR_REMAP_CONFIG4); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG5); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG6); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG12); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG13); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG14); - - DDLOG32(FWCORE_MEM_FAULT_MMU_STATUS); - DDLOG64(FWCORE_MEM_FAULT_REQ_STATUS); - DDLOG32(FWCORE_MEM_MMU_STATUS); - DDLOG32(FWCORE_MEM_READS_EXT_STATUS); - DDLOG32(FWCORE_MEM_READS_INT_STATUS); - - PVR_DUMPDEBUG_LOG("---- [ RISC-V internal state ] ----"); - -#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) - if (RGXRiscvIsHalted(psDevInfo)) - { - /* Avoid resuming the RISC-V FW as most operations - * on the debug module require a halted core */ - PVR_DUMPDEBUG_LOG("(skipping as RISC-V found halted)"); - return PVRSRV_OK; - } -#endif - - eError = RGXRiscvHalt(psDevInfo); - PVR_GOTO_IF_ERROR(eError, _RISCVDMError); - -#define X(name, address) \ - eError = RGXRiscvReadReg(psDevInfo, address, &sRiscvState.name); \ - PVR_LOG_GOTO_IF_ERROR(eError, "RGXRiscvReadReg", _RISCVDMError); \ - DDLOGVAL32(#name, sRiscvState.name); - RGXRISCVFW_DEBUG_DUMP_REGISTERS -#undef X - - eError = RGXRiscvResume(psDevInfo); - PVR_GOTO_IF_ERROR(eError, _RISCVDMError); - - pszException = _GetRISCVException(sRiscvState.mcause); - if (pszException != NULL) - { - PVR_DUMPDEBUG_LOG("RISC-V FW hit an exception: %s", pszException); - - eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - if (eError != PVRSRV_OK) - { - PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); - } - } - return PVRSRV_OK; -_RISCVDMError: - PVR_DPF((PVR_DBG_ERROR, "Failed to communicate with the Debug Module")); - return eError; -} +#if !defined(NO_HARDWARE) -PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ - IMG_UINT32 ui32Meta = (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0; - IMG_UINT32 ui32TACycles, ui323DCycles, ui32TAOr3DCycles, ui32TAAnd3DCycles; - IMG_UINT32 ui32RegVal; - IMG_BOOL bFirmwarePerf; - IMG_BOOL bS7Infra = RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE); - IMG_BOOL bMulticore = RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT); - void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - PVRSRV_ERROR eError; +/*! +******************************************************************************* - PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); - PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM); - PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr); + @Function _CheckForPendingPage - /* Check if firmware perf was set at Init time */ - bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE); + @Description - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBVNC_COREID_REG)) - { - DDLOG64(CORE_ID); - } - else - { - DDLOG32(CORE_ID); - } - DDLOG32(CORE_REVISION); - DDLOG32(DESIGNER_REV_FIELD1); - DDLOG32(DESIGNER_REV_FIELD2); - DDLOG64(CHANGESET_NUMBER); - if (ui32Meta) - { - DDLOG32(META_SP_MSLVIRQSTATUS); - } + Check if the MMU indicates it is blocked on a pending page + MMU4 does not support pending pages, so return false. - if (bMulticore) - { - DDLOG32(MULTICORE_SYSTEM); - DDLOG32(MULTICORE_GPU); - } + @Input psDevInfo - RGX device info - DDLOG64(CLK_CTRL); - DDLOG64(CLK_STATUS); - DDLOG64(CLK_CTRL2); - DDLOG64(CLK_STATUS2); + @Return IMG_BOOL - IMG_TRUE if there is a pending page - if (bS7Infra) - { - DDLOG64(CLK_XTPLUS_CTRL); - DDLOG64(CLK_XTPLUS_STATUS); - } - DDLOG32(EVENT_STATUS); - DDLOG64(TIMER); - if (bS7Infra) - { - DDLOG64(MMU_FAULT_STATUS); - DDLOG64(MMU_FAULT_STATUS_META); - } - else - { - DDLOG32(BIF_FAULT_BANK0_MMU_STATUS); - DDLOG64(BIF_FAULT_BANK0_REQ_STATUS); - DDLOG32(BIF_FAULT_BANK1_MMU_STATUS); - DDLOG64(BIF_FAULT_BANK1_REQ_STATUS); - } - DDLOG32(BIF_MMU_STATUS); - DDLOG32(BIF_MMU_ENTRY); - DDLOG64(BIF_MMU_ENTRY_STATUS); +******************************************************************************/ +static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) + /* MMU4 doesn't support pending pages */ + return (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) < 4) && + (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_ENTRY) & RGX_CR_MMU_ENTRY_PENDING_EN); +#else + IMG_UINT32 ui32BIFMMUEntry; - if (bS7Infra) - { - DDLOG32(BIF_JONES_OUTSTANDING_READ); - DDLOG32(BIF_BLACKPEARL_OUTSTANDING_READ); - DDLOG32(BIF_DUST_OUTSTANDING_READ); - } - else - { - if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))) - { - DDLOG32(BIF_STATUS_MMU); - DDLOG32(BIF_READS_EXT_STATUS); - DDLOG32(BIF_READS_INT_STATUS); - } - DDLOG32(BIFPM_STATUS_MMU); - DDLOG32(BIFPM_READS_EXT_STATUS); - DDLOG32(BIFPM_READS_INT_STATUS); - } + ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY); - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) + if (ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN) { - DDLOG64(CONTEXT_MAPPING0); - DDLOG64(CONTEXT_MAPPING1); - DDLOG64(CONTEXT_MAPPING2); - DDLOG64(CONTEXT_MAPPING3); - DDLOG64(CONTEXT_MAPPING4); + return IMG_TRUE; } else { - DDLOG64(BIF_CAT_BASE_INDEX); - DDLOG64(BIF_CAT_BASE0); - DDLOG64(BIF_CAT_BASE1); - DDLOG64(BIF_CAT_BASE2); - DDLOG64(BIF_CAT_BASE3); - DDLOG64(BIF_CAT_BASE4); - DDLOG64(BIF_CAT_BASE5); - DDLOG64(BIF_CAT_BASE6); - DDLOG64(BIF_CAT_BASE7); + return IMG_FALSE; } +#endif +} - DDLOG32(BIF_CTRL_INVAL); - DDLOG32(BIF_CTRL); - - DDLOG64(BIF_PM_CAT_BASE_VCE0); - DDLOG64(BIF_PM_CAT_BASE_TE0); - DDLOG64(BIF_PM_CAT_BASE_ALIST0); - DDLOG64(BIF_PM_CAT_BASE_VCE1); - DDLOG64(BIF_PM_CAT_BASE_TE1); - DDLOG64(BIF_PM_CAT_BASE_ALIST1); +/*! +******************************************************************************* - if (bMulticore) - { - DDLOG32(MULTICORE_GEOMETRY_CTRL_COMMON); - DDLOG32(MULTICORE_FRAGMENT_CTRL_COMMON); - DDLOG32(MULTICORE_COMPUTE_CTRL_COMMON); - } + @Function _GetPendingPageInfo - DDLOG32(PERF_TA_PHASE); - DDLOG32(PERF_TA_CYCLE); - DDLOG32(PERF_3D_PHASE); - DDLOG32(PERF_3D_CYCLE); + @Description - ui32TACycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_CYCLE); - ui323DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_3D_CYCLE); - ui32TAOr3DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_OR_3D_CYCLE); - ui32TAAnd3DCycles = ((ui32TACycles + ui323DCycles) > ui32TAOr3DCycles) ? (ui32TACycles + ui323DCycles - ui32TAOr3DCycles) : 0; - DDLOGVAL32("PERF_TA_OR_3D_CYCLE", ui32TAOr3DCycles); - DDLOGVAL32("PERF_TA_AND_3D_CYCLE", ui32TAAnd3DCycles); + Get information about the pending page from the MMU status registers - DDLOG32(PERF_COMPUTE_PHASE); - DDLOG32(PERF_COMPUTE_CYCLE); + @Input psDevInfo - RGX device info + @Output psDevVAddr - The device virtual address of the pending MMU address translation + @Output pui32CatBase - The page catalog base - DDLOG32(PM_PARTIAL_RENDER_ENABLE); + @Return void - DDLOG32(ISP_RENDER); - DDLOG64(TLA_STATUS); - DDLOG64(MCU_FENCE); +******************************************************************************/ +static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr, + IMG_UINT32 *pui32CatBase) +{ + IMG_UINT64 ui64BIFMMUEntryStatus; - DDLOG32(VDM_CONTEXT_STORE_STATUS); - DDLOG64(VDM_CONTEXT_STORE_TASK0); - DDLOG64(VDM_CONTEXT_STORE_TASK1); - DDLOG64(VDM_CONTEXT_STORE_TASK2); - DDLOG64(VDM_CONTEXT_RESUME_TASK0); - DDLOG64(VDM_CONTEXT_RESUME_TASK1); - DDLOG64(VDM_CONTEXT_RESUME_TASK2); + ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS); - DDLOG32(ISP_CTL); - DDLOG32(ISP_STATUS); - DDLOG32(MTS_INTCTX); - DDLOG32(MTS_BGCTX); - DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE); - DDLOG32(MTS_SCHEDULE); - DDLOG32(MTS_GPU_INT_STATUS); + psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK); - DDLOG32(CDM_CONTEXT_STORE_STATUS); - DDLOG64(CDM_CONTEXT_PDS0); - DDLOG64(CDM_CONTEXT_PDS1); - DDLOG64(CDM_TERMINATE_PDS); - DDLOG64(CDM_TERMINATE_PDS1); + *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >> + RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT; +} - if (RGX_IS_ERN_SUPPORTED(psDevInfo, 47025)) - { - DDLOG64(CDM_CONTEXT_LOAD_PDS0); - DDLOG64(CDM_CONTEXT_LOAD_PDS1); - } +#endif - if (bS7Infra) - { - DDLOG32(JONES_IDLE); - } +void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_BOOL bRGXPoweredON) +{ + const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + IMG_UINT32 ui32DriverID; + const RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + /* space for the current clock speed and 3 previous */ + RGXFWIF_TIME_CORR asTimeCorrs[4]; + IMG_UINT32 ui32NumClockSpeedChanges; - DDLOG32(SIDEKICK_IDLE); + /* Should invalidate all reads below including when passed to functions. */ + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfRuntimeCfg, INVALIDATE); - if (!bS7Infra) +#if defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(bRGXPoweredON); +#else + if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - DDLOG32(SLC_IDLE); - DDLOG32(SLC_STATUS0); - DDLOG64(SLC_STATUS1); - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS) && RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS)) { - DDLOG64(SLC_STATUS2); - } + IMG_UINT64 ui64RegValMMUStatus, ui64RegValREQStatus; - DDLOG32(SLC_CTRL_BYPASS); - DDLOG64(SLC_CTRL_MISC); - } - else - { - DDLOG32(SLC3_IDLE); - DDLOG64(SLC3_STATUS); - DDLOG32(SLC3_FAULT_STOP_STATUS); - } + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS); + ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS); - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE) && - RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER)) - { - DDLOG32(SAFETY_EVENT_STATUS__ROGUEXE); - DDLOG32(MTS_SAFETY_EVENT_ENABLE__ROGUEXE); - } + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER)) - { - DDLOG32(FWCORE_WDT_CTRL); - } + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SINGLE_BIF))) + { + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS); + ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS); + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); + } - if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0) - { - DDLOG32(SCRATCH0); - DDLOG32(SCRATCH1); - DDLOG32(SCRATCH2); - DDLOG32(SCRATCH3); - DDLOG32(SCRATCH4); - DDLOG32(SCRATCH5); - DDLOG32(SCRATCH6); - DDLOG32(SCRATCH7); - DDLOG32(SCRATCH8); - DDLOG32(SCRATCH9); - DDLOG32(SCRATCH10); - DDLOG32(SCRATCH11); - DDLOG32(SCRATCH12); - DDLOG32(SCRATCH13); - DDLOG32(SCRATCH14); - DDLOG32(SCRATCH15); - } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS); + ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS); + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); + } - if (ui32Meta) - { - IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) + { + IMG_UINT32 ui32PhantomCnt = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_REQ_NUM_PHANTOMS(RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) : 0; - /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0); + if (ui32PhantomCnt > 1) + { + IMG_UINT32 ui32Phantom; + for (ui32Phantom = 0; ui32Phantom < ui32PhantomCnt; ui32Phantom++) + { + /* This can't be done as it may interfere with the FW... */ + /*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/ - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T0 TXENABLE", ui32RegVal); - if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT) - { - bIsT0Enabled = IMG_TRUE; - } + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); + ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T0 TXSTATUS", ui32RegVal); + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); + } + }else + { + ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); + ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); - /* check for FW fault */ - if (((ui32RegVal >> 20) & 0x3) == 0x2) - { - bIsFWFaulted = IMG_TRUE; + _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); + } + } } - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T0 TXDEFR", ui32RegVal); - - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T0 PC", ui32RegVal); - - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T0 PCX", ui32RegVal); + if (_CheckForPendingPage(psDevInfo)) + { + IMG_UINT32 ui32CatBase; + IMG_DEV_VIRTADDR sDevVAddr; - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T0 SP", ui32RegVal); + PVR_DUMPDEBUG_LOG("MMU Pending page: Yes"); - if ((ui32Meta == MTP218) || (ui32Meta == MTP219)) - { - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T1 TXENABLE", ui32RegVal); + _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase); - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T1 TXSTATUS", ui32RegVal); + if (ui32CatBase >= 8) + { + PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase); + } + else + { + IMG_DEV_PHYADDR sPCDevPAddr; + MMU_FAULT_DATA sFaultData; + IMG_BOOL bIsValid = IMG_TRUE; - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T1 TXDEFR", ui32RegVal); +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + IMG_UINT64 ui64CBaseMapping; + IMG_UINT32 ui32CBaseMapCtxReg; - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T1 PC", ui32RegVal); + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) + { + ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4; - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T1 PCX", ui32RegVal); + OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32CBaseMapCtxReg, ui32CatBase); - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T1 SP", ui32RegVal); - } + ui64CBaseMapping = OSReadUncheckedHWReg64(psDevInfo->pvSecureRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1); + sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_CLRMSK) + >> RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT) + << RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT); + bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_EN); + } + else + { + ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT; - if (bFirmwarePerf) - { - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("PERF_COUNT0", ui32RegVal); + OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32CBaseMapCtxReg, ui32CatBase); - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("PERF_COUNT1", ui32RegVal); - } + ui64CBaseMapping = OSReadUncheckedHWReg64(psDevInfo->pvSecureRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING); + sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK) + >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) + << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT); + bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING_INVALID_EN); + } +#else + sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase)); +#endif - if (bIsT0Enabled & bIsFWFaulted) - { - eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - if (eError != PVRSRV_OK) - { - PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); + PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC + " on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECx " is %s", + sDevVAddr.uiAddr, + ui32CatBase, + sPCDevPAddr.uiAddr, + bIsValid ? "valid":"invalid"); + RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData); + RGXDumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT); } } - else if (bIsFWFaulted) - { - PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled"); - } } - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) - { - DDLOG32(MIPS_ADDR_REMAP1_CONFIG1); - DDLOG64(MIPS_ADDR_REMAP1_CONFIG2); - DDLOG32(MIPS_ADDR_REMAP2_CONFIG1); - DDLOG64(MIPS_ADDR_REMAP2_CONFIG2); - DDLOG32(MIPS_ADDR_REMAP3_CONFIG1); - DDLOG64(MIPS_ADDR_REMAP3_CONFIG2); - DDLOG32(MIPS_ADDR_REMAP4_CONFIG1); - DDLOG64(MIPS_ADDR_REMAP4_CONFIG2); - DDLOG32(MIPS_ADDR_REMAP5_CONFIG1); - DDLOG64(MIPS_ADDR_REMAP5_CONFIG2); - DDLOG64(MIPS_WRAPPER_CONFIG); - DDLOG32(MIPS_EXCEPTION_STATUS); +#endif /* NO_HARDWARE */ #if !defined(NO_HARDWARE) - RGXDumpMIPSState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); -#endif + /* Determine the type virtualisation support used */ +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) + { +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) +#if defined(SUPPORT_AUTOVZ) +#if defined(SUPPORT_AUTOVZ_HW_REGS) + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with HW register support"); +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with shared memory"); +#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with static Fw heap allocation"); +#endif /* defined(SUPPORT_AUTOVZ) */ +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation"); +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ } +#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) { - eError = RGXDumpRISCVState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - PVR_RETURN_IF_ERROR(eError); - } + RGXFWIF_CONNECTION_FW_STATE eFwState; + RGXFWIF_CONNECTION_OS_STATE eOsState; - return PVRSRV_OK; + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + KM_CONNECTION_CACHEOP(Os, INVALIDATE); -_METASPError: - PVR_DUMPDEBUG_LOG("Dump Slave Port debug information"); - _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + eFwState = KM_GET_FW_CONNECTION(psDevInfo); + eOsState = KM_GET_OS_CONNECTION(psDevInfo); - return eError; -} + PVR_DUMPDEBUG_LOG("RGX Virtualisation firmware connection state: %s (Fw=%s; OS=%s)", + ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"), + (eFwState < RGXFW_CONNECTION_FW_STATE_COUNT) ? (apszFwOsStateName[eFwState]) : ("invalid"), + (eOsState < RGXFW_CONNECTION_OS_STATE_COUNT) ? (apszFwOsStateName[eOsState]) : ("invalid")); -#undef REG32_FMTSPEC -#undef REG64_FMTSPEC -#undef DDLOG32 -#undef DDLOG64 -#undef DDLOG32_DPX -#undef DDLOG64_DPX -#undef DDLOGVAL32 + } +#endif -/*! -******************************************************************************* +#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) + { + IMG_UINT32 ui32FwAliveTS; + IMG_UINT32 ui32OsAliveTS; - @Function RGXDebugRequestProcess + KM_ALIVE_TOKEN_CACHEOP(Fw, INVALIDATE); + KM_ALIVE_TOKEN_CACHEOP(Os, INVALIDATE); - @Description + ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo); + ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo); - This function will print out the debug for the specified level of verbosity + PVR_DUMPDEBUG_LOG("RGX Virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u", + ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS); + } +#endif +#endif /* !defined(NO_HARDWARE) */ - @Input pfnDumpDebugPrintf - Optional replacement print function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - @Input ui32VerbLevel - Verbosity level + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE]; + IMG_BOOL bDriverIsolationEnabled = IMG_FALSE; + IMG_UINT32 ui32HostIsolationGroup; - @Return void + sHwrStateDescription[0] = '\0'; -******************************************************************************/ -static -void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32VerbLevel) -{ - PVRSRV_ERROR eError; - PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; - PVRSRV_DEV_POWER_STATE ePowerState; - IMG_BOOL bRGXPoweredON; - IMG_UINT8 ui8FwOsCount; - RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - const RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; - IMG_BOOL bPwrLockAlreadyHeld; - - bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode); - if (!bPwrLockAlreadyHeld) - { - /* Only acquire the power-lock if not already held by the calling context */ - eError = PVRSRVPowerLock(psDeviceNode); - if (eError != PVRSRV_OK) + DebugCommonFlagStrings(sHwrStateDescription, RGX_DEBUG_STR_SIZE, + asHwrState2Description, ARRAY_SIZE(asHwrState2Description), + psFwSysData->ui32HWRStateFlags); + PVR_DUMPDEBUG_LOG("RGX HWR State 0x%08x:%s", psFwSysData->ui32HWRStateFlags, sHwrStateDescription); + PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)", + (psFwSysData->ePowState < ARRAY_SIZE(pszPowStateName) ? pszPowStateName[psFwSysData->ePowState] : "???"), + (psDevInfo->pvAPMISRData)?"enabled":"disabled", + psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqDenied, + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqRetry, + psDevInfo->ui32ActivePMReqTotal - + psDevInfo->ui32ActivePMReqOk - + psDevInfo->ui32ActivePMReqDenied - + psDevInfo->ui32ActivePMReqRetry - + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqTotal, + psRuntimeCfg->ui32ActivePMLatencyms); + + ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges); + RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs)); + + PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. " + "Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC " ns). " + "FW frequency: %u.%03u MHz.", + ui32NumClockSpeedChanges, + asTimeCorrs[0].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[0].ui64OSTimeStamp, + psRuntimeCfg->ui32CoreClockSpeed / 1000000, + (psRuntimeCfg->ui32CoreClockSpeed / 1000) % 1000); + if (ui32NumClockSpeedChanges > 0) { - PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return; + PVR_DUMPDEBUG_LOG(" Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at " + "%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")", + asTimeCorrs[1].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[2].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[3].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[1].ui64OSTimeStamp, + asTimeCorrs[2].ui64OSTimeStamp, + asTimeCorrs[3].ui64OSTimeStamp); } - } - ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + ui32HostIsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID]; - eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Error retrieving RGX power state. No debug info dumped.", - __func__)); - goto Exit; - } + FOREACH_SUPPORTED_DRIVER(ui32DriverID) + { + RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID]; + IMG_UINT32 ui32IsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID]; + IMG_BOOL bMTSEnabled = IMG_FALSE; - if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || - (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) - { - PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", - (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount); - } +#if !defined(NO_HARDWARE) + if (bRGXPoweredON) + { + bMTSEnabled = ( +#if defined(FIX_HW_BRN_64502_BIT_MASK) + RGX_IS_BRN_SUPPORTED(psDevInfo, 64502) || +#endif + !RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ? + IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32DriverID)) != 0); + } +#endif - PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); + PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %u; Isolation group: %u; Time Slice%s: %u%% (%ums); %s", ui32DriverID, + apszFwOsStateName[sFwRunFlags.bfOsState], + (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok", + (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "", + psDevInfo->psRGXFWIfRuntimeCfg->ai32DriverPriority[ui32DriverID], + ui32IsolationGroup, + (psDevInfo->psRGXFWIfRuntimeCfg->aui32TSPercentage[ui32DriverID] != 0) ? "" : "*", + psFwSysData->aui32TSMirror[ui32DriverID], + (psFwSysData->aui32TSMirror[ui32DriverID] * + psDevInfo->psRGXFWIfRuntimeCfg->ui32TSIntervalMs / 100), + (bMTSEnabled) ? "MTS on;" : "MTS off;" + ); - bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON); + if (ui32IsolationGroup != ui32HostIsolationGroup) + { + bDriverIsolationEnabled = IMG_TRUE; + } - PVR_DUMPDEBUG_LOG("------[ RGX Info ]------"); - PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo); - PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B, - psDevInfo->sDevFeatureCfg.ui32V, - psDevInfo->sDevFeatureCfg.ui32N, - psDevInfo->sDevFeatureCfg.ui32C, - PVR_ARCH_NAME); - PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState)); - PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState)); - if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) - { - PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks); - } - else - { - PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED"); - } + if (PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) + { + /* don't print guest information on native mode drivers */ + break; + } + } - RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON); +#if defined(PVR_ENABLE_PHR) + { + IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE]; - /* Dump out the kernel CCB. */ - { - const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + sPHRConfigDescription[0] = '\0'; + DebugCommonFlagStrings(sPHRConfigDescription, RGX_DEBUG_STR_SIZE, + asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description), + BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode)); - if (psKCCBCtl != NULL) - { - PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X", - psKCCBCtl->ui32WriteOffset, - psKCCBCtl->ui32ReadOffset); + PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %.*s", psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, RGX_DEBUG_STR_SIZE, sPHRConfigDescription); } - } +#endif - /* Dump out the firmware CCB. */ - { - const RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl; + if (bRGXPoweredON && RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + if (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM) > 1U) + { + PVR_DUMPDEBUG_LOG("RGX MC Configuration: 0x%X (1:primary, 0:secondary)", psFwSysData->ui32McConfig); + } + } - if (psFCCBCtl != NULL) + if (bDriverIsolationEnabled) { - PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X", - psFCCBCtl->ui32WriteOffset, - psFCCBCtl->ui32ReadOffset); + PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); } - } -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Dump out the Workload estimation CCB. */ + _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl); + _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); + _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); + } + else { - const RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; - - if (psWorkEstCCBCtl != NULL) - { - PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X", - psWorkEstCCBCtl->ui32WriteOffset, - psWorkEstCCBCtl->ui32ReadOffset); - } + PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation"); + PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation"); } -#endif + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfHWRInfoBufCtl, INVALIDATE); + _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo); - if (psFwOsData != NULL) +#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) + /* Dump all non-zero values in lines of 8... */ { - /* Dump the KCCB commands executed */ - PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d", - psFwOsData->ui32KCCBCmdsExecuted); - -#if defined(PVRSRV_STALLED_CCB_ACTION) - /* Dump the number of times we have performed a forced UFO update, - * and (if non-zero) the timestamp of the most recent occurrence/ - */ - PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d", - psFwOsData->ui32ForcedUpdatesRequested); - if (psFwOsData->ui32ForcedUpdatesRequested > 0) + IMG_CHAR pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1]; + const IMG_UINT32 *pui32FWStatsBuf = psFwSysData->aui32FWStatsBuf; + IMG_UINT32 ui32Index1, ui32Index2; + + PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX); + for (ui32Index1 = 0; ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX; ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE) { - IMG_UINT8 ui8Idx; - IMG_UINT64 ui64Seconds, ui64Nanoseconds; + IMG_UINT32 ui32OrOfValues = 0; + IMG_CHAR *pszBuf = pszLine; - if (psFwOsData->ui64LastForcedUpdateTime > 0ULL) - { - ConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds); - PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")", - ui64Seconds, ui64Nanoseconds); - } - else - { - PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)"); - } - /* Dump SLR log */ - if (psFwOsData->sSLRLogFirst.aszCCBName[0]) + /* Print all values in this line and skip if all zero... */ + for (ui32Index2 = 0; ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE; ui32Index2++) { - ConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); - PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC - "} Fence found on context 0x%x '%s' has %d UFOs", - ui64Seconds, ui64Nanoseconds, - psFwOsData->sSLRLogFirst.ui32FWCtxAddr, - psFwOsData->sSLRLogFirst.aszCCBName, - psFwOsData->sSLRLogFirst.ui32NumUFOs); + ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2]; + OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]); + pszBuf += 9; /* write over the '\0' */ } - for (ui8Idx=0; ui8IdxsSLRLog[ui8Idx].aszCCBName[0]) - { - ConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); - PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC - "] Fence found on context 0x%x '%s' has %d UFOs", - ui64Seconds, ui64Nanoseconds, - psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr, - psFwOsData->sSLRLog[ui8Idx].aszCCBName, - psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs); - } + PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine); } } -#else - PVR_DUMPDEBUG_LOG("RGX SLR: Disabled"); + PVR_DUMPDEBUG_LOG("STATS[END]"); + } #endif +} + +#if !defined(NO_HARDWARE) +PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + RGXRISCVFW_STATE sRiscvState; + const IMG_CHAR *pszException; + PVRSRV_ERROR eError; - /* Dump the error counts */ - PVR_DUMPDEBUG_LOG("RGX Errors: WGP:%d, TRP:%d", - psDevInfo->sErrorCounts.ui32WGPErrorCount, - psDevInfo->sErrorCounts.ui32TRPErrorCount); + DDLOG64(FWCORE_MEM_CAT_BASE0); + DDLOG64(FWCORE_MEM_CAT_BASE1); + DDLOG64(FWCORE_MEM_CAT_BASE2); + DDLOG64(FWCORE_MEM_CAT_BASE3); + DDLOG64(FWCORE_MEM_CAT_BASE4); + DDLOG64(FWCORE_MEM_CAT_BASE5); + DDLOG64(FWCORE_MEM_CAT_BASE6); + DDLOG64(FWCORE_MEM_CAT_BASE7); - /* Dump the IRQ info for threads or OS IDs */ -#if defined(RGX_FW_IRQ_OS_COUNTERS) - /* only Host has access to registers containing IRQ counters */ - if (!PVRSRV_VZ_MODE_IS(GUEST)) + /* Limit dump to what is currently being used */ +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) + { + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4); + } + else #endif - { - IMG_UINT32 ui32idx; + { + DDLOG64(FWCORE_ADDR_REMAP_CONFIG4); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG5); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG6); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG12); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG13); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG14); + } - for_each_irq_cnt(ui32idx) - { - IMG_UINT32 ui32IrqCnt; + DDLOG32(FWCORE_MEM_FAULT_MMU_STATUS); + DDLOG64(FWCORE_MEM_FAULT_REQ_STATUS); + DDLOG32(FWCORE_MEM_MMU_STATUS); + DDLOG32(FWCORE_MEM_READS_EXT_STATUS); + DDLOG32(FWCORE_MEM_READS_INT_STATUS); - get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); - if (ui32IrqCnt) - { - PVR_DUMPDEBUG_LOG(MSG_IRQ_CNT_TYPE "%u: FW IRQ count = %u", ui32idx, ui32IrqCnt); -#if defined(RGX_FW_IRQ_OS_COUNTERS) - if (ui32idx == RGXFW_HOST_OS) -#endif - { - PVR_DUMPDEBUG_LOG("Last sampled IRQ count in LISR = %u", psDevInfo->aui32SampleIRQCount[ui32idx]); - } - } - } - } + PVR_DUMPDEBUG_LOG("---- [ RISC-V internal state ] ----"); + +#if defined(SUPPORT_RISCV_GDB) + if (RGXRiscvIsHalted(psDevInfo)) + { + /* Avoid resuming the RISC-V FW as most operations + * on the debug module require a halted core */ + PVR_DUMPDEBUG_LOG("(skipping as RISC-V found halted)"); + return PVRSRV_OK; } +#endif + + eError = RGXRiscvHalt(psDevInfo); + PVR_GOTO_IF_ERROR(eError, _RISCVDMError); + +#define X(name, address) \ + eError = RGXRiscvReadReg(psDevInfo, address, &sRiscvState.name); \ + PVR_LOG_GOTO_IF_ERROR(eError, "RGXRiscvReadReg", _RISCVDMError); \ + DDLOGVAL32(#name, sRiscvState.name); + + RGXRISCVFW_DEBUG_DUMP_REGISTERS +#undef X + + eError = RGXRiscvResume(psDevInfo); + PVR_GOTO_IF_ERROR(eError, _RISCVDMError); - /* Dump the FW Sys config flags on the Host */ - if (!PVRSRV_VZ_MODE_IS(GUEST)) + pszException = _GetRISCVException(sRiscvState.mcause); + if (pszException != NULL) { - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + PVR_DUMPDEBUG_LOG("RISC-V FW hit an exception: %s", pszException); - if (!psFwSysData) + eError = RGXValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__)); - goto Exit; + PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); } - - _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags); - PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription); } - /* Dump the FW OS config flags */ - { - IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + return PVRSRV_OK; - if (!psFwOsData) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__)); - goto Exit; - } +_RISCVDMError: + PVR_DPF((PVR_DBG_ERROR, "Failed to communicate with the Debug Module")); - _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags); - PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription); - } + return eError; +} - if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) - { +void RGXDumpCoreRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: RGXDumpRGXRegisters failed (%s)", - __func__, - PVRSRVGetErrorString(eError))); - } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBVNC_COREID_REG)) + { + DDLOG64(CORE_ID__PBVNC); } else { - PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST) ? "Guest Mode of operation" : "RGX power is down"); + DDLOG32(CORE_ID); + DDLOG32(CORE_REVISION); } + DDLOG32(DESIGNER_REV_FIELD1); + DDLOG32(DESIGNER_REV_FIELD2); + DDLOG64(CHANGESET_NUMBER); +} - PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------"); +void RGXDumpMulticoreRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) - { - IMG_INT tid; - /* Dump FW trace information */ - if (psRGXFWIfTraceBufCtl != NULL) - { - for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++) - { - IMG_UINT32 i; - IMG_BOOL bPrevLineWasZero = IMG_FALSE; - IMG_BOOL bLineIsAllZeros = IMG_FALSE; - IMG_UINT32 ui32CountLines = 0; - IMG_UINT32 *pui32TraceBuffer; - IMG_CHAR *pszLine; - - if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) - { - PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", - ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), - RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) - ); - } - else - { - PVR_DUMPDEBUG_LOG("Debug log type: none"); - } + DDLOG32(MULTICORE_SYSTEM); + DDLOG32(MULTICORE_GPU); +} - pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; +void RGXDumpClkRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - /* Skip if trace buffer is not allocated */ - if (pui32TraceBuffer == NULL) - { - PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid); - continue; - } + DDLOG64(CLK_CTRL); + DDLOG64(CLK_STATUS); + DDLOG64(CLK_CTRL2); + DDLOG64(CLK_STATUS2); +} -/* Max number of DWords to be printed per line, in debug dump output */ -#define PVR_DD_FW_TRACEBUF_LINESIZE 30U - /* each element in the line is 8 characters plus a space. The '+ 1' is because of the final trailing '\0'. */ - pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1); - if (pszLine == NULL) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Out of mem allocating line string (size: %d)", - __func__, - 9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1)); - goto Exit; - } +void RGXDumpMMURegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid); - PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); - PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords); + { + DDLOG32(BIF_FAULT_BANK0_MMU_STATUS); + DDLOG64(BIF_FAULT_BANK0_REQ_STATUS); + DDLOG32(BIF_FAULT_BANK1_MMU_STATUS); + DDLOG64(BIF_FAULT_BANK1_REQ_STATUS); + } + DDLOG32(BIF_MMU_STATUS); + DDLOG32(BIF_MMU_ENTRY); + DDLOG64(BIF_MMU_ENTRY_STATUS); - for (i = 0; i < psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE) - { - IMG_UINT32 k = 0; - IMG_UINT32 ui32Line = 0x0; - IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32); - IMG_CHAR *pszBuf = pszLine; + { + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))) + { + DDLOG32(BIF_STATUS_MMU); + DDLOG32(BIF_READS_EXT_STATUS); + DDLOG32(BIF_READS_INT_STATUS); + } + DDLOG32(BIFPM_STATUS_MMU); + DDLOG32(BIFPM_READS_EXT_STATUS); + DDLOG32(BIFPM_READS_INT_STATUS); + } - for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++) - { - if ((i + k) >= psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords) - { - /* Stop reading when the index goes beyond trace buffer size. This condition is - * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not - * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */ - break; - } + { + DDLOG64(BIF_CAT_BASE_INDEX); + DDLOG64(BIF_CAT_BASE0); + DDLOG64(BIF_CAT_BASE1); + DDLOG64(BIF_CAT_BASE2); + DDLOG64(BIF_CAT_BASE3); + DDLOG64(BIF_CAT_BASE4); + DDLOG64(BIF_CAT_BASE5); + DDLOG64(BIF_CAT_BASE6); + DDLOG64(BIF_CAT_BASE7); + } - ui32Line |= pui32TraceBuffer[i + k]; + DDLOG32(BIF_CTRL_INVAL); + DDLOG32(BIF_CTRL); +} - /* prepare the line to print it. The '+1' is because of the trailing '\0' added */ - OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]); - pszBuf += 9; /* write over the '\0' */ - } +void RGXDumpDMRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + IMG_UINT32 ui32TACycles, ui323DCycles, ui32TAOr3DCycles, ui32TAAnd3DCycles; - bLineIsAllZeros = (ui32Line == 0x0); + DDLOG64(BIF_PM_CAT_BASE_VCE0); + DDLOG64(BIF_PM_CAT_BASE_TE0); + DDLOG64(BIF_PM_CAT_BASE_ALIST0); + DDLOG64(BIF_PM_CAT_BASE_VCE1); + DDLOG64(BIF_PM_CAT_BASE_TE1); + DDLOG64(BIF_PM_CAT_BASE_ALIST1); - if (bLineIsAllZeros) - { - if (bPrevLineWasZero) - { - ui32CountLines++; - } - else - { - bPrevLineWasZero = IMG_TRUE; - ui32CountLines = 1; - PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset); - } - } - else - { - if (bPrevLineWasZero && ui32CountLines > 1) - { - PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines); - } - bPrevLineWasZero = IMG_FALSE; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + DDLOG32(MULTICORE_GEOMETRY_CTRL_COMMON); + DDLOG32(MULTICORE_FRAGMENT_CTRL_COMMON); + DDLOG32(MULTICORE_COMPUTE_CTRL_COMMON); + } - PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine); - } + DDLOG32(PERF_TA_PHASE); + DDLOG32(PERF_TA_CYCLE); + DDLOG32(PERF_3D_PHASE); + DDLOG32(PERF_3D_CYCLE); - } - if (bPrevLineWasZero) - { - PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines); - } + ui32TACycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_CYCLE); + ui323DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_3D_CYCLE); + ui32TAOr3DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_OR_3D_CYCLE); + ui32TAAnd3DCycles = ((ui32TACycles + ui323DCycles) > ui32TAOr3DCycles) ? (ui32TACycles + ui323DCycles - ui32TAOr3DCycles) : 0; + DDLOGVAL32("PERF_TA_OR_3D_CYCLE", ui32TAOr3DCycles); + DDLOGVAL32("PERF_TA_AND_3D_CYCLE", ui32TAAnd3DCycles); - PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid); + DDLOG32(PERF_COMPUTE_PHASE); + DDLOG32(PERF_COMPUTE_CYCLE); - OSFreeMem(pszLine); - } - } + DDLOG32(PM_PARTIAL_RENDER_ENABLE); - { - if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) - { - PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------"); - } - else - { - PVR_DUMPDEBUG_LOG("------[ FWCtxs Next CMD ]------"); - } + DDLOG32(ISP_RENDER); + DDLOG64(TLA_STATUS); + DDLOG64(MCU_FENCE); - DumpTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + DDLOG32(VDM_CONTEXT_STORE_STATUS); + DDLOG64(VDM_CONTEXT_STORE_TASK0); + DDLOG64(VDM_CONTEXT_STORE_TASK1); + DDLOG64(VDM_CONTEXT_STORE_TASK2); + DDLOG64(VDM_CONTEXT_RESUME_TASK0); + DDLOG64(VDM_CONTEXT_RESUME_TASK1); + DDLOG64(VDM_CONTEXT_RESUME_TASK2); - DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + DDLOG32(ISP_CTL); + DDLOG32(ISP_STATUS); - DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + DDLOG32(CDM_CONTEXT_STORE_STATUS); + DDLOG64(CDM_CONTEXT_PDS0); + DDLOG64(CDM_CONTEXT_PDS1); + DDLOG64(CDM_TERMINATE_PDS); + DDLOG64(CDM_TERMINATE_PDS1); - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) - { - DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); - } - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) - { - DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); - } - } + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 47025)) + { + DDLOG64(CDM_CONTEXT_LOAD_PDS0); + DDLOG64(CDM_CONTEXT_LOAD_PDS1); } +} - PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d End ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); +void RGXDumpSLCRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; -Exit: - if (!bPwrLockAlreadyHeld) { - PVRSRVPowerUnlock(psDeviceNode); + DDLOG32(SLC_IDLE); + DDLOG32(SLC_STATUS0); + DDLOG64(SLC_STATUS1); + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS) && RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS)) + { + DDLOG64(SLC_STATUS2); + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) + { + DDLOG64(SLC_CTRL_BYPASS); + } + else + { + DDLOG32(SLC_CTRL_BYPASS); + } + DDLOG64(SLC_CTRL_MISC); } } -/*! - ****************************************************************************** +void RGXDumpMiscRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - @Function RGXDebugRequestNotify + DDLOG32(SIDEKICK_IDLE); - @Description Dump the debug data for RGX + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE) && + RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER)) + { + DDLOG32(SAFETY_EVENT_STATUS__ROGUEXE); + DDLOG32(MTS_SAFETY_EVENT_ENABLE__ROGUEXE); + } - ******************************************************************************/ -static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, - IMG_UINT32 ui32VerbLevel, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = hDbgRequestHandle; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER)) + { + DDLOG32(FWCORE_WDT_CTRL); + } - /* Only action the request if we've fully init'ed */ - if (psDevInfo->bDevInit2Done) + if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0) { - RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel); + DDLOG32(SCRATCH0); + DDLOG32(SCRATCH1); + DDLOG32(SCRATCH2); + DDLOG32(SCRATCH3); + DDLOG32(SCRATCH4); + DDLOG32(SCRATCH5); + DDLOG32(SCRATCH6); + DDLOG32(SCRATCH7); + DDLOG32(SCRATCH8); + DDLOG32(SCRATCH9); + DDLOG32(SCRATCH10); + DDLOG32(SCRATCH11); + DDLOG32(SCRATCH12); + DDLOG32(SCRATCH13); + DDLOG32(SCRATCH14); + DDLOG32(SCRATCH15); } } +#endif /* !defined(NO_HARDWARE) */ -PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - return PVRSRVRegisterDeviceDbgRequestNotify(&psDevInfo->hDbgReqNotify, - psDevInfo->psDeviceNode, - RGXDebugRequestNotify, - DEBUG_REQUEST_RGX, - psDevInfo); -} +#undef REG32_FMTSPEC +#undef REG64_FMTSPEC +#undef DDLOG32 +#undef DDLOG64 +#undef DDLOG32_DPX +#undef DDLOG64_DPX +#undef DDLOGVAL32 -PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +void RGXDumpAllContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) { - if (psDevInfo->hDbgReqNotify) + DumpTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) + DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); +#endif + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) { - return PVRSRVUnregisterDeviceDbgRequestNotify(psDevInfo->hDbgReqNotify); + DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) + { + DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); } - - /* No notifier registered */ - return PVRSRV_OK; } /****************************************************************************** diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdebug.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdebug.h deleted file mode 100644 index f163997ac562..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdebug.h +++ /dev/null @@ -1,229 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title RGX debug header file -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Header for the RGX debugging functions -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#if !defined(RGXDEBUG_H) -#define RGXDEBUG_H - -#include "pvrsrv_error.h" -#include "img_types.h" -#include "device.h" -#include "pvr_notifier.h" -#include "pvrsrv.h" -#include "rgxdevice.h" - -/** - * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in - * LISR for each RGX FW thread. - * Macro takes pointer to PVRSRV_RGXDEV_INFO as input. - */ - -#if defined(RGX_FW_IRQ_OS_COUNTERS) -#define for_each_irq_cnt(ui32idx) \ - for (ui32idx = 0; ui32idx < RGX_NUM_OS_SUPPORTED; ui32idx++) - -#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ - do { \ - extern const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS]; \ - ui32Dest = PVRSRV_VZ_MODE_IS(GUEST) ? 0 : OSReadHWReg32((psRgxDevInfo)->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[ui32idx]); \ - } while (false) - -#define MSG_IRQ_CNT_TYPE "OS" - -#else - -#define for_each_irq_cnt(ui32idx) \ - for (ui32idx = 0; ui32idx < RGXFW_THREAD_NUM; ui32idx++) - -#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ - ui32Dest = (psRgxDevInfo)->psRGXFWIfFwOsData->aui32InterruptCount[ui32idx] - -#define MSG_IRQ_CNT_TYPE "Thread" -#endif /* RGX_FW_IRQ_OS_COUNTERS */ - -static inline void RGXDEBUG_PRINT_IRQ_COUNT(PVRSRV_RGXDEV_INFO* psRgxDevInfo) -{ -#if defined(PVRSRV_NEED_PVR_DPF) && defined(DEBUG) - IMG_UINT32 ui32idx; - - for_each_irq_cnt(ui32idx) - { - IMG_UINT32 ui32IrqCnt; - - get_irq_cnt_val(ui32IrqCnt, ui32idx, psRgxDevInfo); - - PVR_DPF((DBGPRIV_VERBOSE, MSG_IRQ_CNT_TYPE - " %u FW IRQ count = %u", ui32idx, ui32IrqCnt)); - -#if defined(RGX_FW_IRQ_OS_COUNTERS) - if (ui32idx == RGXFW_HOST_OS) -#endif - { - PVR_DPF((DBGPRIV_VERBOSE, "Last sampled IRQ count in LISR = %u", - (psRgxDevInfo)->aui32SampleIRQCount[ui32idx])); - } - } -#endif /* PVRSRV_NEED_PVR_DPF */ -} - -extern const IMG_CHAR * const gapszMipsPermissionPTFlags[4]; -extern const IMG_CHAR * const gapszMipsCoherencyPTFlags[8]; -extern const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8]; -/*! -******************************************************************************* - - @Function RGXDumpRGXRegisters - - @Description - - Dumps an extensive list of RGX registers required for debugging - - @Input pfnDumpDebugPrintf - Optional replacement print function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - - @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise - -******************************************************************************/ -PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* - - @Function RGXDumpFirmwareTrace - - @Description Dumps the decoded version of the firmware trace buffer. - - Dump useful debugging info - - @Input pfnDumpDebugPrintf - Optional replacement print function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - - @Return void - -******************************************************************************/ -void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo); - -#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) -void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo); -#endif - -#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -/*! -******************************************************************************* - - @Function ValidateFWOnLoad - - @Description Compare the Firmware image as seen from the CPU point of view - against the same memory area as seen from the firmware point - of view after first power up. - - @Input psDevInfo - Device Info - - @Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo); -#endif - -/*! -******************************************************************************* - - @Function RGXDumpRGXDebugSummary - - @Description - - Dump a summary in human readable form with the RGX state - - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - @Input bRGXPoweredON - IMG_TRUE if RGX device is on - - @Return void - -******************************************************************************/ -void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_BOOL bRGXPoweredON); - -/*! -******************************************************************************* - - @Function RGXDebugInit - - @Description - - Setup debug requests, calls into PVRSRVRegisterDeviceDbgRequestNotify - - @Input psDevInfo RGX device info - @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error - -******************************************************************************/ -PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* - - @Function RGXDebugDeinit - - @Description - - Remove debug requests, calls into PVRSRVUnregisterDeviceDbgRequestNotify - - @Output phNotify Points to debug notifier handle - @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error - -******************************************************************************/ -PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo); - -#endif /* RGXDEBUG_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdevice.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdevice.h index 9a4cfb8f9cf3..95e0a0b53cca 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdevice.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxdevice.h @@ -69,7 +69,7 @@ typedef struct { *****************************************************************************/ #define RGXKM_DEVICE_STATE_ZERO_FREELIST (0x1) /*!< Zeroing the physical pages of reconstructed free lists */ #define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x2) /*!< Used to disable the Devices Watchdog logging */ -#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN (0x4) /*!< Used for validation to inject dust requests every TA/3D kick */ +#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN (0x4) /*!< Used for validation to inject power units state change every DM kick */ #define RGXKM_DEVICE_STATE_CCB_GROW_EN (0x8) /*!< Used to indicate CCB grow is permitted */ #define RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN (0x10) /*!< Used for validation to enable SPU power state mask change */ #define RGXKM_DEVICE_STATE_MASK (0x1F) @@ -97,13 +97,33 @@ typedef struct { #define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US 150000 /* Time required for a recalibration after a DVFS transition */ #define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US 10000000 /* Time before the next periodic calibration and correlation */ + /*! ****************************************************************************** * Global flags for driver validation *****************************************************************************/ -#define RGX_VAL_KZ_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable KZ signature check. Signatures must match */ -#define RGX_VAL_KZ_SIG_CHECK_ERR_EN (0x20U) /*!< Enable KZ signature check. Signatures must not match */ -#define RGX_VAL_SIG_CHECK_ERR_EN (0U) /*!< Not supported on Rogue cores */ +#define RGX_VAL_FBDC_SIG_CHECK_NOERR_EN (0x2U) /*!< Enable FBDC signature check. Signatures must match */ +#define RGX_VAL_FBDC_SIG_CHECK_ERR_EN (0x4U) /*!< Enable FBDC signature check. Signatures must not match */ +#define RGX_VAL_GPUSTATEPIN_EN (0x8U) /*!< Enable GPU state pin check */ +#define RGX_VAL_WGP_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable WGP signature check. Signatures must match */ +#define RGX_VAL_WGP_SIG_CHECK_ERR_EN (0x20U) /*!< Enable WGP signature check. Signatures must not match */ +#define RGX_VAL_TRP_SIG_CHECK_NOERR_EN (0x40U) /*!< Enable TRP signature check. Signatures must match */ +#define RGX_VAL_TRP_SIG_CHECK_ERR_EN (0x80U) /*!< Enable TRP signature check. Signatures must not match */ + +/*! + ****************************************************************************** + * HWPerf L2 Stream ID type definition. + *****************************************************************************/ +typedef IMG_UINT32 RGX_HWPERF_L2_STREAM_ID; +/* HWPerf stream for Client HWPerf access. */ +#define RGX_HWPERF_L2_STREAM_HWPERF 0U +#if (defined(__linux__) && !defined(__QNXNTO__) && !defined(INTEGRITY_OS)) +/* HWPerf stream for FTrace HWPerf access. */ +#define RGX_HWPERF_L2_STREAM_FTRACE 1U +#define RGX_HWPERF_L2_STREAM_LAST 2U +#else +#define RGX_HWPERF_L2_STREAM_LAST 1U +#endif typedef struct _GPU_FREQ_TRACKING_DATA_ { @@ -160,6 +180,15 @@ typedef struct _RGX_GPU_DVFS_TABLE_ * GPU utilisation statistics *****************************************************************************/ +typedef struct _RGXFWIF_TEMP_GPU_UTIL_STATS_ +{ + IMG_UINT64 aaaui64DMOSTmpCounters[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED][RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM]; + IMG_UINT64 aaui64DMOSTmpLastWord[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 aaui64DMOSTmpLastState[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 aaui64DMOSTmpLastPeriod[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 aaui64DMOSTmpLastTime[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; +} RGXFWIF_TEMP_GPU_UTIL_STATS; + typedef struct _RGXFWIF_GPU_UTIL_STATS_ { IMG_BOOL bValid; /* If TRUE, statistics are valid. @@ -169,12 +198,13 @@ typedef struct _RGXFWIF_GPU_UTIL_STATS_ IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */ IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */ - IMG_UINT64 aaui64DMOSStatActive[RGXFWIF_DM_MAX][RGX_NUM_OS_SUPPORTED]; /* Per-DM per-OS active statistic */ - IMG_UINT64 aaui64DMOSStatBlocked[RGXFWIF_DM_MAX][RGX_NUM_OS_SUPPORTED]; /* Per-DM per-OS blocked statistic */ - IMG_UINT64 aaui64DMOSStatIdle[RGXFWIF_DM_MAX][RGX_NUM_OS_SUPPORTED]; /* Per-DM per-OS idle statistic */ - IMG_UINT64 aaui64DMOSStatCumulative[RGXFWIF_DM_MAX][RGX_NUM_OS_SUPPORTED]; /* Per-DM per-OS sum of active/blocked/idle stats */ + IMG_UINT64 aaui64DMOSStatInactive[RGXFWIF_GPU_UTIL_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS sum of idle and blocked stats */ + IMG_UINT64 aaui64DMOSStatActive[RGXFWIF_GPU_UTIL_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS active statistic */ + IMG_UINT64 aaui64DMOSStatCumulative[RGXFWIF_GPU_UTIL_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS sum of active/blocked/idle stats */ IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */ + + RGXFWIF_TEMP_GPU_UTIL_STATS sTempGpuStats; /* Temporary data used to calculate the per-DM per-OS statistics */ } RGXFWIF_GPU_UTIL_STATS; @@ -188,12 +218,6 @@ typedef struct _RGX_REG_CONFIG_ typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; -typedef struct -{ - IMG_UINT32 ui32DustCount1; - IMG_UINT32 ui32DustCount2; - IMG_BOOL bToggle; -} RGX_DUST_STATE; typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_ { @@ -205,7 +229,10 @@ typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_ IMG_UINT32 ui32C; IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX]; IMG_UINT32 ui32MAXDMCount; - IMG_UINT32 ui32MAXDustCount; + IMG_UINT32 ui32MAXPowUnitCount; +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + IMG_UINT32 ui32MAXRACCount; +#endif IMG_UINT32 ui32SLCSizeInBytes; IMG_PCHAR pszBVNCString; }PVRSRV_DEVICE_FEATURE_CONFIG; @@ -279,6 +306,15 @@ typedef union _RGX_WORKLOAD_ IMG_UINT32 ui32Characteristic1; IMG_UINT32 ui32Characteristic2; } sTransfer; + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + struct + { + IMG_UINT32 ui32DispatchSize; + IMG_UINT32 ui32AccStructSize; + } sRay; +#endif + } RGX_WORKLOAD; /*! @@ -329,6 +365,13 @@ typedef struct _WORKEST_HOST_DATA_ { WORKLOAD_MATCHING_DATA sDataTDM; /*!< matching data for TDM-TQ commands */ } sTransfer; + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + struct + { + WORKLOAD_MATCHING_DATA sDataRDM; /*!< matching data for RDM commands */ + } sRay; +#endif } uWorkloadMatchingData; /* @@ -358,6 +401,12 @@ typedef struct _WORKEST_RETURN_DATA_ #endif +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) +#define RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES 4 +#endif + + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) typedef struct { #if defined(PDUMP) @@ -366,6 +415,7 @@ typedef struct PG_HANDLE sPages; IMG_DEV_PHYADDR sPhysAddr; } RGX_MIPS_ADDRESS_TRAMPOLINE; +#endif /*! @@ -378,6 +428,16 @@ typedef struct _PVRSRV_RGXDEV_ERROR_COUNTS_ IMG_UINT32 ui32TRPErrorCount; /*!< count of the number of TRP checksum errors */ } PVRSRV_RGXDEV_ERROR_COUNTS; +/*! + ****************************************************************************** + * RGX Debug dump firmware trace log type + *****************************************************************************/ +typedef IMG_UINT32 RGX_FWT_LOGTYPE; +#define RGX_FWT_LOGTYPE_NONE 0U +#define RGX_FWT_LOGTYPE_BINARY 1U +#define RGX_FWT_LOGTYPE_DECODED 2U +#define RGX_FWT_LOGTYPE_PARTIAL 3U + /*! ****************************************************************************** * RGX Device info @@ -398,6 +458,11 @@ typedef struct _PVRSRV_RGXDEV_INFO_ /* Kernel mode linear address of device registers */ void __iomem *pvRegsBaseKM; +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + /* Kernel mode linear address of device registers */ + void __iomem *pvSecureRegsBaseKM; +#endif + IMG_HANDLE hRegMapping; /* System physical address of device registers */ @@ -418,6 +483,7 @@ typedef struct _PVRSRV_RGXDEV_INFO_ /* Kernel CCB */ DEVMEM_MEMDESC *psKernelCCBCtlMemDesc; /*!< memdesc for Kernel CCB control */ RGXFWIF_CCB_CTL *psKernelCCBCtl; /*!< kernel mapping for Kernel CCB control */ + RGXFWIF_CCB_CTL *psKernelCCBCtlLocal; /*!< cpu local copy of Kernel CCB control */ DEVMEM_MEMDESC *psKernelCCBMemDesc; /*!< memdesc for Kernel CCB */ IMG_UINT8 *psKernelCCB; /*!< kernel mapping for Kernel CCB */ DEVMEM_MEMDESC *psKernelCCBRtnSlotsMemDesc; /*!< Return slot array for Kernel CCB commands */ @@ -426,12 +492,14 @@ typedef struct _PVRSRV_RGXDEV_INFO_ /* Firmware CCB */ DEVMEM_MEMDESC *psFirmwareCCBCtlMemDesc; /*!< memdesc for Firmware CCB control */ RGXFWIF_CCB_CTL *psFirmwareCCBCtl; /*!< kernel mapping for Firmware CCB control */ + RGXFWIF_CCB_CTL *psFirmwareCCBCtlLocal; /*!< cpu local copy of Firmware CCB control */ DEVMEM_MEMDESC *psFirmwareCCBMemDesc; /*!< memdesc for Firmware CCB */ IMG_UINT8 *psFirmwareCCB; /*!< kernel mapping for Firmware CCB */ /* Workload Estimation Firmware CCB */ DEVMEM_MEMDESC *psWorkEstFirmwareCCBCtlMemDesc; /*!< memdesc for Workload Estimation Firmware CCB control */ RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtl; /*!< kernel mapping for Workload Estimation Firmware CCB control */ + RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtlLocal; /*!< cpu local copy of Workload Estimation Firmware CCB control */ DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */ IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */ @@ -445,6 +513,11 @@ typedef struct _PVRSRV_RGXDEV_INFO_ IMG_BOOL bIgnoreHWReportedBVNC; /*!< Ignore BVNC reported by HW */ + /* multicore configuration information */ + IMG_UINT32 ui32MultiCoreNumCores; /* total cores primary + secondaries. 0 for non-multi core */ + IMG_UINT32 ui32MultiCorePrimaryId; /* primary core id for this device */ + IMG_UINT64 *pui64MultiCoreCapabilities; /* capabilities for each core */ + /* if we don't preallocate the pagetables we must insert newly allocated page tables dynamically @@ -459,7 +532,10 @@ typedef struct _PVRSRV_RGXDEV_INFO_ IMG_UINT32 ui32FWCodeSizeInBytes; DEVMEM_MEMDESC *psRGXFWDataMemDesc; IMG_DEV_VIRTADDR sFWDataDevVAddrBase; + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) RGX_MIPS_ADDRESS_TRAMPOLINE *psTrampoline; +#endif DEVMEM_MEMDESC *psRGXFWCorememCodeMemDesc; IMG_DEV_VIRTADDR sFWCorememCodeDevVAddrBase; @@ -479,8 +555,13 @@ typedef struct _PVRSRV_RGXDEV_INFO_ DEVMEM_MEMDESC *psRGXFWSig3DChecksMemDesc; IMG_UINT32 ui32Sig3DChecksSize; - DEVMEM_MEMDESC *psRGXFWSigTDM2DChecksMemDesc; - IMG_UINT32 ui32SigTDM2DChecksSize; + DEVMEM_MEMDESC *psRGXFWSigTDMChecksMemDesc; + IMG_UINT32 ui32SigTDMChecksSize; + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + DEVMEM_MEMDESC *psRGXFWSigRDMChecksMemDesc; + IMG_UINT32 ui32SigRDMChecksSize; +#endif IMG_BOOL bDumpedKCCBCtlAlready; @@ -491,6 +572,8 @@ typedef struct _PVRSRV_RGXDEV_INFO_ DEVMEM_MEMDESC *psRGXFWIfTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */ DEVMEM_MEMDESC *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM]; /*!< memdesc of actual FW trace (log) buffer(s) */ + IMG_PUINT32 apui32TraceBuffer[RGXFW_THREAD_NUM]; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */ + IMG_UINT32 ui32TraceBufSizeInDWords; /*!< CPU local copy of FW Trace buffer size in dwords */ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */ DEVMEM_MEMDESC *psRGXFWIfFwSysDataMemDesc; /*!< memdesc of the firmware-shared system data structure */ @@ -510,12 +593,13 @@ typedef struct _PVRSRV_RGXDEV_INFO_ IMG_UINT32 ui32ClockSource; IMG_UINT32 ui32LastClockSource; - DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCbCtlMemDesc; - RGXFWIF_GPU_UTIL_FWCB *psRGXFWIfGpuUtilFWCb; + DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCtlMemDesc; + RGXFWIF_GPU_UTIL_FW *psRGXFWIfGpuUtilFW; DEVMEM_MEMDESC *psRGXFWIfHWPerfBufMemDesc; IMG_BYTE *psRGXFWIfHWPerfBuf; IMG_UINT32 ui32RGXFWIfHWPerfBufSize; /* in bytes */ + IMG_UINT32 ui32RGXL2HWPerfBufSize; /* in bytes */ DEVMEM_MEMDESC *psRGXFWIfRegCfgMemDesc; @@ -534,9 +618,14 @@ typedef struct _PVRSRV_RGXDEV_INFO_ DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc; RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg; - /* Additional guest firmware memory context info */ - DEVMEM_HEAP *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED]; - DEVMEM_MEMDESC *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED]; +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) + DEVMEM_MEMDESC *psRGXFWIfActiveContextBufDesc; + RGXFWIF_ACTIVE_CONTEXT_BUF_DATA *psRGXFWIfActiveContextBuf; +#endif + + /* Premapped firmware memory context info */ + DEVMEM_HEAP *psPremappedFwRawHeap[RGX_NUM_DRIVERS_SUPPORTED]; + DEVMEM_MEMDESC *psPremappedFwRawMemDesc[RGX_NUM_DRIVERS_SUPPORTED]; #if defined(SUPPORT_WORKLOAD_ESTIMATION) /* Array to store data needed for workload estimation when a workload @@ -549,7 +638,8 @@ typedef struct _PVRSRV_RGXDEV_INFO_ #if defined(SUPPORT_PDVFS) /** * Host memdesc and pointer to memory containing core clock rate in Hz. - * Firmware updates the memory on changing the core clock rate over GPIO. + * Firmware (PDVFS) updates the memory on changing the core clock rate over + * GPIO. * Note: Shared memory needs atomic access from Host driver and firmware, * hence size should not be greater than memory transaction granularity. * Currently it is chosen to be 32 bits. @@ -571,10 +661,12 @@ typedef struct _PVRSRV_RGXDEV_INFO_ * and loss/freeing of FW & Host resources while in * use in another thread e.g. MSIR. */ - IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */ - IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */ - IMG_UINT32 ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */ - IMG_BOOL bSuspendHWPerfL2DataCopy; /*! Flag to indicate if copying HWPerf data is suspended */ + IMG_UINT64 ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_LAST]; /*! Event filter for FW events (settable by AppHint) */ + IMG_HANDLE hHWPerfStream[RGX_HWPERF_L2_STREAM_LAST]; /*! TL Stream buffer (L2) for firmware event stream */ + IMG_UINT32 ui32L2BufMaxPacketSize[RGX_HWPERF_L2_STREAM_LAST]; /*! Max allowed packet size in FW HWPerf TL (L2) buffer */ + IMG_BOOL bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_LAST]; /*! Flag to indicate if copying HWPerf data is suspended */ + IMG_UINT64 ui64HWPerfFwFilter; /*! Event filter for FW events created from OR-ing ui64HWPerfFilter values. */ + IMG_UINT32 uiHWPerfStreamCount; /*! Value indicating if any of the HWPerf streams has been created */ IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */ POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */ @@ -675,7 +767,7 @@ typedef struct _PVRSRV_RGXDEV_INFO_ POS_LOCK hGPUUtilLock; /* Register configuration */ - RGX_REG_CONFIG sRegCongfig; + RGX_REG_CONFIG sRegConfig; IMG_BOOL bRGXPowered; DLLIST_NODE sMemoryContextList; @@ -708,12 +800,14 @@ typedef struct _PVRSRV_RGXDEV_INFO_ POS_LOCK hDebugFaultInfoLock; /*!< Lock to protect the debug fault info list */ POS_LOCK hMMUCtxUnregLock; /*!< Lock to protect list of unregistered MMU contexts */ +#if defined(RGX_FEATURE_MIPS_BIT_MASK) POS_LOCK hNMILock; /*!< Lock to protect NMI operations */ +#endif -#if defined(SUPPORT_VALIDATION) - IMG_UINT32 ui32ValidationFlags; /*!< Validation flags for host driver */ +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + IMG_UINT32 ui32AvailablePowUnitsMask; + IMG_UINT32 ui32AvailableRACMask; #endif - RGX_DUST_STATE sDustReqState; RGX_LAYER_PARAMS sLayerParams; @@ -737,31 +831,16 @@ typedef struct _PVRSRV_RGXDEV_INFO_ IMG_UINT32 ui32FirmwareGcovSize; #endif -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) - struct - { - IMG_UINT64 ui64timerGray; - IMG_UINT64 ui64timerBinary; - IMG_UINT64 *pui64uscTimers; - } sRGXTimerValues; +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) + /* Value to store for each page size range config register in MMU4 */ + IMG_UINT64 aui64MMUPageSizeRangeValue[RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES]; #endif -#if defined(SUPPORT_VALIDATION) - struct - { - IMG_UINT64 ui64RegVal; - struct completion sRegComp; - } sFwRegs; -#endif + IMG_HANDLE hTQCLISharedMem; /*!< TQ Client Shared Mem PMR */ IMG_HANDLE hTQUSCSharedMem; /*!< TQ USC Shared Mem PMR */ -#if defined(SUPPORT_VALIDATION) - IMG_UINT32 ui32TestSLRInterval; /* Don't enqueue an update sync checkpoint every nth kick */ - IMG_UINT32 ui32TestSLRCount; /* (used to test SLR operation) */ - IMG_UINT32 ui32SLRSkipFWAddr; -#endif #if defined(SUPPORT_SECURITY_VALIDATION) DEVMEM_MEMDESC *psRGXFWIfSecureBufMemDesc; @@ -792,12 +871,6 @@ typedef struct _PVRSRV_RGXDEV_INFO_ IMG_UINT32 ui32HostSafetyEventMask;/*!< mask of the safety events handled by the driver */ RGX_CONTEXT_RESET_REASON eLastDeviceError; /*!< device error reported to client */ -#if defined(SUPPORT_VALIDATION) - IMG_UINT32 ui32ECCRAMErrInjModule; - IMG_UINT32 ui32ECCRAMErrInjInterval; -#endif - - IMG_UINT32 ui32Log2Non4KPgSize; /* Page size of Non4k heap in log2 form */ #if defined(SUPPORT_SECURE_ALLOC_KM) PMR *psGenHeapSecMem; /*!< An allocation of secure memory mapped to @@ -805,6 +878,22 @@ typedef struct _PVRSRV_RGXDEV_INFO_ created and mapped at driver init. It's used for various purposes. See rgx_fwif_km.h for all use cases. */ #endif + +#if defined(SUPPORT_SECURE_CONTEXT_SWITCH) + DEVMEM_MEMDESC *psRGXFWScratchBufMemDesc; +#endif + + RGX_FWT_LOGTYPE eDebugDumpFWTLogType; + + RGX_FW_INFO_HEADER sFWInfoHeader; +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + IMG_UINT32 ui32TFBCLossyGroup; /*!< TFBCCompressionControlGroup + setting for those cores which support + this feature. */ +#endif + RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; /*!< GPU usage statistics */ + POS_LOCK hGpuUtilStatsLock; + } PVRSRV_RGXDEV_INFO; @@ -814,6 +903,9 @@ typedef struct _RGX_TIMING_INFORMATION_ /*! GPU default core clock speed in Hz */ IMG_UINT32 ui32CoreClockSpeed; + /*! Default SOC clock speed in Hz */ + IMG_UINT32 ui32SOCClockSpeed; + /*! Active Power Management: GPU actively requests the host driver to be powered off */ IMG_BOOL bEnableActivePM; @@ -831,11 +923,16 @@ typedef struct _RGX_DATA_ RGX_TIMING_INFORMATION *psRGXTimingInfo; } RGX_DATA; +typedef enum _RGX_QUERY_TIMESTAMP_TYPE_ +{ + RGX_QUERY_HOST_TIMESTAMP, + RGX_QUERY_DEVICE_TIMESTAMP, +} RGX_QUERY_TIMESTAMP_TYPE; + /* RGX PDUMP register bank name (prefix) */ #define RGX_PDUMPREG_NAME "RGXREG" -#define RGX_TB_PDUMPREG_NAME "EMUREG" #endif /* RGXDEVICE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxfwutils.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxfwutils.c index 94c05510fc05..2f65106d359e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxfwutils.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxfwutils.c @@ -53,7 +53,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_fwif_km.h" #include "pdump_km.h" #include "osfunc.h" -#include "oskm_apphint.h" +#include "os_apphint.h" #include "cache_km.h" #include "allocmem.h" #include "physheap.h" @@ -64,6 +64,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_debug.h" #include "pvr_notifier.h" #include "rgxfwutils.h" +#include "rgxfwcmnctx.h" +#include "rgxfwriscv.h" #include "rgx_options.h" #include "rgx_fwif_alignchecks.h" #include "rgx_fwif_resetframework.h" @@ -71,21 +73,25 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "fwtrace_string.h" #include "rgxheapconfig.h" #include "pvrsrv.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "rgxhwperf.h" #include "rgxccb.h" #include "rgxcompute.h" #include "rgxtransfer.h" -#include "rgxpower.h" #include "rgxtdmtransfer.h" +#include "rgxpower.h" #if defined(SUPPORT_DISPLAY_CLASS) #include "dc_server.h" #endif #include "rgxmem.h" #include "rgxmmudefs_km.h" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) #include "rgxmipsmmuinit.h" +#endif #include "rgxta3d.h" +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) #include "rgxkicksync.h" +#endif #include "rgxutils.h" #include "rgxtimecorr.h" #include "sync_internal.h" @@ -94,7 +100,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "sync_checkpoint_external.h" #include "tlstream.h" #include "devicemem_server_utils.h" -#include "htbuffer.h" +#include "htbserver.h" #include "rgx_bvnc_defs_km.h" #include "info_page.h" @@ -103,7 +109,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifdef __linux__ #include /* sprintf */ -#include "rogue_trace_events.h" #else #include #include @@ -120,10 +125,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxpdvfs.h" #endif -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -#include "rgxsoctimer.h" +#if defined(SUPPORT_FW_OPP_TABLE) && defined(CONFIG_OF) +#include "pvr_dvfs_common.h" #endif + #include "vz_vmm_pvz.h" #include "rgx_heaps.h" @@ -136,34 +142,30 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXFW_HWPERF_L1_SIZE_MIN (16U) #define RGXFW_HWPERF_L1_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB #define RGXFW_HWPERF_L1_SIZE_MAX (12288U) +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) +#if defined(DEBUG) +/* Catch the use of auto-increment when meta_registers_unpacked_accesses feature is + * present in case we ever use it. No WA exists so it must not be used */ +#define CHECK_HWBRN_68777(v) \ + do { \ + PVR_ASSERT(((v) & RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN) == 0); \ + } while (0) +#else +#define CHECK_HWBRN_68777(v) +#endif +#endif /* Firmware CCB length */ #if defined(NO_HARDWARE) && defined(PDUMP) #define RGXFWIF_FWCCB_NUMCMDS_LOG2 (10) -#elif defined(SUPPORT_PDVFS) +#elif defined(SUPPORT_PDVFS) || defined(SUPPORT_WORKLOAD_ESTIMATION) #define RGXFWIF_FWCCB_NUMCMDS_LOG2 (8) #else #define RGXFWIF_FWCCB_NUMCMDS_LOG2 (5) #endif #if defined(RGX_FW_IRQ_OS_COUNTERS) -const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS] = {IRQ_COUNTER_STORAGE_REGS}; -#endif - -/* - * Maximum length of time a DM can run for before the DM will be marked - * as out-of-time. CDM has an increased value due to longer running kernels. - * - * These deadlines are increased on FPGA, EMU and VP due to the slower - * execution time of these platforms. PDUMPS are also included since they - * are often run on EMU, FPGA or in CSim. - */ -#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP) -#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (480000) -#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (1000000) -#else -#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (40000) -#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (600000) +const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OSIDS] = {IRQ_COUNTER_STORAGE_REGS}; #endif /* Workload Estimation Firmware CCB length */ @@ -181,6 +183,18 @@ typedef struct PVRSRV_RGXDEV_INFO *psDevInfo; } RGX_DEFERRED_KCCB_CMD; +typedef struct +{ + IMG_INT32 i32Priority; + IMG_UINT32 ui32IsolationGroups; + IMG_UINT32 ui32TSPercentage; +}RGX_QOS_DEFAULTS; + +#define RGX_QOS_DEFAULTS_INIT(osid) \ + {RGX_DRIVERID_##osid##_DEFAULT_PRIORITY,\ + RGX_DRIVERID_##osid##_DEFAULT_ISOLATION_GROUP,\ + RGX_DRIVERID_##osid##_DEFAULT_TIME_SLICE} + #if defined(PDUMP) /* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the * PID filter example entries @@ -190,65 +204,16 @@ static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32), "generates WRW commands for loading the PID values"); #endif +#if (RGXFW_MAX_NUM_OSIDS > 1) +static_assert(RGX_DRIVER_DEFAULT_TIME_SLICES_SUM <= PVRSRV_VZ_TIME_SLICE_MAX, "Invalid driverid time slice aggregate"); +#endif + static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo); static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo); -#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) -static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_SYSINIT* psFwSysInit) -{ - PVRSRV_ERROR eError; - DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc; - IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE( - RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); - - PVR_DPF_ENTERED; - - eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap, - 1, - ui32CacheLineSize, - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), - "FwSLC3FenceWA", - ppsSLC3FenceMemDesc); - if (eError != PVRSRV_OK) - { - PVR_DPF_RETURN_RC(eError); - } - - /* We need to map it so the heap for this allocation is set */ - eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc, - psDevInfo->psFirmwareMainHeap, - &psFwSysInit->sSLC3FenceDevVAddr); - if (eError != PVRSRV_OK) - { - DevmemFree(*ppsSLC3FenceMemDesc); - *ppsSLC3FenceMemDesc = NULL; - } - - PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc); -} - -static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo) -{ - DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc; - - if (psSLC3FenceMemDesc) - { - DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc); - DevmemFree(psSLC3FenceMemDesc); - } -} -#endif static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value) { - /* Ensure any uncached/WC memory writes are flushed from CPU write buffers - * before kicking MTS. - */ - OSWriteMemoryBarrier(NULL); - /* This should *NOT* happen. Try to trace what caused this and avoid a NPE * with the Write/Read at the foot of the function. */ @@ -548,10 +513,9 @@ static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO* psDevInfo static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, DEVMEM_MEMDESC** ppsBufferMemDesc, IMG_UINT32 ui32CounterDataBufferSize, - RGXFWIF_COUNTER_DUMP_CTL* psCounterDumpCtl, - const IMG_CHAR* pszBufferName) + RGXFWIF_COUNTER_DUMP_CTL* psCounterDumpCtl) { - PVRSRV_ERROR eError; + PVRSRV_ERROR eError; eError = RGXSetupFwAllocation(psDevInfo, (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | @@ -565,13 +529,14 @@ static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation"); psCounterDumpCtl->ui32SizeInDwords = ui32CounterDataBufferSize >> 2; + RGXFwSharedMemCacheOpValue(psCounterDumpCtl->ui32SizeInDwords, FLUSH); return PVRSRV_OK; } #endif /*! - ****************************************************************************** +******************************************************************************* @Function RGXFWSetupAlignChecks @Description This functions allocates and fills memory needed for the aligns checks of the UM and KM structures shared with the @@ -581,7 +546,7 @@ static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, The UM array is passed from the user side. Now the firmware is - is responsible for filling this part of the memory. If that + responsible for filling this part of the memory. If that happens the check of the UM structures will be performed by the host driver on client's connect. If the macro is not defined the client driver fills the memory @@ -627,11 +592,15 @@ static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_DEVICE_NODE *psDeviceNode, paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM); *paui32AlignChecks = 0; + + OSWriteMemoryBarrier(paui32AlignChecks); + RGXFwSharedMemCacheOpExec(paui32AlignChecks - (ARRAY_SIZE(aui32RGXFWAlignChecksKM) + 1), + ui32RGXFWAlignChecksTotal, + PVRSRV_CACHE_OP_FLUSH); } - OSWriteMemoryBarrier(paui32AlignChecks); - DevmemPDumpLoadMem(psDevInfo->psRGXFWAlignChecksMemDesc, + DevmemPDumpLoadMem( psDevInfo->psRGXFWAlignChecksMemDesc, 0, ui32RGXFWAlignChecksTotal, PDUMP_FLAGS_CONTINUOUS); @@ -666,7 +635,6 @@ PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc); psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) { IMG_UINT32 ui32Offset; @@ -687,9 +655,12 @@ PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; /* Honour the SLC cache flags */ - eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode); + eError = DevmemDeviceCacheMode(uiDevFlags, &uiGPUCacheMode); PVR_LOG_GOTO_IF_ERROR(eError, "DevmemDeviceCacheMode", failDevCacheMode); + /* + * Choose Meta virtual address based on Meta and SLC cacheability. + */ ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS; if (bCachedInMETA) @@ -709,22 +680,23 @@ PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, { ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED; } + ppDest->ui32Addr = ui32Offset; } - else -#endif - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); PVR_GOTO_IF_ERROR(eError, failDevVAAcquire); ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF); } +#endif else { IMG_UINT32 ui32Offset; IMG_BOOL bCachedInRISCV; - PVRSRV_MEMALLOCFLAGS_T uiDevFlags; + PVRSRV_MEMALLOCFLAGS_T uiDevFlags; eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); @@ -769,18 +741,16 @@ PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, return PVRSRV_OK; -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) failDevCacheMode: DevmemReleaseDevVirtAddr(psSrc); -#endif failDevVAAcquire: return eError; } void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, - DEVMEM_MEMDESC *psSrcMemDesc, - RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, - IMG_UINT32 uiOffset) + DEVMEM_MEMDESC *psSrcMemDesc, + RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, + IMG_UINT32 uiOffset) { PVRSRV_ERROR eError; IMG_DEV_VIRTADDR sDevVirtAddr; @@ -801,468 +771,20 @@ void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc) DevmemReleaseDevVirtAddr(psSrc); } -struct _RGX_SERVER_COMMON_CONTEXT_ { - PVRSRV_RGXDEV_INFO *psDevInfo; - DEVMEM_MEMDESC *psFWCommonContextMemDesc; - PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr; - SERVER_MMU_CONTEXT *psServerMMUContext; - DEVMEM_MEMDESC *psFWMemContextMemDesc; - DEVMEM_MEMDESC *psFWFrameworkMemDesc; - DEVMEM_MEMDESC *psContextStateMemDesc; - RGX_CLIENT_CCB *psClientCCB; - DEVMEM_MEMDESC *psClientCCBMemDesc; - DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; - IMG_BOOL bCommonContextMemProvided; - IMG_UINT32 ui32ContextID; - DLLIST_NODE sListNode; - RGX_CONTEXT_RESET_REASON eLastResetReason; - IMG_UINT32 ui32LastResetJobRef; - IMG_INT32 i32Priority; - RGX_CCB_REQUESTOR_TYPE eRequestor; -}; - -/*************************************************************************/ /*! -@Function _CheckPriority -@Description Check if priority is allowed for requestor type -@Input psDevInfo pointer to DevInfo struct -@Input i32Priority Requested priority -@Input eRequestor Requestor type specifying data master -@Return PVRSRV_ERROR PVRSRV_OK on success -*/ /**************************************************************************/ -static PVRSRV_ERROR _CheckPriority(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_INT32 i32Priority, - RGX_CCB_REQUESTOR_TYPE eRequestor) -{ - /* Only one context allowed with real time priority (highest priority) */ - if (i32Priority == RGX_CTX_PRIORITY_REALTIME) - { - DLLIST_NODE *psNode, *psNext; - - dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) - { - RGX_SERVER_COMMON_CONTEXT *psThisContext = - IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); - - if (psThisContext->i32Priority == RGX_CTX_PRIORITY_REALTIME && - psThisContext->eRequestor == eRequestor) - { - PVR_LOG(("Only one context with real time priority allowed")); - return PVRSRV_ERROR_INVALID_PARAMS; - } - } - } - - return PVRSRV_OK; -} - -PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, - RGXFWIF_DM eDM, - SERVER_MMU_CONTEXT *psServerMMUContext, - DEVMEM_MEMDESC *psAllocatedMemDesc, - IMG_UINT32 ui32AllocatedOffset, - DEVMEM_MEMDESC *psFWMemContextMemDesc, - DEVMEM_MEMDESC *psContextStateMemDesc, - IMG_UINT32 ui32CCBAllocSizeLog2, - IMG_UINT32 ui32CCBMaxAllocSizeLog2, - IMG_UINT32 ui32ContextFlags, - IMG_INT32 i32Priority, - IMG_UINT32 ui32MaxDeadlineMS, - IMG_UINT64 ui64RobustnessAddress, - RGX_COMMON_CONTEXT_INFO *psInfo, - RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; - RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext; - IMG_UINT32 ui32FWCommonContextOffset; - IMG_UINT8 *pui8Ptr; - PVRSRV_ERROR eError; - - /* - * Allocate all the resources that are required - */ - psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext)); - if (psServerCommonContext == NULL) - { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto fail_alloc; - } - - psServerCommonContext->psDevInfo = psDevInfo; - psServerCommonContext->psServerMMUContext = psServerMMUContext; - - if (psAllocatedMemDesc) - { - PDUMPCOMMENT(psDeviceNode, - "Using existing MemDesc for Rogue firmware %s context (offset = %d)", - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - ui32AllocatedOffset); - ui32FWCommonContextOffset = ui32AllocatedOffset; - psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc; - psServerCommonContext->bCommonContextMemProvided = IMG_TRUE; - } - else - { - /* Allocate device memory for the firmware context */ - PDUMPCOMMENT(psDeviceNode, - "Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); - eError = DevmemFwAllocate(psDevInfo, - sizeof(*psFWCommonContext), - RGX_FWCOMCTX_ALLOCFLAGS, - "FwContext", - &psServerCommonContext->psFWCommonContextMemDesc); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to allocate firmware %s context (%s)", - __func__, - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - PVRSRVGetErrorString(eError))); - goto fail_contextalloc; - } - ui32FWCommonContextOffset = 0; - psServerCommonContext->bCommonContextMemProvided = IMG_FALSE; - } - - /* Record this context so we can refer to it if the FW needs to tell us it was reset. */ - psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; - psServerCommonContext->ui32LastResetJobRef = 0; - psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++; - - /* - * Temporarily map the firmware context to the kernel and initialise it - */ - eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc, - (void **)&pui8Ptr); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware %s context to CPU (%s)", - __func__, - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - PVRSRVGetErrorString(eError))); - goto fail_cpuvirtacquire; - } - - /* Allocate the client CCB */ - eError = RGXCreateCCB(psDevInfo, - ui32CCBAllocSizeLog2, - ui32CCBMaxAllocSizeLog2, - ui32ContextFlags, - psConnection, - eRGXCCBRequestor, - psServerCommonContext, - &psServerCommonContext->psClientCCB, - &psServerCommonContext->psClientCCBMemDesc, - &psServerCommonContext->psClientCCBCtrlMemDesc); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: failed to create CCB for %s context (%s)", - __func__, - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - PVRSRVGetErrorString(eError))); - goto fail_allocateccb; - } - - psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset); - psFWCommonContext->eDM = eDM; - - /* Set the firmware CCB device addresses in the firmware common context */ - eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB, - psServerCommonContext->psClientCCBMemDesc, - 0, RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr); - - eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl, - psServerCommonContext->psClientCCBCtrlMemDesc, - 0, RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr); - -#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) - { - RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr, - psServerCommonContext->psClientCCBMemDesc, - &psFWCommonContext->psCCB, - 0); - } -#endif - - /* Set the memory context device address */ - psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc; - eError = RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext, - psFWMemContextMemDesc, - 0, RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr); - - /* Set the framework register updates address */ - psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc; - if (psInfo->psFWFrameworkMemDesc != NULL) - { - eError = RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd, - psInfo->psFWFrameworkMemDesc, - 0, RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwaddr); - } - else - { - /* This should never be touched in this contexts without a framework - * memdesc, but ensure it is zero so we see crashes if it is. - */ - psFWCommonContext->psRFCmd.ui32Addr = 0; - } - - eError = _CheckPriority(psDevInfo, i32Priority, eRGXCCBRequestor); - PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); - - psServerCommonContext->i32Priority = i32Priority; - psServerCommonContext->eRequestor = eRGXCCBRequestor; - - psFWCommonContext->i32Priority = i32Priority; - psFWCommonContext->ui32PrioritySeqNum = 0; - psFWCommonContext->ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, - (eDM == RGXFWIF_DM_CDM ? - RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS : - RGXFWIF_MAX_WORKLOAD_DEADLINE_MS)); - psFWCommonContext->ui64RobustnessAddress = ui64RobustnessAddress; - - /* Store a references to Server Common Context and PID for notifications back from the FW. */ - psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID; - psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM(); - OSCachedMemCopy(psFWCommonContext->szProcName, psConnection->pszProcName, RGXFW_PROCESS_NAME_LEN); - - /* Set the firmware GPU context state buffer */ - psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc; - if (psContextStateMemDesc) - { - eError = RGXSetFirmwareAddress(&psFWCommonContext->psContextState, - psContextStateMemDesc, - 0, - RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_ctxstatefwaddr); - } - - /* - * Dump the created context - */ - PDUMPCOMMENT(psDeviceNode, - "Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); - DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc, - ui32FWCommonContextOffset, - sizeof(*psFWCommonContext), - PDUMP_FLAGS_CONTINUOUS); - - /* We've finished the setup so release the CPU mapping */ - DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); - - /* Map this allocation into the FW */ - eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr, - psServerCommonContext->psFWCommonContextMemDesc, - ui32FWCommonContextOffset, - RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:6", fail_fwcommonctxfwaddr); - -#if defined(__linux__) - { - IMG_UINT32 ui32FWAddr; - switch (eDM) { - case RGXFWIF_DM_GEOM: - ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) - psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext)); - break; - case RGXFWIF_DM_3D: - ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) - psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext)); - break; - default: - ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr; - break; - } - - trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(), - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - psDeviceNode->sDevId.ui32InternalID, - ui32FWAddr); - } -#endif - /*Add the node to the list when finalised */ - OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock); - dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode)); - OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock); - - *ppsServerCommonContext = psServerCommonContext; - return PVRSRV_OK; - -fail_fwcommonctxfwaddr: - if (psContextStateMemDesc) - { - RGXUnsetFirmwareAddress(psContextStateMemDesc); - } -fail_ctxstatefwaddr: -fail_checkpriority: - if (psInfo->psFWFrameworkMemDesc != NULL) - { - RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc); - } -fail_fwframeworkfwaddr: - RGXUnsetFirmwareAddress(psFWMemContextMemDesc); -fail_fwmemctxfwaddr: - RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); -fail_cccbctrlfwaddr: - RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); -fail_cccbfwaddr: - RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB); -fail_allocateccb: - DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); -fail_cpuvirtacquire: - if (!psServerCommonContext->bCommonContextMemProvided) - { - DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc); - psServerCommonContext->psFWCommonContextMemDesc = NULL; - } -fail_contextalloc: - OSFreeMem(psServerCommonContext); -fail_alloc: - return eError; -} - -void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -{ - - OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); - /* Remove the context from the list of all contexts. */ - dllist_remove_node(&psServerCommonContext->sListNode); - OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); - - /* - Unmap the context itself and then all its resources - */ - - /* Unmap the FW common context */ - RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); - /* Umap context state buffer (if there was one) */ - if (psServerCommonContext->psContextStateMemDesc) - { - RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc); - } - /* Unmap the framework buffer */ - if (psServerCommonContext->psFWFrameworkMemDesc) - { - RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc); - } - /* Unmap client CCB and CCB control */ - RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); - RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); - /* Unmap the memory context */ - RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc); - - /* Destroy the client CCB */ - RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB); - - - /* Free the FW common context (if there was one) */ - if (!psServerCommonContext->bCommonContextMemProvided) - { - DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo, - psServerCommonContext->psFWCommonContextMemDesc); - psServerCommonContext->psFWCommonContextMemDesc = NULL; - } - /* Free the hosts representation of the common context */ - OSFreeMem(psServerCommonContext); -} - -PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -{ - return psServerCommonContext->sFWCommonContextFWAddr; -} - -RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -{ - return psServerCommonContext->psClientCCB; -} - -RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - IMG_UINT32 *pui32LastResetJobRef) -{ - RGX_CONTEXT_RESET_REASON eLastResetReason; - - PVR_ASSERT(psServerCommonContext != NULL); - PVR_ASSERT(pui32LastResetJobRef != NULL); - - /* Take the most recent reason & job ref and reset for next time... */ - eLastResetReason = psServerCommonContext->eLastResetReason; - *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef; - psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; - psServerCommonContext->ui32LastResetJobRef = 0; - - if (eLastResetReason == RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH) - { - PVR_DPF((PVR_DBG_WARNING, - "A Hard Context Switch was triggered on the GPU to ensure Quality of Service.")); - } - - return eLastResetReason; -} - -PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -{ - return psServerCommonContext->psDevInfo; -} - -PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, - SERVER_MMU_CONTEXT *psServerMMUContext, - PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr) -{ - DLLIST_NODE *psNode, *psNext; - dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) - { - RGX_SERVER_COMMON_CONTEXT *psThisContext = - IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); - - if (psThisContext->psServerMMUContext == psServerMMUContext) - { - psFWCommonContextFWAddr->ui32Addr = psThisContext->sFWCommonContextFWAddr.ui32Addr; - return PVRSRV_OK; - } - } - return PVRSRV_ERROR_INVALID_PARAMS; -} - -PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - IMG_UINT32 ui32ContextFlags) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - - if (BITMASK_ANY(ui32ContextFlags, ~RGX_CONTEXT_FLAGS_WRITEABLE_MASK)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Context flag(s) invalid or not writeable (%d)", - __func__, ui32ContextFlags)); - eError = PVRSRV_ERROR_INVALID_PARAMS; - } - else - { - RGXSetCCBFlags(psServerCommonContext->psClientCCB, - ui32ContextFlags); - } - - return eError; -} - /*! ******************************************************************************* @Function RGXFreeCCB @Description Free the kernel or firmware CCB @Input psDevInfo @Input ppsCCBCtl + @Input ppvCCBCtlLocal @Input ppsCCBCtlMemDesc @Input ppsCCBMemDesc @Input psCCBCtlFWAddr ******************************************************************************/ static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_CCB_CTL **ppsCCBCtl, + RGXFWIF_CCB_CTL **ppsCCBCtlLocal, DEVMEM_MEMDESC **ppsCCBCtlMemDesc, IMG_UINT8 **ppui8CCB, DEVMEM_MEMDESC **ppsCCBMemDesc) @@ -1287,6 +809,11 @@ static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo, DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc); *ppsCCBCtlMemDesc = NULL; } + if (*ppsCCBCtlLocal != NULL) + { + OSFreeMem(*ppsCCBCtlLocal); + *ppsCCBCtlLocal = NULL; + } } /*! @@ -1334,6 +861,7 @@ static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo, ******************************************************************************/ static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_CCB_CTL **ppsCCBCtl, + RGXFWIF_CCB_CTL **ppsCCBCtlLocal, DEVMEM_MEMDESC **ppsCCBCtlMemDesc, IMG_UINT8 **ppui8CCB, DEVMEM_MEMDESC **ppsCCBMemDesc, @@ -1356,21 +884,12 @@ static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, if (unlikely(iStrLen < 0)) { - OSStringLCopy(szCCBCtlName, "FwCCBControl", DEVMEM_ANNOTATION_MAX_LEN); + OSStringSafeCopy(szCCBCtlName, "FwCCBControl", DEVMEM_ANNOTATION_MAX_LEN); } /* Allocate memory for the CCB control.*/ eError = RGXSetupFwAllocation(psDevInfo, - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, sizeof(RGXFWIF_CCB_CTL), szCCBCtlName, ppsCCBCtlMemDesc, @@ -1396,11 +915,15 @@ static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, /* * Initialise the CCB control. */ - psCCBCtl = *ppsCCBCtl; - psCCBCtl->ui32WriteOffset = 0; - psCCBCtl->ui32ReadOffset = 0; + psCCBCtl = OSAllocZMem(sizeof(*psCCBCtl)); + PVR_LOG_GOTO_IF_NOMEM(psCCBCtl, eError, fail); + psCCBCtl->ui32WrapMask = ui32CCBSize - 1; - psCCBCtl->ui32CmdSize = ui32CmdSize; + + OSDeviceMemCopy(*ppsCCBCtl, psCCBCtl, sizeof(*psCCBCtl)); + RGXFwSharedMemCacheOpPtr(psCCBCtl, FLUSH); + + *ppsCCBCtlLocal = psCCBCtl; /* Pdump the CCB control */ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Initialise %s", szCCBCtlName); @@ -1414,6 +937,7 @@ static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, fail: RGXFreeCCB(psDevInfo, ppsCCBCtl, + ppsCCBCtlLocal, ppsCCBCtlMemDesc, ppui8CCB, ppsCCBMemDesc); @@ -1426,18 +950,24 @@ static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo) { PMR *psPMR; - if (psDevInfo->psRGXFaultAddressMemDesc) +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) + /* Run-time check feature support */ + if (PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) +#endif { - if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR) == PVRSRV_OK) + if (psDevInfo->psRGXFaultAddressMemDesc) { - PMRUnlockSysPhysAddresses(psPMR); + if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR) == PVRSRV_OK) + { + PMRUnlockSysPhysAddresses(psPMR); + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); + psDevInfo->psRGXFaultAddressMemDesc = NULL; } - DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); - psDevInfo->psRGXFaultAddressMemDesc = NULL; } } -static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit) +static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit) { PVRSRV_ERROR eError = PVRSRV_OK; IMG_UINT32 *pui32MemoryVirtAddr; @@ -1446,8 +976,16 @@ static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PMR *psPMR; +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) + /* Run-time check feature support */ + if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) + { + return PVRSRV_OK; + } +#endif + /* Allocate page of memory to use for page faults on non-blocking memory transactions. - * Doesn't need to be cleared as it is initialised with the 0xDEADBEE0 pattern below. */ + * Doesn't need to be cleared as it is initialised with the 0xDEADBEEF pattern below. */ psDevInfo->psRGXFaultAddressMemDesc = NULL; eError = DevmemFwAllocateExportable(psDeviceNode, ui32PageSize, @@ -1464,31 +1002,32 @@ static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, goto failFaultAddressDescAlloc; } - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc, - (void **)&pui32MemoryVirtAddr); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to acquire mem for fault address (%u)", - __func__, eError)); - goto failFaultAddressDescAqCpuVirt; - } if (!psDeviceNode->bAutoVzFwIsUp) { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc, + (void **)&pui32MemoryVirtAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire mem for fault address (%u)", + __func__, eError)); + goto failFaultAddressDescAqCpuVirt; + } + /* fill the page with a known pattern when booting the firmware */ for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++) { - *(pui32MemoryVirtAddr + i) = 0xDEADBEE0; + *(pui32MemoryVirtAddr + i) = 0xDEADBEEF; } - } - OSWriteMemoryBarrier(pui32MemoryVirtAddr); + OSWriteMemoryBarrier(pui32MemoryVirtAddr); + RGXFwSharedMemCacheOpExec(pui32MemoryVirtAddr, ui32PageSize, PVRSRV_CACHE_OP_FLUSH); - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); + } eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR); - if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1513,8 +1052,7 @@ static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, goto failFaultAddressDescLockPhys; } - eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid); - + eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid, DEVICE_USE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1559,16 +1097,24 @@ static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo) PMR *psFWInitPMR, *psFaultAddrPMR; IMG_UINT32 ui32Dstoffset; +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) + /* Run-time check feature support */ + if (!PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) + { + return PVRSRV_OK; + } +#endif + psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfSysInitMemDesc->psImport->hPMR); ui32Dstoffset = psDevInfo->psRGXFWIfSysInitMemDesc->uiOffset + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr.uiAddr); psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR); eError = PDumpMemLabelToMem64(psFaultAddrPMR, - psFWInitPMR, - 0, - ui32Dstoffset, - PDUMP_FLAGS_CONTINUOUS); + psFWInitPMR, + 0, + ui32Dstoffset, + PDUMP_FLAGS_CONTINUOUS); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Dump of Fault Page Phys address failed(%u)", __func__, eError)); @@ -1579,40 +1125,10 @@ static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo) #if defined(SUPPORT_TBI_INTERFACE) /*************************************************************************/ /*! -@Function RGXTBIBufferIsInitRequired +@Function RGXTBIBufferDeinit -@Description Returns true if the firmware tbi buffer is not allocated and - might be required by the firmware soon. TBI buffer allocated - on-demand to reduce RAM footprint on systems not needing - tbi. - -@Input psDevInfo RGX device info - -@Return IMG_BOOL Whether on-demand allocation(s) is/are needed - or not -*/ /**************************************************************************/ -INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - - /* The firmware expects a tbi buffer only when: - * - Logtype is "tbi" - */ - if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL) - && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE) - && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)) - { - return IMG_TRUE; - } - - return IMG_FALSE; -} - -/*************************************************************************/ /*! -@Function RGXTBIBufferDeinit - -@Description Deinitialises all the allocations and references that are made - for the FW tbi buffer +@Description Deinitialises all the allocations and references that are made + for the FW tbi buffer @Input ppsDevInfo RGX device info @Return void @@ -1622,6 +1138,7 @@ static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc); psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL; psDevInfo->ui32RGXFWIfHWPerfBufSize = 0; + psDevInfo->ui32RGXL2HWPerfBufSize = 0; } /*************************************************************************/ /*! @@ -1683,39 +1200,6 @@ PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) } #endif -/*************************************************************************/ /*! -@Function RGXTraceBufferIsInitRequired - -@Description Returns true if the firmware trace buffer is not allocated and - might be required by the firmware soon. Trace buffer allocated - on-demand to reduce RAM footprint on systems not needing - firmware trace. - -@Input psDevInfo RGX device info - -@Return IMG_BOOL Whether on-demand allocation(s) is/are needed - or not -*/ /**************************************************************************/ -INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - - /* The firmware expects a trace buffer only when: - * - Logtype is "trace" AND - * - at least one LogGroup is configured - * - the Driver Mode is not Guest - */ - if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL) - && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE) - && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) - && !PVRSRV_VZ_MODE_IS(GUEST)) - { - return IMG_TRUE; - } - - return IMG_FALSE; -} - /*************************************************************************/ /*! @Function RGXTraceBufferDeinit @@ -1727,17 +1211,16 @@ INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) */ /**************************************************************************/ static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) { - RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; IMG_UINT32 i; for (i = 0; i < RGXFW_THREAD_NUM; i++) { if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i]) { - if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL) + if (psDevInfo->apui32TraceBuffer[i] != NULL) { DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); - psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL; + psDevInfo->apui32TraceBuffer[i] = NULL; } DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); @@ -1768,30 +1251,33 @@ PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, IMG_CHAR pszBufferName[] = "FwTraceBuffer_Thread0"; /* Check AppHint value for module-param FWTraceBufSizeInDWords */ - OSCreateKMAppHintState(&pvAppHintState); + OSCreateAppHintState(&pvAppHintState); ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FWTraceBufSizeInDWords, &ui32DefaultTraceBufSize, - &psTraceBufCtl->ui32TraceBufSizeInDWords); - OSFreeKMAppHintState(pvAppHintState); + &psDevInfo->ui32TraceBufSizeInDWords); + OSFreeAppHintState(pvAppHintState); pvAppHintState = NULL; - if (psTraceBufCtl->ui32TraceBufSizeInDWords < RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS || - psTraceBufCtl->ui32TraceBufSizeInDWords > RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS) + /* Write tracebuf size once to devmem */ + psTraceBufCtl->ui32TraceBufSizeInDWords = psDevInfo->ui32TraceBufSizeInDWords; + + if (psDevInfo->ui32TraceBufSizeInDWords < RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS || + psDevInfo->ui32TraceBufSizeInDWords > RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS) { PVR_DPF((PVR_DBG_ERROR, "%s: Requested trace buffer size (%u) out of its minimum (%u) & maximum (%u) range. Exiting error.", __func__, - psTraceBufCtl->ui32TraceBufSizeInDWords, + psDevInfo->ui32TraceBufSizeInDWords, RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS, RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS)); eError = PVRSRV_ERROR_OUT_OF_RANGE; goto exit_error; } - uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + uiTraceBufSizeInBytes = psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++) { @@ -1811,7 +1297,7 @@ PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, pszBufferName, &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum], &psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer, - (void**)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer, + (void**)&psDevInfo->apui32TraceBuffer[ui32FwThreadNum], RFW_FWADDR_NOREF_FLAG); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); } @@ -1834,14 +1320,14 @@ PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, @Input psDevInfo RGX device info */ /*************************************************************************/ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_BOOL bEnableSignatureChecks) + RGX_INIT_APPHINTS *psApphints, + IMG_UINT32 ui32HWPerfCountersDataSize) { - IMG_UINT32 ui32ConfigFlags = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; - IMG_UINT32 ui32FwOsCfgFlags = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags; + IMG_UINT32 ui32ConfigFlags = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; + IMG_UINT32 ui32FwOsCfgFlags = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags; PDUMPCOMMENT(psDevInfo->psDeviceNode, "Dump RGXFW Init data"); - if (!bEnableSignatureChecks) + if (!psApphints->bEnableSignatureChecks) { PDUMPCOMMENT(psDevInfo->psDeviceNode, "(to enable rgxfw signatures place the following line after the RTCONF line)"); @@ -1860,10 +1346,10 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMPCOMMENT(psDevInfo->psDeviceNode, "Dump rgxfw hwperfctl structure"); - DevmemPDumpLoadZeroMem(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, - 0, - ui32HWPerfCountersDataSize, - PDUMP_FLAGS_CONTINUOUS); + DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc, + 0, + ui32HWPerfCountersDataSize, + PDUMP_FLAGS_CONTINUOUS); PDUMPCOMMENT(psDevInfo->psDeviceNode, "Dump rgxfw trace control structure"); @@ -1925,10 +1411,8 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMPCOMMENT(psDevInfo->psDeviceNode, "RTCONF: run-time configuration"); + /* Dump the config options so they can be edited. */ - /* Dump the config options so they can be edited. - * - */ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set the FW system config options here)"); PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -1937,10 +1421,6 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, "( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN); -#if defined(SUPPORT_VALIDATION) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Enable generic DM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN); -#endif /* defined(SUPPORT_VALIDATION) */ PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST); PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -1950,6 +1430,12 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, +#if defined(RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_MAX_VALUE_IDX) + "( Try overlapping DM pipelines: 0x%08x)", RGXFWIF_INICFG_TRY_OVERLAPPING_DM_PIPELINES_EN); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Enable DM pipeline roadblocks: 0x%08x)", RGXFWIF_INICFG_DM_PIPELINE_ROADBLOCKS_EN); + PDUMPCOMMENT(psDevInfo->psDeviceNode, +#endif "( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY); @@ -1961,22 +1447,21 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, "( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER); - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Enable coherent memory accesses: 0x%08x)", RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Enable IRQ validation: 0x%08x)", RGXFWIF_INICFG_VALIDATE_IRQ); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( SPU power state mask change Enable: 0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST); #if defined(SUPPORT_PDVFS) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS); #endif /* defined(SUPPORT_PDVFS) */ + } #endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( ISP Scheduling Mode (v1=b'01, v2=b'10): 0x%08x)", RGXFWIF_INICFG_ISPSCHEDMODE_MASK); PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -1995,11 +1480,15 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Ctx Switch TDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN); + "( Ctx Switch GEOM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN); +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Ctx Switch RDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN); +#endif PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Lower Priority Ctx Switch 2D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM); PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -2008,12 +1497,50 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, "( Lower Priority Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM); +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Lower Priority Ctx Switch RDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM); +#endif DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwOsDataMemDesc, offsetof(RGXFWIF_OSDATA, ui32FwOsConfigFlags), ui32FwOsCfgFlags, PDUMP_FLAGS_CONTINUOUS); +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + { + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; + IMG_BOOL bRunTimeUpdate = IMG_FALSE; + IMG_UINT32 ui32DstOffset = psDevInfo->psRGXFWIfRuntimeCfgMemDesc->uiOffset + offsetof(RGXFWIF_RUNTIME_CFG, ui32PowUnitsState); + IMG_CHAR aszPowUnitsMaskRegVar[] = ":SYSMEM:$1"; + IMG_CHAR aszPowUnitsEnable[] = "RUNTIME_POW_UNITS_MASK"; + PMR *psPMR = (PMR *)(psDevInfo->psRGXFWIfRuntimeCfgMemDesc->psImport->hPMR); + + + if (bRunTimeUpdate) + { + PDUMPIF(psDevInfo->psDeviceNode, aszPowUnitsEnable, ui32PDumpFlags); + } + + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, + "Load initial value power units mask in FW runtime configuration"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + ui32DstOffset, + psDevInfo->psRGXFWIfRuntimeCfg->ui32PowUnitsState, + ui32PDumpFlags); + + if (bRunTimeUpdate) + { + PDUMPELSE(psDevInfo->psDeviceNode, aszPowUnitsEnable, ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, "Read initial SPU mask value from HW registers"); + PDumpRegRead32ToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SPU_ENABLE, aszPowUnitsMaskRegVar, ui32PDumpFlags); + PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, aszPowUnitsMaskRegVar, ui32AllPowUnitsMask, ui32PDumpFlags); + PDumpInternalVarToMemLabel(psPMR, ui32DstOffset, aszPowUnitsMaskRegVar, ui32PDumpFlags); + PDUMPFI(psDevInfo->psDeviceNode, aszPowUnitsEnable, ui32PDumpFlags); + } + } +#endif #if defined(SUPPORT_SECURITY_VALIDATION) PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -2028,10 +1555,10 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, "( Execute FW code from secure (non-FW) memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE); DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, - offsetof(RGXFWIF_SYSINIT, ui32SecurityTestFlags), - psDevInfo->psRGXFWIfSysInit->ui32SecurityTestFlags, - PDUMP_FLAGS_CONTINUOUS); -#endif + offsetof(RGXFWIF_SYSINIT, ui32SecurityTestFlags), + psDevInfo->psRGXFWIfSysInit->ui32SecurityTestFlags, + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_SECURITY_VALIDATION) */ PDUMPCOMMENT(psDevInfo->psDeviceNode, "( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)", @@ -2039,12 +1566,12 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT); DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, - offsetof(RGXFWIF_SYSINIT, sPIDFilter.eMode), - psDevInfo->psRGXFWIfSysInit->sPIDFilter.eMode, - PDUMP_FLAGS_CONTINUOUS); + offsetof(RGXFWIF_SYSINIT, sPIDFilter.eMode), + psDevInfo->psRGXFWIfSysInit->sPIDFilter.eMode, + PDUMP_FLAGS_CONTINUOUS); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))", + "( PID filter PID/DriverID list (Up to %u entries. Terminate with a zero PID))", RGXFWIF_PID_FILTER_MAX_NUM_PIDS); { IMG_UINT32 i; @@ -2060,11 +1587,10 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, const IMG_DEVMEM_OFFSET_T uiPIDOff = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID); - const IMG_DEVMEM_OFFSET_T uiOSIDOff - = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID); + const IMG_DEVMEM_OFFSET_T uiDriverIDOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32DriverID); - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "(PID and OSID pair %u)", i); + PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID and DriverID pair %u)", i); PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID)"); DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, @@ -2072,9 +1598,9 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, 0, PDUMP_FLAGS_CONTINUOUS); - PDUMPCOMMENT(psDevInfo->psDeviceNode, "(OSID)"); + PDUMPCOMMENT(psDevInfo->psDeviceNode, "(DriverID)"); DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, - uiOSIDOff, + uiDriverIDOff, 0, PDUMP_FLAGS_CONTINUOUS); } @@ -2086,7 +1612,11 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set the log config here)"); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Log Type: set bit 0 for TRACE, reset for TBI)"); + "( Log Type: TRACE mode using shared memory buffer: 0x00000001)"); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( TBI mode via external interface or sim support: 0x00000000)"); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Note: TBI mode will hang on most hardware devices!)"); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN); PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -2109,17 +1639,16 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, "( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP); - -#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) - { - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA); - } -#endif - PDUMPCOMMENT(psDevInfo->psDeviceNode, "( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( VZ Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_VZ); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( SAFETY Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SAFETY); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( VERBOSE Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_VERBOSE); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( CUSTOMER Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CUSTOMER); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG); DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, @@ -2136,13 +1665,19 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, #if defined(SUPPORT_USER_REGISTER_CONFIGURATION) PDUMPCOMMENT(psDevInfo->psDeviceNode, - "(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), tla(%d), TDM(%d))", + "(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), " +#if defined(RGX_FEATURE_TLA_BIT_MASK) + "tla(%d), " +#endif + "tdm(%d))", RGXFWIF_REG_CFG_TYPE_PWR_ON, RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, RGXFWIF_REG_CFG_TYPE_TA, RGXFWIF_REG_CFG_TYPE_3D, RGXFWIF_REG_CFG_TYPE_CDM, +#if defined(RGX_FEATURE_TLA_BIT_MASK) RGXFWIF_REG_CFG_TYPE_TLA, +#endif RGXFWIF_REG_CFG_TYPE_TDM); { @@ -2181,7 +1716,7 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, @Description Allocate a Guard Page at the start of a Guest's Main Heap - @Input psDevceNode + @Input psDevInfo @Return PVRSRV_ERROR ******************************************************************************/ @@ -2213,38 +1748,36 @@ static PVRSRV_ERROR RGXSetupFwGuardPage(PVRSRV_RGXDEV_INFO *psDevInfo) @Return PVRSRV_ERROR ******************************************************************************/ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, + RGX_INIT_APPHINTS *psApphints, IMG_UINT32 ui32ConfigFlags, IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_UINT32 *pui32TPUTrilinearFracMask, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, - FW_PERF_CONF eFirmwarePerf) + IMG_UINT32 ui32HWPerfCountersDataSize) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGXFWIF_SYSINIT *psFwSysInitScratch = NULL; +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; + IMG_UINT32 ui32AllRACMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1; +#endif + RGXFWIF_SYSINIT *psFwSysInitScratch = NULL; +#if defined(PDUMP) + IMG_UINT32 ui32SignatureChecksBufSize = psApphints->ui32SignatureChecksBufSize; +#endif psFwSysInitScratch = OSAllocZMem(sizeof(*psFwSysInitScratch)); PVR_LOG_GOTO_IF_NOMEM(psFwSysInitScratch, eError, fail); /* Sys Fw init data */ eError = RGXSetupFwAllocation(psDevInfo, - (RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) & - RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), - sizeof(RGXFWIF_SYSINIT), - "FwSysInitStructure", - &psDevInfo->psRGXFWIfSysInitMemDesc, - NULL, - (void**) &psDevInfo->psRGXFWIfSysInit, - RFW_FWADDR_FLAG_NONE); + (RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_SYSINIT), + "FwSysInitStructure", + &psDevInfo->psRGXFWIfSysInitMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfSysInit, + RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Sys Init structure allocation", fail); /* Setup Fault read register */ @@ -2259,48 +1792,72 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, { RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland; - IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) || - (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON); + IMG_BOOL bEnableRDPowIsland = ((psApphints->eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) || + (psApphints->eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON); ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0; } -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST; -#if defined(SUPPORT_PDVFS) - { - RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo; - IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg; - - /* Pro-active DVFS depends on Workload Estimation */ - psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo; - psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; - PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table"); - if (psDVFSDeviceCfg->pasOPPTable != NULL) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST; +#endif +#if defined(SUPPORT_FW_OPP_TABLE) { - if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)) + RGXFWIF_OPP_INFO *psOPPInfo; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg; + + /* Pro-active DVFS depends on Workload Estimation */ + psOPPInfo = &psFwSysInitScratch->sOPPInfo; + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; +#if defined(CONFIG_OF) && defined(CONFIG_PM_OPP) && !defined(NO_HARDWARE) + if (psDVFSDeviceCfg->bDTConfig) { - PVR_DPF((PVR_DBG_ERROR, - "%s: OPP Table too large: Size = %u, Maximum size = %lu", - __func__, - psDVFSDeviceCfg->ui32OPPTableSize, - (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)))); - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto fail; + /* OPP table configured from Device tree */ + eError = DVFSCopyOPPTable(psDeviceNode, + psOPPInfo, + ARRAY_SIZE(psOPPInfo->asOPPValues)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Unable to copy OPP table to FW init buffer (%u)", eError)); + goto fail; + } } +#endif + if (!psDVFSDeviceCfg->bDTConfig) + { + PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table"); + + if (psDVFSDeviceCfg->pasOPPTable != NULL) + { + if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psOPPInfo->asOPPValues)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OPP Table too large: Size = %u, Maximum size = %lu", + __func__, + psDVFSDeviceCfg->ui32OPPTableSize, + (unsigned long)(ARRAY_SIZE(psOPPInfo->asOPPValues)))); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail; + } + + OSDeviceMemCopy(psOPPInfo->asOPPValues, + psDVFSDeviceCfg->pasOPPTable, + sizeof(psOPPInfo->asOPPValues)); - OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues, - psDVFSDeviceCfg->pasOPPTable, - sizeof(psPDVFSOPPInfo->asOPPValues)); + psOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1; - psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1; + } + } +#if defined(SUPPORT_PDVFS) ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS; +#endif } +#endif /* defined(SUPPORT_FW_OPP_TABLE) */ } -#endif /* defined(SUPPORT_PDVFS) */ -#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ /* FW trace control structure */ eError = RGXSetupFwAllocation(psDevInfo, @@ -2317,15 +1874,16 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, if (!psDeviceNode->bAutoVzFwIsUp) { /* Set initial firmware log type/group(s) */ - if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK) + if (psApphints->ui32LogType & ~RGXFWIF_LOG_TYPE_MASK) { eError = PVRSRV_ERROR_INVALID_PARAMS; PVR_DPF((PVR_DBG_ERROR, "%s: Invalid initial log type (0x%X)", - __func__, ui32LogType)); + __func__, psApphints->ui32LogType)); goto fail; } - psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32LogType; + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = psApphints->ui32LogType; + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, FLUSH); } /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource @@ -2336,7 +1894,7 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, #if defined(SUPPORT_AUTOVZ) /* always allocate trace buffer for AutoVz Host drivers to allow * deterministic addresses of all SysData structures */ - if ((PVRSRV_VZ_MODE_IS(HOST)) || (RGXTraceBufferIsInitRequired(psDevInfo))) + if ((PVRSRV_VZ_MODE_IS(HOST, DEVINFO, psDevInfo)) || (RGXTraceBufferIsInitRequired(psDevInfo))) #else if (RGXTraceBufferIsInitRequired(psDevInfo)) #endif @@ -2349,95 +1907,36 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail); eError = RGXSetupFwAllocation(psDevInfo, - RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & - RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), - sizeof(RGXFWIF_SYSDATA), - "FwSysData", - &psDevInfo->psRGXFWIfFwSysDataMemDesc, - &psFwSysInitScratch->sFwSysData, - (void**) &psDevInfo->psRGXFWIfFwSysData, - RFW_FWADDR_NOREF_FLAG); + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_SYSDATA), + "FwSysData", + &psDevInfo->psRGXFWIfFwSysDataMemDesc, + &psFwSysInitScratch->sFwSysData, + (void**) &psDevInfo->psRGXFWIfFwSysData, + RFW_FWADDR_NOREF_FLAG); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); /* GPIO validation setup */ psFwSysInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF; -#if defined(SUPPORT_VALIDATION) - { - IMG_INT32 ui32AppHintDefault; - IMG_INT32 ui32GPIOValidationMode; - void *pvAppHintState = NULL; - - /* Check AppHint for GPIO validation mode */ - OSCreateKMAppHintState(&pvAppHintState); - ui32AppHintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, - pvAppHintState, - GPIOValidationMode, - &ui32AppHintDefault, - &ui32GPIOValidationMode); - OSFreeKMAppHintState(pvAppHintState); - pvAppHintState = NULL; - - if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.", - __func__, - ui32GPIOValidationMode, - RGXFWIF_GPIO_VAL_LAST)); - } - else - { - psFwSysInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode; - } - - psFwSysInitScratch->eGPIOValidationMode = ui32GPIOValidationMode; - } -#endif #if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) eError = RGXFWSetupCounterBuffer(psDevInfo, &psDevInfo->psCounterBufferMemDesc, - PAGE_SIZE, - &psFwSysInitScratch->sCounterDumpCtl, - "CounterBuffer"); + OSGetPageSize(), + &psFwSysInitScratch->sCounterDumpCtl); PVR_LOG_GOTO_IF_ERROR(eError, "Counter Buffer allocation", fail); -#endif /* defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */ - -#if defined(SUPPORT_VALIDATION) - { - IMG_UINT32 ui32EnablePollOnChecksumErrorStatus; - IMG_UINT32 ui32ApphintDefault = 0; - void *pvAppHintState = NULL; - /* Check AppHint for polling on GPU Checksum status */ - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, - pvAppHintState, - EnablePollOnChecksumErrorStatus, - &ui32ApphintDefault, - &ui32EnablePollOnChecksumErrorStatus); - OSFreeKMAppHintState(pvAppHintState); - pvAppHintState = NULL; + PVR_DPF((PVR_DBG_WARNING, "Counter buffer allocated at %p, size %zu Bytes.", psDevInfo->psCounterBufferMemDesc, OSGetPageSize())); +#endif /* defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */ - switch (ui32EnablePollOnChecksumErrorStatus) - { - case 0: /* no checking */ break; - case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_NOERR_EN; break; - case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_ERR_EN; break; - default: - PVR_DPF((PVR_DBG_WARNING, "Unsupported value in EnablePollOnChecksumErrorStatus (%d)", ui32EnablePollOnChecksumErrorStatus)); - break; - } - } -#endif /* defined(SUPPORT_VALIDATION) */ #if defined(SUPPORT_FIRMWARE_GCOV) eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo, - &psDevInfo->psFirmwareGcovBufferMemDesc, - RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE, - &psFwSysInitScratch->sFirmwareGcovCtl, - "FirmwareGcovBuffer"); + &psDevInfo->psFirmwareGcovBufferMemDesc, + RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE, + &psFwSysInitScratch->sFirmwareGcovCtl, + "FirmwareGcovBuffer"); PVR_LOG_GOTO_IF_ERROR(eError, "Firmware GCOV buffer allocation", fail); psDevInfo->ui32FirmwareGcovSize = RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE; #endif /* defined(SUPPORT_FIRMWARE_GCOV) */ @@ -2449,58 +1948,71 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN; } - /* Setup Signature and Checksum Buffers for TDM, GEOM and 3D */ + /* Setup Signature and Checksum Buffers */ + psDevInfo->psRGXFWSigTDMChecksMemDesc = NULL; + psDevInfo->ui32SigTDMChecksSize = 0; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + /* Buffer allocated only when feature present because all known TDM + * signature registers are dependent on this feature being present */ + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigTDMChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM]); + PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail); + psDevInfo->ui32SigTDMChecksSize = ui32SignatureChecksBufSize; + } + eError = RGXFWSetupSignatureChecks(psDevInfo, - &psDevInfo->psRGXFWSigTAChecksMemDesc, - ui32SignatureChecksBufSize, - &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM]); - PVR_LOG_GOTO_IF_ERROR(eError, "TA Signature check setup", fail); + &psDevInfo->psRGXFWSigTAChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM]); + PVR_LOG_GOTO_IF_ERROR(eError, "GEOM Signature check setup", fail); psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize; eError = RGXFWSetupSignatureChecks(psDevInfo, - &psDevInfo->psRGXFWSig3DChecksMemDesc, - ui32SignatureChecksBufSize, - &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D]); + &psDevInfo->psRGXFWSig3DChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D]); PVR_LOG_GOTO_IF_ERROR(eError, "3D Signature check setup", fail); psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize; - psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL; - psDevInfo->ui32SigTDM2DChecksSize = 0; -#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && + RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) { - /* Buffer allocated only when feature present because, all known TDM - * signature registers are dependent on this feature being present */ eError = RGXFWSetupSignatureChecks(psDevInfo, - &psDevInfo->psRGXFWSigTDM2DChecksMemDesc, - ui32SignatureChecksBufSize, - &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM]); - PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail); - psDevInfo->ui32SigTDM2DChecksSize = ui32SignatureChecksBufSize; + &psDevInfo->psRGXFWSigRDMChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_RAY]); + PVR_LOG_GOTO_IF_ERROR(eError, "RDM Signature check setup", fail); + psDevInfo->ui32SigRDMChecksSize = ui32SignatureChecksBufSize; } #endif - if (!bEnableSignatureChecks) + if (!psApphints->bEnableSignatureChecks) { psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0; psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM].sBuffer.ui32Addr = 0x0; psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0; + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_CDM].sBuffer.ui32Addr = 0x0; +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_RAY].sBuffer.ui32Addr = 0x0; +#endif } #endif /* defined(PDUMP) */ eError = RGXFWSetupAlignChecks(psDeviceNode, - &psFwSysInitScratch->sAlignChecks); + &psFwSysInitScratch->sAlignChecks); PVR_LOG_GOTO_IF_ERROR(eError, "Alignment checks setup", fail); - psFwSysInitScratch->ui32FilterFlags = ui32FilterFlags; + psFwSysInitScratch->ui32FilterFlags = psApphints->ui32FilterFlags; /* Fill the remaining bits of fw the init data */ psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE; psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE; - psFwSysInitScratch->sFBCDCStateTableBase.uiAddr = RGX_FBCDC_HEAP_BASE; - psFwSysInitScratch->sFBCDCLargeStateTableBase.uiAddr = RGX_FBCDC_LARGE_HEAP_BASE; - psFwSysInitScratch->sTextureHeapBase.uiAddr = RGX_TEXTURE_STATE_HEAP_BASE; #if defined(FIX_HW_BRN_65273_BIT_MASK) if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) @@ -2511,30 +2023,17 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, } #endif -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - psFwSysInitScratch->ui32JonesDisableMask = ui32JonesDisableMask; - } -#endif -#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) - { - eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch); - PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail); - } -#endif #if defined(SUPPORT_PDVFS) /* Core clock rate */ eError = RGXSetupFwAllocation(psDevInfo, - RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & - RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), - sizeof(IMG_UINT32), - "FwPDVFSCoreClkRate", - &psDevInfo->psRGXFWIFCoreClkRateMemDesc, - &psFwSysInitScratch->sCoreClockRate, - (void**) &psDevInfo->pui32RGXFWIFCoreClkRate, - RFW_FWADDR_NOREF_FLAG); + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(IMG_UINT32), + "FwPDVFSCoreClkRate", + &psDevInfo->psRGXFWIFCoreClkRateMemDesc, + &psFwSysInitScratch->sCoreClockRate, + (void**) &psDevInfo->pui32RGXFWIFCoreClkRate, + RFW_FWADDR_NOREF_FLAG); PVR_LOG_GOTO_IF_ERROR(eError, "PDVFS core clock rate memory setup", fail); #endif { @@ -2559,7 +2058,7 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, uiMemAllocFlags | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE, "FwStartTimesArray", - &psDevInfo->psStartTimeMemDesc); + & psDevInfo->psStartTimeMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2655,15 +2154,19 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer; #endif /* defined(SUPPORT_TBI_INTERFACE) */ - /* Allocate shared buffer for GPU utilisation */ + /* Allocate shared buffer for GPU utilisation. + * Enable FIRMWARE_CACHED to reduce read latency in the FW. + * The FW flushes the cache after any writes. + */ eError = RGXSetupFwAllocation(psDevInfo, - RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & + (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) & RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), - sizeof(RGXFWIF_GPU_UTIL_FWCB), + sizeof(RGXFWIF_GPU_UTIL_FW), "FwGPUUtilisationBuffer", - &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc, - &psFwSysInitScratch->sGpuUtilFWCbCtl, - (void**) &psDevInfo->psRGXFWIfGpuUtilFWCb, + &psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc, + &psFwSysInitScratch->sGpuUtilFWCtl, + (void**) &psDevInfo->psRGXFWIfGpuUtilFW, RFW_FWADDR_NOREF_FLAG); PVR_LOG_GOTO_IF_ERROR(eError, "GPU Utilisation Buffer ctl allocation", fail); @@ -2680,7 +2183,7 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, #if defined(SUPPORT_USER_REGISTER_CONFIGURATION) eError = RGXSetupFwAllocation(psDevInfo, - RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), sizeof(RGXFWIF_REG_CFG), "FwRegisterConfigStructure", @@ -2691,32 +2194,62 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "Firmware register user configuration structure allocation", fail); #endif - psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB); +#if defined(SUPPORT_SECURE_CONTEXT_SWITCH) + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + RGXFW_SCRATCH_BUF_SIZE, + "FwScratchBuf", + &psDevInfo->psRGXFWScratchBufMemDesc, + &psFwSysInitScratch->pbFwScratchBuf, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware scratch buffer allocation", fail); +#endif + + psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(psApphints->ui32HWPerfFWBufSize); + + /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer + * accessed by the FW. The MISR may try to write one packet the size of the L1 + * buffer in some scenarios. When logging is enabled in the MISR, it can be seen + * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers + * are the more chance of this happening. + * Size chosen to allow MISR to write an L1 sized packet and for the client + * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1. + */ + psDevInfo->ui32RGXL2HWPerfBufSize = psDevInfo->ui32RGXFWIfHWPerfBufSize + + (psDevInfo->ui32RGXFWIfHWPerfBufSize>>1); + /* Second stage initialisation or HWPerf, hHWPerfLock created in first * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */ - if (psDevInfo->ui64HWPerfFilter == 0) + if (psDevInfo->ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_HWPERF] == 0) { - psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter; - psFwSysInitScratch->ui64HWPerfFilter = ui64HWPerfFilter; + psFwSysInitScratch->ui64HWPerfFilter = + RGXHWPerfFwSetEventFilter(psDevInfo, RGX_HWPERF_L2_STREAM_HWPERF, + (IMG_UINT64) psApphints->ui32HWPerfFilter0 | + ((IMG_UINT64) psApphints->ui32HWPerfFilter1 << 32)); } else { /* The filter has already been modified. This can happen if * pvr/apphint/EnableFTraceGPU was enabled. */ - psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter; + psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFwFilter; } #if !defined(PDUMP) /* Allocate if HWPerf filter has already been set. This is possible either * by setting a proper AppHint or enabling GPU ftrace events. */ - if (psDevInfo->ui64HWPerfFilter != 0) + if (psFwSysInitScratch->ui64HWPerfFilter != 0) #endif { /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources * (irrespective of HWPerf enabled or not), given that HWPerf can be * enabled during PDump playback via RTCONF at any point of time. */ - eError = RGXHWPerfInitOnDemandResources(psDevInfo); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail); + eError = RGXHWPerfInitOnDemandL1Buffer(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandL1Buffer", fail); + + eError = RGXHWPerfInitOnDemandL2Stream(psDevInfo, RGX_HWPERF_L2_STREAM_HWPERF); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandL2Stream", fail); } eError = RGXSetupFwAllocation(psDevInfo, @@ -2733,7 +2266,7 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN) ? IMG_FALSE : IMG_TRUE; - psFwSysInitScratch->eFirmwarePerf = eFirmwarePerf; + psFwSysInitScratch->eFirmwarePerf = psApphints->eFirmwarePerf; #if defined(PDUMP) /* default: no filter */ @@ -2741,28 +2274,17 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psFwSysInitScratch->sPIDFilter.asItems[0].uiPID = 0; #endif -#if defined(SUPPORT_VALIDATION) - { - IMG_UINT32 dm; - - /* TPU trilinear rounding mask override */ - for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++) - { - psFwSysInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm]; - } - } -#endif #if defined(SUPPORT_SECURITY_VALIDATION) { PVRSRV_MEMALLOCFLAGS_T uiFlags = RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS; - PVRSRV_SET_PHYS_HEAP_HINT(GPU_SECURE, uiFlags); + PVRSRV_SET_PHYS_HEAP_HINT(FW_PRIV_DATA, uiFlags); PDUMPCOMMENT(psDeviceNode, "Allocate non-secure buffer for security validation test"); eError = DevmemFwAllocateExportable(psDeviceNode, OSGetPageSize(), OSGetPageSize(), - RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, "FwExNonSecureBuffer", &psDevInfo->psRGXFWIfNonSecureBufMemDesc); PVR_LOG_GOTO_IF_ERROR(eError, "Non-secure buffer allocation", fail); @@ -2794,46 +2316,112 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psFwSysInitScratch->ui32TFBCCompressionControl = (ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) >> RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT; } -#endif +#endif /* RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK */ /* Initialize FW started flag */ psFwSysInitScratch->bFirmwareStarted = IMG_FALSE; psFwSysInitScratch->ui32MarkerVal = 1; + psDevInfo->psRGXFWIfRuntimeCfg->ui32VzConnectionCooldownPeriodInSec = RGX_VZ_CONNECTION_COOLDOWN_PERIOD; if (!psDeviceNode->bAutoVzFwIsUp) { - IMG_UINT32 ui32OSIndex; + IMG_UINT32 ui32DriverID; RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; /* Required info by FW to calculate the ActivePM idle timer latency */ psFwSysInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; +#if defined(SUPPORT_SOC_TIMER) + psFwSysInitScratch->ui32InitialSOCClockSpeed = psRGXData->psRGXTimingInfo->ui32SOCClockSpeed; +#endif psFwSysInitScratch->ui32InitialActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms; /* Initialise variable runtime configuration to the system defaults */ psRuntimeCfg->ui32CoreClockSpeed = psFwSysInitScratch->ui32InitialCoreClockSpeed; +#if defined(SUPPORT_SOC_TIMER) + psRuntimeCfg->ui32SOCClockSpeed = psFwSysInitScratch->ui32InitialSOCClockSpeed; +#endif psRuntimeCfg->ui32ActivePMLatencyms = psFwSysInitScratch->ui32InitialActivePMLatencyms; psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE; - psRuntimeCfg->ui32WdgPeriodUs = RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US; psRuntimeCfg->ui32HCSDeadlineMS = RGX_HCS_DEFAULT_DEADLINE_MS; - if (PVRSRV_VZ_MODE_IS(NATIVE)) + if ((RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US > 0U) && (RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US < 1000U)) { - psRuntimeCfg->aui32OSidPriority[RGXFW_HOST_OS] = 0; + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_LOG_GOTO_IF_ERROR(eError, + "RGXSetupFwSysData: RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US must be either 0 (disabled) or greater than 1000", + fail); } else { - for (ui32OSIndex = 0; ui32OSIndex < RGX_NUM_OS_SUPPORTED; ui32OSIndex++) - { - const IMG_INT32 ai32DefaultOsPriority[RGXFW_MAX_NUM_OS] = - {RGX_OSID_0_DEFAULT_PRIORITY, RGX_OSID_1_DEFAULT_PRIORITY, RGX_OSID_2_DEFAULT_PRIORITY, RGX_OSID_3_DEFAULT_PRIORITY, - RGX_OSID_4_DEFAULT_PRIORITY, RGX_OSID_5_DEFAULT_PRIORITY, RGX_OSID_6_DEFAULT_PRIORITY, RGX_OSID_7_DEFAULT_PRIORITY}; + psRuntimeCfg->ui32WdgPeriodUs = RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US; + } + + if (PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) + { + psRuntimeCfg->ai32DriverPriority[RGXFW_HOST_DRIVER_ID] = 0; + psRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID] = RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP; + psRuntimeCfg->aui32TSPercentage[RGXFW_HOST_DRIVER_ID] = (IMG_UINT8)RGX_DRIVERID_0_DEFAULT_TIME_SLICE; + } + else + { + const RGX_QOS_DEFAULTS asQosDefaults[RGXFW_MAX_NUM_OSIDS] = { + RGX_QOS_DEFAULTS_INIT(0), +#if (RGXFW_MAX_NUM_OSIDS > 1) + RGX_QOS_DEFAULTS_INIT(1), +#if (RGXFW_MAX_NUM_OSIDS > 2) + RGX_QOS_DEFAULTS_INIT(2), + RGX_QOS_DEFAULTS_INIT(3), + RGX_QOS_DEFAULTS_INIT(4), + RGX_QOS_DEFAULTS_INIT(5), + RGX_QOS_DEFAULTS_INIT(6), + RGX_QOS_DEFAULTS_INIT(7), +#if (RGXFW_MAX_NUM_OSIDS > 8) + RGX_QOS_DEFAULTS_INIT(8), + RGX_QOS_DEFAULTS_INIT(9), + RGX_QOS_DEFAULTS_INIT(10), + RGX_QOS_DEFAULTS_INIT(11), + RGX_QOS_DEFAULTS_INIT(12), + RGX_QOS_DEFAULTS_INIT(13), + RGX_QOS_DEFAULTS_INIT(14), + RGX_QOS_DEFAULTS_INIT(15), + RGX_QOS_DEFAULTS_INIT(16), + RGX_QOS_DEFAULTS_INIT(17), + RGX_QOS_DEFAULTS_INIT(18), + RGX_QOS_DEFAULTS_INIT(19), + RGX_QOS_DEFAULTS_INIT(20), + RGX_QOS_DEFAULTS_INIT(21), + RGX_QOS_DEFAULTS_INIT(22), + RGX_QOS_DEFAULTS_INIT(23), + RGX_QOS_DEFAULTS_INIT(24), + RGX_QOS_DEFAULTS_INIT(25), + RGX_QOS_DEFAULTS_INIT(26), + RGX_QOS_DEFAULTS_INIT(27), + RGX_QOS_DEFAULTS_INIT(28), + RGX_QOS_DEFAULTS_INIT(29), + RGX_QOS_DEFAULTS_INIT(30), + RGX_QOS_DEFAULTS_INIT(31), +#if (RGXFW_MAX_NUM_OSIDS > 32) +#error "Support for more than 32 OSIDs not implemented." +#endif +#endif /* RGXFW_MAX_NUM_OSIDS > 8 */ +#endif /* RGXFW_MAX_NUM_OSIDS > 2 */ +#endif /* RGXFW_MAX_NUM_OSIDS > 1 */ + }; + + FOREACH_SUPPORTED_DRIVER(ui32DriverID) + { /* Set up initial priorities between different OSes */ - psRuntimeCfg->aui32OSidPriority[ui32OSIndex] = (IMG_UINT32)ai32DefaultOsPriority[ui32OSIndex]; + psRuntimeCfg->ai32DriverPriority[ui32DriverID] = asQosDefaults[ui32DriverID].i32Priority; + psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID] = asQosDefaults[ui32DriverID].ui32IsolationGroups; + psRuntimeCfg->aui32TSPercentage[ui32DriverID] = (asQosDefaults[ui32DriverID].ui32TSPercentage <= + PVRSRV_VZ_TIME_SLICE_MAX) ? + asQosDefaults[ui32DriverID].ui32TSPercentage:(0); } } + psRuntimeCfg->ui32TSIntervalMs = RGX_DRIVER_DEFAULT_TIME_SLICE_INTERVAL; #if defined(PVR_ENABLE_PHR) && defined(PDUMP) psRuntimeCfg->ui32PHRMode = RGXFWIF_PHR_MODE_RD_RESET; @@ -2841,26 +2429,17 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psRuntimeCfg->ui32PHRMode = 0; #endif - /* Initialize the DefaultDustsNumInit Field to Max Dusts */ - psRuntimeCfg->ui32DefaultDustsNumInit = psDevInfo->sDevFeatureCfg.ui32MAXDustCount; + /* Initialize the PowUnitsState Field to Max Dusts */ + psRuntimeCfg->ui32PowUnitsState = psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount; /* flush write buffers for psDevInfo->psRGXFWIfRuntimeCfg */ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfRuntimeCfg); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfRuntimeCfg, FLUSH); /* Setup FW coremem data */ if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) { psFwSysInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; - -#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) - { - RGXSetMetaDMAAddress(&psFwSysInitScratch->sCorememDataStore, - psDevInfo->psRGXFWIfCorememDataStoreMemDesc, - &psFwSysInitScratch->sCorememDataStore.pbyFWAddr, - 0); - } -#endif } psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL; @@ -2870,32 +2449,40 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, { IMG_UINT64 ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(), RGXFWIF_GPU_UTIL_STATE_IDLE); RGXFWIF_DM eDM; + IMG_UINT64 ui64LastWordTimeShifted = + RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64() >> RGXFWIF_DM_OS_TIMESTAMP_SHIFT, RGXFWIF_GPU_UTIL_STATE_IDLE); + IMG_UINT32 ui32DriverID; - psDevInfo->psRGXFWIfGpuUtilFWCb->ui64GpuLastWord = ui64LastWord; + psDevInfo->psRGXFWIfGpuUtilFW->ui64GpuLastWord = ui64LastWord; - for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - IMG_UINT32 ui32OSid; + RGXFWIF_GPU_STATS *psStats = &psDevInfo->psRGXFWIfGpuUtilFW->sStats[ui32DriverID]; - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + for (eDM = 0; eDM < RGXFWIF_GPU_UTIL_DM_MAX; eDM++) { - psDevInfo->psRGXFWIfGpuUtilFWCb->aaui64DMOSLastWord[eDM][ui32OSid] = ui64LastWord; + psStats->aui32DMOSLastWord[eDM] = (IMG_UINT32)(ui64LastWordTimeShifted & IMG_UINT32_MAX); + psStats->aui32DMOSLastWordWrap[eDM] = (IMG_UINT32)(ui64LastWordTimeShifted >> 32); } } + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFW, FLUSH); } /* init HWPERF data */ - psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0; - psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWIdx = 0; - psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWrapCount = 0; + psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfRIdx = 0; + psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfWIdx = 0; + psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfWrapCount = 0; psDevInfo->psRGXFWIfFwSysData->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; psDevInfo->psRGXFWIfFwSysData->ui32HWPerfUt = 0; psDevInfo->psRGXFWIfFwSysData->ui32HWPerfDropCount = 0; psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0; psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0; + psDevInfo->psRGXFWIfFwSysData->ui32MemFaultCheck = 0; + // flush write buffers for psRGXFWIfFwSysData OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwSysData); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, FLUSH); /*Send through the BVNC Feature Flags*/ eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags); @@ -2903,10 +2490,13 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, /* populate the real FwOsInit structure with the values stored in the scratch copy */ OSCachedMemCopyWMB(psDevInfo->psRGXFWIfSysInit, psFwSysInitScratch, sizeof(RGXFWIF_SYSINIT)); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfSysInit, + FLUSH); } OSFreeMem(psFwSysInitScratch); + return PVRSRV_OK; fail: @@ -2942,7 +2532,7 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, OSCachedMemSet(&sFwOsInitScratch, 0, sizeof(RGXFWIF_OSINIT)); - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { eError = RGXSetupFwGuardPage(psDevInfo); PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware heap guard pages", fail); @@ -2985,6 +2575,7 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, /* Might be uncached. Be conservative and use a DeviceMemSet */ OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBufCtl, 0, sizeof(RGXFWIF_HWRINFOBUF)); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfHWRInfoBufCtl, FLUSH); /* Allocate a sync for power management */ eError = SyncPrimContextCreate(psDevInfo->psDeviceNode, @@ -2997,6 +2588,7 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, /* Set up kernel CCB */ eError = RGXSetupCCB(psDevInfo, &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlLocal, &psDevInfo->psKernelCCBCtlMemDesc, &psDevInfo->psKernelCCB, &psDevInfo->psKernelCCBMemDesc, @@ -3025,6 +2617,7 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, /* Set up firmware CCB */ eError = RGXSetupCCB(psDevInfo, &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlLocal, &psDevInfo->psFirmwareCCBCtlMemDesc, &psDevInfo->psFirmwareCCB, &psDevInfo->psFirmwareCCBMemDesc, @@ -3053,31 +2646,42 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, /* flush write buffers for psRGXFWIfFwOsData */ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwOsData); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, + FLUSH); sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit; #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Set up Workload Estimation firmware CCB */ - eError = RGXSetupCCB(psDevInfo, - &psDevInfo->psWorkEstFirmwareCCBCtl, - &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, - &psDevInfo->psWorkEstFirmwareCCB, - &psDevInfo->psWorkEstFirmwareCCBMemDesc, - &sFwOsInitScratch.psWorkEstFirmwareCCBCtl, - &sFwOsInitScratch.psWorkEstFirmwareCCB, - RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2, - sizeof(RGXFWIF_WORKEST_FWCCB_CMD), - RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, - "FwWEstCCB"); - PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Set up Workload Estimation firmware CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlLocal, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc, + &sFwOsInitScratch.psWorkEstFirmwareCCBCtl, + &sFwOsInitScratch.psWorkEstFirmwareCCB, + RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2, + sizeof(RGXFWIF_WORKEST_FWCCB_CMD), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwWEstCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail); + } #endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ - /* Initialise the compatibility check data */ - RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC); - RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC); + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Initialise the compatibility check data */ + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC); + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC); + } /* populate the real FwOsInit structure with the values stored in the scratch copy */ OSCachedMemCopyWMB(psDevInfo->psRGXFWIfOsInit, &sFwOsInitScratch, sizeof(RGXFWIF_OSINIT)); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfOsInit, + FLUSH); return PVRSRV_OK; @@ -3099,33 +2703,24 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, @Return PVRSRV_ERROR ******************************************************************************/ PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, + RGX_INIT_APPHINTS *psApphints, IMG_UINT32 ui32ConfigFlags, IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32FwOsCfgFlags, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWRDebugDumpLimit, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_UINT32 *pui32TPUTrilinearFracMask, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, - FW_PERF_CONF eFirmwarePerf, - IMG_UINT32 ui32KCCBSizeLog2) + IMG_UINT32 ui32FwOsCfgFlags) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32HWPerfCountersDataSize; eError = RGXSetupFwOsData(psDeviceNode, - ui32KCCBSizeLog2, - ui32HWRDebugDumpLimit, + psApphints->ui32KCCBSizeLog2, + psApphints->ui32HWRDebugDumpLimit, ui32FwOsCfgFlags); PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware os data", fail); - if (PVRSRV_VZ_MODE_IS(GUEST)) + ui32HWPerfCountersDataSize = sizeof(RGXFWIF_HWPERF_CTL); + + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { /* Guest drivers do not configure system-wide firmware data */ psDevInfo->psRGXFWIfSysInit = NULL; @@ -3134,19 +2729,10 @@ PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, { /* Native and Host drivers must initialise the firmware's system data */ eError = RGXSetupFwSysData(psDeviceNode, - bEnableSignatureChecks, - ui32SignatureChecksBufSize, - ui32HWPerfFWBufSizeKB, - ui64HWPerfFilter, + psApphints, ui32ConfigFlags, ui32ConfigFlagsExt, - ui32LogType, - ui32FilterFlags, - ui32JonesDisableMask, - ui32HWPerfCountersDataSize, - pui32TPUTrilinearFracMask, - eRGXRDPowerIslandConf, - eFirmwarePerf); + ui32HWPerfCountersDataSize); PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware system data", fail); } @@ -3154,8 +2740,8 @@ PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, #if defined(PDUMP) RGXPDumpLoadFWInitData(psDevInfo, - ui32HWPerfCountersDataSize, - bEnableSignatureChecks); + psApphints, + ui32HWPerfCountersDataSize); #endif /* PDUMP */ fail: @@ -3180,14 +2766,12 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) } #if defined(PDUMP) -#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM) && - psDevInfo->psRGXFWSigTDM2DChecksMemDesc) + psDevInfo->psRGXFWSigTDMChecksMemDesc) { - DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDM2DChecksMemDesc); - psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL; + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDMChecksMemDesc); + psDevInfo->psRGXFWSigTDMChecksMemDesc = NULL; } -#endif if (psDevInfo->psRGXFWSigTAChecksMemDesc) { @@ -3200,6 +2784,16 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc); psDevInfo->psRGXFWSig3DChecksMemDesc = NULL; } + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && + RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1 && + psDevInfo->psRGXFWSigRDMChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigRDMChecksMemDesc); + psDevInfo->psRGXFWSigRDMChecksMemDesc = NULL; + } +#endif #endif #if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) @@ -3220,15 +2814,15 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) RGXSetupFaultReadRegisterRollback(psDevInfo); - if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc) + if (psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc) { - if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL) + if (psDevInfo->psRGXFWIfGpuUtilFW != NULL) { - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); - psDevInfo->psRGXFWIfGpuUtilFWCb = NULL; + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFW = NULL; } - DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); - psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL; + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc = NULL; } if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc) @@ -3242,16 +2836,19 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL; } - if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) { - psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) + { + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } } if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc) { if (psDevInfo->psRGXFWIfTraceBufCtl != NULL) { - /* first deinit/free the tracebuffer allocation */ + /* deinit/free the tracebuffer allocation */ RGXTraceBufferDeinit(psDevInfo); DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc); @@ -3307,12 +2904,6 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) } #endif -#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) - { - _FreeSLC3Fence(psDevInfo); - } -#endif #if defined(SUPPORT_PDVFS) if (psDevInfo->psRGXFWIFCoreClkRateMemDesc) { @@ -3326,6 +2917,15 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL; } #endif + +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) + if (psDevInfo->psRGXFWIfActiveContextBufDesc) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfActiveContextBufDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfActiveContextBufDesc); + psDevInfo->psRGXFWIfActiveContextBufDesc = NULL; + } +#endif } /*! @@ -3343,22 +2943,28 @@ static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo) &psDevInfo->psKernelCCBRtnSlotsMemDesc); RGXFreeCCB(psDevInfo, &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlLocal, &psDevInfo->psKernelCCBCtlMemDesc, &psDevInfo->psKernelCCB, &psDevInfo->psKernelCCBMemDesc); RGXFreeCCB(psDevInfo, &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlLocal, &psDevInfo->psFirmwareCCBCtlMemDesc, &psDevInfo->psFirmwareCCB, &psDevInfo->psFirmwareCCBMemDesc); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - RGXFreeCCB(psDevInfo, - &psDevInfo->psWorkEstFirmwareCCBCtl, - &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, - &psDevInfo->psWorkEstFirmwareCCB, - &psDevInfo->psWorkEstFirmwareCCBMemDesc); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + RGXFreeCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlLocal, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc); + } #endif if (psDevInfo->psPowSyncPrim != NULL) @@ -3491,6 +3097,50 @@ void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo) } } +static INLINE PVRSRV_ERROR RGXUpdateLocalKCCBRoff(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; + IMG_UINT32 ui32ReadOffset; + + barrier(); /* Don't optimise order. Reads from device memory follow. */ + + /* update KCCB read offset */ + RGXFwSharedMemCacheOpValue(psKCCBCtl->ui32ReadOffset, INVALIDATE); + ui32ReadOffset = psKCCBCtl->ui32ReadOffset; + + if (ui32ReadOffset > psKCCBCtlLocal->ui32WrapMask) + { + return PVRSRV_ERROR_KERNEL_CCB_OFFSET; + } + + psKCCBCtlLocal->ui32ReadOffset = ui32ReadOffset; + + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR RGXUpdateLocalFWCCBWoff(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + const RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; + RGXFWIF_CCB_CTL *psFWCCBCtlLocal = psDevInfo->psFirmwareCCBCtlLocal; + IMG_UINT32 ui32WriteOffset; + + barrier(); /* Don't optimise order. Reads from device memory follow. */ + + /* update FWCCB write offset */ + RGXFwSharedMemCacheOpValue(psFWCCBCtl->ui32WriteOffset, INVALIDATE); + ui32WriteOffset = psFWCCBCtl->ui32WriteOffset; + + if (ui32WriteOffset > psFWCCBCtlLocal->ui32WrapMask) + { + return PVRSRV_ERROR_KERNEL_CCB_OFFSET; + } + + psFWCCBCtlLocal->ui32WriteOffset = ui32WriteOffset; + + return PVRSRV_OK; +} + /****************************************************************************** FUNCTION : RGXAcquireKernelCCBSlot @@ -3502,31 +3152,33 @@ void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo) RETURNS : PVRSRV_ERROR ******************************************************************************/ static PVRSRV_ERROR RGXAcquireKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, - const RGXFWIF_CCB_CTL *psKCCBCtl, IMG_UINT32 *pui32Offset) { - IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; -#if defined(PDUMP) - const DEVMEM_MEMDESC *psKCCBCtrlMemDesc = psDevInfo->psKernelCCBCtlMemDesc; -#endif + IMG_UINT32 ui32NextWriteOffset; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; - ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; - ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + ui32NextWriteOffset = (psKCCBCtlLocal->ui32WriteOffset + 1) & psKCCBCtlLocal->ui32WrapMask; #if defined(PDUMP) /* Wait for sufficient CCB space to become available */ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, 0, - "Wait for kCCB woff=%u", ui32NextWriteOffset); - DevmemPDumpCBP(psKCCBCtrlMemDesc, + "Wait for space to write kCCB woff=%u", psKCCBCtlLocal->ui32WriteOffset); + DevmemPDumpCBP(psDevInfo->psKernelCCBCtlMemDesc, offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), - ui32NextWriteOffset, + psKCCBCtlLocal->ui32WriteOffset, 1, - (psKCCBCtl->ui32WrapMask + 1)); + (psKCCBCtlLocal->ui32WrapMask + 1)); #endif - if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset) + if (ui32NextWriteOffset == psKCCBCtlLocal->ui32ReadOffset) { - return PVRSRV_ERROR_KERNEL_CCB_FULL; + PVRSRV_ERROR eError = RGXUpdateLocalKCCBRoff(psDevInfo); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXUpdateLocalKCCBRoff"); + + if (ui32NextWriteOffset == psKCCBCtlLocal->ui32ReadOffset) + { + return PVRSRV_ERROR_KERNEL_CCB_FULL; + } } *pui32Offset = ui32NextWriteOffset; return PVRSRV_OK; @@ -3542,18 +3194,24 @@ static PVRSRV_ERROR RGXAcquireKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, RETURNS : PVRSRV_ERROR ******************************************************************************/ -static PVRSRV_ERROR RGXPollKernelCCBSlot(const DEVMEM_MEMDESC *psKCCBCtrlMemDesc, - const RGXFWIF_CCB_CTL *psKCCBCtl) +static PVRSRV_ERROR RGXPollKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo) { - IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; + IMG_UINT32 ui32NextWriteOffset; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; - ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; - ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + ui32NextWriteOffset = (psKCCBCtlLocal->ui32WriteOffset + 1) & psKCCBCtlLocal->ui32WrapMask; - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + if (ui32NextWriteOffset != psKCCBCtlLocal->ui32ReadOffset) { - - if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset) + return PVRSRV_OK; + } + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + PVRSRV_ERROR eError = RGXUpdateLocalKCCBRoff(psDevInfo); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXUpdateLocalKCCBRoff"); + + if (ui32NextWriteOffset != psKCCBCtlLocal->ui32ReadOffset) { return PVRSRV_OK; } @@ -3568,7 +3226,7 @@ static PVRSRV_ERROR RGXPollKernelCCBSlot(const DEVMEM_MEMDESC *psKCCBCtrlMemDesc } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); return PVRSRV_ERROR_KERNEL_CCB_FULL; } @@ -3641,6 +3299,10 @@ static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) { return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA); } + case RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE_DATA); + } case RGXFWIF_KCCB_CMD_FORCE_UPDATE: { return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA); @@ -3689,26 +3351,25 @@ static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) { return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA); } - case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE: + case RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE: + case RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE: + case RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE_INTERVAL: case RGXFWIF_KCCB_CMD_WDG_CFG: case RGXFWIF_KCCB_CMD_PHR_CFG: case RGXFWIF_KCCB_CMD_HEALTH_CHECK: - case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE: case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL: { /* No command specific data */ return offsetof(RGXFWIF_KCCB_CMD, uCmdData); } -#if defined(SUPPORT_VALIDATION) - case RGXFWIF_KCCB_CMD_RGXREG: + case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE: { - return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_RGXREG_DATA); + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_DEV_VIRTADDR); } - case RGXFWIF_KCCB_CMD_GPUMAP: + case RGXFWIF_KCCB_CMD_CANCEL_WORK: { - return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_GPUMAP_DATA); + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CANCEL_WORK_DATA); } -#endif default: { /* Invalid (OR) Unused (OR) Newly added command type */ @@ -3723,10 +3384,13 @@ PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, { PVRSRV_ERROR eError; + RGXFwSharedMemCacheOpValue(psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], + INVALIDATE); eError = PVRSRVWaitForValueKM( (IMG_UINT32 __iomem *)&psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, - RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + RGXFwSharedMemCacheOpExecPfn); PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVWaitForValueKM"); #if defined(PDUMP) @@ -3758,18 +3422,26 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, { PVRSRV_ERROR eError; PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; - RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + RGXFWIF_CCB_CTL *psKCCBCtl; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; IMG_UINT8 *pui8KCCB = psDevInfo->psKernelCCB; IMG_UINT32 ui32NewWriteOffset; - IMG_UINT32 ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + IMG_UINT32 ui32OldWriteOffset; IMG_UINT32 ui32CmdMemCopySize; #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + RGXFwSharedMemCacheOpPtr(psDevInfo->psKernelCCBCtl, INVALIDATE); + psKCCBCtl = psDevInfo->psKernelCCBCtl; + ui32OldWriteOffset = psKCCBCtlLocal->ui32WriteOffset; + #else IMG_BOOL bContCaptureOn = PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); /* client connected or in pdump init phase */ IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, uiPDumpFlags); /* Are we in capture range or continuous and not in a power transition */ + psKCCBCtl = psDevInfo->psKernelCCBCtl; + ui32OldWriteOffset = psKCCBCtlLocal->ui32WriteOffset; + if (bContCaptureOn) { /* in capture range */ @@ -3787,12 +3459,13 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRVPollForValueKM(psDeviceNode, (IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset, 0xFFFFFFFF, - POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP); + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP, + NULL); /* Dump Init state of Kernel CCB control (read and write offset) */ PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, "Initial state of kernel CCB Control, roff: %d, woff: %d", - psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset); + psKCCBCtl->ui32ReadOffset, psKCCBCtlLocal->ui32WriteOffset); DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, 0, @@ -3804,22 +3477,31 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, #endif #if defined(SUPPORT_AUTOVZ) - if (!((KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo)) || - (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) && - !PVRSRV_VZ_MODE_IS(NATIVE)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:" - "driver state = %u / firmware state = %u;" - "expected READY (%u/%u) or ACTIVE (%u/%u);", - __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo), - RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY, - RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE)); - eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; - goto _RGXSendCommandRaw_Exit; + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + KM_CONNECTION_CACHEOP(Os, INVALIDATE); + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode)) + { + if ((likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && + (KM_OS_CONNECTION_IS(ACTIVE, psDevInfo) || KM_OS_CONNECTION_IS(READY, psDevInfo)))) || + (KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo))) + { + RGXUpdateAutoVzWdgToken(psDevInfo); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:" + "driver state = %u / firmware state = %u;" + "expected READY (%u/%u) or ACTIVE (%u/%u) or in transition (%u/%u);", + __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo), + RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY, + RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE, + RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_ACTIVE)); + eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; + goto _RGXSendCommandRaw_Exit; + } } #endif - PVR_ASSERT(sizeof(RGXFWIF_KCCB_CMD) == psKCCBCtl->ui32CmdSize); if (!OSLockIsLocked(psDeviceNode->hPowerLock)) { PVR_DPF((PVR_DBG_ERROR, @@ -3829,7 +3511,7 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, } /* Acquire a slot in the CCB */ - eError = RGXAcquireKernelCCBSlot(psDevInfo, psKCCBCtl, &ui32NewWriteOffset); + eError = RGXAcquireKernelCCBSlot(psDevInfo, &ui32NewWriteOffset); if (eError != PVRSRV_OK) { goto _RGXSendCommandRaw_Exit; @@ -3840,8 +3522,9 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_LOG_RETURN_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND); /* Copy the command into the CCB */ - OSCachedMemCopyWMB(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize], + OSCachedMemCopy(&pui8KCCB[ui32OldWriteOffset * sizeof(RGXFWIF_KCCB_CMD)], psKCCBCmd, ui32CmdMemCopySize); + RGXFwSharedMemCacheOpExec(&pui8KCCB[ui32OldWriteOffset * sizeof(RGXFWIF_KCCB_CMD)], ui32CmdMemCopySize, PVRSRV_CACHE_OP_FLUSH); /* If non-NULL pui32CmdKCCBSlot passed-in, return the kCCB slot in which the command was enqueued */ if (pui32CmdKCCBSlot) @@ -3852,6 +3535,8 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, * doesn't get to see stale/false value in allotted slot */ OSWriteDeviceMem32WithWMB(&psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset], RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE); + RGXFwSharedMemCacheOpValue(psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset], + FLUSH); #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, "Reset kCCB slot number %u", ui32OldWriteOffset); @@ -3864,10 +3549,16 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, __func__, psDevInfo, ui32OldWriteOffset, RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE, psKCCBCmd->eCmdType)); } + /* Memory barrier before KCCB write offset update. */ + OSWriteMemoryBarrier(NULL); + + /* Move past the current command */ + psKCCBCtlLocal->ui32WriteOffset = ui32NewWriteOffset; psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset; - + /* Read-back of memory before Kick MTS */ OSWriteMemoryBarrier(&psKCCBCtl->ui32WriteOffset); + RGXFwSharedMemCacheOpValue(psKCCBCtl->ui32WriteOffset, FLUSH); #if defined(PDUMP) if (bContCaptureOn) @@ -3880,7 +3571,7 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, uiPDumpFlags, "Dump kCCB cmd woff = %d", ui32OldWriteOffset); DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc, - ui32OldWriteOffset * psKCCBCtl->ui32CmdSize, + ui32OldWriteOffset * sizeof(RGXFWIF_KCCB_CMD), ui32CmdMemCopySize, uiPDumpFlags); @@ -3926,7 +3617,7 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, #if defined(NO_HARDWARE) /* keep the roff updated because fw isn't there to update it */ - psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset; + psKCCBCtl->ui32ReadOffset = psKCCBCtlLocal->ui32WriteOffset; #endif _RGXSendCommandRaw_Exit: @@ -4034,7 +3725,7 @@ PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_ dllist_replace_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { if (dllist_is_empty(&sCommandList)) { @@ -4069,15 +3760,14 @@ PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_ /* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the * outer loop times-out, we'll still want to return KCCB_FULL to caller */ - eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, - psDevInfo->psKernelCCBCtl); + eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo); if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL) { eError = PVRSRV_ERROR_KERNEL_CCB_FULL; goto cleanup_; } } - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); cleanup_: /* if the local list is not empty put it back to the deferred list head @@ -4208,41 +3898,6 @@ void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) OSScheduleMISR(psDevInfo->hProcessQueuesMISR); } -#if defined(SUPPORT_VALIDATION) -PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 ui64RegVal, - IMG_UINT64 ui64Size, - IMG_UINT32 ui32Offset, - IMG_BOOL bWriteOp) -{ - RGXFWIF_KCCB_CMD sRgxRegsCmd = {0}; - IMG_UINT32 ui32kCCBCommandSlot; - PVRSRV_ERROR eError; - - sRgxRegsCmd.eCmdType = RGXFWIF_KCCB_CMD_RGXREG; - sRgxRegsCmd.uCmdData.sFwRgxData.ui64RegVal = ui64RegVal; - sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegWidth = ui64Size; - sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegAddr = ui32Offset; - sRgxRegsCmd.uCmdData.sFwRgxData.bWriteOp = bWriteOp; - - eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, - RGXFWIF_DM_GP, - &sRgxRegsCmd, - PDUMP_FLAGS_CONTINUOUS, - &ui32kCCBCommandSlot); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); - - if (bWriteOp) - { - eError = RGXWaitForKCCBSlotUpdate(psDevInfo, - ui32kCCBCommandSlot, - PDUMP_FLAGS_CONTINUOUS); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); - } - - return eError; -} -#endif /*! ******************************************************************************* @@ -4257,7 +3912,7 @@ static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData) PVRSRV_DEVICE_NODE *psDeviceNode = pvData; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; - PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_OFF; eError = PVRSRVPowerLock(psDeviceNode); if (eError != PVRSRV_OK) @@ -4270,40 +3925,55 @@ static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData) /* Check whether it's worth waking up the GPU */ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); - if (!PVRSRV_VZ_MODE_IS(GUEST) && - (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - /* For now, guest drivers will always wake-up the GPU */ - RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; - IMG_BOOL bGPUHasWorkWaiting; - - bGPUHasWorkWaiting = - (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED); - - if (!bGPUHasWorkWaiting) + /* Guests are not permitted to change the device power state */ + if ((eError != PVRSRV_OK) || (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) { - /* all queues are empty, don't wake up the GPU */ PVRSRVPowerUnlock(psDeviceNode); return; } } + else + { + if ((eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) + { + RGXFWIF_GPU_UTIL_FW *psUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; + IMG_BOOL bGPUHasWorkWaiting; - PDUMPPOWCMDSTART(psDeviceNode); - /* wake up the GPU */ - eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, - PVRSRV_DEV_POWER_STATE_ON, - PVRSRV_POWER_FLAGS_NONE); - PDUMPPOWCMDEND(psDeviceNode); + /* Check whether it's worth waking up the GPU */ + RGXFwSharedMemCacheOpValue(psUtilFW->ui64GpuLastWord, INVALIDATE); + bGPUHasWorkWaiting = + (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFW->ui64GpuLastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", - __func__, PVRSRVGetErrorString(eError))); + if (!bGPUHasWorkWaiting) + { + /* all queues are empty, don't wake up the GPU */ + PVRSRVPowerUnlock(psDeviceNode); + return; + } + } - PVRSRVPowerUnlock(psDeviceNode); - return; + PDUMPPOWCMDSTART(psDeviceNode); + /* wake up the GPU */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + PVRSRV_POWER_FLAGS_NONE); + PDUMPPOWCMDEND(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition device to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + + PVRSRVPowerUnlock(psDeviceNode); + return; + } } + /* Memory barrier before Kick MTS */ + OSWriteMemoryBarrier(NULL); + /* uncounted kick to the FW */ HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED); __MTSScheduleWrite(psDevInfo, (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED); @@ -4319,120 +3989,64 @@ PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE "RGX_ScheduleProcessQueues"); } -PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, +PVRSRV_ERROR _RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_DM eKCCBType, RGXFWIF_KCCB_CMD *psKCCBCmd, IMG_UINT32 ui32PDumpFlags, - IMG_UINT32 *pui32CmdKCCBSlot) + IMG_UINT32 *pui32CmdKCCBSlot, + IMG_BOOL bCallerHasPwrLock) { PVRSRV_ERROR eError; IMG_UINT32 uiMMUSyncUpdate; - /* Don't send the command/power up request if the device is de-initialising. - * The de-init thread could destroy the device whilst the power up - * sequence below is accessing the HW registers. - */ + /* Don't send the command/power up request if device not available. */ if (unlikely((psDevInfo == NULL) || (psDevInfo->psDeviceNode == NULL) || - (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) + (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR))) { return PVRSRV_ERROR_INVALID_DEVICE; } -#if defined(SUPPORT_VALIDATION) - /* For validation, force the core to different dust count states with each kick */ - if ((eKCCBType == RGXFWIF_DM_GEOM) || (eKCCBType == RGXFWIF_DM_CDM)) + /* Don't send the command/power up request if device in deinit phase. + * The de-init thread could destroy the device whilst the power up + * sequence below is accessing the HW registers. + * Not yet safe to free resources. Caller should retry later. + */ + if (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING) { - if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN) - { - IMG_UINT32 ui32NumDusts = RGXGetNextDustCount(&psDevInfo->sDustReqState, psDevInfo->sDevFeatureCfg.ui32MAXDustCount); - PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32NumDusts); - } + return PVRSRV_ERROR_RETRY; } - if (psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_DISABLE) + + + if (!bCallerHasPwrLock) { - if (psDevInfo->ui32ECCRAMErrInjInterval > 0U) - { - --psDevInfo->ui32ECCRAMErrInjInterval; - } - else + /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful + in a scenario with several applications allocating resources. */ + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) { - IMG_UINT64 ui64ECCRegVal = 0U; + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); - psDevInfo->ui32ECCRAMErrInjInterval = RGXKM_ECC_ERR_INJ_INTERVAL; - - if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_SLC) - { - PVR_LOG(("ECC RAM Error Inject SLC")); - ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN; - } - else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_USC) - { - PVR_LOG(("ECC RAM Error Inject USC")); - ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_USC_EN; - } - else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_TPU) - { -#if defined(RGX_FEATURE_MAX_TPU_PER_SPU) - PVR_LOG(("ECC RAM Error Inject Swift TPU")); - ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_SWIFT_EN; -#else - PVR_LOG(("ECC RAM Error Inject TPU MCU L0")); - ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN; -#endif - } - else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_RASCAL) - { -#if defined(RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN) - PVR_LOG(("ECC RAM Error Inject RASCAL")); - ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN; -#else - PVR_LOG(("ECC RAM Error Inject USC")); - ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_USC_EN; -#endif - } - else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_MARS) - { - PVR_LOG(("ECC RAM Error Inject MARS")); - ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_MARS_EN; - } - else + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) { + eError = PVRSRV_ERROR_RETRY; } - OSWriteMemoryBarrier(NULL); - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_ECC_RAM_ERR_INJ, ui64ECCRegVal); - PDUMPCOMMENT(psDevInfo->psDeviceNode, "Write reg ECC_RAM_ERR_INJ"); - PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_ECC_RAM_ERR_INJ, ui64ECCRegVal, PDUMP_FLAGS_CONTINUOUS); - OSWriteMemoryBarrier(NULL); + goto RGXScheduleCommand_exit; } } -#endif - - /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful - in a scenario with several applications allocating resources. */ - eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); - if (unlikely(eError != PVRSRV_OK)) - { - PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", - __func__, PVRSRVGetErrorString(eError))); - - /* If system is found powered OFF, Retry scheduling the command */ - if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) - { - eError = PVRSRV_ERROR_RETRY; - } - - goto RGXScheduleCommand_exit; - } - if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)) + if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING)) { /* If we have the power lock the device is valid but the deinit * thread could be waiting for the lock. */ - PVRSRVPowerUnlock(psDevInfo->psDeviceNode); - return PVRSRV_ERROR_INVALID_DEVICE; + eError = PVRSRV_ERROR_RETRY; + goto _PVRSRVInvalidDeviceError_Exit; } /* Ensure device is powered up before sending any commands */ @@ -4455,8 +4069,11 @@ PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; _PVRSRVSetDevicePowerStateKM_Exit: - PVRSRVPowerUnlock(psDevInfo->psDeviceNode); - +_PVRSRVInvalidDeviceError_Exit: + if (!bCallerHasPwrLock) + { + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + } RGXScheduleCommand_exit: return eError; } @@ -4467,410 +4084,367 @@ PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) { RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; + RGXFWIF_CCB_CTL *psFWCCBCtlLocal = psDevInfo->psFirmwareCCBCtlLocal; IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB; + PVRSRV_ERROR eError; -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) - PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE) || +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + KM_CONNECTION_CACHEOP(Os, INVALIDATE); + PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo) || (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && - KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)), + (KM_OS_CONNECTION_IS(ACTIVE, psDevInfo) || KM_OS_CONNECTION_IS(READY, psDevInfo))), "FW-KM connection is down"); #endif - while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset) + eError = RGXUpdateLocalFWCCBWoff(psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "RGXUpdateLocalFWCCBWoff"); + return; + } + + while (psFWCCBCtlLocal->ui32ReadOffset != psFWCCBCtlLocal->ui32WriteOffset) { /* Point to the next command */ - const RGXFWIF_FWCCB_CMD *psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset; + const RGXFWIF_FWCCB_CMD *psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtlLocal->ui32ReadOffset; + RGXFwSharedMemCacheOpPtr(psFwCCBCmd, INVALIDATE); + HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType); switch (psFwCCBCmd->eCmdType) { - case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING: - { - if (psDevInfo->bPDPEnabled) - { - PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_BACKING, - "Request to add backing to ZSBuffer"); - } - RGXProcessRequestZSBufferBacking(psDevInfo, - psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); - break; - } - - case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING: - { - if (psDevInfo->bPDPEnabled) + case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING: { - PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_UNBACKING, - "Request to remove backing from ZSBuffer"); + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_BACKING, "Request to add backing to ZSBuffer"); + } + RGXProcessRequestZSBufferBacking(psDevInfo, + psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); + break; } - RGXProcessRequestZSBufferUnbacking(psDevInfo, - psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); - break; - } - case RGXFWIF_FWCCB_CMD_FREELIST_GROW: - { - if (psDevInfo->bPDPEnabled) + case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING: { - PDUMP_PANIC(psDevInfo->psDeviceNode, FREELIST_GROW, - "Request to grow the free list"); + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer"); + } + RGXProcessRequestZSBufferUnbacking(psDevInfo, + psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); + break; } - RGXProcessRequestGrow(psDevInfo, - psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID); - break; - } - case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION: - { - if (psDevInfo->bPDPEnabled) + case RGXFWIF_FWCCB_CMD_FREELIST_GROW: { - PDUMP_PANIC(psDevInfo->psDeviceNode, FREELISTS_RECONSTRUCTION, - "Request to reconstruct free lists"); + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(psDevInfo->psDeviceNode, FREELIST_GROW, "Request to grow the free list"); + } + RGXProcessRequestGrow(psDevInfo, + psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID); + break; } - if (PVRSRV_VZ_MODE_IS(GUEST)) - { - PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists", - __func__, - psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, - psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); - } - else + case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION: { - PVR_ASSERT(psDevInfo->psRGXFWIfHWRInfoBufCtl); - PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists", - __func__, - psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, - psDevInfo->psRGXFWIfHWRInfoBufCtl->ui32HwrCounter+1, - psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); - } + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(psDevInfo->psDeviceNode, FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists"); + } - RGXProcessRequestFreelistsReconstruction(psDevInfo, - psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount, - psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs); - break; - } + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists", + __func__, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists", + __func__, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, + psDevInfo->psRGXFWIfHWRInfoBufCtl->ui32HwrCounter+1, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); + } - case RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION: - { - /* Notify client drivers */ - /* Client notification of device error will be achieved by - * clients calling UM function RGXGetLastDeviceError() */ - psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; + RGXProcessRequestFreelistsReconstruction(psDevInfo, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs); + break; + } - /* Notify system layer */ + case RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION: { - PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; - PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; - const RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA *psCmdFwPagefault = - &psFwCCBCmd->uCmdData.sCmdFWPagefault; + /* Notify client drivers */ + /* Client notification of device error will be achieved by + * clients calling UM function RGXGetLastDeviceError() */ + psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; - if (psDevConfig->pfnSysDevErrorNotify) + /* Notify system layer */ { - PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + const RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA *psCmdFwPagefault = + &psFwCCBCmd->uCmdData.sCmdFWPagefault; + + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; - sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; - sErrorData.uErrData.sFwPFErrData.sFWFaultAddr.uiAddr = psCmdFwPagefault->sFWFaultAddr.uiAddr; + sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; + sErrorData.uErrData.sFwPFErrData.sFWFaultAddr.uiAddr = psCmdFwPagefault->sFWFaultAddr.uiAddr; - psDevConfig->pfnSysDevErrorNotify(psDevConfig, - &sErrorData); + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, + &sErrorData); + } } + break; } - break; - } - - case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION: - { - DLLIST_NODE *psNode, *psNext; - const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification = - &psFwCCBCmd->uCmdData.sCmdContextResetNotification; - RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL; - IMG_UINT32 ui32ErrorPid = 0; - - OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); - dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION: { - RGX_SERVER_COMMON_CONTEXT *psThisContext = - IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification = + &psFwCCBCmd->uCmdData.sCmdContextResetNotification; + IMG_UINT32 ui32ErrorPid = 0; - /* If the notification applies to all contexts update reset info - * for all contexts, otherwise only do so for the appropriate ID. - */ - if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) + FWCommonContextListSetLastResetReason(psDevInfo, + &ui32ErrorPid, + psCmdContextResetNotification); + + /* Increment error counter (if appropriate) */ + if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM) { - /* Notification applies to all contexts */ - psThisContext->eLastResetReason = psCmdContextResetNotification->eResetReason; - psThisContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; + /* Avoid wrapping the error count (which would then + * make it appear we had far fewer errors), by limiting + * it to IMG_UINT32_MAX. + */ + if (psDevInfo->sErrorCounts.ui32WGPErrorCount < IMG_UINT32_MAX) + { + psDevInfo->sErrorCounts.ui32WGPErrorCount++; + } } - else + else if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM) { - /* Notification applies to one context only */ - if (psThisContext->ui32ContextID == psCmdContextResetNotification->ui32ServerCommonContextID) + /* Avoid wrapping the error count (which would then + * make it appear we had far fewer errors), by limiting + * it to IMG_UINT32_MAX. + */ + if (psDevInfo->sErrorCounts.ui32TRPErrorCount < IMG_UINT32_MAX) { - psServerCommonContext = psThisContext; - psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason; - psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; - ui32ErrorPid = RGXGetPIDFromServerMMUContext(psServerCommonContext->psServerMMUContext); - break; + psDevInfo->sErrorCounts.ui32TRPErrorCount++; } } - } - if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) - { - PVR_DPF((PVR_DBG_MESSAGE, "%s: All contexts reset (Reason=%d, JobRef=0x%08x)", - __func__, - (IMG_UINT32)(psCmdContextResetNotification->eResetReason), - psCmdContextResetNotification->ui32ResetJobRef)); - } - else - { - PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)", - __func__, - psServerCommonContext, - psCmdContextResetNotification->ui32ServerCommonContextID, - (IMG_UINT32)(psCmdContextResetNotification->eResetReason), - psCmdContextResetNotification->ui32ResetJobRef)); - } + /* Notify system layer */ + { + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; - /* Increment error counter (if appropriate) */ - if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM) - { - /* Avoid wrapping the error count (which would then - * make it appear we had far fewer errors), by limiting - * it to IMG_UINT32_MAX. - */ - if (psDevInfo->sErrorCounts.ui32WGPErrorCount < IMG_UINT32_MAX) + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + + sErrorData.eResetReason = psCmdContextResetNotification->eResetReason; + sErrorData.pid = ui32ErrorPid; + + /* Populate error data according to reset reason */ + switch (psCmdContextResetNotification->eResetReason) + { + case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: + case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: + { + sErrorData.uErrData.sChecksumErrData.ui32ExtJobRef = psCmdContextResetNotification->ui32ResetJobRef; + sErrorData.uErrData.sChecksumErrData.eDM = psCmdContextResetNotification->eDM; + break; + } + default: + { + break; + } + } + + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, + &sErrorData); + } + } + + /* Notify if a page fault */ + if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF) { - psDevInfo->sErrorCounts.ui32WGPErrorCount++; + DevmemIntPFNotify(psDevInfo->psDeviceNode, + psCmdContextResetNotification->ui64PCAddress, + psCmdContextResetNotification->sFaultAddress); } + break; } - else if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM) + + case RGXFWIF_FWCCB_CMD_DEBUG_DUMP: { - /* Avoid wrapping the error count (which would then - * make it appear we had far fewer errors), by limiting - * it to IMG_UINT32_MAX. - */ - if (psDevInfo->sErrorCounts.ui32TRPErrorCount < IMG_UINT32_MAX) + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSAtomicWrite(&psDevInfo->psDeviceNode->eDebugDumpRequested, PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE); + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + if (eError != PVRSRV_OK) { - psDevInfo->sErrorCounts.ui32TRPErrorCount++; + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal FW Cmd debug dump event, dumping now instead", __func__)); + PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); } + break; } - OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); - /* Notify system layer */ + case RGXFWIF_FWCCB_CMD_UPDATE_STATS: { - PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; - PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner; + IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue; - if (psDevConfig->pfnSysDevErrorNotify) + switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate) { - PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; - - sErrorData.eResetReason = psCmdContextResetNotification->eResetReason; - sErrorData.pid = ui32ErrorPid; - - /* Populate error data according to reset reason */ - switch (psCmdContextResetNotification->eResetReason) + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS: { - case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: - case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: - { - sErrorData.uErrData.sChecksumErrData.ui32ExtJobRef = psCmdContextResetNotification->ui32ResetJobRef; - sErrorData.uErrData.sChecksumErrData.eDM = psCmdContextResetNotification->eDM; - break; - } - default: - { - break; - } + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,i32AdjustmentValue,0,0,0,0,0,0,pidTmp); + break; } - - psDevConfig->pfnSysDevErrorNotify(psDevConfig, - &sErrorData); + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY: + { + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,i32AdjustmentValue,0,0,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES: + { + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,i32AdjustmentValue,0,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES: + { + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,i32AdjustmentValue,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES: + { + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,i32AdjustmentValue,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES: + { + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,0,i32AdjustmentValue,0,pidTmp); + break; + } +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_RAY_STORES: + { + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,0,0,i32AdjustmentValue,pidTmp); + break; + } +#endif } +#endif + break; } - - /* Notify if a page fault */ - if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF) +#if defined(SUPPORT_FW_CORE_CLK_RATE_CHANGE_NOTIFY) + case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE: { - DevmemIntPFNotify(psDevInfo->psDeviceNode, - psCmdContextResetNotification->ui64PCAddress, - psCmdContextResetNotification->sFaultAddress); + RGX_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, + psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate); + break; } - break; - } - - case RGXFWIF_FWCCB_CMD_DEBUG_DUMP: - { - PVRSRV_ERROR eError; - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - OSAtomicWrite(&psDevInfo->psDeviceNode->eDebugDumpRequested, PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE); - eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); - if (eError != PVRSRV_OK) +#endif + case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART: { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal FW Cmd debug dump event, dumping now instead", __func__)); - PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); - } - break; - } - - case RGXFWIF_FWCCB_CMD_UPDATE_STATS: - { -#if defined(PVRSRV_ENABLE_PROCESS_STATS) - IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner; - IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue; - - switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate) - { - case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS: - { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,i32AdjustmentValue,0,0,0,0,0,pidTmp); - break; - } - case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY: - { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,i32AdjustmentValue,0,0,0,0,pidTmp); - break; - } - case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES: - { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,i32AdjustmentValue,0,0,0,pidTmp); - break; - } - case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES: - { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,i32AdjustmentValue,0,0,pidTmp); - break; - } - case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES: - { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,i32AdjustmentValue,0,pidTmp); - break; - } - case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES: - { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,0,i32AdjustmentValue,pidTmp); - break; - } - } -#endif - break; - } - case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE: - { -#if defined(SUPPORT_PDVFS) - PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, - psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate); -#endif - break; - } - - case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART: - { - if (psDevInfo->psRGXFWIfFwSysData != NULL && - psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF) - { - PVRSRV_ERROR eError; - - /* Power down... */ - eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, - PVRSRV_SYS_POWER_STATE_OFF, PVRSRV_POWER_FLAGS_NONE); - if (eError == PVRSRV_OK) + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); + if (psDevInfo->psRGXFWIfFwSysData != NULL && + psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF) { - /* Clear the FW faulted flags... */ - psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED); + PVRSRV_ERROR eError; - /* Power back up again... */ + /* Power down... */ eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, - PVRSRV_SYS_POWER_STATE_ON, PVRSRV_POWER_FLAGS_NONE); - - /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */ + PVRSRV_SYS_POWER_STATE_OFF, + PVRSRV_POWER_FLAGS_NONE); if (eError == PVRSRV_OK) { - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + /* Clear the FW faulted flags... */ + psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED); + OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags, + FLUSH); + + /* Power back up again... */ + eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, + PVRSRV_SYS_POWER_STATE_ON, + PVRSRV_POWER_FLAGS_NONE); + + /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */ + if (eError == PVRSRV_OK) { - eError = RGXFWHealthCheckCmd(psDevInfo); - if (eError != PVRSRV_ERROR_RETRY) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + eError = RGXFWHealthCheckCmd(psDevInfo); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + } } - } - /* Notify client drivers and system layer of FW fault */ - { - PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; - PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + /* Notify client drivers and system layer of FW fault */ + { + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; - /* Client notification of device error will be achieved by - * clients calling UM function RGXGetLastDeviceError() */ - psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; + /* Client notification of device error will be achieved by + * clients calling UM function RGXGetLastDeviceError() */ + psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; - /* Notify system layer */ - if (psDevConfig->pfnSysDevErrorNotify) - { - PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + /* Notify system layer */ + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; - sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; - psDevConfig->pfnSysDevErrorNotify(psDevConfig, - &sErrorData); + sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, + &sErrorData); + } } - } - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)", - __func__, PVRSRVGetErrorString(eError))); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)", + __func__, PVRSRVGetErrorString(eError))); + } } + break; } - break; - } -#if defined(SUPPORT_VALIDATION) - case RGXFWIF_FWCCB_CMD_REG_READ: - { - psDevInfo->sFwRegs.ui64RegVal = psFwCCBCmd->uCmdData.sCmdRgxRegReadData.ui64RegValue; - complete(&psDevInfo->sFwRegs.sRegComp); - break; - } -#if defined(SUPPORT_SOC_TIMER) - case RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS: - { - if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) + default: { - PVRSRV_ERROR eSOCtimerErr = RGXValidateSOCUSCTimer(psDevInfo, - PDUMP_NONE, - psFwCCBCmd->uCmdData.sCmdTimers.ui64timerGray, - psFwCCBCmd->uCmdData.sCmdTimers.ui64timerBinary, - psFwCCBCmd->uCmdData.sCmdTimers.aui64uscTimers); - if (PVRSRV_OK == eSOCtimerErr) - { - PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have increased over time")); - } - else - { - PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have NOT increased over time")); - } + /* unknown command */ + PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)", + __func__, psFwCCBCmd->eCmdType)); + /* Assert on magic value corruption */ + PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD); } - break; - } -#endif -#endif - default: - { - /* unknown command */ - PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)", - __func__, psFwCCBCmd->eCmdType)); - /* Assert on magic value corruption */ - PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD); - } } /* Update read offset */ - psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask; + psFWCCBCtlLocal->ui32ReadOffset = (psFWCCBCtlLocal->ui32ReadOffset + 1) & psFWCCBCtlLocal->ui32WrapMask; + OSMemoryBarrier(NULL); + psFWCCBCtl->ui32ReadOffset = psFWCCBCtlLocal->ui32ReadOffset; + OSWriteMemoryBarrier(NULL); + + if (psFWCCBCtlLocal->ui32ReadOffset == psFWCCBCtlLocal->ui32WriteOffset) + { + eError = RGXUpdateLocalFWCCBWoff(psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "RGXUpdateLocalFWCCBWoff"); + return; + } + } } } @@ -4890,12 +4464,13 @@ PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware render context state (%u)", - __func__, eError)); + "%s: Failed to map firmware render context state (%u)", + __func__, eError)); return eError; } OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize); + RGXFwSharedMemCacheOpPtr(psRFReg, FLUSH); /* Release the CPU mapping */ DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc); @@ -4927,7 +4502,7 @@ PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, Allocate device memory for the firmware GPU framework state. Sufficient info to kick one or more DMs should be contained in this buffer */ - PDUMPCOMMENT(psDeviceNode, "Allocate Rogue firmware framework state"); + PDUMPCOMMENT(psDeviceNode, "Allocate firmware framework state"); eError = DevmemFwAllocate(psDevInfo, ui32FrameworkCommandSize, @@ -4938,8 +4513,8 @@ PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to allocate firmware framework state (%u)", - __func__, eError)); + "%s: Failed to allocate firmware framework state (%u)", + __func__, eError)); return eError; } @@ -4954,11 +4529,14 @@ PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, PVRSRV_ERROR eError = PVRSRV_OK; IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries; PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; - const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; + + eError = RGXUpdateLocalKCCBRoff(psDevInfo); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXUpdateLocalKCCBRoff"); - ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 + - psKCCBCtl->ui32WriteOffset - - psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask; + ui32CurrentQueueLength = (psKCCBCtlLocal->ui32WrapMask+1 + + psKCCBCtlLocal->ui32WriteOffset - + psKCCBCtlLocal->ui32ReadOffset) & psKCCBCtlLocal->ui32WrapMask; ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount; for (ui32MaxRetries = ui32CurrentQueueLength + 1; @@ -4971,7 +4549,7 @@ PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, * does not generate an error message. In this case, the PollForValueKM is expected to * timeout as there is work ongoing on the GPU which may take longer than the timeout period. */ - eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE); + eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE, NULL); if (eError != PVRSRV_ERROR_TIMEOUT) { break; @@ -4983,17 +4561,17 @@ PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", - __func__, PVRSRVGetErrorString(eError), - pui32LinMemAddr, ui32Value)); + __func__, PVRSRVGetErrorString(eError), + pui32LinMemAddr, ui32Value)); } return eError; } PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Config, - IMG_UINT32 *pui32ConfigState, - IMG_BOOL bSetNotClear) + IMG_UINT32 ui32Config, + IMG_UINT32 *pui32ConfigState, + IMG_BOOL bSetNotClear) { PVRSRV_ERROR eError; PVRSRV_DEV_POWER_STATE ePowerState; @@ -5003,13 +4581,15 @@ PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32kCCBCommandSlot; IMG_BOOL bWaitForFwUpdate = IMG_FALSE; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); - if (!psDevInfo) { return PVRSRV_ERROR_INVALID_PARAMS; } psDeviceNode = psDevInfo->psDeviceNode; + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); + + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); psFwSysData = psDevInfo->psRGXFWIfFwSysData; if (NULL == psFwSysData) @@ -5032,6 +4612,7 @@ PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, psFwSysData->ui32ConfigFlags &= ~ui32Config; } OSWriteMemoryBarrier(&psFwSysData->ui32ConfigFlags); + RGXFwSharedMemCacheOpValue(psFwSysData->ui32ConfigFlags, FLUSH); /* return current/new value to caller */ if (pui32ConfigState) @@ -5085,6 +4666,14 @@ PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, { PVRSRV_ERROR eError; IMG_UINT32 ui32kCCBCommandSlot; +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus = OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus); + + PVR_LOG_RETURN_IF_FALSE((eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_DEAD) && + (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_FAULT), + "Cleanup aborted: Device in bad state", PVRSRV_OK); +#endif + /* Clean-up commands sent during frame capture intervals must be dumped even when not in capture range... */ ui32PDumpFlags |= PDUMP_FLAGS_INTERVAL; @@ -5104,8 +4693,7 @@ PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, if (eError != PVRSRV_OK) { /* If caller may retry, fail with no error message */ - if ((eError != PVRSRV_ERROR_RETRY) && - (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) + if (!PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR ,"RGXScheduleCommandAndGetKCCBSlot() failed (%s) in %s()", PVRSRVGETERRORSTRING(eError), __func__)); @@ -5153,10 +4741,10 @@ PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, * the issue doesn't go unnoticed. */ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps", - eDM, - psKCCBCmd->uCmdData.sCleanupData.eCleanupType, - psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr); + "Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps", + eDM, + psKCCBCmd->uCmdData.sCleanupData.eCleanupType, + psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr); eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, ui32kCCBCommandSlot * sizeof(IMG_UINT32), 0, @@ -5170,6 +4758,9 @@ PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, If the command has was run but a resource was busy, then the request will need to be retried. */ + RGXFwSharedMemCacheOpValue(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot], + INVALIDATE); + if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)) { if (psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE) @@ -5194,9 +4785,9 @@ PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, RGXRequestCommonContextCleanUp */ PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - RGXFWIF_DM eDM, - IMG_UINT32 ui32PDumpFlags) + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGXFWIF_DM eDM, + IMG_UINT32 ui32PDumpFlags) { RGXFWIF_KCCB_CMD sRCCleanUpCmd = {0}; PVRSRV_ERROR eError; @@ -5205,12 +4796,14 @@ PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, /* Force retry if this context's CCB is currently being dumped * as part of the stalled CCB debug */ - if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB) + if (psDevInfo->pvEarliestStalledClientCCB == (void*)FWCommonContextGetClientCCB(psServerCommonContext)) { PVR_DPF((PVR_DBG_WARNING, - "%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psServerCommonContext->psClientCCB <%p>", + "%s: [%u/%d]: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psCtxClientCCB <%p>", __func__, - (void*)psServerCommonContext->psClientCCB)); + psDeviceNode->sDevId.ui32InternalID, + psDeviceNode->sDevId.i32KernelDeviceID, + psDevInfo->pvEarliestStalledClientCCB)); return PVRSRV_ERROR_RETRY; } @@ -5233,7 +4826,7 @@ PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_CLEANUP_FWCOMMONCONTEXT, ui32PDumpFlags); - if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + if ((eError != PVRSRV_OK) && !PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule a memory context cleanup with error (%u)", @@ -5266,8 +4859,7 @@ PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, if (eError != PVRSRV_OK) { /* If caller may retry, fail with no error message */ - if ((eError != PVRSRV_ERROR_RETRY) && - (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) + if (!PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule a HWRTData cleanup with error (%u)", @@ -5302,8 +4894,7 @@ PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, if (eError != PVRSRV_OK) { /* If caller may retry, fail with no error message */ - if ((eError != PVRSRV_ERROR_RETRY) && - (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) + if (!PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule a memory context cleanup with error (%u)", @@ -5335,7 +4926,7 @@ PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_CLEANUP_ZSBUFFER, PDUMP_FLAGS_NONE); - if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + if ((eError != PVRSRV_OK) && !PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule a memory context cleanup with error (%u)", @@ -5346,12 +4937,13 @@ PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, } PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32HCSDeadlineMs) + IMG_UINT32 ui32HCSDeadlineMs) { - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS = ui32HCSDeadlineMs; OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS, FLUSH); #if defined(PDUMP) PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -5365,22 +4957,72 @@ PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, return PVRSRV_OK; } -PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo) +PVRSRV_ERROR RGXFWHealthCheckCmdInt(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bCallerHasPwrLock) { RGXFWIF_KCCB_CMD sCmpKCCBCmd = { 0 }; sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; - return RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GP, - &sCmpKCCBCmd, - PDUMP_FLAGS_CONTINUOUS); + if (bCallerHasPwrLock) + { + return RGXScheduleCommandWithoutPowerLock(psDevInfo, + RGXFWIF_DM_GP, + &sCmpKCCBCmd, + PDUMP_FLAGS_CONTINUOUS); + } + else + { + return RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sCmpKCCBCmd, + PDUMP_FLAGS_CONTINUOUS); + } } PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo) { - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + IMG_UINT32 ui32CBaseMapCtxReg; +#endif + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) + { + ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4; + /* Set the mapping context */ + RGXWriteReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV); + (void)RGXReadReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg); /* Fence write */ + + /* + * Catbase-0 (FW MMU context) pointing to unmapped mem to make + * FW crash from its memory context + */ + RGXWriteKernelMMUPC32(&psDevInfo->sLayerParams, + RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1, + RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT, + RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT, + 0xDEADBEEF); + } + else + { + ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT; + /* Set the mapping context */ + RGXWriteReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV); + (void)RGXReadReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg); /* Fence write */ + /* + * Catbase-0 (FW MMU context) pointing to unmapped mem to make + * FW crash from its memory context + */ + RGXWriteKernelMMUPC32(&psDevInfo->sLayerParams, + RGX_CR_MMU_CBASE_MAPPING, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, + 0xDEADBEEF); + } +#else /* * Catbase-0 (FW MMU context) pointing to unmapped mem to make * FW crash from its memory context @@ -5389,22 +5031,28 @@ PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo) FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT, RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT, - ((0x0 + ((0xDEADBEEF >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK); +#endif + return PVRSRV_OK; } -PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSid, +PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32DriverID, RGXFWIF_OS_STATE_CHANGE eOSOnlineState) { - PVRSRV_ERROR eError = PVRSRV_OK; - RGXFWIF_KCCB_CMD sOSOnlineStateCmd = { 0 }; - RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sOSOnlineStateCmd = { 0 }; + const RGXFWIF_SYSDATA *psFwSysData; + + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); + psFwSysData = psDevInfo->psRGXFWIfFwSysData; sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE; - sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid; + sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32DriverID = ui32DriverID; sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState; #if defined(SUPPORT_AUTOVZ) @@ -5414,7 +5062,7 @@ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSi PVR_UNREFERENCED_PARAMETER(psFwSysData); sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = RGXFWIF_OS_OFFLINE; - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { /* Send the offline command regardless if power lock is held or not. * Under AutoVz this is done during regular driver deinit, store-to-ram suspend @@ -5433,61 +5081,52 @@ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSi } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); /* Guests and Host going offline should wait for confirmation * from the Firmware of the state change. If this fails, break * the connection on the OS Driver's end as backup. */ - if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32OSid == RGXFW_HOST_OS)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo) || (ui32DriverID == RGXFW_HOST_DRIVER_ID)) { - LOOP_UNTIL_TIMEOUT(SECONDS_TO_MICROSECONDS/2) + LOOP_UNTIL_TIMEOUT_US(SECONDS_TO_MICROSECONDS/2) { + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); if (KM_FW_CONNECTION_IS(READY, psDevInfo)) { bConnectionDown = IMG_TRUE; break; } - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (!bConnectionDown) { KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); } } } #else - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { /* no reason for Guests to update their state or any other VM's. * This is the Hypervisor and Host driver's responsibility. */ return PVRSRV_OK; } - else if (eOSOnlineState == RGXFWIF_OS_ONLINE) + else { - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GP, - &sOSOnlineStateCmd, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_ERROR_RETRY) break; + const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags; - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - } - else if (psFwSysData) - { - const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags = - (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; + PVR_ASSERT(psFwSysData != NULL); + psFwRunFlags = (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID]; /* Attempt several times until the FW manages to offload the OS */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { IMG_UINT32 ui32kCCBCommandSlot; /* Send request */ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, - RGXFWIF_DM_GP, + RGXFWIF_DM_GP, &sOSOnlineStateCmd, PDUMP_FLAGS_CONTINUOUS, &ui32kCCBCommandSlot); @@ -5500,8 +5139,9 @@ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSi /* read the OS state */ OSMemoryBarrier(NULL); - /* check if FW finished offloading the OSID and is stopped */ - if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE) + /* check if FW finished offloading the driver and is stopped */ + if ((eOSOnlineState == RGXFWIF_OS_ONLINE && (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_READY || psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_ACTIVE)) || + (eOSOnlineState == RGXFWIF_OS_OFFLINE && psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE)) { eError = PVRSRV_OK; break; @@ -5512,11 +5152,7 @@ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSi } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - } - else - { - eError = PVRSRV_ERROR_NOT_INITIALISED; + } END_LOOP_UNTIL_TIMEOUT_US(); } return_ : @@ -5524,166 +5160,22 @@ return_ : return eError; } -PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32OSid, - IMG_UINT32 ui32Priority) -{ - PVRSRV_ERROR eError; - RGXFWIF_KCCB_CMD sOSidPriorityCmd = { 0 }; - - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); - - sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE; - psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid] = ui32Priority; - OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid]); - -#if defined(PDUMP) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Updating the priority of OSID%u inside RGXFWIfRuntimeCfg", ui32OSid); - DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, - offsetof(RGXFWIF_RUNTIME_CFG, aui32OSidPriority) + (ui32OSid * sizeof(ui32Priority)), - ui32Priority , - PDUMP_FLAGS_CONTINUOUS); -#endif - - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GP, - &sOSidPriorityCmd, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_ERROR_RETRY) - { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - - return eError; -} - -PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, - CONNECTION_DATA *psConnection, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_INT32 i32Priority, - RGXFWIF_DM eDM) -{ - IMG_UINT32 ui32CmdSize; - IMG_UINT8 *pui8CmdPtr; - RGXFWIF_KCCB_CMD sPriorityCmd = { 0 }; - RGXFWIF_CCB_CMD_HEADER *psCmdHeader; - RGXFWIF_CMD_PRIORITY *psCmd; - PVRSRV_ERROR eError; - RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext); - - eError = _CheckPriority(psDevInfo, i32Priority, psContext->eRequestor); - PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); - - /* - Get space for command - */ - ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY)); - - eError = RGXAcquireCCB(psClientCCB, - ui32CmdSize, - (void **) &pui8CmdPtr, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - if (eError != PVRSRV_ERROR_RETRY) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__)); - } - goto fail_ccbacquire; - } - - /* - Write the command header and command - */ - psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr; - psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY; - psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY)); - pui8CmdPtr += sizeof(*psCmdHeader); - - psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr; - psCmd->i32Priority = i32Priority; - pui8CmdPtr += sizeof(*psCmd); - - /* - We should reserve space in the kernel CCB here and fill in the command - directly. - This is so if there isn't space in the kernel CCB we can return with - retry back to services client before we take any operations - */ - - /* - Submit the command - */ - RGXReleaseCCB(psClientCCB, - ui32CmdSize, - PDUMP_FLAGS_CONTINUOUS); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__)); - return eError; - } - - /* Construct the priority command. */ - sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; - sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext); - sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); - sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); - sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; - -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; -#endif - - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - eError = RGXScheduleCommand(psDevInfo, - eDM, - &sPriorityCmd, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_ERROR_RETRY) - { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to submit set priority command with error (%u)", - __func__, - eError)); - goto fail_cmdacquire; - } - - psContext->i32Priority = i32Priority; - - return PVRSRV_OK; - -fail_ccbacquire: -fail_checkpriority: -fail_cmdacquire: - PVR_ASSERT(eError != PVRSRV_OK); - return eError; -} - PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PHRMode) { PVRSRV_ERROR eError; RGXFWIF_KCCB_CMD sCfgPHRCmd = { 0 }; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); + + PVR_LOG_RETURN_IF_FALSE((ui32PHRMode == RGXFWIF_PHR_MODE_OFF) || + (ui32PHRMode == RGXFWIF_PHR_MODE_RD_RESET), + "Invalid PHR Mode.", PVRSRV_ERROR_INVALID_PARAMS); sCfgPHRCmd.eCmdType = RGXFWIF_KCCB_CMD_PHR_CFG; psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode = ui32PHRMode; OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, FLUSH); #if defined(PDUMP) PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -5694,7 +5186,7 @@ PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMP_FLAGS_CONTINUOUS); #endif - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psDevInfo, RGXFWIF_DM_GP, @@ -5705,7 +5197,7 @@ PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); return eError; } @@ -5716,11 +5208,12 @@ PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRV_ERROR eError; RGXFWIF_KCCB_CMD sCfgWdgCmd = { 0 }; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); sCfgWdgCmd.eCmdType = RGXFWIF_KCCB_CMD_WDG_CFG; psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs = ui32WdgPeriodUs; OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs, FLUSH); #if defined(PDUMP) PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -5731,7 +5224,7 @@ PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMP_FLAGS_CONTINUOUS); #endif - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psDevInfo, RGXFWIF_DM_GP, @@ -5742,13 +5235,12 @@ PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); return eError; } - void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious) { /* Attempt to detect and deal with any stalled client contexts. @@ -5769,15 +5261,12 @@ void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bI ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo); + ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo); +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo); - - if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK) - { - ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo); - } - +#endif /* If at least one DM stalled bit is different than before */ - if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask))//(psDevInfo->ui32StalledClientMask ^ ui32StalledClientMask)) + if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask)) { if (ui32StalledClientMask > 0) { @@ -5788,14 +5277,12 @@ void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bI "warn"; #endif /* Print all the stalled DMs */ - PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s%s%s", + PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s", RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP), RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D), RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA), RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D), RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM), - RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_RTU), - RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_SHG), RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D), RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D))); @@ -5832,21 +5319,35 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, const RGXFWIF_SYSDATA* psFwSysData; const RGXFWIF_OSDATA* psFwOsData; const RGXFWIF_CCB_CTL* psKCCBCtl; + RGXFWIF_CCB_CTL* psKCCBCtlLocal; IMG_UINT32 ui32ThreadCount; IMG_BOOL bKCCBCmdsWaiting; + PVRSRV_ERROR eError; PVR_ASSERT(psDevNode != NULL); psDevInfo = psDevNode->pvDevice; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevNode, PVRSRV_OK); + /* If the firmware is not yet initialised or has already deinitialised, stop here */ if (psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL || - psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT) + psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING) { return PVRSRV_OK; } psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) + { + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, + INVALIDATE); + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + } + + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, + INVALIDATE); psFwOsData = psDevInfo->psRGXFWIfFwOsData; /* If this is a quick update, then include the last current value... */ @@ -5862,6 +5363,40 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, psDevInfo->ui32SLRHoldoffCounter--; } + /* Take power lock, retry if it's in use in another task. */ + eError = PVRSRVPowerTryLockWaitForTimeout(psDevNode); + if (eError == PVRSRV_ERROR_TIMEOUT) + { + /* Skip health status update if timeout */ + PVR_DPF((PVR_DBG_WARNING, "%s: Power lock timeout, increase OS_POWERLOCK_TIMEOUT_US.", __func__)); + goto _RGXUpdateHealthStatus_Exit; + } + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerTryLockWaitForTimeout"); + + /* If the firmware is not yet initialised or has already deinitialised, stop here */ + if (psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL || + psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING) + { + PVRSRVPowerUnlock(psDevNode); + return PVRSRV_OK; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) + { + /* On a PCI error all reads from the PCI bar may return 0xFFFFFFFF. + This value is not valid for a core ID. */ + if (psFwSysData->ui32MemFaultCheck == RGX_PCI_ERROR_VALUE_DWORD) + { + PVR_DPF((PVR_DBG_WARNING, "%s: PCI error", __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_PCI_ERROR; + PVRSRVDeviceSetState(psDevNode, PVRSRV_DEVICE_STATE_PCI_ERROR); + PVRSRVPowerUnlock(psDevNode); + goto _RGXUpdateHealthStatus_Exit; + } + } + /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */ if (PVRSRVIsDevicePowered(psDevNode)) { @@ -5872,61 +5407,74 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, */ for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++) { - const IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo; + const IMG_CHAR* pszTraceAssertInfo; + + RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf, INVALIDATE); + pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo; /* Check if the FW has hit an assert... */ if (*pszTraceAssertInfo != '\0') { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)", - __func__, ui32ThreadCount, pszTraceAssertInfo, + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %.*s (%.*s:%d)", + __func__, ui32ThreadCount, RGXFW_TRACE_BUFFER_ASSERT_SIZE, + pszTraceAssertInfo, RGXFW_TRACE_BUFFER_ASSERT_SIZE, psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath, psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum)); eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED; + PVRSRVPowerUnlock(psDevNode); goto _RGXUpdateHealthStatus_Exit; } - /* - Check the threads to see if they are in the same poll locations as last time... - */ - if (bCheckAfterTimePassed) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) { - if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0 && - psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount]) + /* + Check the threads to see if they are in the same poll locations as last time... + */ + if (bCheckAfterTimePassed) { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)", - __func__, ui32ThreadCount, - ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), - psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, - psFwSysData->aui32CrPollMask[ui32ThreadCount])); - eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; - eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING; - goto _RGXUpdateHealthStatus_Exit; + if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0 && + psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount]) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)", + __func__, ui32ThreadCount, + ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, + psFwSysData->aui32CrPollMask[ui32ThreadCount])); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING; + PVRSRVPowerUnlock(psDevNode); + goto _RGXUpdateHealthStatus_Exit; + } + psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount]; } - psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount]; } } - /* - Check if the FW has faulted... - */ - if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) { - PVR_DPF((PVR_DBG_WARNING, - "%s: Firmware has faulted and needs to restart", - __func__)); - eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT; - if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED) - { - eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING; - } - else + /* + Check if the FW has faulted... + */ + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT) { - eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING; + PVR_DPF((PVR_DBG_WARNING, + "%s: Firmware has faulted and needs to restart", + __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT; + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED) + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING; + } + else + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING; + } + PVRSRVPowerUnlock(psDevNode); + goto _RGXUpdateHealthStatus_Exit; } - goto _RGXUpdateHealthStatus_Exit; } } @@ -5938,8 +5486,8 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime) { PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)", - __func__, - psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts)); + __func__, + psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts)); eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS; } @@ -5951,20 +5499,27 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, that some have executed since then. */ bKCCBCmdsWaiting = IMG_FALSE; + + RGXFwSharedMemCacheOpPtr(psDevInfo->psKernelCCBCtl, INVALIDATE); psKCCBCtl = psDevInfo->psKernelCCBCtl; + psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; - if (psKCCBCtl != NULL) + if (psKCCBCtl != NULL && psKCCBCtlLocal != NULL) { - if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask || - psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask) + /* update KCCB read offset */ + RGXFwSharedMemCacheOpValue(psKCCBCtl->ui32ReadOffset, INVALIDATE); + psKCCBCtlLocal->ui32ReadOffset = psKCCBCtl->ui32ReadOffset; + + if (psKCCBCtlLocal->ui32ReadOffset > psKCCBCtlLocal->ui32WrapMask || + psKCCBCtlLocal->ui32WriteOffset > psKCCBCtlLocal->ui32WrapMask) { PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)", - __func__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset)); + __func__, psKCCBCtlLocal->ui32ReadOffset, psKCCBCtlLocal->ui32WriteOffset)); eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; } - if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset) + if (psKCCBCtlLocal->ui32ReadOffset != psKCCBCtlLocal->ui32WriteOffset) { bKCCBCmdsWaiting = IMG_TRUE; } @@ -5991,13 +5546,15 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, If no commands are currently pending and nothing happened since the last poll, then schedule a dummy command to ping the firmware so we know it is alive and processing. */ - if (!bKCCBCmdsWaiting) + if ((!bKCCBCmdsWaiting) && + (eNewStatus != PVRSRV_DEVICE_HEALTH_STATUS_DEAD) && + (eNewStatus != PVRSRV_DEVICE_HEALTH_STATUS_FAULT)) { /* Protect the PDumpLoadMem. RGXScheduleCommand() cannot take the * PMR lock itself, because some bridge functions will take the PMR lock * before calling RGXScheduleCommand */ - PVRSRV_ERROR eError = RGXFWHealthCheckCmd(psDevNode->pvDevice); + PVRSRV_ERROR eError = RGXFWHealthCheckCmdWithoutPowerLock(psDevNode->pvDevice); if (eError != PVRSRV_OK) { @@ -6019,7 +5576,7 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, /* Interrupt counts check... */ - if (bCheckAfterTimePassed && psFwOsData != NULL) + if (bCheckAfterTimePassed && psFwOsData != NULL) { IMG_UINT32 ui32LISRCount = 0; IMG_UINT32 ui32FWCount = 0; @@ -6028,8 +5585,8 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, /* Add up the total number of interrupts issued, sampled/received and missed... */ #if defined(RGX_FW_IRQ_OS_COUNTERS) /* Only the Host OS has a sample count, so only one counter to check. */ - ui32LISRCount += psDevInfo->aui32SampleIRQCount[RGXFW_HOST_OS]; - ui32FWCount += OSReadHWReg32(psDevInfo->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[RGXFW_HOST_OS]); + ui32LISRCount += psDevInfo->aui32SampleIRQCount[RGXFW_HOST_DRIVER_ID]; + ui32FWCount += OSReadHWReg32(psDevInfo->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[RGXFW_HOST_DRIVER_ID]); #else IMG_UINT32 ui32Index; @@ -6065,6 +5622,9 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, psDevInfo->ui32MissingInterruptsLastTime = ui32MissingInts; } + /* Release power lock before RGXCheckForStalledClientContexts */ + PVRSRVPowerUnlock(psDevNode); + /* Stalled CCB check... */ @@ -6093,8 +5653,8 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, sErrorData.uErrData.sHostWdgData.ui32Status = (IMG_UINT32)eNewStatus; sErrorData.uErrData.sHostWdgData.ui32Reason = (IMG_UINT32)eNewReason; - psDevConfig->pfnSysDevErrorNotify(psDevConfig, - &sErrorData); + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, + &sErrorData); } } } @@ -6105,7 +5665,7 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, _RGXUpdateHealthStatus_Exit: OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus); OSAtomicWrite(&psDevNode->eHealthReason, eNewReason); - RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason); + RGXSRV_HWPERF_DEVICE_INFO_HEALTH(psDevInfo, eNewStatus, eNewReason); /* * Attempt to service the HWPerf buffer to regularly transport idle/periodic @@ -6117,8 +5677,8 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_WARNING, "%s: " - "Error occurred when servicing HWPerf buffer (%d)", - __func__, eError)); + "Error occurred when servicing HWPerf buffer (%d)", + __func__, eError)); } } @@ -6131,11 +5691,15 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, #if defined(SUPPORT_AUTOVZ) void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo) { - if (likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + KM_CONNECTION_CACHEOP(Os, INVALIDATE); + if (likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && + (KM_OS_CONNECTION_IS(ACTIVE, psDevInfo) || KM_OS_CONNECTION_IS(READY, psDevInfo)))) { /* read and write back the alive token value to confirm to the * virtualisation watchdog that this connection is healthy */ KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo); + KM_ALIVE_TOKEN_CACHEOP(Os, FLUSH); } } @@ -6149,7 +5713,8 @@ void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode) PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; if (unlikely((psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || !psDevInfo->bRGXPowered || - psDevInfo->pvRegsBaseKM == NULL || psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) + psDevInfo->pvRegsBaseKM == NULL || psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING))) { /* If the firmware is not initialised, stop here */ return; @@ -6164,58 +5729,41 @@ void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode) } } } -#endif /* SUPPORT_AUTOVZ */ -PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM) +PVRSRV_ERROR RGXDisconnectAllGuests(PVRSRV_DEVICE_NODE *psDeviceNode) { - if (psCurrentServerCommonContext == NULL) - { - /* the context has already been freed so there is nothing to do here */ - return PVRSRV_OK; - } + IMG_UINT32 ui32DriverID; - return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, - psCurrentServerCommonContext->psClientCCB, - eKickTypeDM); -} + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror, + INVALIDATE); -void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - IMG_UINT32 ui32VerbLevel) -{ - if (psCurrentServerCommonContext == NULL) + for (ui32DriverID = RGXFW_GUEST_DRIVER_ID_START; + ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED; + ui32DriverID++) { - /* the context has already been freed so there is nothing to do here */ - return; - } + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE) + psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32DriverID].bfOsState; - if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) - { - /* If high verbosity requested, dump whole CCB */ - DumpCCB(psCurrentServerCommonContext->psDevInfo, - psCurrentServerCommonContext->sFWCommonContextFWAddr, - psCurrentServerCommonContext->psClientCCB, - pfnDumpDebugPrintf, - pvDumpDebugFile); - } - else - { - /* Otherwise, only dump first command in the CCB */ - DumpFirstCCBCmd(psCurrentServerCommonContext->sFWCommonContextFWAddr, - psCurrentServerCommonContext->psClientCCB, - pfnDumpDebugPrintf, - pvDumpDebugFile); + if (eGuestState == RGXFW_CONNECTION_FW_ACTIVE || + eGuestState == RGXFW_CONNECTION_FW_READY) + { + PVRSRV_ERROR eError = RGXFWSetFwOsState(psDevInfo, ui32DriverID, RGXFWIF_OS_OFFLINE); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXFWSetFwOsState"); + } } + + return PVRSRV_OK; } +#endif /* SUPPORT_AUTOVZ */ PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, - IMG_UINT32 *pui32NumCleanupCtl, - RGXFWIF_DM eDM, - IMG_BOOL bKick, - RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, - RGX_ZSBUFFER_DATA *psZSBuffer, - RGX_ZSBUFFER_DATA *psMSAAScratchBuffer) + IMG_UINT32 *pui32NumCleanupCtl, + RGXFWIF_DM eDM, + IMG_BOOL bKick, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer) { PVRSRV_ERROR eError; PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl; @@ -6259,7 +5807,7 @@ PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, if (psBuffer) { (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr + - offsetof(RGXFWIF_PRBUFFER, sCleanupState); + offsetof(RGXFWIF_PRBUFFER, sCleanupState); psBuffer = NULL; } } @@ -6309,11 +5857,11 @@ PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode) } PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, - IMG_DEV_PHYADDR *psPhyAddr, - IMG_UINT32 ui32LogicalOffset, - IMG_UINT32 ui32Log2PageSize, - IMG_UINT32 ui32NumOfPages, - IMG_BOOL *bValid) + IMG_DEV_PHYADDR *psPhyAddr, + IMG_UINT32 ui32LogicalOffset, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_BOOL *bValid) { PVRSRV_ERROR eError; @@ -6329,11 +5877,12 @@ PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, } eError = PMR_DevPhysAddr(psPMR, - ui32Log2PageSize, - ui32NumOfPages, - ui32LogicalOffset, - psPhyAddr, - bValid); + ui32Log2PageSize, + ui32NumOfPages, + ui32LogicalOffset, + psPhyAddr, + bValid, + DEVICE_USE); if (eError != PVRSRV_OK) { @@ -6362,6 +5911,7 @@ PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset) { PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_OK); if (psDevInfo->bDumpedKCCBCtlAlready) { @@ -6370,11 +5920,11 @@ PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Wri /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, - PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER, - "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)", - psDevInfo->psKernelCCBCtl, - ui32WriteOffset, - ui32WriteOffset); + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER, + "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)", + psDevInfo->psKernelCCBCtl, + ui32WriteOffset, + ui32WriteOffset); eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), ui32WriteOffset, @@ -6411,16 +5961,13 @@ PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Wri ******************************************************************************/ PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions) { -#if !defined(NO_HARDWARE) || defined(PDUMP) -#if !defined(NO_HARDWARE) IMG_UINT32 ui32BuildOptionsMismatch; IMG_UINT32 ui32BuildOptionsFW; -#endif PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -#endif - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + RGX_FW_INFO_HEADER *psFWInfoHeader; + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); -#if !defined(NO_HARDWARE) if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL) { PVR_DPF((PVR_DBG_ERROR, @@ -6429,42 +5976,26 @@ PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psD return PVRSRV_ERROR_NOT_INITIALISED; } - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) - { - /* No need to wait if the FW has already updated the values */ - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); -#endif + psFWInfoHeader = &psDevInfo->sFWInfoHeader; -#if defined(PDUMP) +#if !defined(NO_HARDWARE) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - PVRSRV_ERROR eError; - - PDUMPCOMMENT(psDeviceNode, "Compatibility check: client and FW build options"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions), - ui32ClientBuildOptions, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - PVR_DPF((PVR_DBG_ERROR, - "%s: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", - __func__, - eError)); - return eError; - } + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, + INVALIDATE); + if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); } #endif -#if !defined(NO_HARDWARE) - ui32BuildOptionsFW = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.ui32BuildOptions; + ui32BuildOptionsFW = psFWInfoHeader->ui32Flags; ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW; if (ui32BuildOptionsMismatch != 0) @@ -6472,15 +6003,15 @@ PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psD if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) { PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " - "extra options present in client: (0x%x). Please check rgx_options.h", - ui32ClientBuildOptions & ui32BuildOptionsMismatch )); + "extra options present in client: (0x%x). Please check rgx_options.h", + ui32ClientBuildOptions & ui32BuildOptionsMismatch )); } if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0) { PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " - "extra options present in Firmware: (0x%x). Please check rgx_options.h", - ui32BuildOptionsFW & ui32BuildOptionsMismatch )); + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFW & ui32BuildOptionsMismatch )); } return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; @@ -6489,7 +6020,6 @@ PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psD { PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__)); } -#endif return PVRSRV_OK; } @@ -6499,10 +6029,10 @@ PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psD @Function RGXFwRawHeapAllocMap - @Description Register firmware heap for the specified guest OSID + @Description Register firmware heap for the specified driver @Input psDeviceNode - device node - @Input ui32OSID - Guest OSID + @Input ui32DriverID - Guest driver @Input sDevPAddr - Heap address @Input ui64DevPSize - Heap size @@ -6510,7 +6040,7 @@ PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psD ******************************************************************************/ PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSID, + IMG_UINT32 ui32DriverID, IMG_DEV_PHYADDR sDevPAddr, IMG_UINT64 ui64DevPSize) { @@ -6518,73 +6048,78 @@ PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH]; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32OSID)); - PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig, - PHYS_HEAP_USAGE_FW_MAIN); + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32DriverID)); + PHYS_HEAP_CONFIG *psFwHeapConfig = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_SHARED); PHYS_HEAP_CONFIG sFwHeapConfig; - PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); - if (psFwMainConfig == NULL) + if (psFwHeapConfig == NULL) { PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found.")); return PVRSRV_ERROR_NOT_SUPPORTED; } - OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); + OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID); if (!ui64DevPSize || !sDevPAddr.uiAddr || - ui32OSID >= RGX_NUM_OS_SUPPORTED || + ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED || ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE) { PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName)); return PVRSRV_ERROR_INVALID_PARAMS; } - sFwHeapConfig = *psFwMainConfig; - sFwHeapConfig.sStartAddr.uiAddr = 0; - sFwHeapConfig.sCardBase.uiAddr = sDevPAddr.uiAddr; - sFwHeapConfig.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + sFwHeapConfig = *psFwHeapConfig; sFwHeapConfig.eType = PHYS_HEAP_TYPE_LMA; + sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_PREMAP; + sFwHeapConfig.uConfig.sLMA.sStartAddr.uiAddr = 0; + sFwHeapConfig.uConfig.sLMA.sCardBase.uiAddr = sDevPAddr.uiAddr; + sFwHeapConfig.uConfig.sLMA.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; eError = PhysmemCreateHeapLMA(psDeviceNode, - PHYSMEM_LMA_POLICY_DEFAULT, - &sFwHeapConfig, - szRegionRAName, - &psDeviceNode->apsFWPremapPhysHeap[ui32OSID]); - PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32OSID); + RGXPhysHeapGetLMAPolicy(sFwHeapConfig.ui32UsageFlags, psDeviceNode), + &sFwHeapConfig, + szRegionRAName, + &psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]); + PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32DriverID); - eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32OSID]); - PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32OSID); + eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]); + PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32DriverID); - psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID] = psDeviceNode->apsFWPremapPhysHeap[ui32OSID]; + psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID] = psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]; - PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for OSID: [%d]", ui32OSID); + PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for DriverID: [%d]", ui32DriverID); -#if (RGX_NUM_OS_SUPPORTED > 1) +#if (RGX_NUM_DRIVERS_SUPPORTED > 1) /* don't clear the heap of other guests on allocation */ - uiRawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL); + uiRawFwHeapAllocFlags &= (ui32DriverID > RGXFW_HOST_DRIVER_ID) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL); #endif /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */ if (psDeviceNode->bAutoVzFwIsUp) { uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); - DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); + DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE); } +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_PREMAP_FW_HEAPS) + PVR_DPF((PVR_DBG_MESSAGE, "%s: Allocation and mapping for Firmware heaps done by TEE.", __func__)); +#else eError = DevmemFwAllocate(psDevInfo, RGX_FIRMWARE_RAW_HEAP_SIZE, uiRawFwHeapAllocFlags, - psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName, - &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); + psDevInfo->psPremappedFwRawHeap[ui32DriverID]->pszName, + &psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]); PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); +#endif /* Mark this devmem heap as premapped so allocations will not require device mapping. */ - DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); + DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE); - if (ui32OSID == RGXFW_HOST_OS) + if (ui32DriverID == RGXFW_HOST_DRIVER_ID) { /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly * No memory allocated from these sub-heaps will be individually mapped into the device's @@ -6601,1082 +6136,119 @@ PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, @Function RGXFwRawHeapUnmapFree - @Description Unregister firmware heap for the specified guest OSID + @Description Unregister firmware heap for the specified guest driver @Input psDeviceNode - device node - @Input ui32OSID - Guest OSID + @Input ui32DriverID ******************************************************************************/ void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSID) + IMG_UINT32 ui32DriverID) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; /* remove the premap status, so the heap can be unmapped and freed */ - if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID]) + if (psDevInfo->psPremappedFwRawHeap[ui32DriverID]) { - DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE); + DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_FALSE); } - if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]) + if (psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]) { - DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); - psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL; - } -} - -/*! -******************************************************************************* -@Function RGXRiscvHalt - -@Description Halt the RISC-V FW core (required for certain operations - done through Debug Module) - -@Input psDevInfo Pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, - PDUMP_FLAGS_CONTINUOUS, "Halt RISC-V FW"); - - /* Send halt request (no need to select one or more harts on this RISC-V core) */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until hart is halted */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_DMSTATUS, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - /* Clear halt request */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, - PDUMP_FLAGS_CONTINUOUS); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Send halt request (no need to select one or more harts on this RISC-V core) */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); - - /* Wait until hart is halted */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32), - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Hart not halted (0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS))); - return PVRSRV_ERROR_TIMEOUT; - } - - /* Clear halt request */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); -#endif - - return PVRSRV_OK; -} - -/*! -******************************************************************************* -@Function RGXRiscvIsHalted - -@Description Check if the RISC-V FW is halted - -@Input psDevInfo Pointer to device info - -@Return IMG_BOOL -******************************************************************************/ -IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if defined(NO_HARDWARE) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - /* Assume the core is always halted in nohw */ - return IMG_TRUE; -#else - - return (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS) & - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN) != 0U; -#endif -} - -/*! -******************************************************************************* -@Function RGXRiscvResume - -@Description Resume the RISC-V FW core - -@Input psDevInfo Pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, - PDUMP_FLAGS_CONTINUOUS, "Resume RISC-V FW"); - - /* Send resume request (no need to select one or more harts on this RISC-V core) */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until hart is resumed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_DMSTATUS, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - /* Clear resume request */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, - PDUMP_FLAGS_CONTINUOUS); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Send resume request (no need to select one or more harts on this RISC-V core) */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); - - /* Wait until hart is resumed */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32), - RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Hart not resumed (0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS))); - return PVRSRV_ERROR_TIMEOUT; - } - - /* Clear resume request */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); -#endif - - return PVRSRV_OK; -} - -/*! -******************************************************************************* -@Function RGXRiscvCheckAbstractCmdError - -@Description Check for RISC-V abstract command errors and clear them - -@Input psDevInfo Pointer to GPU device info - -@Return RGXRISCVFW_ABSTRACT_CMD_ERR -******************************************************************************/ -static RGXRISCVFW_ABSTRACT_CMD_ERR RGXRiscvCheckAbstractCmdError(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXRISCVFW_ABSTRACT_CMD_ERR eCmdErr; - -#if defined(NO_HARDWARE) && defined(PDUMP) - eCmdErr = RISCV_ABSTRACT_CMD_NO_ERROR; - - /* Check error status */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - RISCV_ABSTRACT_CMD_NO_ERROR << RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT, - ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - - /* Check error status */ - eCmdErr = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS) - & ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK) - >> RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT; - - if (eCmdErr != RISCV_ABSTRACT_CMD_NO_ERROR) - { - PVR_DPF((PVR_DBG_WARNING, "RISC-V FW abstract command error %u", eCmdErr)); - - /* Clear the error (note CMDERR field is write-1-to-clear) */ - OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS, - ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]); + psDevInfo->psPremappedFwRawMemDesc[ui32DriverID] = NULL; } -#endif - - return eCmdErr; -} - -/*! -******************************************************************************* -@Function RGXRiscvReadReg - -@Description Read a value from the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address - -@Output pui32Value Read value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 *pui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32RegAddr); - PVR_UNREFERENCED_PARAMETER(pui32Value); - - /* Reading HW registers is not supported in nohw/pdump */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Send abstract register read command */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_READ | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | - ui32RegAddr); - - /* Wait until abstract command is completed */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); - return PVRSRV_ERROR_TIMEOUT; - } - - if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) - { - /* Read register value */ - *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0); - } - else - { - *pui32Value = 0U; - } - - return PVRSRV_OK; -#endif -} - -/*! -******************************************************************************* -@Function RGXRiscvPollReg - -@Description Poll for a value from the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address -@Input ui32Value Expected value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Poll RISC-V register 0x%x (expected 0x%08x)", - ui32RegAddr, ui32Value); - - /* Send abstract register read command */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_READ | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | - ui32RegAddr, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until abstract command is completed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - RGXRiscvCheckAbstractCmdError(psDevInfo); - - /* Check read value */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_DATA0, - ui32Value, - 0xFFFFFFFF, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - return PVRSRV_OK; -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32RegAddr); - PVR_UNREFERENCED_PARAMETER(ui32Value); - - /* Polling HW registers is currently not required driverlive */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#endif -} - -/*! -******************************************************************************* -@Function RGXRiscvWriteReg - -@Description Write a value to the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address -@Input ui32Value Write value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Write RISC-V register 0x%x (value 0x%08x)", - ui32RegAddr, ui32Value); - - /* Prepare data to be written to register */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0, - ui32Value, PDUMP_FLAGS_CONTINUOUS); - - /* Send abstract register write command */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_WRITE | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | - ui32RegAddr, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until abstract command is completed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Prepare data to be written to register */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value); - - /* Send abstract register write command */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_WRITE | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | - ui32RegAddr); - - /* Wait until abstract command is completed */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); - return PVRSRV_ERROR_TIMEOUT; - } -#endif - - return PVRSRV_OK; -} - -/*! -******************************************************************************* -@Function RGXRiscvCheckSysBusError - -@Description Check for RISC-V system bus errors and clear them - -@Input psDevInfo Pointer to GPU device info - -@Return RGXRISCVFW_SYSBUS_ERR -******************************************************************************/ -static __maybe_unused RGXRISCVFW_SYSBUS_ERR RGXRiscvCheckSysBusError(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXRISCVFW_SYSBUS_ERR eSBError; - -#if defined(NO_HARDWARE) && defined(PDUMP) - eSBError = RISCV_SYSBUS_NO_ERROR; - - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_SBCS, - RISCV_SYSBUS_NO_ERROR << RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT, - ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - - eSBError = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS) - & ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK) - >> RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT; - - if (eSBError != RISCV_SYSBUS_NO_ERROR) - { - PVR_DPF((PVR_DBG_WARNING, "RISC-V FW system bus error %u", eSBError)); - - /* Clear the error (note SBERROR field is write-1-to-clear) */ - OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS, - ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK); - } -#endif - - return eSBError; -} - -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) -/*! -******************************************************************************* -@Function RGXRiscvReadAbstractMem - -@Description Read a value at the given address in RISC-V memory space - using RISC-V abstract memory commands - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space - -@Output pui32Value Read value - -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvReadAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32Addr); - PVR_UNREFERENCED_PARAMETER(pui32Value); - - /* Reading memory is not supported in nohw/pdump */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Prepare read address */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr); - - /* Send abstract memory read command */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_READ | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); - - /* Wait until abstract command is completed */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); - return PVRSRV_ERROR_TIMEOUT; - } - - if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) - { - /* Read memory value */ - *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0); - } - else - { - *pui32Value = 0U; - } - - return PVRSRV_OK; -#endif -} -#endif /* !defined(EMULATOR) */ - -/*! -******************************************************************************* -@Function RGXRiscvPollAbstractMem - -@Description Poll for a value at the given address in RISC-V memory space - using RISC-V abstract memory commands - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Expected value - -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvPollAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Poll RISC-V address 0x%x (expected 0x%08x)", - ui32Addr, ui32Value); - - /* Prepare read address */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1, - ui32Addr, PDUMP_FLAGS_CONTINUOUS); - - /* Send abstract memory read command */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_READ | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until abstract command is completed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - RGXRiscvCheckAbstractCmdError(psDevInfo); - - /* Check read value */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_DATA0, - ui32Value, - 0xFFFFFFFF, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - return PVRSRV_OK; -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32Addr); - PVR_UNREFERENCED_PARAMETER(ui32Value); - - /* Polling memory is currently not required driverlive */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#endif } -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) -/*! -******************************************************************************* -@Function RGXRiscvReadSysBusMem - -@Description Read a value at the given address in RISC-V memory space - using the RISC-V system bus - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space - -@Output pui32Value Read value - -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvReadSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) +/* + RGXReadMETAAddr +*/ +static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value) { -#if defined(NO_HARDWARE) && defined(PDUMP) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32Addr); - PVR_UNREFERENCED_PARAMETER(pui32Value); - - /* Reading memory is not supported in nohw/pdump */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Configure system bus to read 32 bit every time a new address is provided */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_SBCS, - (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | - RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN); - - /* Perform read */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr); - - /* Wait until system bus is idle */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS))); - return PVRSRV_ERROR_TIMEOUT; - } - - if (RGXRiscvCheckSysBusError(psDevInfo) == RISCV_SYSBUS_NO_ERROR) - { - /* Read value from debug system bus */ - *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0); +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + void __iomem *pvRegBase = psDevInfo->pvSecureRegsBaseKM; + IMG_UINT8 __iomem *pui8RegBase = pvRegBase; + IMG_UINT32 ui32PollValue; + IMG_UINT32 ui32PollMask; + IMG_UINT32 ui32PollRegOffset; + IMG_UINT32 ui32ReadOffset; + IMG_UINT32 ui32WriteOffset; + IMG_UINT32 ui32WriteValue; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) + { + ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; + ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; + ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA; + ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA; + ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__RD_EN; + CHECK_HWBRN_68777(ui32WriteValue); + ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA; + } + else + { + ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; + ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; + ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA; + ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__RD_EN; + CHECK_HWBRN_68777(ui32WriteValue); + ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA; + } } else { - *pui32Value = 0U; + ui32PollValue = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32PollMask = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1; + ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0; + ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN; + ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX; } - return PVRSRV_OK; -#endif -} -#endif /* !defined(EMULATOR) */ - -/*! -******************************************************************************* -@Function RGXRiscvPollSysBusMem - -@Description Poll for a value at the given address in RISC-V memory space - using the RISC-V system bus - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Expected value - -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvPollSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Poll RISC-V address 0x%x (expected 0x%08x)", - ui32Addr, ui32Value); - - /* Configure system bus to read 32 bit every time a new address is provided */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS, - (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | - RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN, - PDUMP_FLAGS_CONTINUOUS); - - /* Perform read */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0, - ui32Addr, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until system bus is idle */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_SBCS, - 0U, - RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - RGXRiscvCheckSysBusError(psDevInfo); - - /* Check read value */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_SBDATA0, - ui32Value, - 0xFFFFFFFF, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - return PVRSRV_OK; -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32Addr); - PVR_UNREFERENCED_PARAMETER(ui32Value); - - /* Polling memory is currently not required driverlive */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#endif -} - -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) -/*! -******************************************************************************* -@Function RGXRiscvReadMem - -@Description Read a value at the given address in RISC-V memory space - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space - -@Output pui32Value Read value - -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Addr, - IMG_UINT32 *pui32Value) -{ - if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) - { - return RGXRiscvReadAbstractMem(psDevInfo, ui32Addr, pui32Value); - } - - return RGXRiscvReadSysBusMem(psDevInfo, ui32Addr, pui32Value); -} -#endif /* !defined(EMULATOR) */ - -/*! -******************************************************************************* -@Function RGXRiscvPollMem - -@Description Poll a value at the given address in RISC-V memory space - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Expected value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Addr, - IMG_UINT32 ui32Value) -{ - if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) - { - return RGXRiscvPollAbstractMem(psDevInfo, ui32Addr, ui32Value); - } - - return RGXRiscvPollSysBusMem(psDevInfo, ui32Addr, ui32Value); -} - -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) -/*! -******************************************************************************* -@Function RGXRiscvWriteAbstractMem - -@Description Write a value at the given address in RISC-V memory space - using RISC-V abstract memory commands - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Write value - -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvWriteAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Write RISC-V address 0x%x (value 0x%08x)", - ui32Addr, ui32Value); - - /* Prepare write address */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1, - ui32Addr, PDUMP_FLAGS_CONTINUOUS); - - /* Prepare write data */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0, - ui32Value, PDUMP_FLAGS_CONTINUOUS); - - /* Send abstract register write command */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_WRITE | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until abstract command is completed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Prepare write address */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr); - - /* Prepare write data */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value); - - /* Send abstract memory write command */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_WRITE | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); - - /* Wait until abstract command is completed */ + /* Wait for Slave Port to be Ready */ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), + ui32PollValue, + ui32PollMask, + POLL_FLAG_LOG_ERROR, + NULL) != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); return PVRSRV_ERROR_TIMEOUT; } -#endif - - return PVRSRV_OK; -} - -/*! -******************************************************************************* -@Function RGXRiscvWriteSysBusMem - -@Description Write a value at the given address in RISC-V memory space - using the RISC-V system bus -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Write value - -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvWriteSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Write RISC-V address 0x%x (value 0x%08x)", - ui32Addr, ui32Value); - - /* Configure system bus to read 32 bit every time a new address is provided */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS, - RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT, - PDUMP_FLAGS_CONTINUOUS); - - /* Prepare write address */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0, - ui32Addr, PDUMP_FLAGS_CONTINUOUS); - - /* Prepare write data and initiate write */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBDATA0, - ui32Value, PDUMP_FLAGS_CONTINUOUS); - - /* Wait until system bus is idle */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_SBCS, - 0U, - RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Configure system bus for 32 bit accesses */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_SBCS, - RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT); - - /* Prepare write address */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr); - - /* Prepare write data and initiate write */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0, ui32Value); + /* Issue the Read */ + OSWriteUncheckedHWReg32(pvRegBase, ui32WriteOffset, ui32WriteValue); + (void)OSReadUncheckedHWReg32(pvRegBase, ui32WriteOffset); - /* Wait until system bus is idle */ + /* Wait for Slave Port to be Ready: read complete */ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), + ui32PollValue, + ui32PollMask, + POLL_FLAG_LOG_ERROR, + NULL) != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS))); return PVRSRV_ERROR_TIMEOUT; } -#endif - - return PVRSRV_OK; -} -/*! -******************************************************************************* -@Function RGXRiscvWriteMem - -@Description Write a value to the given address in RISC-V memory space - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Write value - -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Addr, - IMG_UINT32 ui32Value) -{ - if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) - { - return RGXRiscvWriteAbstractMem(psDevInfo, ui32Addr, ui32Value); - } - - return RGXRiscvWriteSysBusMem(psDevInfo, ui32Addr, ui32Value); -} -#endif /* !defined(EMULATOR) */ - -/*! -******************************************************************************* -@Function RGXRiscvDmiOp - -@Description Acquire the powerlock and perform an operation on the RISC-V - Debug Module Interface, but only if the GPU is powered on. - -@Input psDevInfo Pointer to device info -@InOut pui64DMI Encoding of a request for the RISC-V Debug - Module with same format as the 'dmi' register - from the RISC-V debug specification (v0.13+). - On return, this is updated with the result of - the request, encoded the same way. - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 *pui64DMI) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(pui64DMI); - - /* Accessing DM registers is not supported in nohw/pdump */ - return PVRSRV_ERROR_NOT_SUPPORTED; + /* Read the value */ + *pui32Value = OSReadUncheckedHWReg32(pvRegBase, ui32ReadOffset); #else -#define DMI_BASE RGX_CR_FWCORE_DMI_RESERVED00 -#define DMI_STRIDE (RGX_CR_FWCORE_DMI_RESERVED01 - RGX_CR_FWCORE_DMI_RESERVED00) -#define DMI_REG(r) ((DMI_BASE) + (DMI_STRIDE) * (r)) - -#define DMI_OP_SHIFT 0U -#define DMI_OP_MASK 0x3ULL -#define DMI_DATA_SHIFT 2U -#define DMI_DATA_MASK 0x3FFFFFFFCULL -#define DMI_ADDRESS_SHIFT 34U -#define DMI_ADDRESS_MASK 0xFC00000000ULL - -#define DMI_OP_NOP 0U -#define DMI_OP_READ 1U -#define DMI_OP_WRITE 2U -#define DMI_OP_RESERVED 3U - -#define DMI_OP_STATUS_SUCCESS 0U -#define DMI_OP_STATUS_RESERVED 1U -#define DMI_OP_STATUS_FAILED 2U -#define DMI_OP_STATUS_BUSY 3U - - PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; - PVRSRV_DEV_POWER_STATE ePowerState; - PVRSRV_ERROR eError; - IMG_UINT64 ui64Op, ui64Address, ui64Data; - - ui64Op = (*pui64DMI & DMI_OP_MASK) >> DMI_OP_SHIFT; - ui64Address = (*pui64DMI & DMI_ADDRESS_MASK) >> DMI_ADDRESS_SHIFT; - ui64Data = (*pui64DMI & DMI_DATA_MASK) >> DMI_DATA_SHIFT; - - eError = PVRSRVPowerLock(psDeviceNode); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)", - __func__, PVRSRVGetErrorString(eError))); - ui64Op = DMI_OP_STATUS_FAILED; - goto dmiop_update; - } - - eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: failed to retrieve RGX power state (%s)", - __func__, PVRSRVGetErrorString(eError))); - ui64Op = DMI_OP_STATUS_FAILED; - goto dmiop_release_lock; - } - - if (ePowerState == PVRSRV_DEV_POWER_STATE_ON) - { - switch (ui64Op) - { - case DMI_OP_NOP: - ui64Op = DMI_OP_STATUS_SUCCESS; - break; - case DMI_OP_WRITE: - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - DMI_REG(ui64Address), - (IMG_UINT32)ui64Data); - ui64Op = DMI_OP_STATUS_SUCCESS; - break; - case DMI_OP_READ: - ui64Data = (IMG_UINT64)OSReadHWReg32(psDevInfo->pvRegsBaseKM, - DMI_REG(ui64Address)); - ui64Op = DMI_OP_STATUS_SUCCESS; - break; - default: - PVR_DPF((PVR_DBG_ERROR, "%s: unknown op %u", __func__, (IMG_UINT32)ui64Op)); - ui64Op = DMI_OP_STATUS_FAILED; - break; - } - } - else - { - PVR_DPF((PVR_DBG_WARNING, "%s: Accessing RISC-V Debug Module is not " - "possible while the GPU is powered off", __func__)); - - ui64Op = DMI_OP_STATUS_FAILED; - } - -dmiop_release_lock: - PVRSRVPowerUnlock(psDeviceNode); - -dmiop_update: - *pui64DMI = (ui64Op << DMI_OP_SHIFT) | - (ui64Address << DMI_ADDRESS_SHIFT) | - (ui64Data << DMI_DATA_SHIFT); - - return eError; -#endif -} - -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) -/* - RGXReadMETAAddr -*/ -static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value) -{ IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; - IMG_UINT32 ui32Value; /* Wait for Slave Port to be Ready */ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) { return PVRSRV_ERROR_TIMEOUT; } @@ -7693,15 +6265,14 @@ static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) { return PVRSRV_ERROR_TIMEOUT; } /* Read the value */ - ui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX); - - *pui32Value = ui32Value; + *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX); +#endif return PVRSRV_OK; } @@ -7711,6 +6282,70 @@ static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui */ static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value) { +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + void __iomem *pvRegBase = psDevInfo->pvSecureRegsBaseKM; + IMG_UINT8 __iomem *pui8RegBase = pvRegBase; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) + { + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA), + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Write */ + CHECK_HWBRN_68777(ui32METAAddr); + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA, ui32METAAddr); + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA, ui32Value); + } + else + { + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA), + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Write */ + CHECK_HWBRN_68777(ui32METAAddr); + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA, ui32METAAddr); + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_EQ1_AND_MRUA, ui32Value); + } + } + else + { + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Write */ + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr); + (void) OSReadUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT, ui32Value); + (void) OSReadUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ + } +#else IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; /* Wait for Slave Port to be Ready */ @@ -7718,7 +6353,7 @@ static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 u (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) { return PVRSRV_ERROR_TIMEOUT; } @@ -7726,21 +6361,19 @@ static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 u /* Issue the Write */ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr); OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value); +#endif return PVRSRV_OK; } -#endif PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value) { -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) { return RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value); } -#endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) +#if !defined(EMULATOR) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) { return RGXRiscvReadMem(psDevInfo, ui32FWAddr, pui32Value); @@ -7752,14 +6385,12 @@ PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32F PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value) { -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) { return RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value); } -#endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) +#if !defined(EMULATOR) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) { return RGXRiscvWriteMem(psDevInfo, ui32FWAddr, ui32Value); @@ -7782,17 +6413,22 @@ PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, MMU_FAULT_DATA sFaultData = {0U}; MMU_CONTEXT *psFwMMUCtx = psDevInfo->psKernelMMUCtx; IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX); - IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE); - IMG_UINT32 ui32OSID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE; + IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE); + IMG_UINT32 ui32DriverID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE; IMG_UINT32 ui32HeapId; PHYS_HEAP *psPhysHeap; - +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /* MIPS uses the same page size as the OS, while others default to 4K pages */ IMG_UINT32 ui32FwPageSize = RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? OSGetPageSize() : BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); +#else + /* default to 4K pages */ + IMG_UINT32 ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); +#endif + IMG_UINT32 ui32PageOffset = (ui32FwVA & (ui32FwPageSize - 1)); - PVR_LOG_GOTO_IF_INVALID_PARAM((ui32OSID < RGX_NUM_OS_SUPPORTED), + PVR_LOG_GOTO_IF_INVALID_PARAM((ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED), eError, ErrorExit); PVR_LOG_GOTO_IF_INVALID_PARAM(((psCpuPA != NULL) || @@ -7804,36 +6440,35 @@ PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, (ui32FwVA < ui32FwHeapEnd)), eError, ErrorExit); - ui32HeapId = (ui32OSID == RGXFW_HOST_OS) ? - PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID); + ui32HeapId = (ui32DriverID == RGXFW_HOST_DRIVER_ID) ? + PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID); psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[ui32HeapId]; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { - /* MIPS is equipped with a dedicated MMU */ + /* MIPS is equipped with a dedicated MMU */ RGXMipsCheckFaultAddress(psFwMMUCtx, ui32FwVA, &sFaultData); } else +#endif { IMG_UINT64 ui64FwDataBaseMask; IMG_DEV_VIRTADDR sDevVAddr; -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) { ui64FwDataBaseMask = ~(RGXFW_SEGMMU_DATA_META_CACHE_MASK | RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK | RGXFW_SEGMMU_DATA_BASE_ADDRESS); } - else -#endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) +#if !defined(EMULATOR) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) { - ui64FwDataBaseMask = ~(RGXRISCVFW_GET_REGION_BASE(0xF)); + ui64FwDataBaseMask = ~RGXRISCVFW_REGION_MASK; } - else #endif + else { PVR_LOG_GOTO_WITH_ERROR("RGXGetFwMapping", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit); } @@ -7848,9 +6483,14 @@ PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, if (eError == PVRSRV_OK) { +#if defined(RGX_FEATURE_MIPS_BIT_MASK) IMG_BOOL bValidPage = (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? BITMASK_HAS(ui64RawPTE, RGXMIPSFW_TLB_VALID) : BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN); +#else + IMG_BOOL bValidPage = BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN); +#endif + if (!bValidPage) { /* don't report invalid pages */ @@ -7858,12 +6498,16 @@ PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, } else { +#if defined(RGX_FEATURE_MIPS_BIT_MASK) sDevPA.uiAddr = ui32PageOffset + ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? RGXMIPSFW_TLB_GET_PA(ui64RawPTE) : (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK)); +#else + sDevPA.uiAddr = ui32PageOffset + (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK); +#endif /* Only the Host's Firmware heap is present in the Host's CPU IPA space */ - if (ui32OSID == RGXFW_HOST_OS) + if (ui32DriverID == RGXFW_HOST_DRIVER_ID) { PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPA, &sDevPA); } @@ -7893,33 +6537,26 @@ PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, return eError; } -#if defined(SUPPORT_WORKLOAD_ESTIMATION) -/*! -******************************************************************************* -@Function RGXIsValidWorkloadEstCCBCommand - -@Description Checks if command type can be used for workload estimation +PVRSRV_ERROR +RGXFWSetVzConnectionCooldownPeriod(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32VzConnectionCooldownPeriodInSec) +{ + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); -@Input eType Command type to check + psDevInfo->psRGXFWIfRuntimeCfg->ui32VzConnectionCooldownPeriodInSec = ui32VzConnectionCooldownPeriodInSec; + OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32VzConnectionCooldownPeriodInSec); +#if defined(PDUMP) + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "Updating the Vz reconnect request cooldown period inside RGXFWIfRuntimeCfg"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + offsetof(RGXFWIF_RUNTIME_CFG, ui32VzConnectionCooldownPeriodInSec), + ui32VzConnectionCooldownPeriodInSec, + PDUMP_FLAGS_CONTINUOUS); +#endif -@Return IMG_BOOL -******************************************************************************/ -INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType) -{ - switch (eType) - { - case RGXFWIF_CCB_CMD_TYPE_GEOM: - case RGXFWIF_CCB_CMD_TYPE_3D: - case RGXFWIF_CCB_CMD_TYPE_CDM: - case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: - return IMG_TRUE; - default: - PVR_ASSERT(IMG_FALSE); - return IMG_FALSE; - } + return PVRSRV_OK; } -#endif /****************************************************************************** End of file (rgxfwutils.c) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxhwperf.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxhwperf.c index 680614b21464..e43fc499d9db 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxhwperf.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxhwperf.c @@ -90,37 +90,6 @@ static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); -static inline IMG_UINT32 -RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, - IMG_UINT32 ui32AllowedSize, - RGX_PHWPERF_V2_PACKET_HDR psCurPkt ) -{ - IMG_UINT32 sizeSum = 0; - - /* Traverse the array to find how many packets will fit in the available space. */ - while ( sizeSum < ui32BytesExp && - sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize ) - { - sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt); - psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt); - } - - return sizeSum; -} - -static inline void -RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo, - IMG_BOOL bIsReaderConnected) -{ - if (!bIsReaderConnected) - { - PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but host buffer for FW events is full " - "and no reader is currently connected, suspending event collection. " - "Connect a reader or restart driver to avoid event loss.", __func__)); - psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE; - } -} - /****************************************************************************** * RGX HW Performance Profiling Server API(s) *****************************************************************************/ @@ -166,7 +135,7 @@ PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, R if ((pszBVNC = RGXDevBVNCString(psDevInfo))) { size_t uiStringLength = OSStringNLength(pszBVNC, RGX_HWPERF_MAX_BVNC_LEN - 1); - OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1); + OSStringSafeCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1); memset(&psBVNC->aszBvncString[uiStringLength], 0, RGX_HWPERF_MAX_BVNC_LEN - uiStringLength); } else @@ -180,34 +149,20 @@ PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, R { psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG; } -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG; - } -#endif #if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) { psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG; } #endif -#if defined(RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERF_COUNTER_BATCH)) { psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG; } -#endif if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE)) { psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_ROGUEXE_FLAG; } -#if defined(RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, DUST_POWER_ISLAND_S7)) - { - psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG; - } -#endif if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) { psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG; @@ -218,8 +173,11 @@ PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, R } #ifdef SUPPORT_WORKLOAD_ESTIMATION - /* Not a part of BVNC feature line and so doesn't need the feature supported check */ - psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION; + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + /* Not a part of BVNC feature line and so doesn't need the feature supported check */ + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION; + } #endif /* Define the HW counter block counts. */ @@ -255,6 +213,9 @@ PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, R OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks)); } + /* The GPU core count is overwritten by the FW */ + psBVNC->ui16BvncGPUCores = 0; + return PVRSRV_OK; } @@ -280,7 +241,7 @@ PVRSRV_ERROR PVRSRVRGXConfigMuxHWPerfCountersKM( PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0", PVRSRV_ERROR_INVALID_PARAMS); @@ -347,7 +308,7 @@ PVRSRV_ERROR PVRSRVRGXConfigMuxHWPerfCountersKM( /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM firmware completed"));*/ - PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen)); + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen)); PVR_DPF_RETURN_OK; @@ -381,7 +342,7 @@ PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM( PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); PVR_DPF_ENTERED; @@ -498,7 +459,7 @@ PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( PVR_UNREFERENCED_PARAMETER(ui32CtrlWord); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0", PVRSRV_ERROR_INVALID_PARAMS); @@ -526,11 +487,11 @@ PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgDABlks.sBlockConfigs, - psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); + psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray); - PVR_LOG_GOTO_IF_ERROR(eError, "DevMemAcquireCpuVirtAddr", fail2); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); OSCachedMemCopyWMB(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen); DevmemPDumpLoadMem(psFwBlkConfigsMemDesc, @@ -556,21 +517,19 @@ PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); - PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen)); PVR_DPF_RETURN_OK; fail3: DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); - fail2: RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); - fail1: DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); - PVR_DPF_RETURN_RC (eError); + PVR_DPF_RETURN_RC(eError); } /****************************************************************************** @@ -608,11 +567,10 @@ PVRSRV_ERROR RGXHWPerfConfigMuxCounters( IMG_UINT32 ui32NumBlocks, RGX_HWPERF_CONFIG_MUX_CNTBLK *asBlockConfigs) { - PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError; RGX_KM_HWPERF_DEVDATA* psDevData; RGX_HWPERF_DEVICE *psHWPerfDev; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); /* Validate input argument values supplied by the caller */ if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs) @@ -631,17 +589,19 @@ PVRSRV_ERROR RGXHWPerfConfigMuxCounters( { psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevData->psRgxDevNode, PVRSRV_ERROR_NOT_IMPLEMENTED); + /* Call the internal server API */ eError = PVRSRVRGXConfigMuxHWPerfCountersKM(NULL, psDevData->psRgxDevNode, ui32NumBlocks, asBlockConfigs); - PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXConfigMuxHWPerfCountersKM"); psHWPerfDev = psHWPerfDev->psNext; } - return eError; + return PVRSRV_OK; } @@ -654,8 +614,6 @@ PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters( PVRSRV_ERROR eError; RGX_HWPERF_DEVICE *psHWPerfDev; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - /* Validate input arguments supplied by the caller */ PVR_LOG_RETURN_IF_FALSE((NULL != psHWPerfConnection), "psHWPerfConnection invalid", PVRSRV_ERROR_INVALID_PARAMS); @@ -678,6 +636,8 @@ PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters( { RGX_KM_HWPERF_DEVDATA *psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevData->psRgxDevNode, PVRSRV_ERROR_NOT_IMPLEMENTED); + eError = PVRSRVRGXConfigCustomCountersKM(NULL, psDevData->psRgxDevNode, ui16CustomBlockID, ui16NumCustomCounters, pui32CustomCounterIDs); @@ -706,7 +666,7 @@ GetHWPerfBlockTypeByID(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32BlockID) IMG_UINT32 ui32TableIdx = 0xFFFF; RGX_HWPERF_CNTBLK_RT_INFO sRtInfo; /* Only used to satisfy pfnIsBlkPresent requirements. */ -#if defined(RGX_FEATURE_HWPERF_OCEANIC) +#if defined(HWPERF_UNIFIED) IMG_UINT32 uiBlockID = (IMG_UINT32)(ui32BlockID & ~(RGX_CNTBLK_ID_UNIT_ALL_MASK|RGX_CNTBLK_ID_DA_MASK)); #else IMG_UINT32 uiBlockID = (IMG_UINT32)(ui32BlockID & ~RGX_CNTBLK_ID_UNIT_ALL_MASK); @@ -793,7 +753,7 @@ PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCounters(PVRSRV_DEVICE_NODE *psDevNo { RGXFWIF_HWPERF_CTL_BLK *psBlock = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfCtl); const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *psBlkTypeDesc; - IMG_UINT32 i, ui32LastCountIdx = 0; + IMG_UINT32 i, ui32LastCountIdx = 0, ui8CurCountIdx = 0; RGX_HWPERF_CONFIG_MUX_CNTBLK sBlockConfig; PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode)); @@ -804,11 +764,11 @@ PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCounters(PVRSRV_DEVICE_NODE *psDevNo PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error); } - if (!psBlock->bEnabled || !psBlock->bValid) + if (!psBlock->ui32Enabled || !psBlock->ui32Valid) { PVR_DPF((PVR_DBG_ERROR, "Block (0x%04x) is not %s", ui32BlockID, - !psBlock->bEnabled ? "enabled." : "configured.")); + !psBlock->ui32Enabled ? "enabled." : "configured.")); PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error); } @@ -821,7 +781,8 @@ PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCounters(PVRSRV_DEVICE_NODE *psDevNo sBlockConfig.ui16BlockID = psBlock->eBlockID; sBlockConfig.ui8Mode = 0; - for (i = 0; (psBlock->uiCounterMask >> i) != 0; i++) + for (i = 0; ((psBlock->uiCounterMask >> i) != 0) && + (ui8CurCountIdx < psBlkTypeDesc->ui8NumCounters); i++) { if (psBlock->uiCounterMask & (1 << i)) { @@ -844,6 +805,7 @@ PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCounters(PVRSRV_DEVICE_NODE *psDevNo (psBlock->aui64CounterCfg[i] >> RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT) & 0x1FFF; #endif ui32LastCountIdx++; + ui8CurCountIdx++; } } @@ -916,7 +878,7 @@ PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCounters(PVRSRV_DEVICE_NODE *psDevNode, } } } -#if defined(RGX_FEATURE_HWPERF_OCEANIC) +#if defined(HWPERF_UNIFIED) else if ((ui32BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK) { RGXFWIF_HWPERF_DA_BLK *psBlock = rgxfw_hwperf_get_da_block_ctl(ui32BlockID, psHWPerfCtl); @@ -987,7 +949,7 @@ PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode, for (i = 0; i < RGX_HWPERF_MAX_MUX_BLKS; i++) { - if (psHWPerfCtl->sBlkCfg[i].bEnabled && psHWPerfCtl->sBlkCfg[i].bValid) + if (psHWPerfCtl->sBlkCfg[i].ui32Enabled && psHWPerfCtl->sBlkCfg[i].ui32Valid) { *pui32BlockCount += 1; @@ -1034,7 +996,7 @@ PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode, } } -#if defined(RGX_FEATURE_HWPERF_OCEANIC) +#if defined(HWPERF_UNIFIED) for (i = 0; i < RGX_HWPERF_MAX_DA_BLKS; i++) { if (psHWPerfCtl->sDABlkCfg[i].uiEnabled) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxinit.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxinit.c index 895108d873ed..d6096e6a9681 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxinit.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxinit.c @@ -53,11 +53,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_bridge_init.h" #include "rgx_bridge_init.h" #include "syscommon.h" -#include "rgx_heaps.h" +#include "rgx_heaps_server.h" #include "rgxheapconfig.h" #include "rgxpower.h" #include "tlstream.h" #include "pvrsrv_tlstreams.h" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +#include "pvr_ricommon.h" +#endif #include "rgxinit.h" #include "rgxbvnc.h" @@ -71,24 +74,28 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxmem.h" #include "sync_internal.h" #include "pvrsrv_apphint.h" -#include "oskm_apphint.h" +#include "os_apphint.h" #include "rgxfwdbg.h" #include "info_page.h" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) #include "rgxfwimageutils.h" +#endif #include "rgxutils.h" #include "rgxfwutils.h" #include "rgx_fwif_km.h" #include "rgxmmuinit.h" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) #include "rgxmipsmmuinit.h" #include "physmem.h" +#endif #include "devicemem_utils.h" #include "devicemem_server.h" #include "physmem_osmem.h" #include "physmem_lma.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "rgxhwperf.h" #include "htbserver.h" @@ -97,13 +104,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_compat_bvnc.h" -#include "rgx_heaps.h" - #include "rgxta3d.h" #include "rgxtimecorr.h" #include "rgxshader.h" -#include "rgx_bvnc_defs_km.h" #if defined(PDUMP) #include "rgxstartstop.h" #endif @@ -119,9 +123,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxpdvfs.h" #endif -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -#include "rgxsoctimer.h" -#endif #if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) #include "pdump_physmem.h" @@ -134,10 +135,10 @@ static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode); static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); -#if (RGX_NUM_OS_SUPPORTED > 1) -static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid); +#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) +static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID); static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap); -#endif +#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ /* Services internal heap identification used in this file only */ #define RGX_FIRMWARE_MAIN_HEAP_IDENT "FwMain" /*!< RGX Main Firmware Heap identifier */ @@ -154,7 +155,7 @@ static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap); #define VAR(x) #x -static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo); +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo, PVRSRV_DEVICE_NODE *psDeviceNode); #if !defined(NO_HARDWARE) /*************************************************************************/ /*! @@ -168,21 +169,21 @@ static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo); static INLINE IMG_BOOL SampleIRQCount(PVRSRV_RGXDEV_INFO *psDevInfo) { IMG_BOOL bReturnVal = IMG_FALSE; - volatile IMG_UINT32 *aui32SampleIrqCount = psDevInfo->aui32SampleIRQCount; + volatile IMG_UINT32 *pui32SampleIrqCount = psDevInfo->aui32SampleIRQCount; IMG_UINT32 ui32IrqCnt; #if defined(RGX_FW_IRQ_OS_COUNTERS) - if PVRSRV_VZ_MODE_IS(GUEST) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { bReturnVal = IMG_TRUE; } else { - get_irq_cnt_val(ui32IrqCnt, RGXFW_HOST_OS, psDevInfo); + get_irq_cnt_val(ui32IrqCnt, RGXFW_HOST_DRIVER_ID, psDevInfo); - if (ui32IrqCnt != aui32SampleIrqCount[RGXFW_THREAD_0]) + if (ui32IrqCnt != pui32SampleIrqCount[RGXFW_THREAD_0]) { - aui32SampleIrqCount[RGXFW_THREAD_0] = ui32IrqCnt; + pui32SampleIrqCount[RGXFW_THREAD_0] = ui32IrqCnt; bReturnVal = IMG_TRUE; } } @@ -194,9 +195,9 @@ static INLINE IMG_BOOL SampleIRQCount(PVRSRV_RGXDEV_INFO *psDevInfo) get_irq_cnt_val(ui32IrqCnt, ui32TID, psDevInfo); /* treat unhandled interrupts here to align host count with fw count */ - if (aui32SampleIrqCount[ui32TID] != ui32IrqCnt) + if (pui32SampleIrqCount[ui32TID] != ui32IrqCnt) { - aui32SampleIrqCount[ui32TID] = ui32IrqCnt; + pui32SampleIrqCount[ui32TID] = ui32IrqCnt; bReturnVal = IMG_TRUE; } } @@ -214,7 +215,7 @@ static INLINE IMG_BOOL SampleIRQCount(PVRSRV_RGXDEV_INFO *psDevInfo) */ /**************************************************************************/ static INLINE IMG_UINT32 RGXHostSafetyEvents(PVRSRV_RGXDEV_INFO *psDevInfo) { - if (PVRSRV_VZ_MODE_IS(GUEST) || (psDevInfo->ui32HostSafetyEventMask == 0)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo) || (psDevInfo->ui32HostSafetyEventMask == 0)) { return 0; } @@ -240,10 +241,10 @@ static INLINE IMG_BOOL RGXSafetyEventCheck(PVRSRV_RGXDEV_INFO *psDevInfo) { IMG_UINT32 ui32EventStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_STATUS); - if (BIT_ISSET(ui32EventStatus, RGX_CR_EVENT_STATUS_SAFETY_SHIFT)) + if (BIT_ISSET(ui32EventStatus, RGX_CR_EVENT_STATUS__ROGUEXE__SAFETY_SHIFT)) { /* clear the safety event */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_CLEAR, RGX_CR_EVENT_CLEAR_SAFETY_EN); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_CLEAR, RGX_CR_EVENT_CLEAR__ROGUEXE__SAFETY_EN); /* report if there is anything for the Host to handle */ bSafetyEvent = (RGXHostSafetyEvents(psDevInfo) != 0); @@ -260,85 +261,106 @@ static INLINE IMG_BOOL RGXSafetyEventCheck(PVRSRV_RGXDEV_INFO *psDevInfo) */ /**************************************************************************/ static void RGXSafetyEventHandler(PVRSRV_RGXDEV_INFO *psDevInfo) { - IMG_UINT32 ui32HostSafetyStatus = RGXHostSafetyEvents(psDevInfo); - RGX_CONTEXT_RESET_REASON eResetReason = RGX_CONTEXT_RESET_REASON_NONE; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + PVRSRV_ERROR eError; - if (ui32HostSafetyStatus != 0) + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) { - /* clear the safety bus events handled by the Host */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE, ui32HostSafetyStatus); + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire PowerLock (device: %p, error: %s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + return; + } - if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT)) - { - IMG_UINT32 ui32FaultFlag; - IMG_UINT32 ui32FaultFW = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_STATUS); - IMG_UINT32 ui32CorrectedBitOffset = RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT - - RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT; + if (psDevInfo->bRGXPowered) + { + RGX_CONTEXT_RESET_REASON eResetReason = RGX_CONTEXT_RESET_REASON_NONE; + IMG_UINT32 ui32HostSafetyStatus = RGXHostSafetyEvents(psDevInfo); - PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety fault status: 0x%X", __func__, ui32FaultFW)); + if (ui32HostSafetyStatus != 0) + { + /* clear the safety bus events handled by the Host */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE, ui32HostSafetyStatus); - for (ui32FaultFlag = 0; ui32FaultFlag < ui32CorrectedBitOffset; ui32FaultFlag++) + if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT)) { - if (BIT_ISSET(ui32FaultFW, ui32FaultFlag)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety hardware fault detected (0x%lX).", - __func__, BIT(ui32FaultFlag))); - eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_ERR; - } - else if BIT_ISSET(ui32FaultFW, ui32FaultFlag + ui32CorrectedBitOffset) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware safety hardware fault corrected.(0x%lX).", - __func__, BIT(ui32FaultFlag))); + IMG_UINT32 ui32FaultFlag; + IMG_UINT32 ui32FaultFW = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_STATUS); + IMG_UINT32 ui32CorrectedBitOffset = RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT - + RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT; - /* Only report this if we haven't detected a more serious error */ - if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety fault status: 0x%X", __func__, ui32FaultFW)); + + for (ui32FaultFlag = 0; ui32FaultFlag < ui32CorrectedBitOffset; ui32FaultFlag++) + { + if (BIT_ISSET(ui32FaultFW, ui32FaultFlag)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety hardware fault detected (0x%lX).", + __func__, BIT(ui32FaultFlag))); + eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_ERR; + } + else if BIT_ISSET(ui32FaultFW, ui32FaultFlag + ui32CorrectedBitOffset) { - eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_OK; + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware safety hardware fault corrected.(0x%lX).", + __func__, BIT(ui32FaultFlag))); + + /* Only report this if we haven't detected a more serious error */ + if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) + { + eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_OK; + } } } - } - - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_CLEAR, ui32FaultFW); - } - if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT)) - { - volatile RGXFWIF_POW_STATE ePowState = psDevInfo->psRGXFWIfFwSysData->ePowState; + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_CLEAR, ui32FaultFW); + } - if (ePowState == RGXFWIF_POW_ON) + if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT)) { - PVR_DPF((PVR_DBG_ERROR, "%s: Safety Watchdog Trigger !", __func__)); + volatile RGXFWIF_POW_STATE ePowState; + + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ePowState, + INVALIDATE); + ePowState = psDevInfo->psRGXFWIfFwSysData->ePowState; - /* Only report this if we haven't detected a more serious error */ - if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) + if (ePowState != RGXFWIF_POW_OFF) { - eResetReason = RGX_CONTEXT_RESET_REASON_FW_WATCHDOG; + PVR_DPF((PVR_DBG_ERROR, "%s: Safety Watchdog Trigger !", __func__)); + + /* Only report this if we haven't detected a more serious error */ + if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) + { + eResetReason = RGX_CONTEXT_RESET_REASON_FW_WATCHDOG; + } } } - } - /* Notify client and system layer of any error */ - if (eResetReason != RGX_CONTEXT_RESET_REASON_NONE) - { - PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; - PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + /* Notify client and system layer of any error */ + if (eResetReason != RGX_CONTEXT_RESET_REASON_NONE) + { + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; - /* Client notification of device error will be achieved by - * clients calling UM function RGXGetLastDeviceError() */ - psDevInfo->eLastDeviceError = eResetReason; + /* Client notification of device error will be achieved by + * clients calling UM function RGXGetLastDeviceError() */ + psDevInfo->eLastDeviceError = eResetReason; - /* Notify system layer of any error */ - if (psDevConfig->pfnSysDevErrorNotify) - { - PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + /* Notify system layer of any error */ + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; - sErrorData.eResetReason = eResetReason; + sErrorData.eResetReason = eResetReason; - psDevConfig->pfnSysDevErrorNotify(psDevConfig, - &sErrorData); + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, + &sErrorData); + } } } } + + PVRSRVPowerUnlock(psDeviceNode); } static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo) @@ -352,7 +374,7 @@ static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo) #if defined(PVRSRV_DEBUG_LISR_EXECUTION) PVR_DPF((PVR_DBG_ERROR, - "Last RGX_LISRHandler State (DevID %u): 0x%08X Clock: %llu", + "Last RGX_LISRHandler State (DevID %u): 0x%08X Clock: %" IMG_UINT64_FMTSPEC, psDeviceNode->sDevId.ui32InternalID, psDeviceNode->sLISRExecutionInfo.ui32Status, psDeviceNode->sLISRExecutionInfo.ui64Clockns)); @@ -374,7 +396,7 @@ void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) { IMG_BOOL bScheduleMISR; - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { bScheduleMISR = IMG_TRUE; } @@ -402,6 +424,9 @@ static inline IMG_BOOL RGXAckHwIrq(PVRSRV_RGXDEV_INFO *psDevInfo, { IMG_UINT32 ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg); + /* clear only the pending bit of the thread that triggered this interrupt */ + ui32IRQClearMask &= ui32IRQStatus; + if (ui32IRQStatus & ui32IRQStatusEventMsk) { /* acknowledge and clear the interrupt */ @@ -415,8 +440,7 @@ static inline IMG_BOOL RGXAckHwIrq(PVRSRV_RGXDEV_INFO *psDevInfo, } } -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) -static IMG_BOOL RGXAckIrqMETA(PVRSRV_RGXDEV_INFO *psDevInfo) +static __maybe_unused IMG_BOOL RGXAckIrqMETA(PVRSRV_RGXDEV_INFO *psDevInfo) { return RGXAckHwIrq(psDevInfo, RGX_CR_META_SP_MSLVIRQSTATUS, @@ -424,8 +448,8 @@ static IMG_BOOL RGXAckIrqMETA(PVRSRV_RGXDEV_INFO *psDevInfo) RGX_CR_META_SP_MSLVIRQSTATUS, RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK); } -#endif +#if defined(RGX_FEATURE_MIPS_BIT_MASK) static IMG_BOOL RGXAckIrqMIPS(PVRSRV_RGXDEV_INFO *psDevInfo) { return RGXAckHwIrq(psDevInfo, @@ -434,6 +458,7 @@ static IMG_BOOL RGXAckIrqMIPS(PVRSRV_RGXDEV_INFO *psDevInfo) RGX_CR_MIPS_WRAPPER_IRQ_CLEAR, RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN); } +#endif static IMG_BOOL RGXAckIrqDedicated(PVRSRV_RGXDEV_INFO *psDevInfo) { @@ -443,9 +468,51 @@ static IMG_BOOL RGXAckIrqDedicated(PVRSRV_RGXDEV_INFO *psDevInfo) * register bank 0 */ return RGXAckHwIrq(psDevInfo, RGX_CR_IRQ_OS0_EVENT_STATUS, - RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN, + ~RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK, RGX_CR_IRQ_OS0_EVENT_CLEAR, - RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN); + ~RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK); +} + +static PVRSRV_ERROR RGXSetAckIrq(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, IRQ_PER_OS)) && (!PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo))) + { + psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; + } + else if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + psDevInfo->pfnRGXAckIrq = NULL; + } + else + { + /* native and host drivers must clear the unique GPU physical interrupt */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + psDevInfo->pfnRGXAckIrq = RGXAckIrqMIPS; + } + else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + psDevInfo->pfnRGXAckIrq = RGXAckIrqMETA; + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: GPU IRQ clearing mechanism not implemented " + "for this architecture.", __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + } + +#if defined(RGX_IRQ_HYPERV_HANDLER) + /* The hypervisor receives and acknowledges the GPU irq, then it injects an + * irq only in the recipient OS. The KM driver doesn't handle the GPU irq line */ + psDevInfo->pfnRGXAckIrq = NULL; +#endif + + return PVRSRV_OK; } static IMG_BOOL RGX_LISRHandler(void *pvData) @@ -462,6 +529,7 @@ static IMG_BOOL RGX_LISRHandler(void *pvData) get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); UPDATE_LISR_DBG_SNAPSHOT(ui32idx, ui32IrqCnt); } + UPDATE_LISR_DBG_STATUS(RGX_LISR_INIT); UPDATE_LISR_DBG_TIMESTAMP(); #endif @@ -476,7 +544,7 @@ static IMG_BOOL RGX_LISRHandler(void *pvData) { bIrqAcknowledged = IMG_TRUE; - if (SampleIRQCount(psDevInfo) || bSafetyEvent) + if (bSafetyEvent || SampleIRQCount(psDevInfo)) { UPDATE_LISR_DBG_STATUS(RGX_LISR_PROCESSED); UPDATE_MISR_DBG_COUNTER(); @@ -503,11 +571,16 @@ static IMG_BOOL RGX_LISRHandler(void *pvData) } else { +#if defined(SUPPORT_AUTOVZ) /* AutoVz drivers rebooting while the firmware is active must acknowledge * and clear the hw IRQ line before the RGXInit() has finished. */ - if (!(psDevInfo->psDeviceNode->bAutoVzFwIsUp && - (psDevInfo->pfnRGXAckIrq != NULL) && - psDevInfo->pfnRGXAckIrq(psDevInfo))) + if ((psDevInfo->pfnRGXAckIrq != NULL) && + psDevInfo->pfnRGXAckIrq(psDevInfo)) + { + bIrqAcknowledged = IMG_TRUE; + } + else +#endif { UPDATE_LISR_DBG_STATUS(RGX_LISR_DEVICE_NOT_POWERED); } @@ -559,9 +632,13 @@ static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice) { PVRSRV_DEVICE_NODE *psDeviceNode = psDevice; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + const RGXFWIF_SYSDATA *psFwSysData; PVRSRV_ERROR eError = PVRSRV_OK; + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ePowState, + INVALIDATE); + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + if (psFwSysData->ePowState == RGXFWIF_POW_ON || psFwSysData->ePowState == RGXFWIF_POW_IDLE) { RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); @@ -595,40 +672,34 @@ static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice) #define GPU_IDLE RGXFWIF_GPU_UTIL_STATE_IDLE #define GPU_ACTIVE RGXFWIF_GPU_UTIL_STATE_ACTIVE #define GPU_BLOCKED RGXFWIF_GPU_UTIL_STATE_BLOCKED +#define GPU_INACTIVE RGXFWIF_GPU_UTIL_STATE_INACTIVE #define MAX_ITERATIONS 64 +#define MAX_DIFF_TIME_NS (300000ULL) +#define MAX_DIFF_DM_TIME_NS (MAX_DIFF_TIME_NS >> RGXFWIF_DM_OS_TIMESTAMP_SHIFT) static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_HANDLE hGpuUtilUser, RGXFWIF_GPU_UTIL_STATS *psReturnStats) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + RGXFWIF_GPU_STATS sStats; RGXFWIF_GPU_UTIL_STATS *psAggregateStats; - IMG_UINT64 (*paaui64DMOSTmpCounters)[RGX_NUM_OS_SUPPORTED][RGXFWIF_GPU_UTIL_STATE_NUM]; - IMG_UINT64 (*paui64DMOSTmpLastWord)[RGX_NUM_OS_SUPPORTED]; - IMG_UINT64 (*paui64DMOSTmpLastState)[RGX_NUM_OS_SUPPORTED]; - IMG_UINT64 (*paui64DMOSTmpLastPeriod)[RGX_NUM_OS_SUPPORTED]; - IMG_UINT64 (*paui64DMOSTmpLastTime)[RGX_NUM_OS_SUPPORTED]; + IMG_UINT64 (*paaui64DMOSTmpCounters)[RGX_NUM_DRIVERS_SUPPORTED][RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM]; + IMG_UINT64 (*paui64DMOSTmpLastWord)[RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 (*paui64DMOSTmpLastState)[RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 (*paui64DMOSTmpLastPeriod)[RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 (*paui64DMOSTmpLastTime)[RGX_NUM_DRIVERS_SUPPORTED]; IMG_UINT64 ui64TimeNow; + IMG_UINT64 ui64TimeNowShifted; IMG_UINT32 ui32Attempts; IMG_UINT32 ui32Remainder; - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; IMG_UINT32 ui32MaxDMCount; RGXFWIF_DM eDM; - /***** (1) Initialise return stats *****/ psReturnStats->bValid = IMG_FALSE; - psReturnStats->ui64GpuStatIdle = 0; - psReturnStats->ui64GpuStatActive = 0; - psReturnStats->ui64GpuStatBlocked = 0; - psReturnStats->ui64GpuStatCumulative = 0; - - memset(psReturnStats->aaui64DMOSStatIdle, 0, sizeof(psReturnStats->aaui64DMOSStatIdle)); - memset(psReturnStats->aaui64DMOSStatActive, 0, sizeof(psReturnStats->aaui64DMOSStatActive)); - memset(psReturnStats->aaui64DMOSStatBlocked, 0, sizeof(psReturnStats->aaui64DMOSStatBlocked)); - memset(psReturnStats->aaui64DMOSStatCumulative, 0, sizeof(psReturnStats->aaui64DMOSStatCumulative)); if (hGpuUtilUser == NULL) { @@ -636,81 +707,152 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, } psAggregateStats = hGpuUtilUser; - ui32MaxDMCount = psDevInfo->sDevFeatureCfg.ui32MAXDMCount; + /* decrease by 1 to account for excluding GP DM from the statics */; + ui32MaxDMCount = psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1; - /* Allocate temporary counters used in the attempts loop */ - paaui64DMOSTmpCounters = OSAllocMem(sizeof(*paaui64DMOSTmpCounters) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paaui64DMOSTmpCounters != NULL, "OSAllocMem:1", failTmpCountersAlloc); - paui64DMOSTmpLastWord = OSAllocMem(sizeof(*paui64DMOSTmpLastWord) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastWord != NULL, "OSAllocMem:2", failTmpLastWordAlloc); - paui64DMOSTmpLastState = OSAllocMem(sizeof(*paui64DMOSTmpLastState) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastState != NULL, "OSAllocMem:3", failTmpLastStateAlloc); - paui64DMOSTmpLastPeriod = OSAllocMem(sizeof(*paui64DMOSTmpLastPeriod) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastPeriod != NULL, "OSAllocMem:4", failTmpLastPeriodAlloc); - paui64DMOSTmpLastTime = OSAllocMem(sizeof(*paui64DMOSTmpLastTime) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastTime != NULL, "OSAllocMem:5", failTmpLastTimeAlloc); + /* Reset temporary counters used in the attempts loop */ + paaui64DMOSTmpCounters = &psAggregateStats->sTempGpuStats.aaaui64DMOSTmpCounters[0]; + paui64DMOSTmpLastWord = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastWord[0]; + paui64DMOSTmpLastState = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastState[0]; + paui64DMOSTmpLastPeriod = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastPeriod[0]; + paui64DMOSTmpLastTime = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastTime[0]; + + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFW, INVALIDATE); /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */ for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++) { - const volatile IMG_UINT64 *pui64GpuStatsCounters = &psUtilFWCb->aui64GpuStatsCounters[0]; - const volatile IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OS] = &psUtilFWCb->aaui64DMOSLastWord[0]; - const volatile IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OS][RGXFWIF_GPU_UTIL_STATE_NUM] = &psUtilFWCb->aaaui64DMOSStatsCounters[0]; - IMG_UINT64 aui64GpuTmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0}; IMG_UINT64 ui64GpuLastPeriod = 0, ui64GpuLastWord = 0, ui64GpuLastState = 0, ui64GpuLastTime = 0; - IMG_UINT32 i = 0; - /***** (2) Get latest data from shared area *****/ - OSLockAcquire(psDevInfo->hGPUUtilLock); - - /* - * First attempt at detecting if the FW is in the middle of an update. - * This should also help if the FW is in the middle of a 64 bit variable update. - */ - while (((ui64GpuLastWord != psUtilFWCb->ui64GpuLastWord) || - (aui64GpuTmpCounters[ui64GpuLastState] != - pui64GpuStatsCounters[ui64GpuLastState])) && - (i < MAX_ITERATIONS)) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - ui64GpuLastWord = psUtilFWCb->ui64GpuLastWord; - ui64GpuLastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64GpuLastWord); - aui64GpuTmpCounters[GPU_IDLE] = pui64GpuStatsCounters[GPU_IDLE]; - aui64GpuTmpCounters[GPU_ACTIVE] = pui64GpuStatsCounters[GPU_ACTIVE]; - aui64GpuTmpCounters[GPU_BLOCKED] = pui64GpuStatsCounters[GPU_BLOCKED]; + IMG_UINT64 aui64StatsCountersNew[RGXFWIF_GPU_UTIL_STATE_NUM]; + IMG_UINT64 ui64GpuLastWordNew; + RGXFWIF_GPU_STATS sStatsNew; + IMG_UINT32 i = 0; - for (eDM = 0; eDM < ui32MaxDMCount; eDM++) + ui64GpuLastWord = 0; + ui64GpuLastState = 0; + + OSLockAcquire(psDevInfo->hGPUUtilLock); + + /* Copy data from device memory */ + memcpy(&sStatsNew, &psDevInfo->psRGXFWIfGpuUtilFW->sStats[ui32DriverID], sizeof(sStats)); + memcpy(&ui64GpuLastWordNew, &psDevInfo->psRGXFWIfGpuUtilFW->ui64GpuLastWord, sizeof(ui64GpuLastWord)); + memcpy(aui64StatsCountersNew, psDevInfo->psRGXFWIfGpuUtilFW->aui64GpuStatsCounters, sizeof(aui64StatsCountersNew)); + + /* + * First attempt at detecting if the FW is in the middle of an update. + * This should also help if the FW is in the middle of a 64 bit variable update. + * This loop must be fast. Faster than FW updates the stats. + */ + for (i = 0; i < MAX_ITERATIONS; i++) { - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + IMG_UINT32 j,k; + IMG_BOOL bRetry = IMG_FALSE; + + if (i > 0) + { + /* On retry keep previous data */ + ui64GpuLastWordNew = ui64GpuLastWord; + memcpy(aui64StatsCountersNew, aui64GpuTmpCounters, sizeof(aui64StatsCountersNew)); + memcpy(&sStatsNew, &sStats, sizeof(sStatsNew)); + } + + /* Copy data from device memory */ + memcpy(&sStats, &psDevInfo->psRGXFWIfGpuUtilFW->sStats[ui32DriverID], sizeof(sStats)); + memcpy(&ui64GpuLastWord, &psDevInfo->psRGXFWIfGpuUtilFW->ui64GpuLastWord, sizeof(ui64GpuLastWord)); + memcpy(aui64GpuTmpCounters, psDevInfo->psRGXFWIfGpuUtilFW->aui64GpuStatsCounters, sizeof(aui64GpuTmpCounters)); + + /* Check for abnormal time difference between reads */ + if (RGXFWIF_GPU_UTIL_GET_TIME(ui64GpuLastWord) - RGXFWIF_GPU_UTIL_GET_TIME(ui64GpuLastWordNew) > MAX_DIFF_TIME_NS) + { + bRetry = IMG_TRUE; + continue; + } + + for (j = 0; j < RGXFWIF_GPU_UTIL_STATE_NUM; j++) + { + /* Check for abnormal time difference between reads */ + if (aui64GpuTmpCounters[j] - aui64StatsCountersNew[j] > MAX_DIFF_TIME_NS) + { + bRetry = IMG_TRUE; + break; + } + } + + if (bRetry) + { + continue; + } + + /* Check for DM counters wrapped or + abnormal time difference between reads. + The DM time is shifted by RGXFWIF_DM_OS_TIMESTAMP_SHIFT */ + for (j = 0; j < RGXFWIF_GPU_UTIL_DM_MAX; j++) + { + if (sStats.aui32DMOSLastWordWrap[j] != sStatsNew.aui32DMOSLastWordWrap[j] || + RGXFWIF_GPU_UTIL_GET_TIME32(sStats.aui32DMOSLastWord[j]) - RGXFWIF_GPU_UTIL_GET_TIME32(sStatsNew.aui32DMOSLastWord[j]) > MAX_DIFF_DM_TIME_NS) + { + bRetry = IMG_TRUE; + break; + } + + for (k = 0; k < RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM; k++) + { + if (sStats.aaui32DMOSCountersWrap[j][k] != sStatsNew.aaui32DMOSCountersWrap[j][k] || + sStats.aaui32DMOSStatsCounters[j][k] - sStatsNew.aaui32DMOSStatsCounters[j][k] > MAX_DIFF_DM_TIME_NS) + { + bRetry = IMG_TRUE; + break; + } + + } + + if (bRetry) + { + break; + } + } + + if (!bRetry) { - paui64DMOSTmpLastWord[eDM][ui32OSid] = paui64DMOSLastWord[eDM][ui32OSid]; - paui64DMOSTmpLastState[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_STATE(paui64DMOSTmpLastWord[eDM][ui32OSid]); - paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_IDLE] = paaui64DMOSStatsCounters[eDM][ui32OSid][GPU_IDLE]; - paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_ACTIVE] = paaui64DMOSStatsCounters[eDM][ui32OSid][GPU_ACTIVE]; - paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_BLOCKED] = paaui64DMOSStatsCounters[eDM][ui32OSid][GPU_BLOCKED]; + /* Stats are good*/ + break; } } - i++; - } + OSLockRelease(psDevInfo->hGPUUtilLock); - OSLockRelease(psDevInfo->hGPUUtilLock); + ui64GpuLastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64GpuLastWord); - if (i == MAX_ITERATIONS) - { - PVR_DPF((PVR_DBG_WARNING, - "RGXGetGpuUtilStats could not get reliable data after trying %u times", i)); + if (i == MAX_ITERATIONS) + { + PVR_DPF((PVR_DBG_WARNING, + "RGXGetGpuUtilStats could not get reliable data after trying %u times", i)); - OSFreeMem(paaui64DMOSTmpCounters); - OSFreeMem(paui64DMOSTmpLastWord); - OSFreeMem(paui64DMOSTmpLastState); - OSFreeMem(paui64DMOSTmpLastPeriod); - OSFreeMem(paui64DMOSTmpLastTime); + return PVRSRV_ERROR_TIMEOUT; + } - return PVRSRV_ERROR_TIMEOUT; - } + for (eDM = 0; eDM < ui32MaxDMCount; eDM++) + { + paui64DMOSTmpLastWord[eDM][ui32DriverID] = + ((IMG_UINT64)sStats.aui32DMOSLastWordWrap[eDM] << 32) + sStats.aui32DMOSLastWord[eDM]; + paui64DMOSTmpLastState[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_STATE(paui64DMOSTmpLastWord[eDM][ui32DriverID]); + if (paui64DMOSTmpLastState[eDM][ui32DriverID] != GPU_ACTIVE) + { + paui64DMOSTmpLastState[eDM][ui32DriverID] = GPU_INACTIVE; + } + paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_INACTIVE] = (IMG_UINT64)sStats.aaui32DMOSStatsCounters[eDM][GPU_INACTIVE] + + ((IMG_UINT64)sStats.aaui32DMOSCountersWrap[eDM][GPU_INACTIVE] << 32); + paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE] = (IMG_UINT64)sStats.aaui32DMOSStatsCounters[eDM][GPU_ACTIVE] + + ((IMG_UINT64)sStats.aaui32DMOSCountersWrap[eDM][GPU_ACTIVE] << 32); + } + + } /* FOREACH_SUPPORTED_DRIVER(ui32DriverID) */ /***** (3) Compute return stats *****/ @@ -734,24 +876,22 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, psReturnStats->ui64GpuStatActive + psReturnStats->ui64GpuStatBlocked; + /* convert time into the same units as used by fw */ + ui64TimeNowShifted = ui64TimeNow >> RGXFWIF_DM_OS_TIMESTAMP_SHIFT; for (eDM = 0; eDM < ui32MaxDMCount; eDM++) { - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - paui64DMOSTmpLastTime[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_TIME(paui64DMOSTmpLastWord[eDM][ui32OSid]); - paui64DMOSTmpLastPeriod[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, paui64DMOSTmpLastTime[eDM][ui32OSid]); - paaui64DMOSTmpCounters[eDM][ui32OSid][paui64DMOSTmpLastState[eDM][ui32OSid]] += paui64DMOSTmpLastPeriod[eDM][ui32OSid]; - + paui64DMOSTmpLastTime[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_TIME(paui64DMOSTmpLastWord[eDM][ui32DriverID]); + paui64DMOSTmpLastPeriod[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNowShifted , paui64DMOSTmpLastTime[eDM][ui32DriverID]); + paaui64DMOSTmpCounters[eDM][ui32DriverID][paui64DMOSTmpLastState[eDM][ui32DriverID]] += paui64DMOSTmpLastPeriod[eDM][ui32DriverID]; /* Get statistics for a user since its last request */ - psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_IDLE], - psAggregateStats->aaui64DMOSStatIdle[eDM][ui32OSid]); - psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_ACTIVE], - psAggregateStats->aaui64DMOSStatActive[eDM][ui32OSid]); - psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_BLOCKED], - psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32OSid]); - psReturnStats->aaui64DMOSStatCumulative[eDM][ui32OSid] = psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid] + - psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid] + - psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid]; + psReturnStats->aaui64DMOSStatInactive[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_INACTIVE], + psAggregateStats->aaui64DMOSStatInactive[eDM][ui32DriverID]); + psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE], + psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID]); + psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID] = psReturnStats->aaui64DMOSStatInactive[eDM][ui32DriverID] + + psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID]; } } @@ -790,11 +930,10 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, for (eDM = 0; eDM < ui32MaxDMCount; eDM++) { - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - psAggregateStats->aaui64DMOSStatIdle[eDM][ui32OSid] += psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid]; - psAggregateStats->aaui64DMOSStatActive[eDM][ui32OSid] += psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid]; - psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32OSid] += psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid]; + psAggregateStats->aaui64DMOSStatInactive[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatInactive[eDM][ui32DriverID]; + psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID]; } } @@ -805,28 +944,6 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder); psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder); - for (eDM = 0; eDM < ui32MaxDMCount; eDM++) - { - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) - { - psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid] = OSDivide64(psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid], 1000, &ui32Remainder); - psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid] = OSDivide64(psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid], 1000, &ui32Remainder); - psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid] = OSDivide64(psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid], 1000, &ui32Remainder); - psReturnStats->aaui64DMOSStatCumulative[eDM][ui32OSid] = OSDivide64(psReturnStats->aaui64DMOSStatCumulative[eDM][ui32OSid], 1000, &ui32Remainder); - } - } - - OSFreeMem(paui64DMOSTmpLastTime); -failTmpLastTimeAlloc: - OSFreeMem(paui64DMOSTmpLastPeriod); -failTmpLastPeriodAlloc: - OSFreeMem(paui64DMOSTmpLastState); -failTmpLastStateAlloc: - OSFreeMem(paui64DMOSTmpLastWord); -failTmpLastWordAlloc: - OSFreeMem(paaui64DMOSTmpCounters); - -failTmpCountersAlloc: /* Check that the return stats make sense */ if (psReturnStats->ui64GpuStatCumulative == 0) { @@ -838,7 +955,23 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, * When such an event happens frequently, timers or the aggregate * stats might not be accurate... */ +#if defined(VIRTUAL_PLATFORM) + /* To avoid spamming the console logging system on emulated devices, + * we special-case so that we will only produce a single message per + * driver invocation. This should reduce the time spent logging + * information which is not relevant for very slow timers found in + * VP device configurations + */ + static IMG_BOOL bFirstTime = IMG_TRUE; + + if (bFirstTime) + { + bFirstTime = IMG_FALSE; +#endif PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data.")); +#if defined(VIRTUAL_PLATFORM) + } +#endif /* defined(VIRTUAL_PLATFORM) */ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; } @@ -901,11 +1034,16 @@ static void RGX_MISRHandler_Main (void *pvData) RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice); #endif - /* Handle Safety events if necessary */ - RGXSafetyEventHandler(psDeviceNode->pvDevice); + /* Only execute SafetyEventHandler if RGX_FEATURE_SAFETY_EVENT is on */ + if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, ECC_RAMS) > 0 || + PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, WATCHDOG_TIMER)) + { + /* Handle Safety events if necessary */ + RGXSafetyEventHandler(psDeviceNode->pvDevice); + } /* Signal the global event object */ - PVRSRVSignalGlobalEO(); + PVRSRVSignalDriverWideEO(); /* Process the Firmware CCB for pending commands */ RGXCheckFirmwareCCB(psDeviceNode->pvDevice); @@ -914,8 +1052,11 @@ static void RGX_MISRHandler_Main (void *pvData) RGXTimeCorrRestartPeriodic(psDeviceNode); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Process Workload Estimation Specific commands from the FW */ - WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Process Workload Estimation Specific commands from the FW */ + WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice); + } #endif if (psDevInfo->pvAPMISRData == NULL) @@ -926,7 +1067,7 @@ static void RGX_MISRHandler_Main (void *pvData) #endif /* !defined(NO_HARDWARE) */ -#if defined(PDUMP) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) && defined(PDUMP) static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_RGXDEV_INFO *psDevInfo) { @@ -935,7 +1076,7 @@ static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_DEV_PHYADDR sTmpAddr; IMG_UINT32 ui32BootConfOffset, ui32ParamOffset, i; PVRSRV_ERROR eError; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); ui32BootConfOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); @@ -1027,59 +1168,61 @@ static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRV_DEVICE_CONFIG *psDevConfig) { PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEV_PHYADDR sKernelMMUCtxPCAddr; + IMG_BOOL bPremappedFw; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVCFG, psDevConfig, PVRSRV_OK); /* Save information used on power transitions for later * (when RGXStart and RGXStop are executed) */ - psDevInfo->sLayerParams.psDevInfo = psDevInfo; - psDevInfo->sLayerParams.psDevConfig = psDevConfig; #if defined(PDUMP) psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS; #endif -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) || defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || - RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - IMG_DEV_PHYADDR sKernelMMUCtxPCAddr; - if (psDevInfo->psDeviceNode->bAutoVzFwIsUp) - { - /* If AutoVz firmware is up at this stage, the driver initialised it - * during a previous life-cycle. The firmware's memory is already pre-mapped - * and the MMU page tables reside in the predetermined memory carveout. - * The Kernel MMU Context created in this life-cycle is a dummy structure - * that is not used for mapping. - * To program the Device's BIF with the correct PC address, use the base - * address of the carveout reserved for MMU mappings as Kernel MMU PC Address */ -#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) - sKernelMMUCtxPCAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_PREMAP_FW_HEAPS) + /* Rogue drivers with security support and premapped fw heaps + * always have their fw heap premapped by the TEE */ + bPremappedFw = IMG_TRUE; #else - PHYS_HEAP_CONFIG *psFwHeapCfg = FindPhysHeapConfig(psDevConfig, - PHYS_HEAP_USAGE_FW_MAIN); - eError = (psFwHeapCfg != NULL) ? PVRSRV_OK : PVRSRV_ERROR_PHYSHEAP_CONFIG; - PVR_LOG_RETURN_IF_ERROR(eError, "FindPhysHeapConfig(PHYS_HEAP_USAGE_FW_MAIN)"); - - sKernelMMUCtxPCAddr.uiAddr = psFwHeapCfg->sCardBase.uiAddr + - (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED); -#endif /* PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR */ - } - else + /* If AutoVz firmware is up at this stage, the driver initialised it + * during a previous life-cycle. The firmware's memory is already pre-mapped + * and the MMU page tables reside in the predetermined memory carveout. + * The Kernel MMU Context created in this life-cycle is a dummy structure + * that is not used for mapping. + * To program the Device's BIF with the correct PC address, use the base + * address of the carveout reserved for MMU mappings as Kernel MMU PC Address */ + bPremappedFw = psDevInfo->psDeviceNode->bAutoVzFwIsUp; +#endif + + if (bPremappedFw) + { + IMG_DEV_PHYADDR sDevPAddr; + PHYS_HEAP *psFwPageTableHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]; + + PVR_LOG_RETURN_IF_FALSE((NULL != psFwPageTableHeap), + "Firmware Page Table heap not defined.", + PVRSRV_ERROR_INVALID_HEAP); + + PhysHeapGetDevPAddr(psFwPageTableHeap, &sDevPAddr); + sKernelMMUCtxPCAddr.uiAddr = sDevPAddr.uiAddr; + psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; + } + else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, + &sKernelMMUCtxPCAddr); + if (eError != PVRSRV_OK) { - eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, - &sKernelMMUCtxPCAddr); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog")); - return eError; - } + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog")); + return eError; } psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) else -#endif { PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR); PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); @@ -1155,6 +1298,7 @@ static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, psDevInfo->sLayerParams.bDevicePA0IsValid = psDevConfig->bDevicePA0IsValid; } +#endif #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) /* Send information used on power transitions to the trusted device as @@ -1164,25 +1308,20 @@ static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, { PVRSRV_TD_POWER_PARAMS sTDPowerParams; -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; - } -#endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) { sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; } -#endif - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { sTDPowerParams.sGPURegAddr = psDevInfo->sLayerParams.sGPURegAddr; sTDPowerParams.sBootRemapAddr = psDevInfo->sLayerParams.sBootRemapAddr; sTDPowerParams.sCodeRemapAddr = psDevInfo->sLayerParams.sCodeRemapAddr; sTDPowerParams.sDataRemapAddr = psDevInfo->sLayerParams.sDataRemapAddr; } +#endif eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData, &sTDPowerParams); @@ -1197,98 +1336,211 @@ static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, return eError; } +#if defined(RGX_FEATURE_AXI_ACE_BIT_MASK) /* - RGXSystemHasFBCDCVersion31 + RGXSystemGetFabricCoherency */ -static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode) +PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDevConfig, + IMG_CPU_PHYADDR sRegsCpuPBase, + IMG_UINT32 ui32RegsSize, + PVRSRV_DEVICE_FABRIC_TYPE *peDevFabricType, + PVRSRV_DEVICE_SNOOP_MODE *peCacheSnoopingMode) { -#if defined(SUPPORT_VALIDATION) - IMG_UINT32 ui32FBCDCVersionOverride = 0; + IMG_CHAR *aszLabels[] = {"none", "acelite", "fullace", "unknown"}; + PVRSRV_DEVICE_SNOOP_MODE eAppHintCacheSnoopingMode; + PVRSRV_DEVICE_SNOOP_MODE eDeviceCacheSnoopingMode; + IMG_UINT32 ui32AppHintFabricCoherency; + IMG_UINT32 ui32DeviceFabricCoherency; + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; +#if !defined(NO_HARDWARE) + void *pvRegsBaseKM; + IMG_BOOL bPowerDown = IMG_TRUE; + PVRSRV_ERROR eError; #endif -#if defined(FIX_HW_ERN_66622_BIT_MASK) - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - - if (RGX_IS_ERN_SUPPORTED(psDevInfo, 66622)) + if (!sRegsCpuPBase.uiAddr || !ui32RegsSize) { -#if defined(SUPPORT_VALIDATION) - void *pvAppHintState = NULL; - - IMG_UINT32 ui32AppHintDefault; - - OSCreateKMAppHintState(&pvAppHintState); - ui32AppHintDefault = PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FBCDCVersionOverride, - &ui32AppHintDefault, &ui32FBCDCVersionOverride); - OSFreeKMAppHintState(pvAppHintState); - - if (ui32FBCDCVersionOverride > 0) - { - if (ui32FBCDCVersionOverride == 2) - { - return IMG_TRUE; - } - } - else -#endif - { - if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) - { - return IMG_TRUE; - } - } + PVR_DPF((PVR_DBG_ERROR, + "RGXSystemGetFabricCoherency: Invalid RGX register base/size parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; } - else -#endif - { - -#if defined(SUPPORT_VALIDATION) - if (ui32FBCDCVersionOverride == 2) - { - PVR_DPF((PVR_DBG_WARNING, - "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!", - __func__)); - } -#endif #if !defined(NO_HARDWARE) - if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: System uses FBCDC3.1 but GPU doesn't support it!", - __func__)); - } -#endif + pvRegsBaseKM = OSMapPhysToLin(sRegsCpuPBase, ui32RegsSize, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + if (!pvRegsBaseKM) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXSystemGetFabricCoherency: Failed to create RGX register mapping")); + return PVRSRV_ERROR_BAD_MAPPING; } - return IMG_FALSE; -} - -/* - RGXGetTFBCLossyGroup -*/ -static IMG_UINT32 RGXGetTFBCLossyGroup(PVRSRV_DEVICE_NODE *psDeviceNode) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + bPowerDown = ! PVRSRVIsSystemPowered(psDevConfig->psDevNode); - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT)) + /* Power-up the device as required to read the registers */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig) && bPowerDown) { - IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP; - IMG_UINT32 ui32TFBCCompressionControlGroup = ui32AppHintDefault; + eError = PVRSRVSetSystemPowerState(psDevConfig, PVRSRV_SYS_POWER_STATE_ON); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON"); + } -#if defined(SUPPORT_VALIDATION) - void *pvAppHintState = NULL; - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TFBCCompressionControlGroup, - &ui32AppHintDefault, &ui32TFBCCompressionControlGroup); - OSFreeKMAppHintState(pvAppHintState); + /* AXI support within the SoC, bitfield COHERENCY_SUPPORT [1 .. 0] + value NO_COHERENCY 0x0 {SoC does not support any form of Coherency} + value ACE_LITE_COHERENCY 0x1 {SoC supports ACE-Lite or I/O Coherency} + value FULL_ACE_COHERENCY 0x2 {SoC supports full ACE or 2-Way Coherency} */ + ui32DeviceFabricCoherency = OSReadHWReg32((void __iomem *)pvRegsBaseKM, RGX_CR_SOC_AXI); + PVR_LOG(("AXI fabric coherency (RGX_CR_SOC_AXI): 0x%x", ui32DeviceFabricCoherency)); +#if defined(DEBUG) + if (ui32DeviceFabricCoherency & ~((IMG_UINT32)RGX_CR_SOC_AXI_MASKFULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid RGX_CR_SOC_AXI value.", __func__)); + return PVRSRV_ERROR_INVALID_DEVICE; + } #endif + ui32DeviceFabricCoherency &= ~((IMG_UINT32)RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK); + ui32DeviceFabricCoherency >>= RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT; - return ui32TFBCCompressionControlGroup; + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig) && bPowerDown) + { + eError = PVRSRVSetSystemPowerState(psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); } - return 0; -} + /* UnMap Regs */ + OSUnMapPhysToLin(pvRegsBaseKM, ui32RegsSize); + + switch (ui32DeviceFabricCoherency) + { + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY: + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + *peDevFabricType = PVRSRV_DEVICE_FABRIC_FULLACE; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY: + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY: + default: + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + *peDevFabricType = PVRSRV_DEVICE_FABRIC_NONE; + break; + } +#else /* !defined(NO_HARDWARE) */ + *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE; + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY; +#endif /* !defined(NO_HARDWARE) */ + + OSCreateAppHintState(&pvAppHintState); + ui32AppHintDefault = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FabricCoherencyOverride, + &ui32AppHintDefault, &ui32AppHintFabricCoherency); + OSFreeAppHintState(pvAppHintState); + +#if defined(SUPPORT_SECURITY_VALIDATION) + /* Temporarily disable coherency */ + ui32AppHintFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY; +#endif + + /* Suppress invalid AppHint value */ + switch (ui32AppHintFabricCoherency) + { + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY: + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY: + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY: + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "Invalid FabricCoherencyOverride AppHint %d, ignoring", + ui32AppHintFabricCoherency)); + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + ui32AppHintFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; + break; + } + + if (ui32AppHintFabricCoherency < ui32DeviceFabricCoherency) + { + PVR_LOG(("Downgrading device fabric coherency from %s to %s", + aszLabels[ui32DeviceFabricCoherency], + aszLabels[ui32AppHintFabricCoherency])); + eDeviceCacheSnoopingMode = eAppHintCacheSnoopingMode; + } + else if (ui32AppHintFabricCoherency > ui32DeviceFabricCoherency) + { + PVR_DPF((PVR_DBG_WARNING, + "Cannot upgrade device fabric coherency from %s to %s, not supported by device!", + aszLabels[ui32DeviceFabricCoherency], + aszLabels[ui32AppHintFabricCoherency])); + + /* Override requested-for app-hint with actual app-hint value being used */ + ui32AppHintFabricCoherency = ui32DeviceFabricCoherency; + } + + *peCacheSnoopingMode = eDeviceCacheSnoopingMode; + return PVRSRV_OK; +} +#endif + +/* + RGXSystemHasFBCDCVersion31 +*/ +static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + +#if defined(HW_ERN_66622_BIT_MASK) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 66622)) +#endif + { + { + if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) + { + return IMG_TRUE; + } + } + } +#if defined(HW_ERN_66622_BIT_MASK) + else + { + +#if !defined(NO_HARDWARE) + if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: System uses FBCDC3.1 but GPU doesn't support it!", + __func__)); + } +#endif + } +#endif /* defined(HW_ERN_66622_BIT_MASK) */ + + return IMG_FALSE; +} + +/* + RGXGetTFBCLossyGroup +*/ +static IMG_UINT32 RGXGetTFBCLossyGroup(PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + return psDevInfo->ui32TFBCLossyGroup; +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + return 0; +#endif +} /* RGXDevMMUAttributes @@ -1296,42 +1548,66 @@ static IMG_UINT32 RGXGetTFBCLossyGroup(PVRSRV_DEVICE_NODE *psDeviceNode) static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_BOOL bKernelFWMemoryCtx) { - MMU_DEVICEATTRIBS *psMMUDevAttrs; + MMU_DEVICEATTRIBS *psMMUDevAttrs = NULL; - if ((psDeviceNode->pfnCheckDeviceFeature) && - PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + if (psDeviceNode->pfnCheckDeviceFeature) { - psMMUDevAttrs = bKernelFWMemoryCtx ? - psDeviceNode->psFirmwareMMUDevAttrs : - psDeviceNode->psMMUDevAttrs; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + { + psMMUDevAttrs = bKernelFWMemoryCtx ? + psDeviceNode->psFirmwareMMUDevAttrs : + psDeviceNode->psMMUDevAttrs; + } + else +#endif + { + PVR_UNREFERENCED_PARAMETER(bKernelFWMemoryCtx); + psMMUDevAttrs = psDeviceNode->psMMUDevAttrs; + } } - else + + return psMMUDevAttrs; +} + +/* + RGXDevSnoopMode +*/ +static PVRSRV_DEVICE_SNOOP_MODE RGXDevSnoopMode(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(psDeviceNode != NULL); + PVR_ASSERT(psDeviceNode->pvDevice != NULL); + + psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) { - psMMUDevAttrs = psDeviceNode->psMMUDevAttrs; + return PVRSRV_DEVICE_SNOOP_CPU_ONLY; } - return psMMUDevAttrs; + return PVRSRV_DEVICE_SNOOP_NONE; } /* * RGXInitDevPart2 */ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32DeviceFlags, - IMG_UINT32 ui32HWPerfHostFilter, - RGX_ACTIVEPM_CONF eActivePMConf) + RGX_INIT_APPHINTS *psApphints) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_DEV_POWER_STATE eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; - - /* Assume system layer has turned power on by this point, required before powering device */ - psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; + IMG_UINT32 ui32AllRACMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1; +#endif PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 2"); -#if defined(PDUMP) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) && defined(PDUMP) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { RGXPDumpBootldrData(psDeviceNode, psDevInfo); @@ -1343,7 +1619,7 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, /* Initialise Device Flags */ psDevInfo->ui32DeviceFlags = 0; - RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE); + RGXSetDeviceFlags(psDevInfo, psApphints->ui32DeviceFlags, IMG_TRUE); /* Allocate DVFS Table (needs to be allocated before GPU trace events * component is initialised because there is a dependency between them) */ @@ -1352,7 +1628,7 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, if (psDevInfo->ui32HWPerfHostFilter == 0) { - RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter); + RGXHWPerfHostSetEventFilter(psDevInfo, psApphints->ui32HWPerfHostFilter); } /* If HWPerf enabled allocate all resources for the host side buffer. */ @@ -1366,9 +1642,12 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Initialise work estimation lock */ - eError = OSLockCreate(&psDevInfo->hWorkEstLock); - PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Initialise work estimation lock */ + eError = OSLockCreate(&psDevInfo->hWorkEstLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit); + } #endif /* Initialise lists of ZSBuffers */ @@ -1392,11 +1671,13 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(MMUCtxUnregLock)", ErrorExit); } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { eError = OSLockCreate(&psDevInfo->hNMILock); PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(NMILock)", ErrorExit); } +#endif /* Setup GPU utilisation stats update callback */ eError = OSLockCreate(&psDevInfo->hGPUUtilLock); @@ -1406,24 +1687,41 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, #endif eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; - psDevInfo->eActivePMConf = eActivePMConf; + psDevInfo->eActivePMConf = psApphints->eRGXActivePMConf; + +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + /* Validate the SPU mask and initialize to number of SPUs to power up */ + if ((psApphints->ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s:Invalid SPU mask (All=0x%X, Non Fused=0x%X). At-least one SPU must to be powered up.", + __func__, + ui32AllPowUnitsMask, + psApphints->ui32AvailablePowUnitsMask)); + PVR_LOG_GOTO_WITH_ERROR("ui32AvailablePowUnitsMask", eError, PVRSRV_ERROR_INVALID_SPU_MASK, ErrorExit); + } + + psDevInfo->ui32AvailablePowUnitsMask = psApphints->ui32AvailablePowUnitsMask & ui32AllPowUnitsMask; + + psDevInfo->ui32AvailableRACMask = psApphints->ui32AvailableRACMask & ui32AllRACMask; +#endif - /* set-up the Active Power Mgmt callback */ #if !defined(NO_HARDWARE) + /* set-up the Active Power Mgmt callback */ { RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM; - IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) || - (eActivePMConf == RGX_ACTIVEPM_FORCE_ON); + IMG_BOOL bEnableAPM = ((psApphints->eRGXActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) || + (psApphints->eRGXActivePMConf == RGX_ACTIVEPM_FORCE_ON); - if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE))) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ) + /* The AutoVz driver enables a virtualisation watchdog not compatible with APM */ + if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode))) { - PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__)); + PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in AutoVz mode", __func__)); bEnableAPM = IMG_FALSE; } -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ) - /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */ PVR_ASSERT(bEnableAPM == IMG_FALSE); #endif @@ -1441,6 +1739,8 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, } #endif + psDevInfo->eDebugDumpFWTLogType = psApphints->eDebugDumpFWTLogType; + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM, RGXQueryAPMState, RGXSetAPMState, @@ -1451,56 +1751,20 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, /* Register the device with the power manager */ eError = PVRSRVRegisterPowerDevice(psDeviceNode, - (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPrePowerState : &RGXVzPrePowerState, - (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPostPowerState : &RGXVzPostPowerState, - psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState, - &RGXPreClockSpeedChange, &RGXPostClockSpeedChange, - &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, - &RGXDustCountChange, - (IMG_HANDLE)psDeviceNode, - PVRSRV_DEV_POWER_STATE_OFF, - eDefaultPowerState); + (PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode)) ? &RGXPrePowerState : &RGXVzPrePowerState, + (PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode)) ? &RGXPostPowerState : &RGXVzPostPowerState, + psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState, + &RGXPreClockSpeedChange, &RGXPostClockSpeedChange, + &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, &RGXCancelForcedIdleRequestAsync, + &RGXPowUnitsChange, + (IMG_HANDLE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + eDefaultPowerState); PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterPowerDevice", ErrorExit); eError = RGXSetPowerParams(psDevInfo, psDevConfig); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetPowerParams", ErrorExit); -#if defined(SUPPORT_VALIDATION) - { - void *pvAppHintState = NULL; - - IMG_UINT32 ui32AppHintDefault; - - OSCreateKMAppHintState(&pvAppHintState); - ui32AppHintDefault = PVRSRV_APPHINT_TESTSLRINTERVAL; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TestSLRInterval, - &ui32AppHintDefault, &psDevInfo->ui32TestSLRInterval); - PVR_LOG(("OSGetKMAppHintUINT32(TestSLRInterval) ui32AppHintDefault=%d, psDevInfo->ui32TestSLRInterval=%d", - ui32AppHintDefault, psDevInfo->ui32TestSLRInterval)); - OSFreeKMAppHintState(pvAppHintState); - psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; - psDevInfo->ui32SLRSkipFWAddr = 0; - - ui32AppHintDefault = 0; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, ECCRAMErrInj, &ui32AppHintDefault, &psDevInfo->ui32ECCRAMErrInjModule); - psDevInfo->ui32ECCRAMErrInjInterval = RGXKM_ECC_ERR_INJ_INTERVAL; - -#if defined(PDUMP) && defined(SUPPORT_VALIDATION) - /* POL on ECC RAM GPU fault events, MARS is FW fault */ - if (psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_DISABLE && - psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_MARS) - { - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, "Verify ECC fault event"); - eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME, - RGX_CR_SCRATCH11, - 1U, - 0xFFFFFFFF, - PDUMP_FLAGS_DEINIT, - PDUMP_POLL_OPERATOR_EQUAL); - } -#endif - } -#endif #if defined(PDUMP) #if defined(NO_HARDWARE) @@ -1527,7 +1791,7 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW; - if (! PVRSRV_VZ_MODE_IS(GUEST)) + if (! PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { eError = RGXStop(&psDevInfo->sLayerParams); PVR_LOG_GOTO_IF_ERROR(eError, "RGXStop", ErrorExit); @@ -1551,46 +1815,8 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(MISR)", ErrorExit); /* Register appropriate mechanism for clearing hw interrupts */ - if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, IRQ_PER_OS)) && (!PVRSRV_VZ_MODE_IS(NATIVE))) - { - psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; - } - else if (PVRSRV_VZ_MODE_IS(GUEST)) - { - psDevInfo->pfnRGXAckIrq = NULL; - } - else - { - /* native and host drivers must clear the unique GPU physical interrupt */ - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) - { - psDevInfo->pfnRGXAckIrq = RGXAckIrqMIPS; - } -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) - else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - psDevInfo->pfnRGXAckIrq = RGXAckIrqMETA; - } -#endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) - else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; - } -#endif - else - { - PVR_DPF((PVR_DBG_ERROR, "%s: GPU IRQ clearing mechanism not implemented " - "for the this architecture.", __func__)); - PVR_LOG_GOTO_WITH_ERROR("pfnRGXAckIrq", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit); - } - } - -#if defined(RGX_IRQ_HYPERV_HANDLER) - /* The hypervisor receives and acknowledges the GPU irq, then it injects an - * irq only in the recipient OS. The KM driver doesn't handle the GPU irq line */ - psDevInfo->pfnRGXAckIrq = NULL; -#endif + eError = RGXSetAckIrq(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetAckIrq", ErrorExit); eError = SysInstallDeviceLISR(psDevConfig->hSysData, psDevConfig->ui32IRQ, @@ -1602,42 +1828,16 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, #endif /* !defined(NO_HARDWARE) */ #if defined(PDUMP) -/* We need to wrap the check for S7_CACHE_HIERARCHY being supported inside - * #if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)...#endif, as the - * RGX_IS_FEATURE_SUPPORTED macro references a bitmask define derived from its - * last parameter which will not exist on architectures which do not have this - * feature. - * Note we check for RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK rather than for - * RGX_FEATURE_S7_CACHE_HIERARCHY (which might seem a better choice) as this - * means we can build the kernel driver without having to worry about the BVNC - * (the BIT_MASK is defined in rgx_bvnc_defs_km.h for all BVNCs for a given - * architecture, whereas the FEATURE is only defined for those BVNCs that - * support it). - */ -#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK) - if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY))) -#endif - { - if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) && - !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) - { - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "System has NO cache snooping"); - } - else - { - if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig)) - { - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "System has CPU cache snooping"); - } - if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) - { - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "System has DEVICE cache snooping"); - } - } - } + if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "System has CPU cache snooping"); + } + else + { + PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "System has NO cache snooping"); + } #endif #if defined(RGX_FEATURE_COMPUTE_ONLY_BIT_MASK) @@ -1649,7 +1849,7 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, } #if defined(SUPPORT_SECURE_ALLOC_KM) - eError = OSAllocateSecBuf(psDeviceNode, RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES, "SharedSecMem", &psDevInfo->psGenHeapSecMem); + eError = OSAllocateSecBuf(psDeviceNode, RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE, "SharedSecMem", &psDevInfo->psGenHeapSecMem); PVR_LOG_GOTO_IF_ERROR(eError, "OSAllocateSecBuf", ErrorExit); #endif @@ -1664,9 +1864,14 @@ PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, } #define VZ_RGX_FW_FILENAME_SUFFIX ".vz" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) #define RGX_64K_FW_FILENAME_SUFFIX ".64k" #define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX) + sizeof(RGX_64K_FW_FILENAME_SUFFIX))) +#else +#define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ + RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX))) +#endif static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR *pszFWFilenameStr, @@ -1674,31 +1879,32 @@ static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode, { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; const IMG_CHAR * const pszFWFilenameSuffix = - PVRSRV_VZ_MODE_IS(NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX; + PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode) ? "" : VZ_RGX_FW_FILENAME_SUFFIX; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) const IMG_CHAR * const pszFWFilenameSuffix2 = ((OSGetPageSize() == RGX_MMU_PAGE_SIZE_64KB) && RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? RGX_64K_FW_FILENAME_SUFFIX : ""; +#else + const IMG_CHAR * const pszFWFilenameSuffix2 = ""; +#endif OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE, - "%s." RGX_BVNC_STR_FMTSPEC "%s%s", - RGX_FW_FILENAME, + RGX_FW_FILENAME "." RGX_BVNC_STR_FMTSPEC "%s%s", psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, pszFWFilenameSuffix, pszFWFilenameSuffix2); OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE, - "%s." RGX_BVNC_STRP_FMTSPEC "%s%s", - RGX_FW_FILENAME, + RGX_FW_FILENAME "." RGX_BVNC_STRP_FMTSPEC "%s%s", psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, pszFWFilenameSuffix, pszFWFilenameSuffix2); } PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, - OS_FW_IMAGE **ppsRGXFW, - const IMG_BYTE **ppbFWData) + OS_FW_IMAGE **ppsRGXFW) { IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; @@ -1730,15 +1936,9 @@ PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, if (eErr == PVRSRV_OK) { PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr)); - *ppbFWData = (const IMG_BYTE*)OSFirmwareData(*ppsRGXFW); - } - else - { - *ppbFWData = NULL; } return eErr; - } #if defined(PDUMP) @@ -1751,20 +1951,48 @@ PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) { /* set up fw memory contexts */ - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + __maybe_unused PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_ERROR eError; -#if defined(SUPPORT_AUTOVZ) +#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + IMG_BOOL bNativeFwUMAHeap = PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode) && + (PhysHeapGetType(psDeviceNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM]) == PHYS_HEAP_TYPE_UMA); +#endif + +#if defined(RGX_PREMAP_FW_HEAPS) PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; - if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp)) + if ((!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) && (!psDeviceNode->bAutoVzFwIsUp) && (!bNativeFwUMAHeap)) { + PHYS_HEAP *psFwPageTableHeap = + psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]; + + PVR_LOG_GOTO_IF_INVALID_PARAM((psFwPageTableHeap != NULL), + eError, failed_to_create_ctx); + /* Temporarily swap the MMU and default GPU physheap to allow the page * tables of all memory mapped by the FwKernel context to be placed * in a dedicated memory carveout. This should allow the firmware mappings to * persist after a Host kernel crash or driver reset. */ + psDeviceNode->psMMUPhysHeap = psFwPageTableHeap; + } +#endif - psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap; +#if defined(RGX_FEATURE_AXI_ACE_BIT_MASK) + /* Set the device fabric coherency before FW context creation */ + eError = RGXSystemGetFabricCoherency(psDevConfig, + psDevConfig->sRegsCpuPBase, + psDevConfig->ui32RegsSize, + &psDeviceNode->eDevFabricType, + &psDevConfig->eCacheSnoopingMode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed RGXSystemGetFabricCoherency (%u)", + __func__, + eError)); + goto failed_to_create_ctx; } #endif @@ -1772,6 +2000,8 @@ PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; + RGXFwSharedMemCheckSnoopMode(psDevConfig); + /* Create the memory context for the firmware. */ eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_FORFW, &psDevInfo->psKernelDevmemCtx); @@ -1806,63 +2036,81 @@ PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode goto failed_to_find_heap; } -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if (defined(RGX_PREMAP_FW_HEAPS)) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - IMG_UINT32 ui32OSID; - for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + IMG_UINT32 ui32DriverID; + + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVNODE, psDeviceNode) { IMG_CHAR szHeapName[RA_MAX_NAME_LENGTH]; - OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); + OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID); eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName, - &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]); + &psDevInfo->psPremappedFwRawHeap[ui32DriverID]); PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap); } } #endif -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) && !bNativeFwUMAHeap) { IMG_DEV_PHYADDR sPhysHeapBase; - IMG_UINT32 ui32OSID; - - eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN], &sPhysHeapBase); + IMG_UINT32 ui32DriverID; + void *pvAppHintState = NULL; + IMG_UINT64 ui64DefaultHeapStride; + IMG_UINT64 ui64GuestHeapDevBaseStride; + + OSCreateAppHintState(&pvAppHintState); + ui64DefaultHeapStride = PVRSRV_APPHINT_GUESTFWHEAPSTRIDE; + OSGetAppHintUINT64(APPHINT_NO_DEVICE, + pvAppHintState, + GuestFWHeapStride, + &ui64DefaultHeapStride, + &ui64GuestHeapDevBaseStride); + OSFreeAppHintState(pvAppHintState); + pvAppHintState = NULL; + + eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM], &sPhysHeapBase); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", failed_to_find_heap); - for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVNODE, psDeviceNode) { - IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)}; + IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32DriverID * ui64GuestHeapDevBaseStride)}; eError = RGXFwRawHeapAllocMap(psDeviceNode, - ui32OSID, + ui32DriverID, sRawFwHeapBase, RGX_FIRMWARE_RAW_HEAP_SIZE); if (eError != PVRSRV_OK) { - for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--) + for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--) { - RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID); } PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap); } } -#if defined(SUPPORT_AUTOVZ) +#if defined(RGX_PREMAP_FW_HEAPS) /* restore default Px setup */ psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; #endif } -#else - if (PVRSRV_VZ_MODE_IS(GUEST)) +#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + +#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* On setups with dynamically mapped Guest heaps, the Guest makes + * a PVZ call to the Host to request the mapping during init. */ + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig); + eError = PvzClientMapDevPhysHeap(psDevConfig); PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap); } -#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ +#endif /* !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); @@ -1887,31 +2135,32 @@ void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; +#if defined(RGX_PREMAP_FW_HEAPS) + PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; +#endif -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { -#if defined(SUPPORT_AUTOVZ) - PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; - - psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap; +#if defined(RGX_PREMAP_FW_HEAPS) + psDeviceNode->psMMUPhysHeap = + psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]; if (!psDeviceNode->bAutoVzFwIsUp) #endif { - IMG_UINT32 ui32OSID; + IMG_UINT32 ui32DriverID; - for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVNODE, psDeviceNode) { - RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID); } } -#if defined(SUPPORT_AUTOVZ) - psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; -#endif } -#else - if (PVRSRV_VZ_MODE_IS(GUEST)) +#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + +#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { (void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig); @@ -1938,6 +2187,10 @@ void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); PVR_ASSERT(eError == PVRSRV_OK); } + +#if defined(RGX_PREMAP_FW_HEAPS) + psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; +#endif } static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, @@ -1952,7 +2205,7 @@ static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, /* Skip the alignment check if the driver is guest since there is no firmware to check against */ - PVRSRV_VZ_RET_IF_MODE(GUEST, eError); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevNode, eError); if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL) { @@ -1974,6 +2227,8 @@ static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, } paui32FWAlignChecks += ui32UMChecksOffset; + /* Invalidate the size value, check the next region size (UM) and invalidate */ + RGXFwSharedMemCacheOpPtr(paui32FWAlignChecks, INVALIDATE); if (*paui32FWAlignChecks++ != ui32AlignChecksSizeUM) { PVR_DPF((PVR_DBG_ERROR, @@ -1986,6 +2241,10 @@ static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, goto return_; } + RGXFwSharedMemCacheOpExec(paui32FWAlignChecks, + ui32AlignChecksSizeUM * sizeof(IMG_UINT32), + PVRSRV_CACHE_OP_INVALIDATE); + for (i = 0; i < ui32AlignChecksSizeUM; i++) { if (aui32AlignChecksUM[i] != paui32FWAlignChecks[i]) @@ -2019,15 +2278,15 @@ PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, { PVRSRV_ERROR eError = PVRSRV_OK; IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift(); -#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) && defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY) PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -#endif -#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K; } +#else + PVR_UNREFERENCED_PARAMETER(uiLog2Align); #endif uiMemAllocFlags = (uiMemAllocFlags | @@ -2066,22 +2325,57 @@ PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, This check is left for clarity in error messages if any incompatibility occurs. - @Input psFwOsInit - FW init data + @Input psDevInfo - device info @Return PVRSRV_ERROR - depending on mismatch found ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_OSINIT *psFwOsInit) +static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) { -#if !defined(NO_HARDWARE) + IMG_UINT32 ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch; + RGX_FW_INFO_HEADER *psFWInfoHeader = NULL; + RGXFWIF_OSINIT *psFwOsInit = NULL; + IMG_UINT8 ui8FwOsCount; - if (psFwOsInit == NULL) + if (psDevInfo == NULL) return PVRSRV_ERROR_INVALID_PARAMS; ui32BuildOptions = (RGX_BUILD_OPTIONS_KM & RGX_BUILD_OPTIONS_MASK_FW); - ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW; + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + psFwOsInit = psDevInfo->psRGXFWIfOsInit; + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW; + + ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + if (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + __func__, RGX_NUM_DRIVERS_SUPPORTED, ui8FwOsCount)); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + } + else + { + psFWInfoHeader = &psDevInfo->sFWInfoHeader; + ui32BuildOptionsFWKMPart = psFWInfoHeader->ui32Flags & RGX_BUILD_OPTIONS_MASK_FW; + + if (PVRSRV_VZ_MODE_IS(HOST, DEVINFO, psDevInfo) && BITMASK_HAS(psFWInfoHeader->ui32Flags, OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN)) + { + ui8FwOsCount = (psFWInfoHeader->ui32Flags & OPTIONS_NUM_DRIVERS_SUPPORTED_MASK) >> OPTIONS_NUM_DRIVERS_SUPPORTED_SHIFT; + ui8FwOsCount++; + if (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + __func__, RGX_NUM_DRIVERS_SUPPORTED, ui8FwOsCount)); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + } + } /* Check if the FW is missing support for any features required by the driver */ if (~ui32BuildOptionsFWKMPart & ui32BuildOptions) @@ -2091,19 +2385,19 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF /*Mask non-critical options out as we do support combining them in UM & KM */ ui32BuildOptionsMismatch &= FW_OPTIONS_STRICT; #endif - if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + if ((ui32BuildOptions & ui32BuildOptionsMismatch) != 0) { PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; " - "extra options present in the KM driver: (0x%x). Please check rgx_options.h", - ui32BuildOptions & ui32BuildOptionsMismatch )); + "extra options present in the KM driver: (0x%x). Please check rgx_options.h", + ui32BuildOptions & ui32BuildOptionsMismatch)); return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; } - if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0) + if ((ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0) { PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; " - "extra options present in Firmware: (0x%x). Please check rgx_options.h", - ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch )); + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch )); return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; } PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ.")); @@ -2112,7 +2406,6 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF { PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]")); } -#endif return PVRSRV_OK; } @@ -2127,48 +2420,44 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF Validate FW DDK version against driver DDK version @Input psDevInfo - device info - @Input psFwOsInit - FW init data @Return PVRSRV_ERROR - depending on mismatch found ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) { -#if defined(PDUMP)||(!defined(NO_HARDWARE)) - IMG_UINT32 ui32DDKVersion; + IMG_UINT32 ui32KMDDKVersion; + IMG_UINT32 ui32FWDDKVersion; PVRSRV_ERROR eError; + RGX_FW_INFO_HEADER *psFWInfoHeader = NULL; + RGXFWIF_OSINIT *psFwOsInit = NULL; - ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); -#endif + if (psDevInfo == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; -#if defined(PDUMP) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW DDK version"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion), - ui32DDKVersion, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) + ui32KMDDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); + + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; + psFwOsInit = psDevInfo->psRGXFWIfOsInit; + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + ui32FWDDKVersion = psFwOsInit->sRGXCompChecks.ui32DDKVersion; } -#endif + else + { + psFWInfoHeader = &psDevInfo->sFWInfoHeader; -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; + ui32FWDDKVersion = PVRVERSION_PACK(psFWInfoHeader->ui16PVRVersionMajor, psFWInfoHeader->ui16PVRVersionMinor); + } - if (psFwOsInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion) + if (ui32FWDDKVersion != ui32KMDDKVersion) { PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK version (%u.%u).", - PVRVERSION_MAJ, PVRVERSION_MIN, - PVRVERSION_UNPACK_MAJ(psFwOsInit->sRGXCompChecks.ui32DDKVersion), - PVRVERSION_UNPACK_MIN(psFwOsInit->sRGXCompChecks.ui32DDKVersion))); + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_UNPACK_MAJ(ui32FWDDKVersion), + PVRVERSION_UNPACK_MIN(ui32FWDDKVersion))); eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH; PVR_DBG_BREAK; return eError; @@ -2176,10 +2465,9 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXD else { PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK version (%u.%u) match. [ OK ]", - PVRVERSION_MAJ, PVRVERSION_MIN, - PVRVERSION_MAJ, PVRVERSION_MIN)); + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_MAJ, PVRVERSION_MIN)); } -#endif return PVRSRV_OK; } @@ -2194,46 +2482,41 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXD Validate FW DDK build against driver DDK build @Input psDevInfo - device info - @Input psFwOsInit - FW init data @Return PVRSRV_ERROR - depending on mismatch found ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) { - PVRSRV_ERROR eError=PVRSRV_OK; -#if defined(PDUMP)||(!defined(NO_HARDWARE)) - IMG_UINT32 ui32DDKBuild; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32KMDDKBuild; + IMG_UINT32 ui32FWDDKBuild; + RGX_FW_INFO_HEADER *psFWInfoHeader = NULL; + RGXFWIF_OSINIT *psFwOsInit = NULL; - ui32DDKBuild = PVRVERSION_BUILD; -#endif + ui32KMDDKBuild = PVRVERSION_BUILD; -#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW DDK build"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild), - ui32DDKBuild, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) + if (psDevInfo == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } -#endif + psFwOsInit = psDevInfo->psRGXFWIfOsInit; + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; + ui32FWDDKBuild = psFwOsInit->sRGXCompChecks.ui32DDKBuild; + } + else + { + psFWInfoHeader = &psDevInfo->sFWInfoHeader; + ui32FWDDKBuild = psFWInfoHeader->ui32PVRVersionBuild; + } - if (psFwOsInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild) + if (ui32FWDDKBuild != ui32KMDDKBuild) { PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).", - ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); + ui32KMDDKBuild, ui32FWDDKBuild)); #if defined(PVRSRV_STRICT_COMPAT_CHECK) eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH; PVR_DBG_BREAK; @@ -2243,9 +2526,8 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV else { PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]", - ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); + ui32KMDDKBuild, ui32FWDDKBuild)); } -#endif return eError; } @@ -2259,439 +2541,57 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV Validate FW BVNC against driver BVNC @Input psDevInfo - device info - @Input psFwOsInit - FW init data - - @Return PVRSRV_ERROR - depending on mismatch found - - ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) -{ -#if !defined(NO_HARDWARE) - IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; -#endif -#if defined(PDUMP)||(!defined(NO_HARDWARE)) - RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC); - PVRSRV_ERROR eError; - - sBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, - psDevInfo->sDevFeatureCfg.ui32V, - psDevInfo->sDevFeatureCfg.ui32N, - psDevInfo->sDevFeatureCfg.ui32C); -#endif - -#if defined(PDUMP) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW BVNC (struct version)"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), - sBVNC.ui32LayoutVersion, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - } - - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW BVNC (BVNC part - Lower 32 bits)"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), - (IMG_UINT32)sBVNC.ui64BVNC, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - } - - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW BVNC (BVNC part - Higher 32 bits)"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + - sizeof(IMG_UINT32), - (IMG_UINT32)(sBVNC.ui64BVNC >> 32), - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - } -#endif - -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; - - RGX_BVNC_EQUAL(sBVNC, psFwOsInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); - - if (!bCompatibleAll) - { - if (!bCompatibleVersion) - { - PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).", - __func__, - sBVNC.ui32LayoutVersion, - psFwOsInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion)); - eError = PVRSRV_ERROR_BVNC_MISMATCH; - return eError; - } - - if (!bCompatibleBVNC) - { - PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)", - RGX_BVNC_PACKED_EXTR_B(sBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_B(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC))); - eError = PVRSRV_ERROR_BVNC_MISMATCH; - return eError; - } - } - else - { - PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]")); - } -#endif - return PVRSRV_OK; -} - -/*! - ******************************************************************************* - - @Function RGXDevInitCompatCheck_BVNC_HWAgainstDriver - - @Description - - Validate HW BVNC against driver BVNC - - @Input psDevInfo - device info - @Input psFwOsInit - FW init data @Return PVRSRV_ERROR - depending on mismatch found ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) +static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) { -#if defined(PDUMP) || !defined(NO_HARDWARE) - IMG_UINT64 ui64MaskBVNC = RGX_BVNC_PACK_MASK_B | - RGX_BVNC_PACK_MASK_V | - RGX_BVNC_PACK_MASK_N | - RGX_BVNC_PACK_MASK_C; - - PVRSRV_ERROR eError; - RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC); -#endif - -#if defined(PDUMP) - PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; -#endif - -#if !defined(NO_HARDWARE) - RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC); - IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; -#endif - - if (psDevInfo->bIgnoreHWReportedBVNC) - { - PVR_LOG(("BVNC compatibility checks between driver and HW are disabled (AppHint override)")); - return PVRSRV_OK; - } - -#if defined(PDUMP) || !defined(NO_HARDWARE) -#if defined(COMPAT_BVNC_MASK_V) - ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_V; -#endif -#if defined(COMPAT_BVNC_MASK_N) - ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_N; -#endif -#if defined(COMPAT_BVNC_MASK_C) - ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C; -#endif - - sSWBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, - psDevInfo->sDevFeatureCfg.ui32V, - psDevInfo->sDevFeatureCfg.ui32N, - psDevInfo->sDevFeatureCfg.ui32C); - -#if defined(FIX_HW_BRN_38344_BIT_MASK) - if (RGX_IS_BRN_SUPPORTED(psDevInfo, 38344) && (psDevInfo->sDevFeatureCfg.ui32C >= 10)) - { - ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C; - } -#endif - if (ui64MaskBVNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_V | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) - { - PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.", - ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_B))?("B"):("")), - ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_V))?("V"):("")), - ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_N))?("N"):("")), - ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_C))?("C"):("")))); - } -#endif - -#if defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: Layout version of compchecks struct"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), - sSWBVNC.ui32LayoutVersion, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } - - PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check started"); - if (ui64MaskBVNC & (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) - { - PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags); - PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags); - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: HW BNC and FW BNC (Lower 32 bits)"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), - (IMG_UINT32)sSWBVNC.ui64BVNC , - (IMG_UINT32)(ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V), - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } + PVRSRV_ERROR eError = PVRSRV_OK; - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: HW BNC and FW BNC (Higher 32 bits)"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + - sizeof(IMG_UINT32), - (IMG_UINT32)(sSWBVNC.ui64BVNC >> 32), - (IMG_UINT32)((ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V) >> 32), - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } + RGX_FW_INFO_HEADER *psFWInfoHeader; + IMG_UINT64 ui64KMBVNC; - PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags); - } - if (ui64MaskBVNC & RGX_BVNC_PACK_MASK_V) - { - PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags); - PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags); - - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: HW V and FW V"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + - ((RGX_BVNC_PACK_SHIFT_V >= 32) ? sizeof(IMG_UINT32) : 0), - (IMG_UINT32)(sSWBVNC.ui64BVNC >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0)), - RGX_BVNC_PACK_MASK_V >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0), - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } - PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags); - } - PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check finished"); -#endif - -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - { + if (psDevInfo == NULL) return PVRSRV_ERROR_INVALID_PARAMS; - } - - sHWBVNC = psFwOsInit->sRGXCompChecks.sHWBVNC; - sHWBVNC.ui64BVNC &= ui64MaskBVNC; - sSWBVNC.ui64BVNC &= ui64MaskBVNC; + psFWInfoHeader = &psDevInfo->sFWInfoHeader; - RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); + ui64KMBVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); - if (!bCompatibleAll) + if (ui64KMBVNC != psFWInfoHeader->ui64BVNC) { - if (!bCompatibleVersion) - { - PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).", - __func__, - sHWBVNC.ui32LayoutVersion, - sSWBVNC.ui32LayoutVersion)); - eError = PVRSRV_ERROR_BVNC_MISMATCH; - return eError; - } + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)", + RGX_BVNC_PACKED_EXTR_B(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_V(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_N(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_C(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_B(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(psFWInfoHeader->ui64BVNC))); - if (!bCompatibleBVNC) - { - PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d).", - RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); - eError = PVRSRV_ERROR_BVNC_MISMATCH; - return eError; - } + eError = PVRSRV_ERROR_BVNC_MISMATCH; } else { - PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]", - RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); - } -#endif + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: KM driver BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]", + RGX_BVNC_PACKED_EXTR_B(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_V(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_N(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_C(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_B(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(psFWInfoHeader->ui64BVNC))); - return PVRSRV_OK; -} - -/*! - ******************************************************************************* - - @Function RGXDevInitCompatCheck_METACoreVersion_AgainstDriver - - @Description - - Validate HW META version against driver META version - - @Input psDevInfo - device info - @Input psFwOsInit - FW init data - - @Return PVRSRV_ERROR - depending on mismatch found - - ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) -{ -#if defined(PDUMP)||(!defined(NO_HARDWARE)) - PVRSRV_ERROR eError; -#endif -#if defined(PDUMP) - PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; -#endif - IMG_UINT32 ui32FWCoreIDValue = 0; - IMG_CHAR *pcRGXFW_PROCESSOR = NULL; - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) - { - ui32FWCoreIDValue = RGXMIPSFW_CORE_ID_VALUE; - pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS; - } - else -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - switch (RGX_GET_FEATURE_VALUE(psDevInfo, META)) - { - case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break; - case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break; - case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break; - case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break; - default: - PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); - PVR_ASSERT(0); - } - pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; - } - else -#endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - ui32FWCoreIDValue = RGXRISCVFW_CORE_ID_VALUE; - pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; - } - else -#endif - { - PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); - PVR_ASSERT(0); + eError = PVRSRV_OK; } -#if defined(PDUMP) - PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags); - PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags); - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: KM driver and HW FW Processor version"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion), - ui32FWCoreIDValue, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } - PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags); -#endif - -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; - - if (psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue) - { - PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).", - pcRGXFW_PROCESSOR, - ui32FWCoreIDValue, - pcRGXFW_PROCESSOR, - psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); - eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH; - PVR_DBG_BREAK; - return eError; - } - else - { - PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].", - pcRGXFW_PROCESSOR, - ui32FWCoreIDValue, - pcRGXFW_PROCESSOR, - psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); - } -#endif - return PVRSRV_OK; + return eError; } /*! @@ -2714,105 +2614,63 @@ static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; #if !defined(NO_HARDWARE) - IMG_UINT32 ui32RegValue; - IMG_UINT8 ui8FwOsCount; - IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; - - LOOP_UNTIL_TIMEOUT(ui32FwTimeout) - { - if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) - { - /* No need to wait if the FW has already updated the values */ - break; - } - OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - - ui32RegValue = 0; - -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) - if ((!PVRSRV_VZ_MODE_IS(GUEST)) && - RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue); + IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; - if (eError != PVRSRV_OK) + LOOP_UNTIL_TIMEOUT_US(ui32FwTimeout) { - PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)", - __func__, eError)); - goto chk_exit; - } + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, + INVALIDATE); + if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); - if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT)) + /* Flush covers this instance and the reads in the functions below */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks, + INVALIDATE); + if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) { - eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED; - PVR_DPF((PVR_DBG_ERROR, - "%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)", - __func__, psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, eError)); - goto chk_exit; - } - } -#endif + eError = PVRSRV_ERROR_TIMEOUT; + PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)", + __func__, eError)); - if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) - { - eError = PVRSRV_ERROR_TIMEOUT; - PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)", - __func__, eError)); - if (PVRSRV_VZ_MODE_IS(GUEST)) - { PVR_DPF((PVR_DBG_ERROR, "%s: Potential causes: firmware not initialised or the current Guest driver's " "OsConfig initialisation data was not accepted by the firmware", __func__)); + goto chk_exit; } - goto chk_exit; - } - - ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; - if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || - (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", - __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount)); } #endif /* defined(NO_HARDWARE) */ - eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo->psRGXFWIfOsInit); + eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo); if (eError != PVRSRV_OK) { goto chk_exit; } - eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo); if (eError != PVRSRV_OK) { goto chk_exit; } - eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo); if (eError != PVRSRV_OK) { goto chk_exit; } - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo); if (eError != PVRSRV_OK) { goto chk_exit; } - - eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); - if (eError != PVRSRV_OK) - { - goto chk_exit; - } - } - - eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); - if (eError != PVRSRV_OK) - { - goto chk_exit; } eError = PVRSRV_OK; @@ -2821,6 +2679,16 @@ static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) return eError; } +static void _RGXSoftResetToggle(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 ui64ResetValue1, + IMG_UINT64 ui64ResetValue2) +{ + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1); + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); +} + /**************************************************************************/ /*! @Function RGXSoftReset @Description Resets some modules of the RGX device @@ -2832,10 +2700,10 @@ static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) to a module to reset (via the SOFT_RESET2 register). @Return PVRSRV_ERROR - */ /***************************************************************************/ +*/ /***************************************************************************/ static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT64 ui64ResetValue1, - IMG_UINT64 ui64ResetValue2) + IMG_UINT64 ui64ResetValue1, + IMG_UINT64 ui64ResetValue2) { PVRSRV_RGXDEV_INFO *psDevInfo; IMG_BOOL bSoftReset = IMG_FALSE; @@ -2843,73 +2711,36 @@ static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_ASSERT(psDeviceNode != NULL); PVR_ASSERT(psDeviceNode->pvDevice != NULL); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); - /* the device info */ psDevInfo = psDeviceNode->pvDevice; + #if defined(RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) { ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL; - }else + } + else #endif { ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL; } -#if defined(RGX_CR_SOFT_RESET2_MASKFULL) - if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) && - ((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2)) - { - bSoftReset = IMG_TRUE; - } -#endif - if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset) { return PVRSRV_ERROR_INVALID_PARAMS; } /* Set in soft-reset */ - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1); - -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2); - } -#endif - - /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ - (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2); - } -#endif + _RGXSoftResetToggle(psDevInfo, ui64ResetValue1, ui64ResetValue2); /* Take the modules out of reset... */ - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0); -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0); - } -#endif - - /* ...and fence again */ - (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2); - } -#endif + _RGXSoftResetToggle(psDevInfo, 0, 0); return PVRSRV_OK; } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline; static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) @@ -2963,6 +2794,7 @@ static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) "TrampolineRegion", &pasTrampoline[i]->hPdumpPages, #endif + PVR_SYS_ALLOC_PID, &pasTrampoline[i]->sPages, &pasTrampoline[i]->sPhysAddr); if (PVRSRV_OK != eError) @@ -3019,7 +2851,7 @@ static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) } #undef RANGES_OVERLAP - +#endif PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_DEVMEM_SIZE_T uiFWCodeLen, @@ -3030,6 +2862,7 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) IMG_DEVMEM_SIZE_T uiDummyLen; DEVMEM_MEMDESC *psDummyMemDesc = NULL; @@ -3045,25 +2878,18 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, goto failTrampolineMemDescAlloc; } } +#endif /* * Set up Allocation for FW code section */ - uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + uiMemAllocFlags = RGX_FWCODEDATA_ALLOCFLAGS | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE); - eError = RGXAllocateFWMemoryRegion(psDeviceNode, - uiFWCodeLen, - uiMemAllocFlags, - "FwExCodeRegion", - &psDevInfo->psRGXFWCodeMemDesc); + uiFWCodeLen, + uiMemAllocFlags, + "FwExCodeRegion", + &psDevInfo->psRGXFWCodeMemDesc); if (eError != PVRSRV_OK) { @@ -3083,7 +2909,9 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, goto failFWCodeMemDescAqDevVirt; } - if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) || (PVRSRV_VZ_MODE_IS(GUEST)))) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) || (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)))) +#endif { /* * The FW code must be the first allocation in the firmware heap, otherwise @@ -3092,6 +2920,7 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_RAW_HEAP_BASE); } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { /* @@ -3120,25 +2949,17 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, } } } +#endif /* * Set up Allocation for FW data section */ - uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA); - eError = RGXAllocateFWMemoryRegion(psDeviceNode, - uiFWDataLen, - uiMemAllocFlags, - "FwExDataRegion", - &psDevInfo->psRGXFWDataMemDesc); + uiFWDataLen, + RGX_FWCODEDATA_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA), + "FwExDataRegion", + &psDevInfo->psRGXFWDataMemDesc); if (eError != PVRSRV_OK) { @@ -3163,20 +2984,14 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, /* * Set up Allocation for FW coremem code section */ - uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE); - + uiMemAllocFlags = (RGX_FWCODEDATA_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE)) & + ~PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE; eError = RGXAllocateFWMemoryRegion(psDeviceNode, - uiFWCorememCodeLen, - uiMemAllocFlags, - "FwExCorememCodeRegion", - &psDevInfo->psRGXFWCorememCodeMemDesc); + uiFWCorememCodeLen, + uiMemAllocFlags, + "FwExCorememCodeRegion", + &psDevInfo->psRGXFWCorememCodeMemDesc); if (eError != PVRSRV_OK) { @@ -3212,17 +3027,8 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, /* * Set up Allocation for FW coremem data section */ - uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA)) - & RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); - + uiMemAllocFlags = RGX_FWCODEDATA_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA); eError = RGXAllocateFWMemoryRegion(psDeviceNode, uiFWCorememDataLen, uiMemAllocFlags, @@ -3261,11 +3067,13 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0; } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /* Free Dummy Pages */ if (psDummyMemDesc) { DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); } +#endif return PVRSRV_OK; @@ -3289,21 +3097,25 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); psDevInfo->psRGXFWDataMemDesc = NULL; failFWDataMemDescAlloc: +#if defined(RGX_FEATURE_MIPS_BIT_MASK) if (psDummyMemDesc) { DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); } failDummyMemDescAlloc: +#endif failFWCodeMemDescAqDevVirt: DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); psDevInfo->psRGXFWCodeMemDesc = NULL; failFWCodeMemDescAlloc: +#if defined(RGX_FEATURE_MIPS_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) { RGXFreeTrampoline(psDeviceNode); } failTrampolineMemDescAlloc: +#endif return eError; } @@ -3434,22 +3246,10 @@ PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, */ PVRSRV_ERROR RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, - IMG_UINT32 ui32ConfigFlags, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWRDebugDumpLimit, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_UINT32 *pui32TPUTrilinearFracMask, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, - FW_PERF_CONF eFirmwarePerf, - IMG_UINT32 ui32KCCBSizeLog2, - IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32FwOsCfgFlags) + RGX_INIT_APPHINTS *psApphints, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32FwOsCfgFlags) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; @@ -3460,22 +3260,10 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, #endif eError = RGXSetupFirmware(psDeviceNode, - bEnableSignatureChecks, - ui32SignatureChecksBufSize, - ui32HWPerfFWBufSizeKB, - ui64HWPerfFilter, - ui32ConfigFlags, - ui32ConfigFlagsExt, - ui32FwOsCfgFlags, - ui32LogType, - ui32FilterFlags, - ui32JonesDisableMask, - ui32HWRDebugDumpLimit, - ui32HWPerfCountersDataSize, - pui32TPUTrilinearFracMask, - eRGXRDPowerIslandingConf, - eFirmwarePerf, - ui32KCCBSizeLog2); + psApphints, + ui32ConfigFlags, + ui32ConfigFlagsExt, + ui32FwOsCfgFlags); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -3484,7 +3272,7 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, goto failed_init_firmware; } - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup, RGXFWTraceQueryFilter, @@ -3499,16 +3287,16 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, } #if defined(DEBUG) - OSCreateKMAppHintState(&pvAppHintState); + OSCreateAppHintState(&pvAppHintState); bAppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE; - OSGetKMAppHintBOOL(psDeviceNode, + OSGetAppHintBOOL(psDeviceNode, pvAppHintState, EnableFWPoisonOnFree, &bAppHintDefault, &bEnableFWPoisonOnFree); - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree, RGXQueryFWPoisonOnFree, @@ -3523,8 +3311,8 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, psDevInfo->uiFWPoisonOnFreeFlag = 0ULL; #endif - psDevInfo->ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK; - psDevInfo->ui32LastClockSource = PVRSRV_APPHINT_TIMECORRCLOCK; + psDevInfo->ui32ClockSource = PVRSRV_APPHINT_SECONDARYOSCLOCKSOURCE; + psDevInfo->ui32LastClockSource = PVRSRV_APPHINT_SECONDARYOSCLOCKSOURCE; return PVRSRV_OK; @@ -3535,6 +3323,7 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, /* See device.h for function declaration */ static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RequestedSize, DEVMEM_MEMDESC **psMemDesc, IMG_UINT32 *puiSyncPrimVAddr, IMG_UINT32 *puiSyncPrimBlockSize) @@ -3542,34 +3331,24 @@ static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_RGXDEV_INFO *psDevInfo; PVRSRV_ERROR eError; RGXFWIF_DEV_VIRTADDR pFirmwareAddr; - IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32); - IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32); - IMG_UINT32 ui32CoherencyFlag = 0; + IMG_DEVMEM_ALIGN_T uiUFOBlockAlign = MAX(sizeof(IMG_UINT32), sizeof(SYNC_CHECKPOINT_FW_OBJ)); + IMG_DEVMEM_SIZE_T uiUFOBlockSize = PVR_ALIGN(ui32RequestedSize, uiUFOBlockAlign); psDevInfo = psDeviceNode->pvDevice; /* Size and align are 'expanded' because we request an Exportalign allocation */ eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), - &uiUFOBlockSize, - &ui32UFOBlockAlign); + &uiUFOBlockSize, + &uiUFOBlockAlign); + if (eError != PVRSRV_OK) { goto e0; } - if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && - PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) - { - ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; - } - else - { - ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_UNCACHED; - } - eError = DevmemFwAllocateExportable(psDeviceNode, uiUFOBlockSize, - ui32UFOBlockAlign, + uiUFOBlockAlign, PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | @@ -3578,7 +3357,7 @@ static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - ui32CoherencyFlag, + PVRSRV_MEMALLOCFLAG_UNCACHED, "FwExUFOBlock", psMemDesc); if (eError != PVRSRV_OK) @@ -3606,62 +3385,6 @@ static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - /* - If the system has snooping of the device cache then the UFO block - might be in the cache so we need to flush it out before freeing - the memory - - When the device is being shutdown/destroyed we don't care anymore. - Several necessary data structures to issue a flush were destroyed - already. - */ - if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && - psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT) - { - RGXFWIF_KCCB_CMD sFlushInvalCmd; - PVRSRV_ERROR eError; - IMG_UINT32 ui32kCCBCommandSlot; - - /* Schedule the SLC flush command ... */ -#if defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Submit SLC flush and invalidate"); -#endif - sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0; - - eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, - &sFlushInvalCmd, - PDUMP_FLAGS_CONTINUOUS, - &ui32kCCBCommandSlot); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to schedule SLC flush command with error (%u)", - __func__, - eError)); - } - else - { - /* Wait for the SLC flush to complete */ - eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: SLC flush and invalidate aborted with error (%u)", - __func__, - eError)); - } - else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & - RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) - { - PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); - } - } - } - RGXUnsetFirmwareAddress(psMemDesc); DevmemFwUnmapAndFree(psDevInfo, psMemDesc); } @@ -3724,11 +3447,13 @@ static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) OSLockDestroy(psDevInfo->hGPUUtilLock); } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (psDevInfo->hNMILock != NULL)) { OSLockDestroy(psDevInfo->hNMILock); } +#endif if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && (psDevInfo->hMMUCtxUnregLock != NULL)) @@ -3753,10 +3478,13 @@ static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* De-init work estimation lock */ - if (psDevInfo->hWorkEstLock != NULL) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - OSLockDestroy(psDevInfo->hWorkEstLock); + /* De-init work estimation lock */ + if (psDevInfo->hWorkEstLock != NULL) + { + OSLockDestroy(psDevInfo->hWorkEstLock); + } } #endif @@ -3776,7 +3504,6 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; PVRSRV_ERROR eError; DEVICE_MEMORY_INFO *psDevMemoryInfo; - IMG_UINT32 ui32Temp=0; if (!psDevInfo) { @@ -3788,75 +3515,15 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) if (psDevInfo->psRGXFWIfOsInit) { KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); } - DeviceDepBridgeDeInit(psDevInfo); - -#if defined(PDUMP) - DevmemIntFreeDefBackingPage(psDeviceNode, - &psDeviceNode->sDummyPage, - DUMMY_PAGE); - DevmemIntFreeDefBackingPage(psDeviceNode, - &psDeviceNode->sDevZeroPage, - DEV_ZERO_PAGE); -#endif - -#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) - if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) - { - OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0); - PVR_UNREFERENCED_PARAMETER(ui32Temp); - } - else -#else - { - /*Delete the Dummy page related info */ - ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter); - if (0 != ui32Temp) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Dummy page reference counter is non zero (%u)", - __func__, - ui32Temp)); - PVR_ASSERT(0); - } - } -#endif - - /*Delete the Dummy page related info */ - ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter); - if (0 != ui32Temp) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Zero page reference counter is non zero (%u)", - __func__, - ui32Temp)); - } - -#if defined(PDUMP) - if (NULL != psDeviceNode->sDummyPage.hPdumpPg) - { - PDUMPCOMMENT(psDeviceNode, "Error dummy page handle is still active"); - } - - if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg) - { - PDUMPCOMMENT(psDeviceNode, "Error Zero page handle is still active"); - } -#endif - - /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */ - OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); - - /* Destroy the zero page lock */ - OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); + RGXUnregisterBridges(psDevInfo); #if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) OSLockDestroy(psDevInfo->hCounterDumpingLock); #endif - RGXDeInitMultiCoreInfo(psDeviceNode); - /* Unregister debug request notifiers first as they could depend on anything. */ RGXDebugDeinit(psDevInfo); @@ -3876,6 +3543,7 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) eError)); } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { /* Unregister MMU related stuff */ @@ -3887,6 +3555,7 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) eError)); } } +#endif /* UnMap Regs */ if (psDevInfo->pvRegsBaseKM != NULL) @@ -3898,6 +3567,22 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) psDevInfo->pvRegsBaseKM = NULL; } +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (psDevInfo->pvSecureRegsBaseKM != NULL) + { +#if !defined(NO_HARDWARE) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) && + (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)) + { + /* undo the VA offset performed in RGXRegisterDevice() to allow the allocation to be unmapped */ + psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET); + OSUnMapPhysToLin((void __force *) psDevInfo->pvSecureRegsBaseKM, RGX_HOST_SECURE_REGBANK_SIZE); + } +#endif /* !NO_HARDWARE */ + psDevInfo->pvSecureRegsBaseKM = NULL; + } +#endif + #if 0 /* not required at this time */ if (psDevInfo->hTimer) { @@ -3914,7 +3599,7 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; - RGXDeInitHeaps(psDevMemoryInfo); + RGXDeInitHeaps(psDevMemoryInfo, psDeviceNode); if (psDevInfo->psRGXFWCodeMemDesc) { @@ -3924,11 +3609,14 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); psDevInfo->psRGXFWCodeMemDesc = NULL; } - else if (!PVRSRV_VZ_MODE_IS(GUEST)) +#if !defined(NO_HARDWARE) + else if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { PVR_DPF((PVR_DBG_WARNING, "No firmware code memory to free")); } +#endif /* !defined(NO_HARDWARE) */ +#if defined(RGX_FEATURE_MIPS_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) { @@ -3939,6 +3627,7 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) RGXFreeTrampoline(psDeviceNode); } } +#endif if (psDevInfo->psRGXFWDataMemDesc) { @@ -3948,10 +3637,12 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); psDevInfo->psRGXFWDataMemDesc = NULL; } - else if (!PVRSRV_VZ_MODE_IS(GUEST)) +#if !defined(NO_HARDWARE) + else if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { PVR_DPF((PVR_DBG_WARNING, "No firmware data memory to free")); } +#endif /* !defined(NO_HARDWARE) */ if (psDevInfo->psRGXFWCorememCodeMemDesc) { @@ -3976,20 +3667,34 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) */ RGXFreeFirmware(psDevInfo); + + RGXDeInitMultiCoreInfo(psDeviceNode); + /* De-initialise non-device specific (TL) users of RGX device memory */ - RGXHWPerfDeinit(psDevInfo); + { + IMG_UINT32 i; + for (i = 0; i < RGX_HWPERF_L2_STREAM_LAST; i++) + { + RGXHWPerfDeinitL2Stream(psDevInfo, i); + } + + RGXHWPerfDeinit(psDevInfo); + } + + RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode); + RGXHWPerfHostDeInit(psDevInfo); eError = HTBDeInit(); PVR_LOG_IF_ERROR(eError, "HTBDeInit"); - RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode); + OSLockDestroy(psDevInfo->hGpuUtilStatsLock); /* destroy the stalled CCB locks */ OSLockDestroy(psDevInfo->hCCBRecoveryLock); OSLockDestroy(psDevInfo->hCCBStallCheckLock); /* destroy the context list locks */ - OSLockDestroy(psDevInfo->sRegCongfig.hLock); + OSLockDestroy(psDevInfo->sRegConfig.hLock); OSLockDestroy(psDevInfo->hBPLock); OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); OSWRLockDestroy(psDevInfo->hRenderCtxListLock); @@ -4007,6 +3712,7 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString); } + /* DeAllocate devinfo */ OSFreeMem(psDevInfo); @@ -4029,9 +3735,10 @@ PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode) /* Takes a log2 page size parameter and calculates a suitable page size * for the RGX heaps. Returns 0 if parameter is wrong.*/ -static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) +IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) { IMG_BOOL bFound = IMG_FALSE; + IMG_UINT32 ui32PageSizeMask = RGXGetValidHeapPageSizeMask(); /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT, * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/ @@ -4049,24 +3756,18 @@ static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) do { - switch (uiLog2PageSize) + if ((IMG_PAGE2BYTES32(uiLog2PageSize) & ui32PageSizeMask) == 0) { - case RGX_HEAP_4KB_PAGE_SHIFT: - case RGX_HEAP_16KB_PAGE_SHIFT: - case RGX_HEAP_64KB_PAGE_SHIFT: - case RGX_HEAP_256KB_PAGE_SHIFT: - case RGX_HEAP_1MB_PAGE_SHIFT: - case RGX_HEAP_2MB_PAGE_SHIFT: - /* All good, RGX page size equals given page size - * => use it as default for heaps */ - bFound = IMG_TRUE; - break; - default: - /* We have to fall back to a smaller device - * page size than given page size because there - * is no exact match for any supported size. */ - uiLog2PageSize -= 1U; - break; + /* We have to fall back to a smaller device + * page size than given page size because there + * is no exact match for any supported size. */ + uiLog2PageSize -= 1U; + } + else + { + /* All good, RGX page size equals given page size + * => use it as default for heaps */ + bFound = IMG_TRUE; } } while (!bFound); @@ -4089,6 +3790,7 @@ static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) typedef struct RGX_HEAP_INFO_TAG RGX_HEAP_INFO; // Forward declaration typedef IMG_BOOL (*PFN_IS_PRESENT)(PVRSRV_RGXDEV_INFO*, const RGX_HEAP_INFO*); +typedef void (*PFN_HEAP_DYNAMIC)(PVRSRV_DEVICE_NODE*, RGX_HEAP_INFO*); struct RGX_HEAP_INFO_TAG { @@ -4098,52 +3800,56 @@ struct RGX_HEAP_INFO_TAG IMG_DEVMEM_SIZE_T uiHeapReservedRegionLength; IMG_UINT32 ui32Log2ImportAlignment; PFN_IS_PRESENT pfnIsHeapPresent; + PFN_HEAP_DYNAMIC pfnDynamicBaseSize; /* May modify the psHeapInfo's base & length. May be NULL. + Only called once if the heap is present otherwise never. */ PFN_HEAP_INIT pfnInit; PFN_HEAP_DEINIT pfnDeInit; IMG_UINT32 ui32HeapInstanceFlags; }; +/* RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES is the total amount of reserved space, to be specified in gasRGXHeapLayoutApp[] */ +#define RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES (RGX_HEAP_UM_GENERAL_RESERVED_SIZE + RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE) -#if defined(SUPPORT_SECURE_ALLOC_KM) -/* Private data struct for general heap. */ -typedef struct RGX_GENERAL_HEAP_DATA_TAG +/* Private heap data struct. */ +typedef struct RGX_HEAP_DATA_TAG { - DEVMEMINT_RESERVATION *psSecMemReservation; - DEVMEMINT_MAPPING *psSecMemMapping; -} RGX_GENERAL_HEAP_DATA; - -/* Init callback function for general heap. */ -static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, - DEVMEMINT_HEAP *psDevmemHeap, - IMG_HANDLE *phPrivData) + DEVMEMINT_RESERVATION *psMemReservation; +} RGX_HEAP_DATA; + +static PVRSRV_ERROR HeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64Offset, + IMG_BOOL bWriteAble, + IMG_HANDLE *phPrivData) { - PVRSRV_RGXDEV_INFO *psDevInfo; - RGX_GENERAL_HEAP_DATA *psHeapData; + RGX_HEAP_DATA *psHeapData; IMG_DEV_VIRTADDR sCarveOutAddr; PVRSRV_ERROR eError; PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemHeap, "psDevmemHeap"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psPMR, "psPMR"); PVR_LOG_RETURN_IF_INVALID_PARAM(phPrivData, "phPrivData"); - psDevInfo = psDeviceNode->pvDevice; - psHeapData = OSAllocMem(sizeof(*psHeapData)); PVR_LOG_RETURN_IF_NOMEM(psHeapData, "psHeapData"); /* Map the per device secure mem PMR allocation to the general devmem heap carveout. */ sCarveOutAddr = DevmemIntHeapGetBaseAddr(psDevmemHeap); - sCarveOutAddr.uiAddr += RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET; - - eError = DevmemIntReserveRange(psDevmemHeap, - sCarveOutAddr, - RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES, - &psHeapData->psSecMemReservation); + sCarveOutAddr.uiAddr += ui64Offset; + + eError = DevmemIntReserveRange(NULL, + NULL, + psDevmemHeap, + sCarveOutAddr, + uiSize, + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + (bWriteAble ? PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE : 0), + &psHeapData->psMemReservation); PVR_GOTO_IF_ERROR(eError, ErrorFreeHeapData); - eError = DevmemIntMapPMR(psDevmemHeap, psHeapData->psSecMemReservation, psDevInfo->psGenHeapSecMem, - PVRSRV_MEMALLOCFLAG_GPU_READABLE - | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE, - &psHeapData->psSecMemMapping); + eError = DevmemIntMapPMR(psHeapData->psMemReservation, psPMR); PVR_GOTO_IF_ERROR(eError, ErrorUnreserve); *phPrivData = (IMG_HANDLE)psHeapData; @@ -4151,7 +3857,7 @@ static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, return PVRSRV_OK; ErrorUnreserve: - DevmemIntUnreserveRange(psHeapData->psSecMemReservation); + DevmemIntUnreserveRange(psHeapData->psMemReservation); ErrorFreeHeapData: OSFreeMem(psHeapData); @@ -4159,28 +3865,121 @@ static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, } /* Deinit callback function for general heap. */ -static void GeneralHeapDeInit(IMG_HANDLE hPrivData) +static void HeapDeInit(IMG_HANDLE hPrivData) { - RGX_GENERAL_HEAP_DATA *psHeapData = (RGX_GENERAL_HEAP_DATA*)hPrivData; + RGX_HEAP_DATA *psHeapData = (RGX_HEAP_DATA*)hPrivData; PVR_ASSERT(hPrivData); - DevmemIntUnmapPMR(psHeapData->psSecMemMapping); - DevmemIntUnreserveRange(psHeapData->psSecMemReservation); + DevmemIntUnmapPMR(psHeapData->psMemReservation); + DevmemIntUnreserveRange(psHeapData->psMemReservation); OSFreeMem(psHeapData); } + +/* Init callback function for general heap. */ +static PVRSRV_ERROR USCHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + IMG_HANDLE *phPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_BOOL bWriteAble = IMG_FALSE; + IMG_DEVMEM_SIZE_T uiSize; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); + + psDevInfo = psDeviceNode->pvDevice; + + uiSize = PMR_PhysicalSize(psDevInfo->hTQUSCSharedMem); + + eError = HeapInit(psDeviceNode, + psDevmemHeap, + psDevInfo->hTQUSCSharedMem, + uiSize, + RGX_HEAP_KM_USC_RESERVED_REGION_OFFSET, + bWriteAble, + phPrivData); + + return eError; +} + +#if defined(SUPPORT_SECURE_ALLOC_KM) +/* Init callback function for general heap. */ +static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + IMG_HANDLE *phPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_BOOL bWriteAble = IMG_TRUE; + PVRSRV_ERROR eError; + + psDevInfo = psDeviceNode->pvDevice; + + eError = HeapInit(psDeviceNode, + psDevmemHeap, + psDevInfo->psGenHeapSecMem, + RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE, + RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET, + bWriteAble, + phPrivData); + + return eError; +} + +#define GeneralHeapDeInit HeapDeInit #else /* Callbacks not used */ #define GeneralHeapInit NULL #define GeneralHeapDeInit NULL #endif +static void SVMHeapDynamic(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HEAP_INFO *psHeapInfo) +{ + IMG_UINT64 ui64OSPageSize = OSGetPageSize(); +#if defined(FIX_HW_BRN_65273_BIT_MASK) + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; +#endif /* defined(FIX_HW_BRN_65273_BIT_MASK) */ + + /* Ensures the SVM heap has the correct alignment & size for any OS page size. + * + * The SVM heap's base must be the smallest possible address mappable by UM. + * This is 32KB unless the page size is larger than 32KB. [1] + * If the page size > 32KB, raise the SVM heap base to the next page boundary. + * Also reduce the length to ensure it's still page aligned and doesn't go + * into another heap. + * + * [1]: https://chromium.googlesource.com/chromium/src/+/fe24932ee14aa93e1fe4d3e7003b9362591a54d4/docs/security/faq.md#why-aren_t-null-pointer-dereferences-considered-security-bugs + */ + IMG_UINT64 ui64Base = PVR_ALIGN(psHeapInfo->ui64HeapBase, ui64OSPageSize); + IMG_UINT64 ui64BaseDiff = ui64Base - psHeapInfo->ui64HeapBase; + psHeapInfo->ui64HeapBase = ui64Base; + if (psHeapInfo->uiHeapLength >= ui64BaseDiff) + psHeapInfo->uiHeapLength -= ui64BaseDiff; + if (psHeapInfo->uiHeapReservedRegionLength >= ui64BaseDiff) + psHeapInfo->uiHeapReservedRegionLength -= ui64BaseDiff; + + /* The device shared-virtual-memory heap address-space size is stored on the device for + faster look-up without having to walk the device heap configuration structures during + client device connection (i.e. this size is relative to a zero-based offset) */ +#if defined(FIX_HW_BRN_65273_BIT_MASK) + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + psDeviceNode->ui64GeneralSVMHeapTopVA = 0; + } + else +#endif /* defined(FIX_HW_BRN_65273_BIT_MASK) */ + { + psDeviceNode->ui64GeneralSVMHeapTopVA = psHeapInfo->ui64HeapBase + psHeapInfo->uiHeapLength; + } +} + /* Feature Present function prototypes */ +#if defined(FIX_HW_BRN_65273_BIT_MASK) static IMG_BOOL BRN65273IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) { -#if defined(FIX_HW_BRN_65273_BIT_MASK) if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) { return (((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_BRN_ALT_VALUE) || @@ -4188,19 +3987,17 @@ static IMG_BOOL BRN65273IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_ IMG_TRUE : IMG_FALSE; } else -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); -#endif { return ((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_DEFAULT_VALUE) ? IMG_TRUE : IMG_FALSE; } } +#endif +#if defined(FIX_HW_BRN_63142_BIT_MASK) static IMG_BOOL BRN63142IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) { PVR_UNREFERENCED_PARAMETER(pksHeapInfo); -#if defined(FIX_HW_BRN_63142_BIT_MASK) if (RGX_IS_BRN_SUPPORTED(psDevInfo, 63142)) { PVR_ASSERT((pksHeapInfo->ui64HeapBase & IMG_UINT64_C(0x3FFFFFFFF)) + @@ -4208,12 +4005,10 @@ static IMG_BOOL BRN63142IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_ return IMG_TRUE; } -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); -#endif return IMG_FALSE; } +#endif static IMG_BOOL FBCDescriptorIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) { @@ -4253,22 +4048,9 @@ static IMG_BOOL TextureStateIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_H return IMG_FALSE; } -static IMG_BOOL SignalSnoopingIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) -{ - PVR_UNREFERENCED_PARAMETER(pksHeapInfo); - -#if defined(RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING)) - { - return IMG_TRUE; - } -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); -#endif - - return IMG_FALSE; -} +/* FW Feature Present function prototypes */ +#if defined(FIX_HW_BRN_65101_BIT_MASK) static IMG_BOOL FWBRN65101IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) { /* Used to determine the correct table row to instantiate as a heap by checking @@ -4277,15 +4059,13 @@ static IMG_BOOL FWBRN65101IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEA IMG_UINT64 ui64MainSubHeapSize; /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */ - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST))) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo))) { -#if defined(FIX_HW_BRN_65101_BIT_MASK) if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) { ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101; } else -#endif { ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL; } @@ -4300,6 +4080,7 @@ static IMG_BOOL FWBRN65101IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEA pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_MAIN_HEAP_BASE) ? IMG_TRUE : IMG_FALSE; } +#endif static IMG_BOOL FWVZConfigPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo) { @@ -4313,43 +4094,108 @@ static IMG_BOOL FWVZConfigPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_ /* Blueprint array. note: not all heaps are available to clients*/ -static const RGX_HEAP_INFO gasRGXHeapLayoutApp[] = +static RGX_HEAP_INFO gasRGXHeapLayoutApp[] = { - /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnPresent pfnInit pfnDeInit HeapInstanceFlags */ - {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, - {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, GeneralHeapInit, GeneralHeapDeInit, HEAP_INST_DEFAULT_VALUE }, - {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_BRN_65273_HEAP_BASE, RGX_GENERAL_BRN_65273_HEAP_SIZE, RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, - {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG }, - {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE, RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE | HEAP_INST_NON4K_FLAG }, - {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, - {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_BRN_65273_HEAP_BASE, RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, - {RGX_RGNHDR_BRN_63142_HEAP_IDENT, RGX_RGNHDR_BRN_63142_HEAP_BASE, RGX_RGNHDR_BRN_63142_HEAP_SIZE, 0, 0, BRN63142IsPresent, NULL, NULL, HEAP_INST_BRN_DEP_VALUE }, - {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, - {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_BRN_65273_HEAP_BASE, RGX_USCCODE_BRN_65273_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, - {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_HEAP_BASE, RGX_TQ3DPARAMETERS_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, - {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, - {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, - {RGX_SIGNALS_HEAP_IDENT, RGX_SIGNALS_HEAP_BASE, RGX_SIGNALS_HEAP_SIZE, 0, 0, SignalSnoopingIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, - {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, FBCDescriptorIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, - {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, FBCLargeDescriptorIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, - {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, - {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, - {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, TextureStateIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, - {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, - {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE, RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, - {RGX_MMU_INIA_BRN_65273_HEAP_IDENT, RGX_MMU_INIA_BRN_65273_HEAP_BASE, RGX_MMU_INIA_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_DEP_VALUE }, - {RGX_MMU_INIB_BRN_65273_HEAP_IDENT, RGX_MMU_INIB_BRN_65273_HEAP_BASE, RGX_MMU_INIB_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_DEP_VALUE } + /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent pfnDynamicBaseSize pfnInit pfnDeInit HeapInstanceFlags */ + {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, SVMHeapDynamic, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES, 0, BRN65273IsPresent, NULL, GeneralHeapInit, GeneralHeapDeInit, HEAP_INST_DEFAULT_VALUE }, + {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_BRN_65273_HEAP_BASE, RGX_GENERAL_BRN_65273_HEAP_SIZE, RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, + {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG }, + {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE, RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_BRN_ALT_VALUE | HEAP_INST_NON4K_FLAG }, + {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_BRN_65273_HEAP_BASE, RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, + {RGX_RGNHDR_BRN_63142_HEAP_IDENT, RGX_RGNHDR_BRN_63142_HEAP_BASE, RGX_RGNHDR_BRN_63142_HEAP_SIZE, 0, 0, BRN63142IsPresent, NULL, NULL, NULL, HEAP_INST_BRN_DEP_VALUE }, + {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, USCHeapInit, HeapDeInit, HEAP_INST_DEFAULT_VALUE }, + {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_BRN_65273_HEAP_BASE, RGX_USCCODE_BRN_65273_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, USCHeapInit, HeapDeInit, HEAP_INST_BRN_ALT_VALUE }, + {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_HEAP_BASE, RGX_TQ3DPARAMETERS_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, + {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, FBCDescriptorIsPresent, NULL, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, + {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, FBCLargeDescriptorIsPresent, NULL, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, + {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, TextureStateIsPresent, NULL, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, + {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE, RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, + {RGX_MMU_INIA_BRN_65273_HEAP_IDENT, RGX_MMU_INIA_BRN_65273_HEAP_BASE, RGX_MMU_INIA_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_BRN_DEP_VALUE }, + {RGX_MMU_INIB_BRN_65273_HEAP_IDENT, RGX_MMU_INIB_BRN_65273_HEAP_BASE, RGX_MMU_INIB_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, NULL, HEAP_INST_BRN_DEP_VALUE }, }; -static const RGX_HEAP_INFO gasRGXHeapLayoutFW[] = +static RGX_HEAP_INFO gasRGXHeapLayoutFW[] = { - /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent pfnInit pfnDeInit HeapInstanceFlags*/ - {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWBRN65101IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL, 0, 0, FWBRN65101IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101, 0, 0, FWBRN65101IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE}, - {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, + /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent pfnDynamicBaseSize pfnInit pfnDeInit HeapInstanceFlags*/ + {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWBRN65101IsPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL, 0, 0, FWBRN65101IsPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101, 0, 0, FWBRN65101IsPresent, NULL, NULL, NULL, HEAP_INST_BRN_ALT_VALUE}, + {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, }; +static INLINE IMG_BOOL IsFwHeapLayout(const RGX_HEAP_INFO *psHeapInfo) +{ + return psHeapInfo->pszName[0] == 'F' && + psHeapInfo->pszName[1] == 'w' ? IMG_TRUE : IMG_FALSE; +} + +static INLINE void CheckHeapAlignment(const RGX_HEAP_INFO *psHeapInfo, + const PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT64 uiAlignment = RGX_HEAP_BASE_SIZE_ALIGN - 1; + + if (IsFwHeapLayout(psHeapInfo) || + RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + /* + * 1) Main heap starts at 2MB offset 0xEC10000000UL + * 2) Config Sub heap is created at the end of the main heap making the entire unit start and end at 2MB offset aligned, + * these 2 heaps will always have the same page size + * There are no other heaps in between these two heaps, there is no risk for another devmem heap be created between them. + */ + return; + } + + /* General SVM Heap must be placed below 2MiB boundary so we need to adjust + * the validity condition. This is because an OS might return virtual + * addresses below 2MiB threshold. By default (based on testing on Linux) + * this is around 32KiB. */ + if (OSStringNCompare(psHeapInfo->pszName, RGX_GENERAL_SVM_HEAP_IDENT, + sizeof(RGX_GENERAL_SVM_HEAP_IDENT)) == 0) + { + uiAlignment = RGX_GENERAL_SVM_BASE_SIZE_ALIGNMENT - 1; + } + + /* All UM accessible heap bases should be aligned to 2MB */ + if (psHeapInfo->ui64HeapBase & uiAlignment) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Heap Base not aligned to RGX_HEAP_BASE_SIZE_ALIGN. " + "Invalid Heap \"%s\" Base: " + "%"IMG_UINT64_FMTSPEC")\n" + "Heap Base (0x%"IMG_UINT64_FMTSPECX") should always be aligned to " + "RGX_HEAP_BASE_ALIGN (0x%" IMG_UINT64_FMTSPECX ")", + __func__, + psHeapInfo->pszName, + psHeapInfo->ui64HeapBase, + psHeapInfo->ui64HeapBase, + uiAlignment + 1)); + } + + /* All UM accessible heaps should also be size aligned to 2MB */ + if (psHeapInfo->uiHeapLength & uiAlignment) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Heap Size not aligned to RGX_HEAP_BASE_SIZE_ALIGN. " + "Invalid Heap \"%s\" Size: " + "%"IMG_UINT64_FMTSPEC")\n" + "Heap Size (0x%"IMG_UINT64_FMTSPECX") should always be aligned to " + "RGX_HEAP_BASE_SIZE_ALIGN (0x%" IMG_UINT64_FMTSPECX ")", + __func__, + psHeapInfo->pszName, + psHeapInfo->uiHeapLength, + psHeapInfo->uiHeapLength, + uiAlignment + 1)); + } +} + /* Generic counting method. */ static void _CountRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO pksHeapInfo[], @@ -4379,19 +4225,60 @@ static void _CountRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, } /* Generic heap instantiator */ static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, - const RGX_HEAP_INFO pksHeapInfo[], + RGX_HEAP_INFO psHeapInfos[], IMG_UINT32 ui32HeapListSize, const IMG_UINT32 ui32Log2RgxDefaultPageShift, DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor) { IMG_UINT32 i; + +#if defined DEBUG + IMG_UINT32 ui32heapListCnt; + bool bHeapPageSizeMisMatch = false; + + /* + * To ensure all heaps within a 2MB region have the same page sizes + */ + for (ui32heapListCnt = 0; ui32heapListCnt < (ui32HeapListSize-1); ui32heapListCnt++) + { + const RGX_HEAP_INFO *psHeapInfo1 = &psHeapInfos[ui32heapListCnt]; + const RGX_HEAP_INFO *psHeapInfo2 = &psHeapInfos[ui32heapListCnt+1]; + + if (((psHeapInfo1->uiHeapLength) & (RGX_HEAP_BASE_SIZE_ALIGN - 1)) && + ((psHeapInfo1->ui64HeapBase + psHeapInfo1->uiHeapLength) & (RGX_HEAP_BASE_SIZE_ALIGN - 1)) == + ((psHeapInfo2->ui64HeapBase) & (RGX_HEAP_BASE_SIZE_ALIGN - 1))) + { + if (psHeapInfo1->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG) + { + if (!(psHeapInfo2->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG)) + { + bHeapPageSizeMisMatch = true; + } + } + else if (psHeapInfo2->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG) + { + bHeapPageSizeMisMatch = true; + } + } + if (bHeapPageSizeMisMatch) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Two Heap with Different Page Size allocated in the same PD space(2MB)\n" + "Invalid Heaps 1) \"%s\" and 2) \"%s\"", + __func__, + psHeapInfo1->pszName, + psHeapInfo2->pszName)); + } + } +#endif + /* We now have a list of the heaps to include and so we should loop over this * list and instantiate. */ for (i = 0; i < ui32HeapListSize; i++) { IMG_UINT32 ui32Log2DataPageSize = 0; - const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i]; + RGX_HEAP_INFO *psHeapInfo = &psHeapInfos[i]; if (psHeapInfo->pfnIsHeapPresent) { @@ -4402,15 +4289,22 @@ static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, } } + if (psHeapInfo->pfnDynamicBaseSize != NULL) + { + psHeapInfo->pfnDynamicBaseSize(psDevInfo->psDeviceNode, psHeapInfo); + } + if (psHeapInfo->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG) { - ui32Log2DataPageSize = psDevInfo->ui32Log2Non4KPgSize; + ui32Log2DataPageSize = psDevInfo->psDeviceNode->ui32Non4KPageSizeLog2; } else { ui32Log2DataPageSize = ui32Log2RgxDefaultPageShift; } + CheckHeapAlignment(psHeapInfo, psDevInfo); + HeapCfgBlueprintInit(psHeapInfo->pszName, psHeapInfo->ui64HeapBase, psHeapInfo->uiHeapLength, @@ -4473,7 +4367,7 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, ui32FWHeapListSize, &ui32FWHeapCount); - ui32CountedHeapSize = (ui32AppHeapCount + ui32FWHeapCount + RGX_NUM_OS_SUPPORTED); + ui32CountedHeapSize = (ui32AppHeapCount + ui32FWHeapCount + RGX_NUM_DRIVERS_SUPPORTED); psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * ui32CountedHeapSize); PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeap, eError, e0); @@ -4501,7 +4395,7 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, /* Check we have allocated the correct # of heaps, minus any VZ heaps as these * have not been created at this point */ - PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_OS_SUPPORTED)); + PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_DRIVERS_SUPPORTED)); /* In the new heap setup, we initialise 2 configurations: @@ -4524,19 +4418,58 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; -#if (RGX_NUM_OS_SUPPORTED > 1) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) + if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4) + { + IMG_UINT32 i; + const IMG_UINT32 ui32GeneralNon4KHeapPageSize = (1 << psDevInfo->psDeviceNode->ui32Non4KPageSizeLog2); + const IMG_UINT32 ui32RgxDefaultPageSize = (1 << RGXHeapDerivePageSize(OSGetPageShift())); + + /* + * Initialise all MMU Page Size Range Config register to the default page size + * used by the OS, leaving the address range 0; + */ + for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i) + { + psDevInfo->aui64MMUPageSizeRangeValue[i] = + RGXMMUInit_GetConfigRangeValue(ui32RgxDefaultPageSize, + 0, + (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT)); + } + + + /* set the last MMU config range covering the entire virtual memory to the OS's page size */ + psDevInfo->aui64MMUPageSizeRangeValue[RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES - 1] = + RGXMMUInit_GetConfigRangeValue(ui32RgxDefaultPageSize, 0, (1ULL << 40)); + + /* + * If the Non4K heap has a different page size than the OS's page size + * (used as default for all other heaps), configure one MMU config range + * for the Non4K heap + */ + if (ui32GeneralNon4KHeapPageSize != ui32RgxDefaultPageSize) + { + psDevInfo->aui64MMUPageSizeRangeValue[0] = + RGXMMUInit_GetConfigRangeValue(ui32GeneralNon4KHeapPageSize, + RGX_GENERAL_NON4K_HEAP_BASE, + RGX_GENERAL_NON4K_HEAP_SIZE); + } + } +#endif + +#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; /* Create additional raw firmware heaps */ - for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVINFO, psDevInfo) { - eError = RGXInitFwRawHeap(psDevInfo, psDeviceMemoryHeapCursor, ui32OSid); + eError = RGXInitFwRawHeap(psDevInfo, psDeviceMemoryHeapCursor, ui32DriverID); if (eError != PVRSRV_OK) { /* if any allocation fails, free previously allocated heaps and abandon initialisation */ - for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--) + for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--) { RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); psDeviceMemoryHeapCursor--; @@ -4551,7 +4484,7 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, psDeviceMemoryHeapCursor++; } } -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ +#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ return PVRSRV_OK; e1: @@ -4560,203 +4493,476 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, return eError; } -static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo) +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo, PVRSRV_DEVICE_NODE *psDeviceNode) { -#if (RGX_NUM_OS_SUPPORTED > 1) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap; /* Delete all guest firmware heaps */ - for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVNODE, psDeviceNode) { RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); psDeviceMemoryHeapCursor++; } } -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray); OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap); } -static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode) +static PVRSRV_ERROR RGXInitSharedFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig, - PHYS_HEAP_USAGE_FW_MAIN); + PHYS_HEAP_CONFIG *psSysHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_SHARED); -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) /* VZ heap validation */ - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - PVR_LOG_RETURN_IF_FALSE(psFwMainConfig != NULL, + PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg != NULL, "FW Main heap is required for VZ Guest.", PVRSRV_ERROR_PHYSHEAP_CONFIG); } #endif - if (psFwMainConfig != NULL) + if (psSysHeapCfg != NULL) { - /* Check FW_MAIN for multiple usage flags. Because FW_MAIN is divided + /* Check FW_SHARED for multiple usage flags. Because FW_SHARED is divided into subheaps, shared usage with other heaps is not allowed. */ - PVR_LOG_RETURN_IF_FALSE(psFwMainConfig->ui32UsageFlags == PHYS_HEAP_USAGE_FW_MAIN, - "FW Main phys heap config specified with more than one usage. FW Main must be FW Main only.", + PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_SHARED, + "FW_SHARED phys heap config not specified with more than one usage." + "FW_SHARED heap must be exclusively used as FW_SHARED.", PVRSRV_ERROR_PHYSHEAP_CONFIG); } - if (psFwMainConfig == NULL) + if (psSysHeapCfg == NULL) { PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap not set", __func__)); + /* Nothing to do. Default to the physheap fallback option */ } - else if (psFwMainConfig->eType == PHYS_HEAP_TYPE_UMA) + else if (psSysHeapCfg->eType == PHYS_HEAP_TYPE_UMA) { PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__)); + + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psSysHeapCfg, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); + + psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG] = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; } else /* PHYS_HEAP_TYPE_LMA or PHYS_HEAP_TYPE_DMA */ { - IMG_UINT64 uFwMainSubHeapSize; - PHYS_HEAP_CONFIG sFwHeapConfig; - - /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */ - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST))) - { -#if defined(FIX_HW_BRN_65101_BIT_MASK) - if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) - { - uFwMainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101; - } - else -#endif - { - uFwMainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL; - } - } - else - { - uFwMainSubHeapSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE; - } + PHYS_HEAP_CONFIG sFwMainHeapCfg, sFwCfgHeapCfg; PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__)); - PVR_LOG_GOTO_IF_FALSE(psFwMainConfig->uiSize >= RGX_FIRMWARE_RAW_HEAP_SIZE, - "Invalid firmware physical heap size.", ErrorDeinit); - /* Now we construct RAs to manage the FW heaps */ + /* Subheap layout: Main + (optional MIPS reserved range) + Config */ + sFwMainHeapCfg = *psSysHeapCfg; + PVR_ASSERT(sFwMainHeapCfg.eType == PHYS_HEAP_TYPE_LMA || + sFwMainHeapCfg.eType == PHYS_HEAP_TYPE_DMA); + + /* Reserve space for the Config heap */ + sFwMainHeapCfg.uConfig.sLMA.uiSize -= RGX_FIRMWARE_CONFIG_HEAP_SIZE; -#if defined(SUPPORT_AUTOVZ) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) { - sFwHeapConfig = *psFwMainConfig; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap. - * If a different base address is specified for this reserved range, use the overriding define instead. */ -#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) - sFwHeapConfig.sStartAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; - sFwHeapConfig.sCardBase.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; -#else - sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED; - sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED; + /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode))) + { +#if defined(FIX_HW_BRN_65101_BIT_MASK) + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) + { + sFwMainHeapCfg.uConfig.sLMA.uiSize -= RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101; + } + else #endif - - sFwHeapConfig.uiSize = RGX_FIRMWARE_MAX_PAGETABLE_SIZE; - sFwHeapConfig.ui32UsageFlags = 0; - - eError = PhysmemCreateHeapLMA(psDeviceNode, - PHYSMEM_LMA_POLICY_DEFAULT, - &sFwHeapConfig, - "Fw MMU subheap", - &psDeviceNode->psFwMMUReservedPhysHeap); - PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MMU", ErrorDeinit); + { + sFwMainHeapCfg.uConfig.sLMA.uiSize -= RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL; + } + } } #endif - /* Subheap layout: Main + (optional MIPS reserved range) + Config */ - sFwHeapConfig = *psFwMainConfig; - sFwHeapConfig.uiSize = uFwMainSubHeapSize; - sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN; - eError = PhysmemCreateHeapLMA(psDeviceNode, - PHYSMEM_LMA_POLICY_DEFAULT, - &sFwHeapConfig, + RGXPhysHeapGetLMAPolicy(sFwMainHeapCfg.ui32UsageFlags, psDeviceNode), + &sFwMainHeapCfg, "Fw Main subheap", - &psDeviceNode->psFWMainPhysHeap); + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit); - sFwHeapConfig = *psFwMainConfig; - sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE; - sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE; - sFwHeapConfig.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE; - sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CONFIG; + sFwCfgHeapCfg = *psSysHeapCfg; + PVR_ASSERT(sFwCfgHeapCfg.eType == PHYS_HEAP_TYPE_LMA || + sFwCfgHeapCfg.eType == PHYS_HEAP_TYPE_DMA); + + sFwCfgHeapCfg.uConfig.sLMA.sStartAddr.uiAddr += psSysHeapCfg->uConfig.sLMA.uiSize - RGX_FIRMWARE_CONFIG_HEAP_SIZE; + sFwCfgHeapCfg.uConfig.sLMA.sCardBase.uiAddr += psSysHeapCfg->uConfig.sLMA.uiSize - RGX_FIRMWARE_CONFIG_HEAP_SIZE; + + sFwCfgHeapCfg.uConfig.sLMA.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE; eError = PhysmemCreateHeapLMA(psDeviceNode, - PHYSMEM_LMA_POLICY_DEFAULT, - &sFwHeapConfig, + RGXPhysHeapGetLMAPolicy(sFwCfgHeapCfg.ui32UsageFlags, psDeviceNode), + &sFwCfgHeapCfg, "Fw Cfg subheap", - &psDeviceNode->psFWCfgPhysHeap); + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit); } /* Acquire FW heaps */ - eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode, - &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); + eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit); - eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode, - &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); + eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit); - eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode, - &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]); + return eError; + +ErrorDeinit: + PVR_ASSERT(IMG_FALSE); + + return eError; +} + +static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PHYS_HEAP_CONFIG *psFwCodeHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_CODE); + PHYS_HEAP_CONFIG *psFwDataHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_PRIV_DATA); + PHYS_HEAP_CONFIG *psFwPrivateHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_PRIVATE); + PHYS_HEAP_CONFIG sFwPrivateTempCfg; + + if (psFwPrivateHeapCfg != NULL) + { + PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg == NULL) && (psFwDataHeapCfg == NULL), + "FW_PRIVATE and the FW_CODE & FW_PRIV_DATA usage flags " + "achieve the same goal and are mutually exclusive.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + /* Fw code and data are both allocated from this unified heap */ + sFwPrivateTempCfg = *psFwPrivateHeapCfg; + sFwPrivateTempCfg.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA; + + psFwCodeHeapCfg = &sFwPrivateTempCfg; + psFwDataHeapCfg = &sFwPrivateTempCfg; + } + + if ((psFwCodeHeapCfg == NULL) || (psFwDataHeapCfg == NULL)) + { + if (psFwCodeHeapCfg != psFwDataHeapCfg) + { + /* Private Firmware code and data heaps must be either both defined + * or both undefined. There is no point in isolating one but not + * the other.*/ + eError = PVRSRV_ERROR_PHYSHEAP_CONFIG; + PVR_LOG_GOTO_IF_ERROR(eError, "PrivateFwPhysHeap check", ErrorDeinit); + } + else + { + /* No dedicated heaps, default to the physheap fallback option */ + } + } + else if (psFwCodeHeapCfg == psFwDataHeapCfg) + { + if (psFwCodeHeapCfg->ui32UsageFlags == + (PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA)) + { + /* Fw code and private data allocations come from the same system heap + * Instantiate one physheap and share it between them. */ + + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psFwCodeHeapCfg, + NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig"); + } + else + { + /* Not an exclusive heap, can be used for other purposes (e.g. secure buffers). + * Expect the PVR layer to have already created a heap for the other uses. */ + } + } + else + { + /* + * Separating private Firmware code and data is allowed for backwards compatibility + * purposes. New platforms should use the unified FW_PRIVATE heap instead. + * + * Early security implementations on Rogue cores required separate FW_PRIV_DATA + * and FW_CODE heaps, as access permissions to Firmware were granted differently + * based on the transaction types (code or data). + */ + PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_CODE) && + (psFwDataHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PRIV_DATA), + "Dedicated private heaps for Fw code and " + "data must have one usage flag exclusively.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + /* Dedicated Fw code heap */ + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psFwCodeHeapCfg, + NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); + + /* Dedicated Fw private data heap */ + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psFwDataHeapCfg, + NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); + } + +#if defined(RGX_PREMAP_FW_HEAPS) && defined(SUPPORT_TRUSTED_DEVICE) + /* When premapping distinct private and shared Firmware phys heaps + * inside the same virtual devmem heap, their sizes must add up to + * the fixed RGX_FIRMWARE_RAW_HEAP_SIZE for the premapping to work */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + PHYS_HEAP_CONFIG *psFwSharedHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_SHARED); + IMG_UINT64 ui64FwCodeHeapSize = PhysHeapConfigGetSize(psFwCodeHeapCfg); + IMG_UINT64 ui64FwDataHeapSize = PhysHeapConfigGetSize(psFwDataHeapCfg); + IMG_UINT64 ui64FwSharedHeapSize = PhysHeapConfigGetSize(psFwSharedHeapCfg); + IMG_UINT64 ui64FwPrivateHeapSize; + + PVR_LOG_GOTO_IF_FALSE((psFwCodeHeapCfg != NULL) && (psFwDataHeapCfg != NULL), + "Security support requires Fw code and data memory be" + " separate from the heap shared with the kernel driver.", FailDeinit); + + if (psFwCodeHeapCfg != psFwDataHeapCfg) + { + /* Private Firmware allocations come from 2 different heaps */ + ui64FwPrivateHeapSize = ui64FwCodeHeapSize + ui64FwDataHeapSize; + } + else + { + /* Private Firmware allocations come from a single heap */ + ui64FwPrivateHeapSize = ui64FwCodeHeapSize; + } + + PVR_LOG_GOTO_IF_FALSE((ui64FwSharedHeapSize + ui64FwPrivateHeapSize) == RGX_FIRMWARE_RAW_HEAP_SIZE, + "Invalid firmware physical heap size.", FailDeinit); + } +#endif + + eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit); - eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode, - &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]); + eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit); return eError; +#if defined(RGX_PREMAP_FW_HEAPS) && defined(SUPPORT_TRUSTED_DEVICE) +FailDeinit: + eError = PVRSRV_ERROR_INVALID_PARAMS; +#endif ErrorDeinit: PVR_ASSERT(IMG_FALSE); return eError; } -static void _ReadNon4KHeapPageSize(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 *pui32Log2Non4KPgSize) +static PVRSRV_ERROR RGXInitFwPageTableHeap(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(RGX_PREMAP_FW_HEAPS) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + PHYS_HEAP_CONFIG *psFwPageTableHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_PREMAP_PT); + + PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg != NULL), + "The Firmware Page Table phys heap config not found.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + + PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PREMAP_PT), + "The Firmware Page Table heap must be used exclusively for this purpose", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_LMA) || + (psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_DMA), + "The Firmware Page Table heap must be LMA or DMA memory.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->uConfig.sLMA.uiSize >= RGX_FIRMWARE_MAX_PAGETABLE_SIZE), + "The Firmware Page Table heap must be able to hold the maximum " + "number of pagetables needed to cover the Firmware's VA space.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psFwPageTableHeapCfg, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig:FwPageTableHeap"); + + eError = PhysHeapAcquire(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire:FwPageTableHeap"); + } +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +#endif /* defined(RGX_PREMAP_FW_HEAPS) */ + + return eError; +} + +static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + eError = RGXInitFwPageTableHeap(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitFwPageTableHeap", ErrorDeinit); + eError = RGXInitSharedFwPhysHeaps(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSharedFwPhysHeaps", ErrorDeinit); + eError = RGXInitPrivateFwPhysHeaps(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitPrivateFwPhysHeaps", ErrorDeinit); + +ErrorDeinit: + return eError; +} + +/*************************************************************************/ /*! +@Function RGXDeviceFWMainHeapMemCheck +@Description Checks the free memory in FW Main PhysHeap of a device to ensure + there is enough for a connection to be made. + +@Input psDeviceNode The device of the FW Main PhysHeap to be checked. + +@Return On success PVRSRV_OK, else a PVRSRV_ERROR code. +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXDeviceFWMainHeapMemCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PHYS_HEAP *psFWMainPhysHeap; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); + + psFWMainPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; + if (psFWMainPhysHeap == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to get device's FW Main PhysHeap")); + return PVRSRV_ERROR_INVALID_HEAP; + } + + if (PhysHeapGetType(psFWMainPhysHeap) == PHYS_HEAP_TYPE_LMA) + { + const IMG_UINT32 ui32MinMemInKBs = RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION; + IMG_UINT64 ui64FreePhysHeapMem; + + eError = PhysHeapFreeMemCheck(psFWMainPhysHeap, + KB2B(ui32MinMemInKBs), + &ui64FreePhysHeapMem); + + if (eError == PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY) + { + PVR_DPF((PVR_DBG_ERROR, "FW_MAIN PhysHeap contains less than the " + "minimum free space required to acquire a connection. " + "Free space: %"IMG_UINT64_FMTSPEC"KB " + "Minimum required: %uKB", + B2KB(ui64FreePhysHeapMem), + ui32MinMemInKBs)); + } + } + + return eError; +} + +PVRSRV_ERROR RGXGetNon4KHeapPageShift(const void *hPrivate, IMG_UINT32 *pui32Log2Non4KPgShift) { - void *pvAppHintState = NULL; - IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; IMG_UINT32 ui32GeneralNon4KHeapPageSize; + IMG_UINT32 uiLog2OSPageShift = OSGetPageShift(); + + /* We support Non4K pages only on platforms with 4KB pages. On all platforms + * where OS pages are larger than 4KB we must ensure the non4K device memory + * heap matches the page size used in all other device memory heaps, which + * is the OS page size, see RGXHeapDerivePageSize. */ + if (uiLog2OSPageShift > RGX_HEAP_4KB_PAGE_SHIFT) + { + *pui32Log2Non4KPgShift = RGXHeapDerivePageSize(uiLog2OSPageShift); + } + else + { + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; + IMG_UINT32 ui32Log2Non4KPgShift; + + /* Get the page size for the dummy page from the NON4K heap apphint */ + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, + GeneralNon4KHeapPageSize, &ui32AppHintDefault, + &ui32GeneralNon4KHeapPageSize); + OSFreeAppHintState(pvAppHintState); + + /* Validate the specified parameter to be one of the supported values */ + ui32Log2Non4KPgShift = RGXHeapDerivePageSize(ExactLog2(ui32GeneralNon4KHeapPageSize)); + if (ui32AppHintDefault != ui32GeneralNon4KHeapPageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid Non4K Page-size, default=%u, requested=%u," + " actual=%u, pageshift 0x%x", + __func__, ui32AppHintDefault, ui32GeneralNon4KHeapPageSize, + (1U << ui32Log2Non4KPgShift), ui32Log2Non4KPgShift)); + } + + if (ui32Log2Non4KPgShift == 0U) + { + return PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE; + } + + *pui32Log2Non4KPgShift = ui32Log2Non4KPgShift; + } - /* Get the page size for the dummy page from the NON4K heap apphint */ - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, - GeneralNon4KHeapPageSize, &ui32AppHintDefault, - &ui32GeneralNon4KHeapPageSize); - *pui32Log2Non4KPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize); #if defined(FIX_HW_BRN_71317_BIT_MASK) - if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71317)) + if (RGX_DEVICE_HAS_BRN(hPrivate, 71317)) { - if (*pui32Log2Non4KPgSize == RGX_HEAP_2MB_PAGE_SHIFT - || *pui32Log2Non4KPgSize == RGX_HEAP_1MB_PAGE_SHIFT) + if (*pui32Log2Non4KPgShift == RGX_HEAP_2MB_PAGE_SHIFT + || *pui32Log2Non4KPgShift == RGX_HEAP_1MB_PAGE_SHIFT) { - PVR_DPF((PVR_DBG_WARNING, - "Page sizes of 2MB or 1MB cause page faults. " - "Setting Non4K heap size to default apphint value (16KB).")); - *pui32Log2Non4KPgSize = ExactLog2(ui32AppHintDefault); + PVR_DPF((PVR_DBG_ERROR, + "Page sizes of 2MB or 1MB cause page faults.")); + return PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE; } } +#else + PVR_UNREFERENCED_PARAMETER(hPrivate); #endif - OSFreeKMAppHintState(pvAppHintState); + /* Check the Non4k page size is at least the size of the OS page size + * or larger. The Non4k page size also has to be a multiple of the OS page + * size but since we have the log2 value from the apphint we know powers of 2 + * will always be multiples. If the Non4k page size is less than OS page size + * we notify and upgrade the size. + */ + if (*pui32Log2Non4KPgShift < uiLog2OSPageShift) + { + PVR_DPF((PVR_DBG_MESSAGE, "Non4K page size smaller than OS page size, upgrading to " + "match OS page size.")); + *pui32Log2Non4KPgShift = uiLog2OSPageShift; + } + + return PVRSRV_OK; } /* RGXRegisterDevice * - * NOTE: No PDUMP statements are allowed in until Part 2 of the device initialisation - * is reached. + * WARNING! + * + * No PDUMP statements are allowed until device initialisation starts. */ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) { @@ -4766,11 +4972,10 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) void *pvAppHintState = NULL; IMG_UINT32 ui32AppHintDefault = HWPERF_HOST_TL_STREAM_SIZE_DEFAULT, ui32HWPerfHostBufSizeKB; - ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB, + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB, &ui32AppHintDefault, &ui32HWPerfHostBufSizeKB); - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); pvAppHintState = NULL; /********************* @@ -4789,14 +4994,22 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) /* Configure MMU specific stuff */ RGXMMUInit_Register(psDeviceNode); - psDeviceNode->pfnDevSLCFlushRange = NULL; psDeviceNode->pfnInvalFBSCTable = NULL; psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL; + /* Callback for getting the MMU device attributes */ + psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes; + psDeviceNode->pfnGetDeviceSnoopMode = RGXDevSnoopMode; psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate; - psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick; +#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) + psDeviceNode->pfnMMUTopLevelPxWorkarounds = RGXMapBRN71422TargetPhysicalAddress; +#else + psDeviceNode->pfnMMUTopLevelPxWorkarounds = NULL; +#endif + /* pfnMMUTweakProtFlags is set later on once BVNC features are setup */ + psDeviceNode->pfnMMUTweakProtFlags = NULL; psDeviceNode->pfnInitDeviceCompatCheck = &RGXDevInitCompatCheck; @@ -4809,7 +5022,7 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock; /* Register callback for checking the device's health */ - psDeviceNode->pfnUpdateHealthStatus = PVRSRV_VZ_MODE_IS(GUEST) ? NULL : RGXUpdateHealthStatus; + psDeviceNode->pfnUpdateHealthStatus = PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) ? NULL : RGXUpdateHealthStatus; #if defined(SUPPORT_AUTOVZ) /* Register callback for updating the virtualization watchdog */ @@ -4845,54 +5058,17 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) /* Callback for checking if system layer supports FBC 3.1 */ psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31; - /* Callback for getting the MMU device attributes */ - psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes; - /* Callback for getting TFBC configuration */ psDeviceNode->pfnGetTFBCLossyGroup = RGXGetTFBCLossyGroup; /* Register callback for initialising device-specific physical memory heaps */ psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit; - /* Set up required support for dummy page */ - OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0); - OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0); - - /* Set the order to 0 */ - psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0; - psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0; - - /* Set the size of the Dummy page to zero */ - psDeviceNode->sDummyPage.ui32Log2PgSize = 0; - - /* Set the size of the Zero page to zero */ - psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0; - - /* Set the Dummy page phys addr */ - psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; - - /* Set the Zero page phys addr */ - psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; - - /* The lock can be acquired from MISR (Z-buffer) path */ - eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock); - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__)); - return eError; - } + /* Register callback for checking a device's FW Main physical heap for sufficient free memory */ + psDeviceNode->pfnCheckForSufficientFWPhysMem = RGXDeviceFWMainHeapMemCheck; - /* Create the lock for zero page */ - eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock); - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__)); - goto free_dummy_page; - } -#if defined(PDUMP) - psDeviceNode->sDummyPage.hPdumpPg = NULL; - psDeviceNode->sDevZeroPage.hPdumpPg = NULL; -#endif + /* Register callback for determining the appropriate LMA allocation policy for a phys heap */ + psDeviceNode->pfnPhysHeapGetLMAPolicy = RGXPhysHeapGetLMAPolicy; /********************* * Device info setup * @@ -4905,8 +5081,15 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) "DevInitRGXPart1 : Failed to alloc memory for DevInfo")); return PVRSRV_ERROR_OUT_OF_MEMORY; } + + /* initialise the layer parameters needed for early hw feature checks */ + psDevInfo->sLayerParams.psDevInfo = psDevInfo; + psDevInfo->sLayerParams.psDevConfig = psDeviceNode->psDevConfig; + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /* Default psTrampoline to point to null struct */ psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline; +#endif /* create locks for the context lists stored in the DevInfo structure. * these lists are modified on context create/destroy and read by the @@ -4981,7 +5164,7 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) goto e7; } - eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock); + eError = OSLockCreate(&psDevInfo->sRegConfig.hLock); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__)); @@ -5015,6 +5198,13 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) goto e12; } + eError = OSLockCreate(&psDevInfo->hGpuUtilStatsLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create GPU stats lock", __func__)); + goto e13; + } + dllist_init(&psDevInfo->sMemoryContextList); /* initialise ui32SLRHoldoffCounter */ @@ -5050,9 +5240,9 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) "%s: Failed to create RGX register mapping", __func__)); eError = PVRSRV_ERROR_BAD_MAPPING; - goto e13; + goto e14; } -#endif +#endif /* !NO_HARDWARE */ psDeviceNode->pvDevice = psDevInfo; @@ -5062,74 +5252,82 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) PVR_DPF((PVR_DBG_ERROR, "%s: Unsupported HW device detected by driver", __func__)); - goto e14; + goto e15; + } + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + /* + * We must now setup the SECURITY mappings if supported. We cannot + * check on the features until we have reached here as the BVNC is + * not setup before now. + */ +#if !defined(NO_HARDWARE) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) && + (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)) + { + IMG_CPU_PHYADDR sHostSecureRegBankBase = {psDeviceNode->psDevConfig->sRegsCpuPBase.uiAddr + RGX_HOST_SECURE_REGBANK_OFFSET}; + + psDevInfo->pvSecureRegsBaseKM = (void __iomem *) OSMapPhysToLin(sHostSecureRegBankBase, + RGX_HOST_SECURE_REGBANK_SIZE, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + + if (psDevInfo->pvSecureRegsBaseKM == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitDevPart2KM: Failed to create RGX secure register mapping")); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto e15; + } + + /* + * The secure register bank is mapped into the CPU VA space starting from + * the base of the normal register bank + an offset of RGX_HOST_SECURE_REGBAK_OFFSET. + * The hardware register addresses are all indexed from the base of the regular register bank. + * For the RegBankBase+RegOffset computation to still be accurate for host-secure registers, + * we need to compensate for offsets of registers in the secure bank + */ + psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM - RGX_HOST_SECURE_REGBANK_OFFSET); + } + else + { + psDevInfo->pvSecureRegsBaseKM = psDevInfo->pvRegsBaseKM; } +#endif /* !NO_HARDWARE */ +#endif /* defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) */ - _ReadNon4KHeapPageSize(psDevInfo, &psDevInfo->ui32Log2Non4KPgSize); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + psDeviceNode->pfnMMUTweakProtFlags = (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? + RGXMMUTweakProtFlags : NULL; +#endif - /*Set the zero & dummy page sizes as needed for the heap with largest page size */ - psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize; - psDeviceNode->sDummyPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize; + eError = RGXGetNon4KHeapPageShift(&psDevInfo->sLayerParams, + &psDeviceNode->ui32Non4KPageSizeLog2); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXGetNon4KHeapPageSize", e16); eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo); if (eError != PVRSRV_OK) { - goto e14; + goto e16; } eError = RGXHWPerfInit(psDevInfo); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e14); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e16); eError = RGXHWPerfHostInit(psDeviceNode->pvDevice, ui32HWPerfHostBufSizeKB); PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfHostInit", ErrorDeInitHWPerfFw); -#if defined(SUPPORT_VALIDATION) - /* This completion will be signaled by the ISR when processing - * the answer CCB command carrying an RGX Register read value */ - init_completion(&psDevInfo->sFwRegs.sRegComp); - psDevInfo->sFwRegs.ui64RegVal = 0; - -#if defined(SUPPORT_SOC_TIMER) - { - IMG_BOOL bAppHintDefault = IMG_FALSE; - IMG_BOOL bInitSocTimer; - void *pvAppHintState = NULL; - - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &bAppHintDefault, &bInitSocTimer); - OSFreeKMAppHintState(pvAppHintState); - - if (bInitSocTimer) - { - eError = RGXInitSOCUSCTimer(psDeviceNode); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSOCUSCTimer", ErrorDeInitHWPerfHost); - } - } -#endif -#endif /* Register callback for dumping debug info */ eError = RGXDebugInit(psDevInfo); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", ErrorDeInitHWPerfHost); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", e17); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /* Register callback for fw mmu init */ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { psDeviceNode->pfnFwMMUInit = RGXMipsMMUInit_Register; } - - /* The device shared-virtual-memory heap address-space size is stored here for faster - look-up without having to walk the device heap configuration structures during - client device connection (i.e. this size is relative to a zero-based offset) */ -#if defined(FIX_HW_BRN_65273_BIT_MASK) - if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) - { - psDeviceNode->ui64GeneralSVMHeapTopVA = 0; - }else #endif - { - psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE; - } if (NULL != psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit) { @@ -5137,11 +5335,9 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) psDevInfo->sDevFeatureCfg.ui64Features); } - psDeviceNode->bHasSystemDMA = psDeviceNode->psDevConfig->bHasDma; - /* Initialise the device dependent bridges */ - eError = DeviceDepBridgeInit(psDevInfo); - PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit"); + eError = RGXRegisterBridges(psDevInfo); + PVR_LOG_IF_ERROR(eError, "RGXRegisterBridges"); #if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) eError = OSLockCreate(&psDevInfo->hCounterDumpingLock); @@ -5159,22 +5355,41 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) #if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) ErrorDeInitDeviceDepBridge: - DeviceDepBridgeDeInit(psDevInfo); + RGXUnregisterBridges(psDevInfo); #endif -ErrorDeInitHWPerfHost: +e17: RGXHWPerfHostDeInit(psDevInfo); - ErrorDeInitHWPerfFw: RGXHWPerfDeinit(psDevInfo); - -e14: +e16: #if !defined(NO_HARDWARE) - OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, - psDevInfo->ui32RegSize); - -e13: +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (psDevInfo->pvSecureRegsBaseKM != NULL) + { + /* Adjust pvSecureRegsBaseKM if device has SECURITY_VERSION > 1 */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) && + (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)) + { + /* Undo the VA offset adjustment to unmap correct VAddr */ + psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET); + OSUnMapPhysToLin((void __force *) psDevInfo->pvSecureRegsBaseKM, + RGX_HOST_SECURE_REGBANK_SIZE); + } + } +#endif +#endif /* !NO_HARDWARE */ +e15: +#if !defined(NO_HARDWARE) + if (psDevInfo->pvRegsBaseKM != NULL) + { + OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, + psDevInfo->ui32RegSize); + } +e14: #endif /* !NO_HARDWARE */ + OSLockDestroy(psDevInfo->hGpuUtilStatsLock); +e13: OSLockDestroy(psDevInfo->hCCBRecoveryLock); e12: OSLockDestroy(psDevInfo->hCCBStallCheckLock); @@ -5183,7 +5398,7 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) e10: OSLockDestroy(psDevInfo->hBPLock); e9: - OSLockDestroy(psDevInfo->sRegCongfig.hLock); + OSLockDestroy(psDevInfo->sRegConfig.hLock); e8: OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); e7: @@ -5203,13 +5418,6 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) e0: OSFreeMem(psDevInfo); - /* Destroy the zero page lock created above */ - OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); - -free_dummy_page: - /* Destroy the dummy page lock created above */ - OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); - PVR_ASSERT(eError != PVRSRV_OK); return eError; } @@ -5257,9 +5465,9 @@ IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo) version string @Output ppszVersionString Contains the version string upon return @Return PVRSRV_ERROR - */ /**************************************************************************/ +*/ /**************************************************************************/ static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_CHAR **ppszVersionString) + IMG_CHAR **ppszVersionString) { #if defined(NO_HARDWARE) || defined(EMULATOR) const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (SW)"; @@ -5304,9 +5512,9 @@ static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, @Input psDeviceNode Device node @Output pui32RGXClockSpeed Variable for storing the clock speed @Return PVRSRV_ERROR - */ /***************************************************************************/ +*/ /***************************************************************************/ static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_PUINT32 pui32RGXClockSpeed) + IMG_PUINT32 pui32RGXClockSpeed) { RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; @@ -5316,7 +5524,7 @@ static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, return PVRSRV_OK; } -#if (RGX_NUM_OS_SUPPORTED > 1) +#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) /*! ******************************************************************************* @@ -5324,7 +5532,7 @@ static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, @Description Called to perform additional initialisation ******************************************************************************/ -static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid) +static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID) { IMG_UINT32 uiStringLength; IMG_UINT32 uiStringLengthMax = 32; @@ -5349,19 +5557,19 @@ static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_ uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1); - /* Start by allocating memory for this OSID heap identification string */ + /* Start by allocating memory for this DriverID heap identification string */ psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); if (psDevMemHeap->pszName == NULL) { return PVRSRV_ERROR_OUT_OF_MEMORY; } - /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */ - OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid); + /* Append the DriverID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */ + OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID); /* Use the common blueprint template support function to initialise the heap */ HeapCfgBlueprintInit(psDevMemHeap->pszName, - RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE), + RGX_FIRMWARE_RAW_HEAP_BASE + (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE), RGX_FIRMWARE_RAW_HEAP_SIZE, 0, ui32Log2RgxDefaultPageShift, @@ -5383,7 +5591,7 @@ static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_ static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap) { IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE; - IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE); + IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_DRIVERS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE); /* Safe to do as the guest firmware heaps are last in the list */ if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase && @@ -5393,7 +5601,7 @@ static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap) OSFreeMem(pszName); } } -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ +#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ /****************************************************************************** End of file (rgxinit.c) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxinit.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxinit.h deleted file mode 100644 index 6cc8c8b1c256..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxinit.h +++ /dev/null @@ -1,281 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title RGX initialisation header file -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Header for the RGX initialisation -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#if !defined(RGXINIT_H) -#define RGXINIT_H - -#include "connection_server.h" -#include "pvrsrv_error.h" -#include "img_types.h" -#include "device.h" -#include "rgxdevice.h" -#include "rgx_bridge.h" -#include "fwload.h" - -#if defined(__linux__) -#define OS_FW_VERIFY_FUNCTION OSVerifyFirmware -#else -#define OS_FW_VERIFY_FUNCTION NULL -#endif - -/*! -******************************************************************************* - - @Function RGXInitDevPart2 - - @Description - - Second part of server-side RGX initialisation - - @Input psDeviceNode - device node - - @Return PVRSRV_ERROR - -******************************************************************************/ -PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32DeviceFlags, - IMG_UINT32 ui32HWPerfHostFilter, - RGX_ACTIVEPM_CONF eActivePMConf); - -PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_DEVMEM_SIZE_T ui32FWCodeLen, - IMG_DEVMEM_SIZE_T ui32FWDataLen, - IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, - IMG_DEVMEM_SIZE_T uiFWCorememDataLen); - - -/*! -******************************************************************************* - - @Function RGXInitFirmware - - @Description - - Server-side RGX firmware initialisation - - @Input psDeviceNode - device node - - @Return PVRSRV_ERROR - -******************************************************************************/ -PVRSRV_ERROR -RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, - IMG_UINT32 ui32ConfigFlags, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWRDebugDumpLimit, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_UINT32 *pui32TPUTrilinearFracMask, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, - FW_PERF_CONF eFirmwarePerf, - IMG_UINT32 ui32KCCBSizeLog2, - IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32FwOsCfgFlags); - - -/*! -******************************************************************************* - - @Function RGXLoadAndGetFWData - - @Description - - Load FW and return pointer to FW data. - - @Input psDeviceNode - device node - - @Input ppsRGXFW - fw pointer - - @Output ppbFWData - pointer to FW data (NULL if an error occurred) - - @Return PVRSRV_ERROR - PVRSRV_OK on success - PVRSRV_ERROR_NOT_READY if filesystem is not ready - PVRSRV_ERROR_NOT_FOUND if no suitable FW image found - PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc memory for FW image - PVRSRV_ERROR_NOT_AUTHENTICATED if FW image failed verification - -******************************************************************************/ -PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, - OS_FW_IMAGE **ppsRGXFW, - const IMG_BYTE **ppbFWData); - -#if defined(PDUMP) -/*! -******************************************************************************* - - @Function RGXInitHWPerfCounters - - @Description - - Initialisation of the performance counters - - @Input psDeviceNode - device node - - @Return PVRSRV_ERROR - -******************************************************************************/ -PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode); -#endif - -/*! -******************************************************************************* - - @Function RGXRegisterDevice - - @Description - - Registers the device with the system - - @Input: psDeviceNode - device node - - @Return PVRSRV_ERROR - -******************************************************************************/ -PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); - -/*! -******************************************************************************* - - @Function RGXDevBVNCString - - @Description - - Returns the Device BVNC string. It will allocate and fill it first, if necessary. - - @Input: psDevInfo - device info (must not be null) - - @Return IMG_PCHAR - pointer to BVNC string - -******************************************************************************/ -IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* - - @Function DevDeInitRGX - - @Description - - Reset and deinitialise Chip - - @Input psDeviceNode - device info. structure - - @Return PVRSRV_ERROR - -******************************************************************************/ -PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); - - -#if !defined(NO_HARDWARE) - -void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* - - @Function SORgxGpuUtilStatsRegister - - @Description SO Interface function called from the OS layer implementation. - Initialise data used to compute GPU utilisation statistics - for a particular user (identified by the handle passed as - argument). This function must be called only once for each - different user/handle. - - @Input phGpuUtilUser - Pointer to handle used to identify a user of - RGXGetGpuUtilStats - - @Return PVRSRV_ERROR - -******************************************************************************/ -PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser); - - -/*! -******************************************************************************* - - @Function SORgxGpuUtilStatsUnregister - - @Description SO Interface function called from the OS layer implementation. - Free data previously used to compute GPU utilisation statistics - for a particular user (identified by the handle passed as - argument). - - @Input hGpuUtilUser - Handle used to identify a user of - RGXGetGpuUtilStats - - @Return PVRSRV_ERROR - -******************************************************************************/ -PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser); -#endif /* !defined(NO_HARDWARE) */ - -/*! - ******************************************************************************* - - @Function RGXInitCreateFWKernelMemoryContext - - @Description Called to perform initialisation during firmware kernel context - creation. - - @Input psDeviceNode device node - ******************************************************************************/ -PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); - -/*! - ******************************************************************************* - - @Function RGXDeInitDestroyFWKernelMemoryContext - - @Description Called to perform deinitialisation during firmware kernel - context destruction. - - @Input psDeviceNode device node - ******************************************************************************/ -void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); - -#endif /* RGXINIT_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer_impl.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer_impl.c index 34f202741ee0..535326d7e786 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer_impl.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxlayer_impl.c @@ -46,9 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pdump_km.h" #include "rgxfwutils.h" #include "rgxfwimageutils.h" -#include "devicemem.h" #include "cache_km.h" -#include "pmr.h" #if defined(PDUMP) #if defined(__linux__) @@ -64,670 +62,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* __linux__ */ #endif -void RGXMemCopy(const void *hPrivate, - void *pvDst, - void *pvSrc, - size_t uiSize) -{ - PVR_UNREFERENCED_PARAMETER(hPrivate); - OSDeviceMemCopy(pvDst, pvSrc, uiSize); -} - -void RGXMemSet(const void *hPrivate, - void *pvDst, - IMG_UINT8 ui8Value, - size_t uiSize) -{ - PVR_UNREFERENCED_PARAMETER(hPrivate); - OSDeviceMemSet(pvDst, ui8Value, uiSize); -} - -void RGXCommentLog(const void *hPrivate, - const IMG_CHAR *pszString, - ...) -{ -#if defined(PDUMP) - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - va_list argList; - va_start(argList, pszString); - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - PDumpCommentWithFlagsVA(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, pszString, argList); - va_end(argList); -#else - PVR_UNREFERENCED_PARAMETER(hPrivate); - PVR_UNREFERENCED_PARAMETER(pszString); -#endif -} - -void RGXErrorLog(const void *hPrivate, - const IMG_CHAR *pszString, - ...) -{ - IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; - va_list argList; - - PVR_UNREFERENCED_PARAMETER(hPrivate); - - va_start(argList, pszString); - vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList); - va_end(argList); - - PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer)); -} - -IMG_UINT32 RGXGetOSPageSize(const void *hPrivate) -{ - PVR_UNREFERENCED_PARAMETER(hPrivate); - return OSGetPageSize(); -} - -IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate) -{ -#if defined(RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX) - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - IMG_UINT32 ui32CorememSize = 0; - - PVR_ASSERT(hPrivate != NULL); - - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) - { - ui32CorememSize = RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE); - } - - return ui32CorememSize; -#else - PVR_UNREFERENCED_PARAMETER(hPrivate); - - return 0U; -#endif -} - -void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = psDevInfo->pvRegsBaseKM; - -#if defined(PDUMP) - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - { - OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue); - } - - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags); -} - -void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = psDevInfo->pvRegsBaseKM; - -#if defined(PDUMP) - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - { - OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue); - } - - PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags); -} - -IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - IMG_UINT32 ui32RegValue; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = psDevInfo->pvRegsBaseKM; - -#if defined(PDUMP) - if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) - { - ui32RegValue = IMG_UINT32_MAX; - } - else -#endif - { - ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr); - } - - PDUMPREGREAD32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, psParams->ui32PdumpFlags); - - return ui32RegValue; -} - -IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - IMG_UINT64 ui64RegValue; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = psDevInfo->pvRegsBaseKM; - -#if defined(PDUMP) - if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) - { - ui64RegValue = IMG_UINT64_MAX; - } - else -#endif - { - ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); - } - - PDUMPREGREAD64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); - - return ui64RegValue; -} - -IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT64 uiRegValueNew, - IMG_UINT64 uiRegKeepMask) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; -#if defined(PDUMP) - PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; -#endif - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = psDevInfo->pvRegsBaseKM; - - /* only use the new values for bits we update according to the keep mask */ - uiRegValueNew &= ~uiRegKeepMask; - -#if defined(PDUMP) - - PDUMP_BLKSTART(ui32PDumpFlags); - - /* Store register offset to temp PDump variable */ - PDumpRegRead64ToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ":SYSMEM:$1", ui32RegAddr, ui32PDumpFlags); - - /* Keep the bits set in the mask */ - PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", - uiRegKeepMask, ui32PDumpFlags); - - /* OR the new values */ - PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", - uiRegValueNew, ui32PDumpFlags); - - /* Do the actual register write */ - PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); - - PDUMP_BLKEND(ui32PDumpFlags); - - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - - { - IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); - uiRegValue &= uiRegKeepMask; - OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew); - } - - return PVRSRV_OK; -} - -PVRSRV_ERROR RGXPollReg32(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32RegValue, - IMG_UINT32 ui32RegMask) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = psDevInfo->pvRegsBaseKM; - -#if defined(PDUMP) - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - { - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), - ui32RegValue, - ui32RegMask, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr)); - return PVRSRV_ERROR_TIMEOUT; - } - } - - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - ui32RegAddr, - ui32RegValue, - ui32RegMask, - psParams->ui32PdumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - - return PVRSRV_OK; -} - -PVRSRV_ERROR RGXPollReg64(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT64 ui64RegValue, - IMG_UINT64 ui64RegMask) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - - /* Split lower and upper words */ - IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32); - IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue); - IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32); - IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask); - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = psDevInfo->pvRegsBaseKM; - -#if defined(PDUMP) - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - { - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4), - ui32UpperValue, - ui32UpperMask, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); - return PVRSRV_ERROR_TIMEOUT; - } - - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), - ui32LowerValue, - ui32LowerMask, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for lower part of Reg (0x%x) failed", ui32RegAddr)); - return PVRSRV_ERROR_TIMEOUT; - } - } - - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - ui32RegAddr + 4, - ui32UpperValue, - ui32UpperMask, - psParams->ui32PdumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - - - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - ui32RegAddr, - ui32LowerValue, - ui32LowerMask, - psParams->ui32PdumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - - return PVRSRV_OK; -} - -void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - OSWaitus(ui32TimeUs); - PDUMPIDLWITHFLAGS(psDevInfo->psDeviceNode, ui32Cycles, PDUMP_FLAGS_CONTINUOUS); -} - -void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr) -{ - PVR_ASSERT(hPrivate != NULL); - *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr; -} - -#if defined(PDUMP) -void RGXWriteKernelMMUPC64(const void *hPrivate, - IMG_UINT32 ui32PCReg, - IMG_UINT32 ui32PCRegAlignShift, - IMG_UINT32 ui32PCRegShift, - IMG_UINT64 ui64PCVal) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - /* Write the cat-base address */ - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal); - - /* Pdump catbase address */ - MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, - RGX_PDUMPREG_NAME, - ui32PCReg, - 8, - ui32PCRegAlignShift, - ui32PCRegShift, - PDUMP_FLAGS_CONTINUOUS); -} - -void RGXWriteKernelMMUPC32(const void *hPrivate, - IMG_UINT32 ui32PCReg, - IMG_UINT32 ui32PCRegAlignShift, - IMG_UINT32 ui32PCRegShift, - IMG_UINT32 ui32PCVal) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - /* Write the cat-base address */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal); - - /* Pdump catbase address */ - MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, - RGX_PDUMPREG_NAME, - ui32PCReg, - 4, - ui32PCRegAlignShift, - ui32PCRegShift, - PDUMP_FLAGS_CONTINUOUS); -} -#endif /* defined(PDUMP) */ - -void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr) -{ - PVR_ASSERT(hPrivate != NULL); - *psGPURegsAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sGPURegAddr; -} - -#if defined(PDUMP) -void RGXMIPSWrapperConfig(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT64 ui64GPURegsAddr, - IMG_UINT32 ui32GPURegsAlign, - IMG_UINT32 ui32BootMode) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, - ui32RegAddr, - (ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode); - - PDUMP_BLKSTART(ui32PDumpFlags); - - /* Store register offset to temp PDump variable */ - PDumpRegLabelToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); - - /* Align register transactions identifier */ - PDumpWriteVarSHRValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", - ui32GPURegsAlign, ui32PDumpFlags); - - /* Enable micromips instruction encoding */ - PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", - ui32BootMode, ui32PDumpFlags); - - /* Do the actual register write */ - PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); - - PDUMP_BLKEND(ui32PDumpFlags); -} -#endif - -void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr) -{ - PVR_ASSERT(hPrivate != NULL); - *psBootRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sBootRemapAddr; -} - -void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr) -{ - PVR_ASSERT(hPrivate != NULL); - *psCodeRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr; -} - -void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr) -{ - PVR_ASSERT(hPrivate != NULL); - *psDataRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sDataRemapAddr; -} - -void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr) -{ - PVR_ASSERT(hPrivate != NULL); - *psTrampolineRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr; -} - -#if defined(PDUMP) -static inline -void RGXWriteRemapConfig2Reg(void __iomem *pvRegs, - PMR *psPMR, - IMG_DEVMEM_OFFSET_T uiLogicalOffset, - IMG_UINT32 ui32RegAddr, - IMG_UINT64 ui64PhyAddr, - IMG_UINT64 ui64PhyMask, - IMG_UINT64 ui64Settings) -{ - PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; - PVRSRV_DEVICE_NODE *psDevNode; - - PVR_ASSERT(psPMR != NULL); - psDevNode = PMR_DeviceNode(psPMR); - - OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings); - - PDUMP_BLKSTART(ui32PDumpFlags); - - /* Store memory offset to temp PDump variable */ - PDumpMemLabelToInternalVar64(":SYSMEM:$1", psPMR, - uiLogicalOffset, ui32PDumpFlags); - - /* Keep only the relevant bits of the output physical address */ - PDumpWriteVarANDValueOp(psDevNode, ":SYSMEM:$1", ui64PhyMask, ui32PDumpFlags); - - /* Extra settings for this remapped region */ - PDumpWriteVarORValueOp(psDevNode, ":SYSMEM:$1", ui64Settings, ui32PDumpFlags); - - /* Do the actual register write */ - PDumpInternalVarToReg64(psDevNode, RGX_PDUMPREG_NAME, ui32RegAddr, - ":SYSMEM:$1", ui32PDumpFlags); - - PDUMP_BLKEND(ui32PDumpFlags); -} - -void RGXBootRemapConfig(const void *hPrivate, - IMG_UINT32 ui32Config1RegAddr, - IMG_UINT64 ui64Config1RegValue, - IMG_UINT32 ui32Config2RegAddr, - IMG_UINT64 ui64Config2PhyAddr, - IMG_UINT64 ui64Config2PhyMask, - IMG_UINT64 ui64Config2Settings) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - IMG_UINT32 ui32BootRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE); - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - /* Write remap config1 register */ - RGXWriteReg64(hPrivate, - ui32Config1RegAddr, - ui64Config1RegValue); - - /* Write remap config2 register */ - RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, - psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, - psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset, - ui32Config2RegAddr, - ui64Config2PhyAddr, - ui64Config2PhyMask, - ui64Config2Settings); -} - -void RGXCodeRemapConfig(const void *hPrivate, - IMG_UINT32 ui32Config1RegAddr, - IMG_UINT64 ui64Config1RegValue, - IMG_UINT32 ui32Config2RegAddr, - IMG_UINT64 ui64Config2PhyAddr, - IMG_UINT64 ui64Config2PhyMask, - IMG_UINT64 ui64Config2Settings) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - IMG_UINT32 ui32CodeRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE); - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - /* Write remap config1 register */ - RGXWriteReg64(hPrivate, - ui32Config1RegAddr, - ui64Config1RegValue); - - /* Write remap config2 register */ - RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, - psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, - psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset, - ui32Config2RegAddr, - ui64Config2PhyAddr, - ui64Config2PhyMask, - ui64Config2Settings); -} - -void RGXDataRemapConfig(const void *hPrivate, - IMG_UINT32 ui32Config1RegAddr, - IMG_UINT64 ui64Config1RegValue, - IMG_UINT32 ui32Config2RegAddr, - IMG_UINT64 ui64Config2PhyAddr, - IMG_UINT64 ui64Config2PhyMask, - IMG_UINT64 ui64Config2Settings) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - IMG_UINT32 ui32DataRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - /* Write remap config1 register */ - RGXWriteReg64(hPrivate, - ui32Config1RegAddr, - ui64Config1RegValue); - - /* Write remap config2 register */ - RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, - psDevInfo->psRGXFWDataMemDesc->psImport->hPMR, - psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset, - ui32Config2RegAddr, - ui64Config2PhyAddr, - ui64Config2PhyMask, - ui64Config2Settings); -} - -void RGXTrampolineRemapConfig(const void *hPrivate, - IMG_UINT32 ui32Config1RegAddr, - IMG_UINT64 ui64Config1RegValue, - IMG_UINT32 ui32Config2RegAddr, - IMG_UINT64 ui64Config2PhyAddr, - IMG_UINT64 ui64Config2PhyMask, - IMG_UINT64 ui64Config2Settings) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - /* write the register for real, without PDump */ - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, - ui32Config1RegAddr, - ui64Config1RegValue); - - PDUMP_BLKSTART(ui32PDumpFlags); - - /* Store the memory address in a PDump variable */ - PDumpPhysHandleToInternalVar64(psDevInfo->psDeviceNode, ":SYSMEM:$1", - psDevInfo->psTrampoline->hPdumpPages, - ui32PDumpFlags); - - /* Keep only the relevant bits of the input physical address */ - PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", - ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK, - ui32PDumpFlags); - - /* Enable bit */ - PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", - RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN, - ui32PDumpFlags); - - /* Do the PDump register write */ - PDumpInternalVarToReg64(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - ui32Config1RegAddr, - ":SYSMEM:$1", - ui32PDumpFlags); - - PDUMP_BLKEND(ui32PDumpFlags); - - /* this can be written directly */ - RGXWriteReg64(hPrivate, - ui32Config2RegAddr, - (ui64Config2PhyAddr & ui64Config2PhyMask) | ui64Config2Settings); -} -#endif #define MAX_NUM_COHERENCY_TESTS (10) IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate) { PVRSRV_RGXDEV_INFO *psDevInfo; - PVRSRV_DEVICE_CONFIG *psDevConfig; + PVRSRV_DEVICE_NODE *psDeviceNode; PVR_ASSERT(hPrivate != NULL); psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; @@ -737,30 +77,10 @@ IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate) return IMG_FALSE; } - psDevConfig = ((RGX_LAYER_PARAMS*)hPrivate)->psDevConfig; - - return PVRSRVSystemSnoopingOfCPUCache(psDevConfig); -} - -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) -static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - - /* Wait for Slave Port to be Ready */ - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); - if (eError != PVRSRV_OK) return eError; - - /* Issue a Write */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); + psDeviceNode = psDevInfo->psDeviceNode; - return eError; + return PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig); } -#endif /* * The fabric coherency test is performed when platform supports fabric coherency @@ -773,7 +93,6 @@ static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui */ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) { -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) PVRSRV_RGXDEV_INFO *psDevInfo; IMG_UINT32 *pui32FabricCohTestBufferCpuVA; DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc; @@ -782,10 +101,6 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64); IMG_UINT32 ui32SLCCTRL = 0; IMG_UINT32 ui32OddEven; -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - IMG_BOOL bFeatureS7 = RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE); -#endif - IMG_UINT32 ui32TestType; IMG_UINT32 ui32OddEvenSeed = 1; PVRSRV_ERROR eError = PVRSRV_OK; IMG_BOOL bFullTestPassed = IMG_TRUE; @@ -793,23 +108,16 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) #if defined(DEBUG) IMG_BOOL bSubTestPassed = IMG_FALSE; #endif + enum TEST_TYPE { + CPU_WRITE_GPU_READ_SM=0, GPU_WRITE_CPU_READ_SM, + CPU_WRITE_GPU_READ_SH, GPU_WRITE_CPU_READ_SH + } eTestType; PVR_ASSERT(hPrivate != NULL); psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; PVR_LOG(("Starting fabric coherency test .....")); -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (bFeatureS7) - { - IMG_UINT64 ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(MMU_CONTEXT_MAPPING_FWIF); - - /* Configure META to use SLC force-linefill for the bootloader segment */ - RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), - (ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); - } - else -#endif { /* Bypass the SLC when IO coherency is enabled */ ui32SLCCTRL = RGXReadReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS); @@ -820,8 +128,8 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) /* Size and align are 'expanded' because we request an export align allocation */ eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), - &uiFabricCohTestBlockSize, - &uiFabricCohTestBlockAlign); + &uiFabricCohTestBlockSize, + &uiFabricCohTestBlockAlign); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -832,20 +140,20 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) /* Allocate, acquire cpu address and set firmware address */ eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode, - uiFabricCohTestBlockSize, - uiFabricCohTestBlockAlign, - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | - PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), - "FwExFabricCoherencyTestBuffer", - &psFabricCohTestBufferMemDesc); + uiFabricCohTestBlockSize, + uiFabricCohTestBlockAlign, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | + PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwExFabricCoherencyTestBuffer", + &psFabricCohTestBufferMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -863,15 +171,13 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) goto e1; } - /* Create a FW address which is uncached in the Meta DCache and in the SLC - * using the Meta bootloader segment. - * This segment is the only one configured correctly out of reset - * (when this test is meant to be executed). - */ + /* Create a FW address which is uncached in the Meta DCache and in the SLC using the Meta bootloader segment. + This segment is the only one configured correctly out of reset (when this test is meant to be executed) */ + eError = RGXSetFirmwareAddress(&sFabricCohTestBufferDevVA, - psFabricCohTestBufferMemDesc, - 0, - RFW_FWADDR_FLAG_NONE); + psFabricCohTestBufferMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e2); /* Undo most of the FW mappings done by RGXSetFirmwareAddress */ @@ -883,7 +189,7 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR; sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED; - for (ui32TestType = 0; ui32TestType < 4 && bExit == IMG_FALSE; ui32TestType++) + for (eTestType = CPU_WRITE_GPU_READ_SH; eTestType <= GPU_WRITE_CPU_READ_SH && bExit == IMG_FALSE; eTestType++) { IMG_CPU_PHYADDR sCpuPhyAddr; IMG_BOOL bValid; @@ -891,7 +197,7 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) /* Acquire underlying PMR CpuPA in preparation for cache maintenance */ (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR); - eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid); + eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid, CPU_USE); if (eError != PVRSRV_OK || bValid == IMG_FALSE) { PVR_DPF((PVR_DBG_ERROR, @@ -901,25 +207,25 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) continue; } - /* Here we do two passes [runs] mostly to account for the effects of using - the different seed (i.e. ui32OddEvenSeed) value to read and write */ + /* Here we do two passes mostly to account for the effects of using a different + seed (i.e. ui32OddEvenSeed) value to read and write */ for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++) { IMG_UINT32 i; #if defined(DEBUG) - switch (ui32TestType) + switch (eTestType) { - case 0: + case CPU_WRITE_GPU_READ_SM: PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); break; - case 1: + case GPU_WRITE_CPU_READ_SM: PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); break; - case 2: + case CPU_WRITE_GPU_READ_SH: PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); break; - case 3: + case GPU_WRITE_CPU_READ_SH: PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); break; default: @@ -930,6 +236,7 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) } #endif + /* Do multiple sub-dword cache line tests */ for (i = 0; i < 2 && bExit == IMG_FALSE; i++) { IMG_UINT32 ui32FWAddr; @@ -946,35 +253,30 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr; ui32OddEvenSeed += 1; - if (ui32TestType & 0x1) + if (eTestType == GPU_WRITE_CPU_READ_SM || eTestType == GPU_WRITE_CPU_READ_SH) { - ui32FWValue = i + ui32OddEvenSeed; + /* Clean dcache to ensure there is no stale data in dcache that might over-write + what we are about to write via slave-port here because if it drains from the CPU + dcache before we read it, it would corrupt what we are going to read back via + the CPU */ + sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); + CacheOpExec(psDevInfo->psDeviceNode, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), + sCpuPhyAddrStart, + sCpuPhyAddrEnd, + PVRSRV_CACHE_OP_FLUSH); - switch (ui32TestType) - { - case 1: - case 3: - /* Clean dcache to ensure there is no stale data in dcache that might over-write - what we are about to write via slave-port here because if it drains from the CPU - dcache before we read it, it would corrupt what we are going to read back via - the CPU */ - sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); - CacheOpExec(psDevInfo->psDeviceNode, - (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, - (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), - sCpuPhyAddrStart, - sCpuPhyAddrEnd, - PVRSRV_CACHE_OP_CLEAN); - break; - } + /* Calculate a new value to write */ + ui32FWValue = i + ui32OddEvenSeed; /* Write the value using the RGX slave-port interface */ eError = RGXWriteFWModuleAddr(psDevInfo, ui32FWAddr, ui32FWValue); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "RGXWriteFWModuleAddr error: %s, exiting", - PVRSRVGetErrorString(eError))); + "RGXWriteFWModuleAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); bExit = IMG_TRUE; continue; } @@ -985,8 +287,8 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "RGXReadFWModuleAddr error: %s, exiting", - PVRSRVGetErrorString(eError))); + "RGXReadFWModuleAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); bExit = IMG_TRUE; continue; } @@ -1003,21 +305,18 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) continue; } - if (! PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) - { - /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory - region is discarded before we read (i.e. next read must trigger a cache miss). - If there is snooping of device cache, then any prefetching done by the CPU - will reflect the most up to date datum writing by GPU into said location, - that is to say prefetching must be coherent so CPU d-flush is not needed */ - sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); - CacheOpExec(psDevInfo->psDeviceNode, - (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, - (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), - sCpuPhyAddrStart, - sCpuPhyAddrEnd, - PVRSRV_CACHE_OP_INVALIDATE); - } + /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory + region is discarded before we read (i.e. next read must trigger a cache miss). + Previously there was snooping of device cache, where prefetching done by the CPU + would reflect the most up to date datum writing by GPU into said location, + that is to say prefetching was coherent so CPU d-flush was not needed */ + sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); + CacheOpExec(psDevInfo->psDeviceNode, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, + (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), + sCpuPhyAddrStart, + sCpuPhyAddrEnd, + PVRSRV_CACHE_OP_INVALIDATE); } else { @@ -1033,9 +332,8 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) /* Flush possible cpu store-buffer(ing) on LMA */ OSWriteMemoryBarrier(&pui32FabricCohTestBufferCpuVA[i]); - switch (ui32TestType) + if (eTestType == CPU_WRITE_GPU_READ_SM) { - case 0: /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so memory is coherent before the SlavePort reads */ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); @@ -1045,7 +343,6 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) sCpuPhyAddrStart, sCpuPhyAddrEnd, PVRSRV_CACHE_OP_FLUSH); - break; } /* Read back value using RGX slave-port interface */ @@ -1059,7 +356,8 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) continue; } - /* We are being mostly paranoid here, just to account for CPU RAW operations */ + /* Being mostly paranoid here, verify that CPU RAW operation is valid + after the above slave port read */ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); CacheOpExec(psDevInfo->psDeviceNode, (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, @@ -1099,9 +397,9 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) { #if defined(DEBUG) PVR_LOG(("At Offset: %d, Expected: %x, Got: %x", - i, - (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i], - (ui32TestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue)); + i, + (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i], + (eTestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue)); #endif } else @@ -1114,7 +412,7 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) } } - ui32LastFWValue = (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i]; + ui32LastFWValue = (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i]; } #if defined(DEBUG) @@ -1123,22 +421,27 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) continue; } - switch (ui32TestType) + switch (eTestType) { - case 0: - PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + case CPU_WRITE_GPU_READ_SM: + PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); break; - case 1: - PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + case GPU_WRITE_CPU_READ_SM: + PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); break; - case 2: - PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + case CPU_WRITE_GPU_READ_SH: + PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); break; - case 3: - PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + case GPU_WRITE_CPU_READ_SH: + PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); break; default: PVR_LOG(("Internal error, exiting test")); + eError = PVRSRV_ERROR_INIT_FAILURE; bExit = IMG_TRUE; continue; } @@ -1153,16 +456,6 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) DevmemFwUnmapAndFree(psDevInfo, psFabricCohTestBufferMemDesc); e0: -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (bFeatureS7) - { - /* Restore bootloader segment settings */ - IMG_UINT64 ui64SegOutAddrTopCached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWIF); - RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), - (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); - } - else -#endif { /* Restore SLC bypass settings */ RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS, ui32SLCCTRL); @@ -1181,148 +474,5 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) } return eError; -#else - PVR_UNREFERENCED_PARAMETER(hPrivate); - - return PVRSRV_OK; -#endif -} - -IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature) -{ - IMG_INT32 i32Ret = -1; - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - PVRSRV_DEVICE_NODE *psDeviceNode; - - PVR_ASSERT(hPrivate != NULL); - - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - psDeviceNode = psDevInfo->psDeviceNode; - - if ((psDeviceNode->pfnGetDeviceFeatureValue)) - { - i32Ret = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ui64Feature); - } - - return i32Ret; -} - -IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0; -} - -IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0; -} - -IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS)) - { - return 0; - } - return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS); -} - -IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)) - { - return 0; - } - return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS); -} - -IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH)) - { - return 0; - } - return RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH); -} - -IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - return psDevInfo->sLayerParams.bDevicePA0IsValid; -} - -void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase; -} - -void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase; } -IMG_BOOL RGXDeviceAckIrq(const void *hPrivate) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - return (psDevInfo->pfnRGXAckIrq != NULL) ? - psDevInfo->pfnRGXAckIrq(psDevInfo) : IMG_TRUE; -} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmipsmmuinit.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmipsmmuinit.c index 0e6c0ab05a46..e0b17bf3dedd 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmipsmmuinit.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmipsmmuinit.c @@ -186,9 +186,11 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently */ + sRGXMMUPCEConfig.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPCEConfig.pszPxLevelStr = "UnD"; sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */ - sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */ + sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */ sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */ sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; /* Alignment of PD AND PC */ @@ -211,7 +213,7 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */ sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; - sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = (RGX_NUM_DRIVERS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; /* * @@ -222,6 +224,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently */ + sRGXMMUPDEConfig_4KBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPDEConfig_4KBDP.pszPxLevelStr = "UnD"; sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0; /* No PD used for MIPS */ @@ -241,6 +245,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPTEConfig_4KBDP. */ + sRGXMMUPTEConfig_4KBDP.ePxLevel = MMU_LEVEL_1; + sRGXMMUPTEConfig_4KBDP.pszPxLevelStr = "PT"; sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; @@ -279,7 +285,7 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK; sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = (RGX_NUM_DRIVERS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); @@ -305,6 +311,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPDEConfig_16KBDP */ + sRGXMMUPDEConfig_16KBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPDEConfig_16KBDP.pszPxLevelStr = "UnD"; sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0; sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0; @@ -323,6 +331,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet */ + sRGXMMUPTEConfig_16KBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPTEConfig_16KBDP.pszPxLevelStr = "UnD"; sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0; sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0; @@ -373,6 +383,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPDEConfig_64KBDP */ + sRGXMMUPDEConfig_64KBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPDEConfig_64KBDP.pszPxLevelStr = "UnD"; sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0; sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0; @@ -392,6 +404,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) * Setup sRGXMMUPTEConfig_64KBDP. * */ + sRGXMMUPTEConfig_64KBDP.ePxLevel = MMU_LEVEL_1; + sRGXMMUPTEConfig_64KBDP.pszPxLevelStr = "PT"; sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; if (bPhysBusAbove32Bit) @@ -429,7 +443,7 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00ffff0000); sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_64K; - sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = (RGX_NUM_DRIVERS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; @@ -454,6 +468,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPDEConfig_256KBDP */ + sRGXMMUPDEConfig_256KBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPDEConfig_256KBDP.pszPxLevelStr = "UnD"; sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0; sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0; @@ -472,6 +488,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP */ + sRGXMMUPTEConfig_256KBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPTEConfig_256KBDP.pszPxLevelStr = "UnD"; sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0; sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0; @@ -515,6 +533,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPDEConfig_1MBDP. Not supported yet */ + sRGXMMUPDEConfig_1MBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPDEConfig_1MBDP.pszPxLevelStr = "UnD"; sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0; sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0; @@ -533,7 +553,9 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPTEConfig_1MBDP */ - sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; + sRGXMMUPTEConfig_1MBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPTEConfig_1MBDP.pszPxLevelStr = "UnD"; + sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 0; sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0; sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0; @@ -576,6 +598,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet */ + sRGXMMUPDEConfig_2MBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPDEConfig_2MBDP.pszPxLevelStr = "UnD"; sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0; sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0; @@ -594,6 +618,8 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) /* * Setup sRGXMMUPTEConfig_2MBDP */ + sRGXMMUPTEConfig_2MBDP.ePxLevel = MMU_LEVEL_LAST; + sRGXMMUPTEConfig_2MBDP.pszPxLevelStr = "UnD"; sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0; sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0; @@ -638,7 +664,6 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) * Setup sRGXMMUDeviceAttributes */ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV; - sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_1; /* * The page table fits in one or more big physically adjacent pages, @@ -648,7 +673,7 @@ PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) * log2 page size (12, 14, 16 for a 4K, 16K, 64K page size). */ sRGXMMUDeviceAttributes.ui32BaseAlign = - (CeilLog2(RGX_NUM_OS_SUPPORTED) + RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) & ~1U; + (CeilLog2(RGX_NUM_DRIVERS_SUPPORTED) + RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) & ~1U; /* 256K alignment might be too hard to achieve, fall back to 64K */ sRGXMMUDeviceAttributes.ui32BaseAlign = @@ -1041,5 +1066,3 @@ void RGXMipsCheckFaultAddress(MMU_CONTEXT *psFwMMUCtx, RGXMIPSFW_TLB_VALID) ? ("valid") : ("not valid"); } - - diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmmuinit.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmmuinit.c deleted file mode 100644 index 629e7abdc370..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmmuinit.c +++ /dev/null @@ -1,1079 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title Device specific initialisation routines -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Device specific MMU initialisation -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ /**************************************************************************/ -#include "rgxmmuinit.h" -#include "rgxmmudefs_km.h" - -#include "device.h" -#include "img_types.h" -#include "img_defs.h" -#include "mmu_common.h" -#include "pdump_mmu.h" - -#include "pvr_debug.h" -#include "pvrsrv_error.h" -#include "rgx_memallocflags.h" -#include "rgx_heaps.h" -#include "pdump_km.h" - - -/* useful macros */ -/* units represented in a bitfield */ -#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1) - - -/* - * Bits of PT, PD and PC not involving addresses - */ - -#define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \ - RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \ - RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \ - RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \ - RGX_MMUCTRL_PT_DATA_CC_EN | \ - RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \ - RGX_MMUCTRL_PT_DATA_VALID_EN) - -#define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \ - ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \ - RGX_MMUCTRL_PD_DATA_VALID_EN) - -#define RGX_MMUCTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \ - RGX_MMUCTRL_PC_DATA_VALID_EN) - - - -static MMU_PxE_CONFIG sRGXMMUPCEConfig; -static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; - - -/* - * - * Configuration for heaps with 4kB Data-Page size - * - */ - -static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; -static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; -static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; -static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; - - -/* - * - * Configuration for heaps with 16kB Data-Page size - * - */ - -static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; -static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; -static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; -static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; - - -/* - * - * Configuration for heaps with 64kB Data-Page size - * - */ - -static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; -static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; -static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; -static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; - - -/* - * - * Configuration for heaps with 256kB Data-Page size - * - */ - -static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; -static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; -static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; -static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; - - -/* - * - * Configuration for heaps with 1MB Data-Page size - * - */ - -static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; -static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; -static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; -static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; - - -/* - * - * Configuration for heaps with 2MB Data-Page size - * - */ - -static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; -static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; -static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; -static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; - - -/* Forward declaration of protection bits derivation functions, for - the following structure */ -static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); -static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); -static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); - -static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, - const MMU_PxE_CONFIG **ppsMMUPDEConfig, - const MMU_PxE_CONFIG **ppsMMUPTEConfig, - const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, - IMG_HANDLE *phPriv); - -static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); - -static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); -static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); - -static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; - -PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) -{ - /* Setup of Px Entries: - * - * - * PAGE TABLE (8 Byte): - * - * | 62 | 61...40 | 39...12 (varies) | 11...6 | 5 | 4 | 3 | 2 | 1 | 0 | - * | PM/Meta protect | VP Page (39:18) | Physical Page | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid | - * - * - * PAGE DIRECTORY (8 Byte): - * - * | 40 | 39...5 (varies) | 4 | 3...1 | 0 | - * | Entry Pending | Page Table base address | (reserved) | Page Size | Valid | - * - * - * PAGE CATALOGUE (4 Byte): - * - * | 31...4 | 3...2 | 1 | 0 | - * | Page Directory base address | (reserved) | Entry Pending | Valid | - * - */ - - - /* Example how to get the PD address from a PC entry. - * The procedure is the same for PD and PT entries to retrieve PT and Page addresses: - * - * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&': - * | 31...4 | 3...2 | 1 | 0 | - * | PD Addr | 0 | 0 | 0 | - * - * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>': - * | 27...0 | - * | PD Addr | - * - * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<': - * | 39...0 | - * | PD Addr | - * - */ - - - sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = - PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]); - - /* - * Setup sRGXMMUPCEConfig - */ - sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */ - sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */ - - sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */ - sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */ - - sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/ - sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the status bits */ - - sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ - sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */ - - /* - * Setup sRGXMMUTopLevelDevVAddrConfig - */ - sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */ - sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PC index */ - sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask, - sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift)); - - sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */ - sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PD index */ - sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask, - sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift)); - - /* - * - * Configuration for heaps with 4kB Data-Page size - * - */ - - /* - * Setup sRGXMMUPDEConfig_4KBDP - */ - sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8; - - sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); - sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12; - sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12; - - sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); - sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1; - - sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; - sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; - - sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; - sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUPTEConfig_4KBDP - */ - sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8; - - sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000); - sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12; - sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */ - - sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; - sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; - - sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; - sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUDevVAddrConfig_4KBDP - */ - sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask, - sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift)); - - sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask, - sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift)); - - sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask, - sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift)); - - sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); - sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; - sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0; - - /* - * Setup gsPageSizeConfig4KB - */ - gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; - gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; - gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; - gsPageSizeConfig4KB.uiRefCount = 0; - gsPageSizeConfig4KB.uiMaxRefCount = 0; - - - /* - * - * Configuration for heaps with 16kB Data-Page size - * - */ - - /* - * Setup sRGXMMUPDEConfig_16KBDP - */ - sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8; - - sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); - sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10; - sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10; - - sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); - sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1; - - sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; - sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; - - sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; - sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUPTEConfig_16KBDP - */ - sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8; - - sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000); - sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14; - sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14; - - sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; - sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; - - sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; - sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUDevVAddrConfig_16KBDP - */ - sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask, - sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift)); - - - sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask, - sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift)); - - - sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000); - sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14; - sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask, - sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift)); - - sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff); - sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; - sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; - - /* - * Setup gsPageSizeConfig16KB - */ - gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; - gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; - gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; - gsPageSizeConfig16KB.uiRefCount = 0; - gsPageSizeConfig16KB.uiMaxRefCount = 0; - - - /* - * - * Configuration for heaps with 64kB Data-Page size - * - */ - - /* - * Setup sRGXMMUPDEConfig_64KBDP - */ - sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8; - - sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); - sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8; - sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8; - - sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); - sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1; - - sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; - sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; - - sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; - sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUPTEConfig_64KBDP - */ - sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8; - - sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000); - sRGXMMUPTEConfig_64KBDP.uiAddrShift = 16; - sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16; - - sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; - sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; - - sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; - sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUDevVAddrConfig_64KBDP - */ - sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask, - sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift)); - - - sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask, - sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift)); - - - sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000); - sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16; - sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask, - sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift)); - - - sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); - sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; - sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0; - - /* - * Setup gsPageSizeConfig64KB - */ - gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; - gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; - gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; - gsPageSizeConfig64KB.uiRefCount = 0; - gsPageSizeConfig64KB.uiMaxRefCount = 0; - - - /* - * - * Configuration for heaps with 256kB Data-Page size - * - */ - - /* - * Setup sRGXMMUPDEConfig_256KBDP - */ - sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8; - - sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); - sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6; - sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6; - - sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); - sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1; - - sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; - sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; - - sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; - sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; - - /* - * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP - */ - sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8; - - sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000); - sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18; - sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18; - - sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; - sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; - - sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; - sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUDevVAddrConfig_256KBDP - */ - sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask, - sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift)); - - - sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask, - sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift)); - - - sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000); - sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18; - sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask, - sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift)); - - - sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff); - sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; - sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; - - /* - * Setup gsPageSizeConfig256KB - */ - gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; - gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; - gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; - gsPageSizeConfig256KB.uiRefCount = 0; - gsPageSizeConfig256KB.uiMaxRefCount = 0; - - /* - * Setup sRGXMMUPDEConfig_1MBDP - */ - sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8; - - sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); - /* - * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even - * if they contain fewer entries. - */ - sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6; - sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6; - - sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); - sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1; - - sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; - sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; - - sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; - sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUPTEConfig_1MBDP - */ - sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; - - sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000); - sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20; - sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20; - - sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; - sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; - - sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; - sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUDevVAddrConfig_1MBDP - */ - sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask, - sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift)); - - - sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask, - sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift)); - - - sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000); - sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20; - sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask, - sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift)); - - - sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff); - sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; - sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; - - /* - * Setup gsPageSizeConfig1MB - */ - gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; - gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; - gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; - gsPageSizeConfig1MB.uiRefCount = 0; - gsPageSizeConfig1MB.uiMaxRefCount = 0; - - /* - * Setup sRGXMMUPDEConfig_2MBDP - */ - sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8; - - sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); - /* - * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even - * if they contain fewer entries. - */ - sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6; - sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6; - - sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); - sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1; - - sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; - sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; - - sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; - sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUPTEConfig_2MBDP - */ - sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8; - - sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000); - sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21; - sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21; - - sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; - sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; - - sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; - sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; - - /* - * Setup sRGXMMUDevVAddrConfig_2MBDP - */ - sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask, - sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift)); - - - sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; - sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; - sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask, - sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift)); - - - sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000); - sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21; - sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask, - sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift)); - - - sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff); - sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; - sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; - - /* - * Setup gsPageSizeConfig2MB - */ - gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; - gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; - gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; - gsPageSizeConfig2MB.uiRefCount = 0; - gsPageSizeConfig2MB.uiMaxRefCount = 0; - - /* - * Setup sRGXMMUDeviceAttributes - */ - sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT; - sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3; - sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; - sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig; - sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; - - /* Functions for deriving page table/dir/cat protection bits */ - sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; - sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; - sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; - sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; - sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; - sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; - - /* Functions for establishing configurations for PDE/PTE/DEVVADDR - on per-heap basis */ - sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; - sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; - - sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; - sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; - sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = NULL; - - psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes; - - return PVRSRV_OK; -} - -PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) -{ - PVRSRV_ERROR eError; - - eError = PVRSRV_OK; - -#if defined(PDUMP) - psDeviceNode->pfnMMUGetContextID = NULL; -#endif - - psDeviceNode->psMMUDevAttrs = NULL; - -#if defined(DEBUG) - PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); - PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", - gsPageSizeConfig4KB.uiMaxRefCount)); - PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", - gsPageSizeConfig4KB.uiRefCount)); - PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", - gsPageSizeConfig16KB.uiMaxRefCount)); - PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", - gsPageSizeConfig16KB.uiRefCount)); - PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", - gsPageSizeConfig64KB.uiMaxRefCount)); - PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", - gsPageSizeConfig64KB.uiRefCount)); - PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", - gsPageSizeConfig256KB.uiMaxRefCount)); - PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", - gsPageSizeConfig256KB.uiRefCount)); - PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", - gsPageSizeConfig1MB.uiMaxRefCount)); - PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", - gsPageSizeConfig1MB.uiRefCount)); - PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", - gsPageSizeConfig2MB.uiMaxRefCount)); - PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", - gsPageSizeConfig2MB.uiRefCount)); -#endif - if (gsPageSizeConfig4KB.uiRefCount > 0 || - gsPageSizeConfig16KB.uiRefCount > 0 || - gsPageSizeConfig64KB.uiRefCount > 0 || - gsPageSizeConfig256KB.uiRefCount > 0 || - gsPageSizeConfig1MB.uiRefCount > 0 || - gsPageSizeConfig2MB.uiRefCount > 0 - ) - { - PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); - } - - return eError; -} - -/*************************************************************************/ /*! -@Function RGXDerivePCEProt4 -@Description calculate the PCE protection flags based on a 4 byte entry -@Return PVRSRV_ERROR - */ /**************************************************************************/ -static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) -{ - return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN; -} - - -/*************************************************************************/ /*! -@Function RGXDerivePCEProt8 -@Description calculate the PCE protection flags based on an 8 byte entry -@Return PVRSRV_ERROR - */ /**************************************************************************/ -static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) -{ - PVR_UNREFERENCED_PARAMETER(uiProtFlags); - PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); - - PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device")); - return 0; -} - - -/*************************************************************************/ /*! -@Function RGXDerivePDEProt4 -@Description derive the PDE protection flags based on a 4 byte entry -@Return PVRSRV_ERROR - */ /**************************************************************************/ -static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) -{ - PVR_UNREFERENCED_PARAMETER(uiProtFlags); - PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); - return 0; -} - - -/*************************************************************************/ /*! -@Function RGXDerivePDEProt8 -@Description derive the PDE protection flags based on an 8 byte entry - -@Input uiLog2DataPageSize The log2 of the required page size. - E.g, for 4KiB pages, this parameter must be 12. - For 2MiB pages, it must be set to 21. - -@Return PVRSRV_ERROR - */ /**************************************************************************/ -static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) -{ - IMG_UINT64 ret_value = 0; /* 0 means invalid */ - - if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */ - { - switch (uiLog2DataPageSize) - { - case RGX_HEAP_4KB_PAGE_SHIFT: - ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB; - break; - case RGX_HEAP_16KB_PAGE_SHIFT: - ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB; - break; - case RGX_HEAP_64KB_PAGE_SHIFT: - ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB; - break; - case RGX_HEAP_256KB_PAGE_SHIFT: - ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB; - break; - case RGX_HEAP_1MB_PAGE_SHIFT: - ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB; - break; - case RGX_HEAP_2MB_PAGE_SHIFT: - ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB; - break; - default: - PVR_DPF((PVR_DBG_ERROR, - "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]", - __FILE__, __LINE__, __func__, uiLog2DataPageSize)); - } - } - return ret_value; -} - - -/*************************************************************************/ /*! -@Function RGXDerivePTEProt4 -@Description calculate the PTE protection flags based on a 4 byte entry -@Return PVRSRV_ERROR - */ /**************************************************************************/ -static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) -{ - PVR_UNREFERENCED_PARAMETER(uiProtFlags); - PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device")); - - return 0; -} - -/*************************************************************************/ /*! -@Function RGXDerivePTEProt8 -@Description calculate the PTE protection flags based on an 8 byte entry -@Return PVRSRV_ERROR - */ /**************************************************************************/ -static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) -{ - IMG_UINT64 ui64MMUFlags=0; - - PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); - - if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) - { - /* read/write */ - } - else if (MMU_PROTFLAGS_READABLE & uiProtFlags) - { - /* read only */ - ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN; - } - else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) - { - /* write only */ - PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt8: write-only is not possible on this device")); - } - else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified...")); - } - - /* cache coherency */ - if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) - { - ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN; - } - - /* cache setup */ - if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0) - { - ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN; - } - - if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) - { - ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN; - } - - if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) - { - ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN; - } - - return ui64MMUFlags; -} - - -/*************************************************************************/ /*! -@Function RGXGetPageSizeConfig -@Description Set up configuration for variable sized data pages. - RGXPutPageSizeConfigCB has to be called to ensure correct - refcounting. -@Return PVRSRV_ERROR - */ /**************************************************************************/ -static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, - const MMU_PxE_CONFIG **ppsMMUPDEConfig, - const MMU_PxE_CONFIG **ppsMMUPTEConfig, - const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, - IMG_HANDLE *phPriv) -{ - MMU_PAGESIZECONFIG *psPageSizeConfig; - - switch (uiLog2DataPageSize) - { - case RGX_HEAP_4KB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig4KB; - break; - case RGX_HEAP_16KB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig16KB; - break; - case RGX_HEAP_64KB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig64KB; - break; - case RGX_HEAP_256KB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig256KB; - break; - case RGX_HEAP_1MB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig1MB; - break; - case RGX_HEAP_2MB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig2MB; - break; - default: - PVR_DPF((PVR_DBG_ERROR, - "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", - uiLog2DataPageSize)); - *phPriv = NULL; - return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; - } - - /* Refer caller's pointers to the data */ - *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; - *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; - *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; - -#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) - /* Increment ref-count - not that we're allocating anything here - (I'm using static structs), but one day we might, so we want - the Get/Put code to be balanced properly */ - psPageSizeConfig->uiRefCount++; - - /* This is purely for debug statistics */ - psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, - psPageSizeConfig->uiRefCount); -#endif - - *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; - PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); - - return PVRSRV_OK; -} - -/*************************************************************************/ /*! -@Function RGXPutPageSizeConfig -@Description Tells this code that the mmu module is done with the - configurations set in RGXGetPageSizeConfig. This can - be a no-op. - Called after RGXGetPageSizeConfigCB. -@Return PVRSRV_ERROR - */ /**************************************************************************/ -static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) -{ -#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) - MMU_PAGESIZECONFIG *psPageSizeConfig; - IMG_UINT32 uiLog2DataPageSize; - - uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; - - switch (uiLog2DataPageSize) - { - case RGX_HEAP_4KB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig4KB; - break; - case RGX_HEAP_16KB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig16KB; - break; - case RGX_HEAP_64KB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig64KB; - break; - case RGX_HEAP_256KB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig256KB; - break; - case RGX_HEAP_1MB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig1MB; - break; - case RGX_HEAP_2MB_PAGE_SHIFT: - psPageSizeConfig = &gsPageSizeConfig2MB; - break; - default: - PVR_DPF((PVR_DBG_ERROR, - "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", - uiLog2DataPageSize)); - return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; - } - - /* Ref-count here is not especially useful, but it's an extra - check that the API is being used correctly */ - psPageSizeConfig->uiRefCount--; -#else - PVR_UNREFERENCED_PARAMETER(hPriv); -#endif - return PVRSRV_OK; -} - -static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) -{ - PVR_UNREFERENCED_PARAMETER(ui32PDE); - PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); - PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); - return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; -} - -static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) -{ - switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK)) - { - case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB: - *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT; - break; - case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB: - *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT; - break; - case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB: - *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT; - break; - case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB: - *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT; - break; - case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB: - *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT; - break; - case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB: - *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT; - break; - default: - return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; - } - return PVRSRV_OK; -} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmulticore.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmulticore.c index a888e70015db..447c50a78e00 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmulticore.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxmulticore.c @@ -49,6 +49,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "multicore_defs.h" #include "allocmem.h" #include "pvr_debug.h" +#include "rgxfwmemctx.h" /* * check that register defines match our hardcoded definitions. @@ -72,26 +73,27 @@ static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, /* * RGXInitMultiCoreInfo: * Return multicore information to clients. - * Return not_supported on cores without multicore. + * Return not supported on cores without multicore. */ static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32CapsSize, IMG_UINT32 *pui32NumCores, IMG_UINT64 *pui64Caps) { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError = PVRSRV_OK; - if (psDeviceNode->ui32MultiCoreNumCores == 0) + if (psDevInfo->ui32MultiCoreNumCores == 0) { /* MULTICORE not supported on this device */ eError = PVRSRV_ERROR_NOT_SUPPORTED; } else { - *pui32NumCores = psDeviceNode->ui32MultiCoreNumCores; + *pui32NumCores = psDevInfo->ui32MultiCoreNumCores; if (ui32CapsSize > 0) { - if (ui32CapsSize < psDeviceNode->ui32MultiCoreNumCores) + if (ui32CapsSize < psDevInfo->ui32MultiCoreNumCores) { PVR_DPF((PVR_DBG_ERROR, "Multicore caps buffer too small")); eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; @@ -100,9 +102,9 @@ static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, { IMG_UINT32 i; - for (i = 0; i < psDeviceNode->ui32MultiCoreNumCores; ++i) + for (i = 0; i < psDevInfo->ui32MultiCoreNumCores; ++i) { - pui64Caps[i] = psDeviceNode->pui64MultiCoreCapabilities[i]; + pui64Caps[i] = psDevInfo->pui64MultiCoreCapabilities[i]; } } } @@ -130,19 +132,75 @@ PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) } /* defaults for non-multicore devices */ - psDeviceNode->ui32MultiCoreNumCores = 0; - psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); - psDeviceNode->pui64MultiCoreCapabilities = NULL; + psDevInfo->ui32MultiCoreNumCores = 0; + psDevInfo->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); + psDevInfo->pui64MultiCoreCapabilities = NULL; psDeviceNode->pfnGetMultiCoreInfo = NULL; if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) { + IMG_BOOL bPowerWasDown; IMG_UINT32 ui32MulticoreRegBankOffset = (1 << RGX_GET_FEATURE_VALUE(psDevInfo, XPU_MAX_REGBANKS_ADDR_WIDTH)); IMG_UINT32 ui32MulticoreGPUReg = RGX_CR_MULTICORE_GPU; IMG_UINT32 ui32NumCores; IMG_UINT32 i; - ui32NumCores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM); +#if defined(RGX_HOST_SECURE_REGBANK_OFFSET) && defined(XPU_MAX_REGBANKS_ADDR_WIDTH) + /* Ensure the HOST_SECURITY reg bank definitions are correct */ + if ((RGX_HOST_SECURE_REGBANK_OFFSET + RGX_HOST_SECURE_REGBANK_SIZE) != ui32MulticoreRegBankOffset) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Register bank definitions for HOST_SECURITY don't match core's configuration.", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } +#endif + bPowerWasDown = ! PVRSRVIsSystemPowered(psDeviceNode); + + /* Power-up the device as required to read the registers */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) && bPowerWasDown) + { + PVRSRVPowerLock(psDeviceNode); + eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVSetSystemPowerState ON failed (%u)", __func__, eError)); + PVRSRVPowerUnlock(psDeviceNode); + return eError; + } + } + +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; + + LOOP_UNTIL_TIMEOUT_US(ui32FwTimeout) + { + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXMulticoreInfo.ui32NumCores, + INVALIDATE); + if (*((volatile IMG_UINT32*)&psDevInfo->psRGXFWIfOsInit->sRGXMulticoreInfo.ui32NumCores)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + if (*((volatile IMG_UINT32*)&psDevInfo->psRGXFWIfOsInit->sRGXMulticoreInfo.ui32NumCores) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "Multicore info not available for guest")); + return PVRSRV_ERROR_DEVICE_REGISTER_FAILED; + } + + ui32NumCores = psDevInfo->psRGXFWIfOsInit->sRGXMulticoreInfo.ui32NumCores; + + PVR_LOG(("RGX Guest Device initialised with %u %s", + ui32NumCores, (ui32NumCores == 1U) ? "core" : "cores")); + } + else +#endif + { + ui32NumCores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM); + } #if !defined(NO_HARDWARE) /* check that the number of cores reported is in-bounds */ if (ui32NumCores > (RGX_CR_MULTICORE_SYSTEM_MASKFULL >> RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT)) @@ -158,35 +216,67 @@ PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) PDUMPCOMMENT(psDeviceNode, "RGX Multicore has %d cores\n", ui32NumCores); /* allocate storage for capabilities */ - psDeviceNode->pui64MultiCoreCapabilities = OSAllocMem(ui32NumCores * sizeof(psDeviceNode->pui64MultiCoreCapabilities[0])); - if (psDeviceNode->pui64MultiCoreCapabilities == NULL) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc memory for Multicore info", __func__)); - return PVRSRV_ERROR_OUT_OF_MEMORY; - } + psDevInfo->pui64MultiCoreCapabilities = OSAllocMem(ui32NumCores * sizeof(psDevInfo->pui64MultiCoreCapabilities[0])); + PVR_LOG_GOTO_IF_NOMEM(psDevInfo->pui64MultiCoreCapabilities, eError, err); - psDeviceNode->ui32MultiCoreNumCores = ui32NumCores; + psDevInfo->ui32MultiCoreNumCores = ui32NumCores; for (i = 0; i < ui32NumCores; ++i) { #if !defined(NO_HARDWARE) - psDeviceNode->pui64MultiCoreCapabilities[i] = - OSReadHWReg64(psDevInfo->pvRegsBaseKM, ui32MulticoreGPUReg) & RGX_CR_MULTICORE_GPU_MASKFULL; +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + psDevInfo->pui64MultiCoreCapabilities[i] = psDevInfo->psRGXFWIfOsInit->sRGXMulticoreInfo.aui64MultiCoreCapabilities[i]; + } + else +#endif + { + IMG_BOOL bMulticoreRegsMapped = (psDeviceNode->psDevConfig->ui32RegsSize > ui32MulticoreGPUReg); + void __iomem *pvCoreRegBase; + IMG_INT32 ui32MulticoreRegOffset; + + if (bMulticoreRegsMapped) + { + pvCoreRegBase = psDevInfo->pvRegsBaseKM; + ui32MulticoreRegOffset = ui32MulticoreGPUReg; + } + else + { + /* the register bank of this core is not mapped */ + IMG_CPU_PHYADDR sMultiCoreRegsBase = psDeviceNode->psDevConfig->sRegsCpuPBase; + + sMultiCoreRegsBase.uiAddr += i*ui32MulticoreRegBankOffset; + pvCoreRegBase = (void __iomem *) OSMapPhysToLin(sMultiCoreRegsBase, psDeviceNode->psDevConfig->ui32RegsSize, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + PVR_LOG_GOTO_IF_NOMEM(pvCoreRegBase, eError, err); + + /* adjust the register offset to point inside the newly mapped range */ + ui32MulticoreRegOffset = RGX_CR_MULTICORE_GPU; + } + + psDevInfo->pui64MultiCoreCapabilities[i] = + OSReadHWReg64(pvCoreRegBase, ui32MulticoreRegOffset) & RGX_CR_MULTICORE_GPU_MASKFULL; + + if (!bMulticoreRegsMapped) + { + OSUnMapPhysToLin((void __force *) pvCoreRegBase, psDeviceNode->psDevConfig->ui32RegsSize); + } + } #else /* emulation for what we think caps are */ - psDeviceNode->pui64MultiCoreCapabilities[i] = + psDevInfo->pui64MultiCoreCapabilities[i] = i | ((i == 0) ? (RGX_MULTICORE_CAPABILITY_PRIMARY_EN | RGX_MULTICORE_CAPABILITY_GEOMETRY_EN) : 0) | RGX_MULTICORE_CAPABILITY_COMPUTE_EN | RGX_MULTICORE_CAPABILITY_FRAGMENT_EN; #endif - PVR_DPF((PVR_DBG_MESSAGE, "Core %d has capabilities value 0x%x", i, (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i] )); + PVR_DPF((PVR_DBG_MESSAGE, "Core %d has capabilities value 0x%x", i, (IMG_UINT32)psDevInfo->pui64MultiCoreCapabilities[i] )); PDUMPCOMMENT(psDeviceNode, "\tCore %d has caps 0x%08x\n", i, - (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i]); + (IMG_UINT32)psDevInfo->pui64MultiCoreCapabilities[i]); - if (psDeviceNode->pui64MultiCoreCapabilities[i] & RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN) + if (psDevInfo->pui64MultiCoreCapabilities[i] & RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN) { - psDeviceNode->ui32MultiCorePrimaryId = (psDeviceNode->pui64MultiCoreCapabilities[i] + psDevInfo->ui32MultiCorePrimaryId = (psDevInfo->pui64MultiCoreCapabilities[i] & ~RGX_CR_MULTICORE_GPU_ID_CLRMSK) >> RGX_CR_MULTICORE_GPU_ID_SHIFT; } @@ -194,6 +284,14 @@ PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) ui32MulticoreGPUReg += ui32MulticoreRegBankOffset; } + /* revert power state to what it was on entry to this function */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) && bPowerWasDown) + { + eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); + PVRSRVPowerUnlock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); + } + /* Register callback to return info about multicore setup to client bridge */ psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo; } @@ -204,6 +302,10 @@ PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) } return eError; + +err: + RGXDeInitMultiCoreInfo(psDeviceNode); + return eError; } @@ -213,12 +315,14 @@ PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) */ void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) { - if (psDeviceNode->pui64MultiCoreCapabilities != NULL) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (psDevInfo->pui64MultiCoreCapabilities != NULL) { - OSFreeMem(psDeviceNode->pui64MultiCoreCapabilities); - psDeviceNode->pui64MultiCoreCapabilities = NULL; - psDeviceNode->ui32MultiCoreNumCores = 0; - psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); + OSFreeMem(psDevInfo->pui64MultiCoreCapabilities); + psDevInfo->pui64MultiCoreCapabilities = NULL; + psDevInfo->ui32MultiCoreNumCores = 0; + psDevInfo->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); } psDeviceNode->pfnGetMultiCoreInfo = NULL; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpdump.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpdump.c index 750281d82ebe..409d901ff3a1 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpdump.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxpdump.c @@ -43,8 +43,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if defined(PDUMP) #include "pvrsrv.h" +#include "devicemem_utils.h" #include "devicemem_pdump.h" +#include "devicemem_server.h" #include "rgxpdump.h" +#include "rgxpdump_common.h" #include "rgx_bvnc_defs_km.h" #include "pdumpdesc.h" @@ -58,68 +61,144 @@ static PVRSRV_ERROR _FWDumpSignatureBufferKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32PDumpFlags) { - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(SUPPORT_FIRMWARE_GCOV) + DECLARE_DLLIST(sFirmwareGcovBufferValidRegions); +#endif + DECLARE_DLLIST(sSigTDMChecksValidRegions); + DECLARE_DLLIST(sSigTAChecksValidRegions); + DECLARE_DLLIST(sSig3DChecksValidRegions); +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + DECLARE_DLLIST(sSigRDMChecksValidRegions); +#endif PVR_UNREFERENCED_PARAMETER(psConnection); +#if defined(SUPPORT_FIRMWARE_GCOV) + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psFirmwareGcovBufferMemDesc, + psDevInfo->ui32FirmwareGcovSize, + &sFirmwareGcovBufferValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrReturnError); +#endif + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psRGXFWSigTDMChecksMemDesc, + psDevInfo->ui32SigTDMChecksSize, + &sSigTDMChecksValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrFreeGcovBufferRegions); + } + + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psRGXFWSigTAChecksMemDesc, + psDevInfo->ui32SigTAChecksSize, + &sSigTAChecksValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrFreeSigTDMChecksRegions); + + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psRGXFWSig3DChecksMemDesc, + psDevInfo->ui32Sig3DChecksSize, + &sSig3DChecksValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrFreeSigTAChecksRegions); + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && + RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) + { + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psRGXFWSigRDMChecksMemDesc, + psDevInfo->ui32SigRDMChecksSize, + &sSigRDMChecksValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrFreeSig3DChecksRegions); + } +#endif + PDUMPIF(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); PDUMPELSE(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); #if defined(SUPPORT_FIRMWARE_GCOV) /* Gcov */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Gcov Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psFirmwareGcovBufferMemDesc, - 0, - psDevInfo->ui32FirmwareGcovSize, - "firmware_gcov.img", - 0, - ui32PDumpFlags); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psFirmwareGcovBufferMemDesc, + &sFirmwareGcovBufferValidRegions, + "firmware_gcov.img", + ui32PDumpFlags); #endif + + /* TDM signatures */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer"); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psRGXFWSigTDMChecksMemDesc, + &sSigTDMChecksValidRegions, + "out.tdmsig", + ui32PDumpFlags); + } + /* TA signatures */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TA signatures and checksums Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc, - 0, - psDevInfo->ui32SigTAChecksSize, - "out.tasig", - 0, - ui32PDumpFlags); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psRGXFWSigTAChecksMemDesc, + &sSigTAChecksValidRegions, + "out.tasig", + ui32PDumpFlags); /* 3D signatures */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc, - 0, - psDevInfo->ui32Sig3DChecksSize, - "out.3dsig", - 0, - ui32PDumpFlags); - -#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psRGXFWSig3DChecksMemDesc, + &sSig3DChecksValidRegions, + "out.3dsig", + ui32PDumpFlags); + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && + RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) { - /* TDM signatures */ - PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTDM2DChecksMemDesc, - 0, - psDevInfo->ui32SigTDM2DChecksSize, - "out.tdmsig", - 0, - ui32PDumpFlags); + /* RDM signatures */ + PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump RDM signatures and checksums Buffer"); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psRGXFWSigRDMChecksMemDesc, + &sSigRDMChecksValidRegions, + "out.rdmsig", + ui32PDumpFlags); } #endif PDUMPFI(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); return PVRSRV_OK; + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) +ErrFreeSig3DChecksRegions: + DevmemIntPDumpFreeValidRegions(&sSig3DChecksValidRegions); +#endif +ErrFreeSigTAChecksRegions: + DevmemIntPDumpFreeValidRegions(&sSigTAChecksValidRegions); +ErrFreeSigTDMChecksRegions: + DevmemIntPDumpFreeValidRegions(&sSigTDMChecksValidRegions); +ErrFreeGcovBufferRegions: +#if defined(SUPPORT_FIRMWARE_GCOV) + DevmemIntPDumpFreeValidRegions(&sFirmwareGcovBufferValidRegions); +ErrReturnError: +#endif + return eError; } -static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32PDumpFlags) + +static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset; PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); /* Dump trace buffers */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump trace buffers"); @@ -130,8 +209,8 @@ static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection, * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is * "expression must have a constant value". */ - const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff - = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]); + const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff = + (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]); /* ui32TracePointer tracepointer */ ui32Size = sizeof(IMG_UINT32); @@ -154,7 +233,7 @@ static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection, ui32OutFileOffset += ui32Size; /* trace buffer */ - ui32Size = psDevInfo->psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + ui32Size = psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]); DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum], 0, /* 0 offset in the trace buffer mem desc */ @@ -192,10 +271,10 @@ static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection, ui32PDumpFlags); return PVRSRV_OK; - } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) static PVRSRV_ERROR _MipsDumpSignatureBufferKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32PDumpFlags) @@ -221,18 +300,16 @@ static PVRSRV_ERROR _MipsDumpSignatureBufferKM(CONNECTION_DATA * psConnection, "out.3dsig", 0); -#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) { /* TDM signatures */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer"); - DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTDM2DChecksMemDesc, + DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTDMChecksMemDesc, 0, - psDevInfo->ui32SigTDM2DChecksSize, + psDevInfo->ui32SigTDMChecksSize, "out.tdmsig", 0); } -#endif return PVRSRV_OK; } @@ -244,7 +321,7 @@ static PVRSRV_ERROR _MipsDumpTraceBufferKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); /* Dump trace buffers */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump trace buffers"); @@ -281,7 +358,7 @@ static PVRSRV_ERROR _MipsDumpTraceBufferKM(CONNECTION_DATA *psConnection, ui32OutFileOffset += ui32Size; /* trace buffer */ - ui32Size = psDevInfo->psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + ui32Size = psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]); DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum], 0, /* 0 offset in the trace buffer mem desc */ @@ -311,8 +388,8 @@ static PVRSRV_ERROR _MipsDumpTraceBufferKM(CONNECTION_DATA *psConnection, 0); return PVRSRV_OK; - } +#endif /* @@ -322,73 +399,36 @@ PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32PDumpFlags) { - if ((psDeviceNode->pfnCheckDeviceFeature) && - PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) - { - return _MipsDumpSignatureBufferKM(psConnection, - psDeviceNode, - ui32PDumpFlags); - } - else - { - return _FWDumpSignatureBufferKM(psConnection, - psDeviceNode, - ui32PDumpFlags); - } -} - - -#if defined(SUPPORT_VALIDATION) -PVRSRV_ERROR PVRSRVPDumpComputeCRCSignatureCheckKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE * psDeviceNode, - IMG_UINT32 ui32PDumpFlags) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - PVRSRV_ERROR eError; - - if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE))) - { - return PVRSRV_ERROR_NOT_SUPPORTED; - } - - /* - * Add a PDUMP POLL on the KZ signature check status. - */ - if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_NOERR_EN) - { - PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: match required"); - eError = PDUMPREGPOL(psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_SCRATCH11, - 1U, - 0xFFFFFFFF, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - } - else if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_ERR_EN) + if (psDeviceNode->pfnCheckDeviceFeature) { - PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: mismatch required"); - eError = PDUMPREGPOL(psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_SCRATCH11, - 2U, - 0xFFFFFFFF, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + { + return _MipsDumpSignatureBufferKM(psConnection, + psDeviceNode, + ui32PDumpFlags); + } + else +#endif + { + return _FWDumpSignatureBufferKM(psConnection, + psDeviceNode, + ui32PDumpFlags); + } } - PVR_UNREFERENCED_PARAMETER(psConnection); return PVRSRV_OK; } -#endif + + PVRSRV_ERROR PVRSRVPDumpCRCSignatureCheckKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32PDumpFlags) { - PVR_UNREFERENCED_PARAMETER(psConnection); PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + PVR_UNREFERENCED_PARAMETER(psConnection); return PVRSRV_OK; } @@ -427,15 +467,21 @@ PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32PDumpFlags) { - if ((psDeviceNode->pfnCheckDeviceFeature) && - PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + if (psDeviceNode->pfnCheckDeviceFeature) { - return _MipsDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); - } - else - { - return _FWDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + { + return _MipsDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); + } + else +#endif + { + return _FWDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); + } } + + return PVRSRV_OK; } PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode, @@ -563,6 +609,8 @@ PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDevic pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_75; } + pui32Word[9] |= (eFBCSwizzle << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) & IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK; + break; default: PVR_DPF((PVR_DBG_ERROR, "Unsupported algorithm - %d", @@ -596,12 +644,12 @@ PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDevic pui32Word[12] = paui32FBCClearColour[2]; pui32Word[13] = paui32FBCClearColour[3]; +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) #if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT)) { /* Should match current value of RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP */ - IMG_UINT32 ui32TFBCGroup = (ui32TFBCControl & ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) >> - RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT; + IMG_UINT32 ui32TFBCGroup = ui32TFBCControl & ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK; switch (ui32TFBCGroup) { case RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_0: @@ -622,8 +670,7 @@ PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDevic if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION)) { /* Should match current value of RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME */ - IMG_UINT32 ui32TFBCScheme = (ui32TFBCControl & ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) >> - RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT; + IMG_UINT32 ui32TFBCScheme = ui32TFBCControl & ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK; switch (ui32TFBCScheme) { case RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_DEFAULT: @@ -659,6 +706,27 @@ PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDevic } #endif + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, TFBC_VERSION)) + { + RGX_LAYER_PARAMS sParams = {.psDevInfo = psDevInfo}; + + if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, TFBC_VERSION) >= 20U) + { + IMG_UINT32 ui32TFBCOverrideLossyMinChannel = (ui32TFBCControl & RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN); + + if (ui32TFBCOverrideLossyMinChannel) + { + pui32Word[14] |= IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_EN; + } + } + else + { + /* Should be set on TFBC version < 2.0 cores */ + pui32Word[14] |= IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_EN; + } + } +#endif /* defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) */ + return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxsrvinit.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxsrvinit.c index 72bbe24d40d8..77d63732e4e3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxsrvinit.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxsrvinit.c @@ -47,7 +47,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "osfunc.h" #include "km_apphint_defs.h" #include "htbuffer_types.h" -#include "htbuffer_init.h" #include "devicemem.h" #include "devicemem_pdump.h" @@ -55,6 +54,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_fwif_km.h" #include "pdump_km.h" +#include "rgxinit_apphints.h" #include "rgxinit.h" #include "rgxmulticore.h" @@ -88,10 +88,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_device.h" #endif -#define DRIVER_MODE_HOST 0 /* AppHint value for host driver mode */ - #define HW_PERF_FILTER_DEFAULT 0x00000000 /* Default to no HWPerf */ #define HW_PERF_FILTER_DEFAULT_ALL_ON 0xFFFFFFFF /* All events */ +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) +#define AVAIL_POW_UNITS_MASK_DEFAULT (PVRSRV_APPHINT_HWVALAVAILABLESPUMASK) +#define AVAIL_RAC_MASK_DEFAULT (PVRSRV_APPHINT_HWVALAVAILABLERACMASK) +#endif /* Kernel CCB size */ @@ -108,197 +110,206 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too high. #endif -#if defined(SUPPORT_VALIDATION) -#include "pvrsrv_apphint.h" -#endif -#include "os_srvinit_param.h" -#if !defined(__linux__) +#include "os_apphint.h" + +/* + * _ParseHTBAppHints: + * + * Generate necessary references to the globally visible AppHints which are + * declared in the above #include "km_apphint_defs.h" + * Without these local references some compiler tool-chains will treat + * unreferenced declarations as fatal errors. This function duplicates the + * HTB_specific apphint references which are made in htbserver.c:HTBInit() + * However, it makes absolutely *NO* use of these hints. + */ +static void +_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode, void *pvAppHintState) +{ + IMG_UINT32 ui32AppHintDefault; + IMG_UINT32 ui32LogType; + IMG_UINT32 ui32OpMode; + IMG_UINT32 ui32BufferSize; + + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEHTBLOGGROUP; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableHTBLogGroup, + &ui32AppHintDefault, &ui32LogType); + ui32AppHintDefault = PVRSRV_APPHINT_HTBOPERATIONMODE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBOperationMode, + &ui32AppHintDefault, &ui32OpMode); + ui32AppHintDefault = PVRSRV_APPHINT_HTBUFFERSIZE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBufferSizeInKB, + &ui32AppHintDefault, &ui32BufferSize); +} + /*! ******************************************************************************* - * AppHint mnemonic data type helper tables + + @Function GetFilterFlags + + @Description Initialise and return filter flags + + @Input bFilteringMode : Enable new TPU filtering mode + @Input ui32TruncateMode : TPU Truncate mode + + @Return IMG_UINT32 : Filter flags + ******************************************************************************/ -/* apphint map of name vs. enable flag */ -static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = { -#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, - HTB_LOG_SFGROUPLIST -#undef X -}; -/* apphint map of arg vs. OpMode */ -static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = { - { "droplatest", HTB_OPMODE_DROPLATEST}, - { "dropoldest", HTB_OPMODE_DROPOLDEST}, - /* HTB should never be started in HTB_OPMODE_BLOCK - * as this can lead to deadlocks - */ -}; +static INLINE IMG_UINT32 GetFilterFlags(IMG_BOOL bFilteringMode, IMG_UINT32 ui32TruncateMode) +{ + IMG_UINT32 ui32FilterFlags = 0; -static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = { - { "trace", 0}, - { "none", 0} -#if defined(SUPPORT_TBI_INTERFACE) - , { "tbi", 1} -#endif -}; + ui32FilterFlags |= bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0; + if (ui32TruncateMode == 2) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT; + } + else if (ui32TruncateMode == 3) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF; + } -static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = { - { "mono", 0 }, - { "mono_raw", 1 }, - { "sched", 2 } -}; + return ui32FilterFlags; +} -static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP }; -/* - * Services AppHints initialisation - */ -#define X(a, b, c, d, e, f) SrvInitParamInit ## b(a, d, e) -APPHINT_LIST_ALL -#undef X -#endif /* !defined(__linux__) */ +/*! +******************************************************************************* -/* - * Container for all the apphints used by this module - */ -typedef struct _RGX_SRVINIT_APPHINTS_ + @Function InitDeviceFlags + + @Description Initialise and return device flags + + @Input psDeviceNode : Pointer to device node + @Input pvAppHintState : Pointer to apphint state + @Input psHints : Apphints container + + @Return void + +******************************************************************************/ +static INLINE void InitDeviceFlags(PVRSRV_DEVICE_NODE *psDeviceNode, + void *pvAppHintState, + RGX_INIT_APPHINTS *psHints) { - IMG_UINT32 ui32DriverMode; - IMG_BOOL bGPUUnitsPowerChange; - IMG_BOOL bEnableSignatureChecks; - IMG_UINT32 ui32SignatureChecksBufSize; - - IMG_BOOL bAssertOnOutOfMem; -#if defined(SUPPORT_VALIDATION) - IMG_BOOL bValidateIrq; - IMG_BOOL bValidateSOCUSCTimer; -#endif - IMG_BOOL bAssertOnHWRTrigger; -#if defined(SUPPORT_VALIDATION) - IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; - IMG_UINT32 ui32FBCDCVersionOverride; - IMG_UINT32 ui32TFBCCompressionControlGroup; - IMG_UINT32 ui32TFBCCompressionControlScheme; - IMG_BOOL bTFBCCompressionControlYUVFormat; -#endif - IMG_BOOL bCheckMlist; - IMG_BOOL bDisableClockGating; - IMG_BOOL bDisableDMOverlap; - IMG_BOOL bDisableFEDLogging; - IMG_BOOL bDisablePDP; - IMG_BOOL bEnableCDMKillRand; - IMG_BOOL bEnableRandomCsw; - IMG_BOOL bEnableSoftResetCsw; - IMG_BOOL bFilteringMode; - IMG_BOOL bHWPerfDisableCustomCounterFilter; - IMG_BOOL bZeroFreelist; - IMG_UINT32 ui32EnableFWContextSwitch; - IMG_UINT32 ui32FWContextSwitchProfile; - - IMG_UINT32 ui32HWPerfFWBufSize; - IMG_UINT32 ui32HWPerfHostBufSize; - IMG_UINT32 ui32HWPerfFilter0; - IMG_UINT32 ui32HWPerfFilter1; - IMG_UINT32 ui32HWPerfHostFilter; - IMG_UINT32 ui32TimeCorrClock; - IMG_UINT32 ui32HWRDebugDumpLimit; - IMG_UINT32 ui32JonesDisableMask; - IMG_UINT32 ui32LogType; - IMG_UINT32 ui32TruncateMode; - IMG_UINT32 ui32KCCBSizeLog2; - FW_PERF_CONF eFirmwarePerf; - RGX_ACTIVEPM_CONF eRGXActivePMConf; - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf; - - IMG_BOOL bEnableTrustedDeviceAceConfig; - IMG_UINT32 ui32FWContextSwitchCrossDM; -#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) - IMG_UINT32 ui32PhysMemTestPasses; + IMG_UINT32 ui32DeviceFlags = 0; + IMG_BOOL bAppHintDefault; + IMG_BOOL bZeroFreelist; + IMG_BOOL bDisableFEDLogging; + + + bAppHintDefault = PVRSRV_APPHINT_ZEROFREELIST; + OSGetAppHintBOOL(psDeviceNode, pvAppHintState, ZeroFreelist, + &bAppHintDefault, &bZeroFreelist); + ui32DeviceFlags |= bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0; + + bAppHintDefault = PVRSRV_APPHINT_DISABLEFEDLOGGING; + OSGetAppHintBOOL(psDeviceNode, pvAppHintState, DisableFEDLogging, + &bAppHintDefault, &bDisableFEDLogging); + ui32DeviceFlags |= bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0; + + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); #endif -} RGX_SRVINIT_APPHINTS; + + psHints->ui32DeviceFlags = ui32DeviceFlags; +} /*! ******************************************************************************* @Function GetApphints - @Description Read init time apphints and initialise internal variables + @Description Read init time apphints and initialise apphints structure @Input psHints : Pointer to apphints container @Return void ******************************************************************************/ -static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints) +static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_INIT_APPHINTS *psHints) { - void *pvParamState = SrvInitParamOpen(); + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; + IMG_BOOL bAppHintDefault; IMG_UINT32 ui32ParamTemp; - IMG_BOOL bS7TopInfra = IMG_FALSE, bE42290 = IMG_FALSE, bTPUFiltermodeCtrl = IMG_FALSE; - IMG_BOOL bE42606 = IMG_FALSE; -#if defined(EMULATOR) - IMG_BOOL bAXIACELite = IMG_FALSE; +#if defined(__linux__) + IMG_UINT64 ui64AppHintDefault; #endif -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) - { - bS7TopInfra = IMG_TRUE; - } -#endif -#if defined(RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TPU_FILTERING_MODE_CONTROL)) - { - bTPUFiltermodeCtrl = IMG_TRUE; - } -#endif -#if defined(HW_ERN_42290_BIT_MASK) - if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42290)) - { - bE42290 = IMG_TRUE; - } -#endif -#if defined(HW_ERN_42606_BIT_MASK) - if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42606)) - { - bE42606 = IMG_TRUE; - } -#endif -#if defined(HW_FEATURE_AXI_ACELITE_BIT_MASK) && defined(EMULATOR) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) - { - bAXIACELite = IMG_TRUE; - } + OSCreateAppHintState(&pvAppHintState); + + bAppHintDefault = PVRSRV_APPHINT_ENABLESIGNATURECHECKS; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableSignatureChecks, + &bAppHintDefault, &psHints->bEnableSignatureChecks); + ui32AppHintDefault = PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, SignatureChecksBufSize, + &ui32AppHintDefault, &psHints->ui32SignatureChecksBufSize); + + bAppHintDefault = PVRSRV_APPHINT_ASSERTOUTOFMEMORY; + OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, AssertOutOfMemory, + &bAppHintDefault, &psHints->bAssertOnOutOfMem); + bAppHintDefault = PVRSRV_APPHINT_ASSERTONHWRTRIGGER; + OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, AssertOnHWRTrigger, + &bAppHintDefault, &psHints->bAssertOnHWRTrigger); + bAppHintDefault = PVRSRV_APPHINT_CHECKMLIST; + OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, CheckMList, + &bAppHintDefault, &psHints->bCheckMlist); + bAppHintDefault = PVRSRV_APPHINT_DISABLECLOCKGATING; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, DisableClockGating, + &bAppHintDefault, &psHints->bDisableClockGating); + bAppHintDefault = PVRSRV_APPHINT_DISABLEDMOVERLAP; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, DisableDMOverlap, + &bAppHintDefault, &psHints->bDisableDMOverlap); + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEAPM; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, EnableAPM, + &ui32AppHintDefault, &ui32ParamTemp); + psHints->eRGXActivePMConf = ui32ParamTemp; + bAppHintDefault = PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableCDMKillingRandMode, + &bAppHintDefault, &psHints->bEnableDMKillRand); + bAppHintDefault = PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableRandomContextSwitch, + &bAppHintDefault, &psHints->bEnableRandomCsw); + bAppHintDefault = PVRSRV_APPHINT_ENABLESOFTRESETCONTEXTSWITCH; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableSoftResetContextSwitch, + &bAppHintDefault, &psHints->bEnableSoftResetCsw); + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableFWContextSwitch, + &ui32AppHintDefault, &psHints->ui32EnableFWContextSwitch); + ui32AppHintDefault = PVRSRV_APPHINT_ENABLERDPOWERISLAND; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableRDPowerIsland, + &ui32AppHintDefault, &ui32ParamTemp); + psHints->eRGXRDPowerIslandConf = ui32ParamTemp; +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + bAppHintDefault = PVRSRV_APPHINT_ENABLESPUCLOCKGATING; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableSPUClockGating, + &bAppHintDefault, &psHints->bSPUClockGating); #endif + ui32AppHintDefault = PVRSRV_APPHINT_FIRMWAREPERF; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FirmwarePerf, + &ui32AppHintDefault, &ui32ParamTemp); + psHints->eFirmwarePerf = ui32ParamTemp; + ui32AppHintDefault = PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FWContextSwitchProfile, + &ui32AppHintDefault, &psHints->ui32FWContextSwitchProfile); /* - * NB AppHints initialised to a default value via SrvInitParamInit* macros above + * HWPerf apphints * */ - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, DriverMode, psHints->ui32DriverMode); - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, GPUUnitsPowerChange, psHints->bGPUUnitsPowerChange); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableSignatureChecks, psHints->bEnableSignatureChecks); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize); - - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, AssertOutOfMemory, psHints->bAssertOnOutOfMem); - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger); - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, CheckMList, psHints->bCheckMlist); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, DisableClockGating, psHints->bDisableClockGating); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, DisableDMOverlap, psHints->bDisableDMOverlap); - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, DisableFEDLogging, psHints->bDisableFEDLogging); - SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, EnableAPM, ui32ParamTemp); - psHints->eRGXActivePMConf = ui32ParamTemp; - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableCDMKillingRandMode, psHints->bEnableCDMKillRand); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableRandomContextSwitch, psHints->bEnableRandomCsw); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableSoftResetContextSwitch, psHints->bEnableSoftResetCsw); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, EnableRDPowerIsland, ui32ParamTemp); - psHints->eRGXRDPowerIslandConf = ui32ParamTemp; - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FirmwarePerf, ui32ParamTemp); - psHints->eFirmwarePerf = ui32ParamTemp; - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, - HWPerfDisableCustomCounterFilter, psHints->bHWPerfDisableCustomCounterFilter); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize); - SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, KernelCCBSizeLog2, psHints->ui32KCCBSizeLog2); + bAppHintDefault = PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, HWPerfDisableCustomCounterFilter, + &bAppHintDefault, &psHints->bHWPerfDisableCounterFilter); + ui32AppHintDefault = PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB, + &ui32AppHintDefault, &psHints->ui32HWPerfHostBufSize); + ui32AppHintDefault = PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfFWBufSizeInKB, + &ui32AppHintDefault, &psHints->ui32HWPerfFWBufSize); + + ui32AppHintDefault = PVRSRV_APPHINT_KCCB_SIZE_LOG2; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, KernelCCBSizeLog2, + &ui32AppHintDefault, &psHints->ui32KCCBSizeLog2); if (psHints->ui32KCCBSizeLog2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE) { @@ -313,91 +324,119 @@ static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHIN psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE; } -#if defined(SUPPORT_VALIDATION) - if (psHints->ui32KCCBSizeLog2 != PVRSRV_APPHINT_KCCB_SIZE_LOG2) - { - PVR_LOG(("KernelCCBSizeLog2 set to %u", psHints->ui32KCCBSizeLog2)); - } + +#if defined(PVR_ARCH_VOLCANIC) + ui32AppHintDefault = PVRSRV_APPHINT_ISPSCHEDULINGLATENCYMODE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, ISPSchedulingLatencyMode, + &ui32AppHintDefault, &psHints->ui32ISPSchedulingLatencyMode); #endif #if defined(__linux__) /* name changes */ { IMG_UINT64 ui64Tmp; - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, DisablePDumpPanic, psHints->bDisablePDP); - SrvInitParamGetUINT64(psDevInfo->psDeviceNode, pvParamState, HWPerfFWFilter, ui64Tmp); + bAppHintDefault = PVRSRV_APPHINT_DISABLEPDUMPPANIC; + OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, DisablePDumpPanic, + &bAppHintDefault, &psHints->bDisablePDP); + ui64AppHintDefault = PVRSRV_APPHINT_HWPERFFWFILTER; + OSGetAppHintUINT64(psDevInfo->psDeviceNode, pvAppHintState, HWPerfFWFilter, + &ui64AppHintDefault, &ui64Tmp); psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu); psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu); } -#else - SrvInitParamUnreferenced(DisablePDumpPanic); - SrvInitParamUnreferenced(HWPerfFWFilter); - SrvInitParamUnreferenced(RGXBVNC); #endif - SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, HWPerfHostFilter, psHints->ui32HWPerfHostFilter); - SrvInitParamGetUINT32List(psDevInfo->psDeviceNode, pvParamState, TimeCorrClock, psHints->ui32TimeCorrClock); - SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, HWRDebugDumpLimit, ui32ParamTemp); + + ui32AppHintDefault = PVRSRV_APPHINT_HWPERFHOSTFILTER; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, HWPerfHostFilter, + &ui32AppHintDefault, &psHints->ui32HWPerfHostFilter); + ui32AppHintDefault = PVRSRV_APPHINT_SECONDARYOSCLOCKSOURCE; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, SecondaryOSClockSource, + &ui32AppHintDefault, &psHints->ui32SecondaryOSClockSource); + ui32AppHintDefault = PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, HWRDebugDumpLimit, + &ui32AppHintDefault, &ui32ParamTemp); psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL); - if (bS7TopInfra) { - #define RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK (0XFFFFFFCFU) - #define RGX_CR_JONES_FIX_MT_ORDER_ISP_EN (0X00000020U) - #define RGX_CR_JONES_FIX_MT_ORDER_TE_EN (0X00000010U) + IMG_BOOL bFilteringMode = IMG_FALSE; + IMG_UINT32 ui32TruncateMode = 0U; - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, JonesDisableMask, ui32ParamTemp); - if (((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_ISP_EN) || - ((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_TE_EN)) +#if defined(HW_ERN_42290_BIT_MASK) && defined(RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK) + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42290) && RGX_IS_FEATURE_SUPPORTED(psDevInfo, TPU_FILTERING_MODE_CONTROL)) +#endif { - ui32ParamTemp |= (RGX_CR_JONES_FIX_MT_ORDER_TE_EN | - RGX_CR_JONES_FIX_MT_ORDER_ISP_EN); - PVR_DPF((PVR_DBG_WARNING, "Tile reordering mode requires both TE and ISP enabled. Forcing JonesDisableMask = %d", - ui32ParamTemp)); + bAppHintDefault = PVRSRV_APPHINT_NEWFILTERINGMODE; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, NewFilteringMode, + &bAppHintDefault, &bFilteringMode); } - psHints->ui32JonesDisableMask = ui32ParamTemp; - } - if ((bE42290) && (bTPUFiltermodeCtrl)) - { - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, NewFilteringMode, psHints->bFilteringMode); - } +#if defined(HW_ERN_42606_BIT_MASK) + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42606)) +#endif + { + ui32AppHintDefault = PVRSRV_APPHINT_TRUNCATEMODE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TruncateMode, + &ui32AppHintDefault, &ui32TruncateMode); + } - if (bE42606) - { - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TruncateMode, psHints->ui32TruncateMode); + psHints->ui32FilterFlags = GetFilterFlags(bFilteringMode, ui32TruncateMode); } + #if defined(EMULATOR) - if (bAXIACELite) +#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) +#endif { - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig); + bAppHintDefault = PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableTrustedDeviceAceConfig, + &bAppHintDefault, &psHints->bEnableTrustedDeviceAceConfig); } #endif - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, ZeroFreelist, psHints->bZeroFreelist); - #if defined(__linux__) - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM); -#else - SrvInitParamUnreferenced(FWContextSwitchCrossDM); + ui32AppHintDefault = 0; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FWContextSwitchCrossDM, + &ui32AppHintDefault, &psHints->ui32FWContextSwitchCrossDM); #endif #if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, PhysMemTestPasses, psHints->ui32PhysMemTestPasses); + ui32AppHintDefault = PVRSRV_APPHINT_PHYSMEMTESTPASSES; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysMemTestPasses, + &ui32AppHintDefault, &psHints->ui32PhysMemTestPasses); #endif -#if defined(SUPPORT_VALIDATION) - /* Apphints for TPU trilinear frac masking */ - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskPDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, ValidateIrq, psHints->bValidateIrq); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, ValidateSOCUSCTimer, psHints->bValidateSOCUSCTimer); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FBCDCVersionOverride, psHints->ui32FBCDCVersionOverride); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlGroup, psHints->ui32TFBCCompressionControlGroup); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlScheme, psHints->ui32TFBCCompressionControlScheme); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlYUVFormat, psHints->bTFBCCompressionControlYUVFormat); -#endif +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + psHints->ui32AvailablePowUnitsMask = AVAIL_POW_UNITS_MASK_DEFAULT; + psHints->ui32AvailableRACMask = AVAIL_RAC_MASK_DEFAULT; +#endif /* defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) */ + + +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) + ui32AppHintDefault = PVRSRV_APPHINT_TFBCVERSION; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TFBCVersionDowngrade, + &ui32AppHintDefault, &psHints->ui32TFBCVersion); + + if (ui32AppHintDefault != psHints->ui32TFBCVersion) + { + PVR_LOG(("TFBCVersionDowngrade set to %u", psHints->ui32TFBCVersion)); + } + + psHints->bTFBCCompressionControlLossyMinChannel = false; + psHints->bTFBCCompressionControlYUVFormat = false; + psHints->ui32TFBCCompressionControlScheme = + PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME; + psHints->ui32TFBCCompressionControlGroup = + PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP; +#endif /* defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) */ + + ui32AppHintDefault = PVRSRV_APPHINT_DEBUGDUMPFWTLOGTYPE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DebugDumpFWTLogType, + &ui32AppHintDefault, &psHints->eDebugDumpFWTLogType); + if ((IMG_UINT32)psHints->eDebugDumpFWTLogType > RGX_FWT_LOGTYPE_PARTIAL) + { + psHints->eDebugDumpFWTLogType = RGX_FWT_LOGTYPE_NONE; + PVR_DPF((PVR_DBG_WARNING, "Invalid value for DebugDumpFWTLogType. Setting to 0 (disabled).")); + } /* * FW logs apphints @@ -405,8 +444,10 @@ static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHIN { IMG_UINT32 ui32LogGroup, ui32TraceOrTBI; - SrvInitParamGetUINT32BitField(psDevInfo->psDeviceNode, pvParamState, EnableLogGroup, ui32LogGroup); - SrvInitParamGetUINT32List(psDevInfo->psDeviceNode, pvParamState, FirmwareLogType, ui32TraceOrTBI); + ui32AppHintDefault = PVRSRV_APPHINT_ENABLELOGGROUP; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, EnableLogGroup, &ui32AppHintDefault, &ui32LogGroup); + ui32AppHintDefault = PVRSRV_APPHINT_FIRMWARELOGTYPE; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, FirmwareLogType, &ui32AppHintDefault, &ui32TraceOrTBI); /* Defaulting to TRACE */ BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); @@ -425,7 +466,11 @@ static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHIN psHints->ui32LogType = ui32LogGroup; } - SrvInitParamClose(pvParamState); + _ParseHTBAppHints(psDevInfo->psDeviceNode, pvAppHintState); + + InitDeviceFlags(psDevInfo->psDeviceNode, pvAppHintState, psHints); + + OSFreeAppHintState(pvAppHintState); } @@ -443,152 +488,198 @@ static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHIN ******************************************************************************/ static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_SRVINIT_APPHINTS *psHints, + RGX_INIT_APPHINTS *psHints, IMG_UINT32 *pui32FWConfigFlags, IMG_UINT32 *pui32FWConfigFlagsExt, IMG_UINT32 *pui32FwOsCfgFlags) { - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; IMG_UINT32 ui32FWConfigFlags = 0; IMG_UINT32 ui32FWConfigFlagsExt = 0; + IMG_UINT32 ui32FwOsCfgFlags = psHints->ui32FWContextSwitchCrossDM | + (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK); + + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) + IMG_UINT32 ui32FWConfigFlagsSupValExt = 0; + IMG_UINT32 ui32TFBCVersion = 0U; +#endif + + ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0; + ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0; + ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0; + ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0; - if (PVRSRV_VZ_MODE_IS(GUEST)) +#if defined(RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PIPELINED_DATAMASTERS_VERSION) && + (RGX_GET_FEATURE_VALUE(psDevInfo, PIPELINED_DATAMASTERS_VERSION) > 0) && + !RGX_IS_FEATURE_SUPPORTED(psDevInfo, ERYX_TOP_INFRASTRUCTURE)) { - ui32FWConfigFlags = 0; - ui32FWConfigFlagsExt = 0; + /* Pipeline DM roadblocks are currently enabled pre-Eryx. */ + ui32FWConfigFlags |= RGXFWIF_INICFG_DM_PIPELINE_ROADBLOCKS_EN; } - else +#endif + + ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0; + + if ((RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_KILOBYTES) <= 2)) { - ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0; - ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0; - ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0; - ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0; - ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0; - ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0; - ui32FWConfigFlags |= psHints->bEnableCDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0; - ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0; - ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0; - ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0; - ui32FWConfigFlags |= psHints->bHWPerfDisableCustomCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0; - ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK; - -#if defined(SUPPORT_VALIDATION) -#if defined(NO_HARDWARE) && defined(PDUMP) - ui32FWConfigFlags |= psHints->bValidateIrq ? RGXFWIF_INICFG_VALIDATE_IRQ : 0; + ui32FWConfigFlags |= RGXFWIF_INICFG_DISABLE_DM_OVERLAP; + } + + ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0; + ui32FWConfigFlags |= psHints->bEnableDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0; + ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0; + ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0; + ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0; + ui32FWConfigFlags |= psHints->bHWPerfDisableCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0; + ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK; +#if defined(PVR_ARCH_VOLCANIC) + ui32FWConfigFlags |= (psHints->ui32ISPSchedulingLatencyMode << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) & RGXFWIF_INICFG_ISPSCHEDMODE_MASK; #endif - if (psHints->ui32FBCDCVersionOverride > 0) - { - ui32FWConfigFlags |= (psHints->ui32FBCDCVersionOverride == 2) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; - } - else -#endif /* defined(SUPPORT_VALIDATION) */ - { - ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; - } + { + ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; + } -#if defined(SUPPORT_VALIDATION) - ui32FWConfigFlags |= psHints->bValidateSOCUSCTimer ? RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER : 0; - if ((ui32FWConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) && - ((psHints->eRGXActivePMConf != 0) || (psHints->eRGXRDPowerIslandConf != 0))) - { - psHints->eRGXActivePMConf = 0; - psHints->eRGXRDPowerIslandConf = 0; - PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with both EnableAPM and EnableRDPowerIsland disabled.\n" - "Overriding current value for both with new value 0.")); - } +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) + + /* Determine if we need to present a TFBC v1.0, v1.1 or native + * behaviour. For V1.0 we need to set the following features: + * TFBCCompressionControlLossyMinChannel = 0x1 + * TFBCCompressionControlYUVFormat = 0x1 + * TFBCCompressionControlScheme = 0x2 + * TFBCCompressionControlGroup = 0x0 + * For V1.1 we need to set the following: + * TFBCCompressionControlLossyMinChannel = 0x1 + * TFBCCompressionControlYUVFormat = 0x0 + * TFBCCompressionControlScheme = 0x1 + * TFBCCompressionControlGroup = 0 / 1 (depends on LOSSY_37_PERCENT) + * The gating for these values depends on whether the GPU supports + * RGX_FEATURE_TFBC_VERSION = 20U + */ - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) || - RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION) || - RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_NATIVE_YUV10)) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, TFBC_VERSION)) + { + ui32TFBCVersion = RGX_GET_FEATURE_VALUE(psDevInfo, TFBC_VERSION); + + if (ui32TFBCVersion >= 20U) { + switch (psHints->ui32TFBCVersion) { + case 10: /* TFBC Version 1.0 */ + psHints->bTFBCCompressionControlLossyMinChannel = true; + psHints->bTFBCCompressionControlYUVFormat = true; + psHints->ui32TFBCCompressionControlScheme = 2U; + psHints->ui32TFBCCompressionControlGroup = 0U; + +#if defined(DEBUG) + PVR_LOG(("%s: Setting TFBC Version 1.0, Native v%u", + __func__, ui32TFBCVersion)); +#endif + break; + + case 11: /* TFBC Version 1.1 */ + psHints->bTFBCCompressionControlLossyMinChannel = true; + psHints->bTFBCCompressionControlYUVFormat = false; + psHints->ui32TFBCCompressionControlScheme = 1U; + psHints->ui32TFBCCompressionControlGroup = + PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP; + +#if defined(DEBUG) + PVR_LOG(("%s: Setting TFBC Version 1.1, Native v%u", + __func__, ui32TFBCVersion)); +#endif + break; + + case 0: /* Leave with whatever the ui32TFBCVersion is */ + break; + default: /* Unexpected / unsupported value */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Unexpected TFBC Version %u" + " Ignoring. Using value %u instead", + __func__, psHints->ui32TFBCVersion, + ui32TFBCVersion)); + break; + } + ui32FWConfigFlagsExt |= ((((psHints->ui32TFBCCompressionControlGroup << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) & ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) | ((psHints->ui32TFBCCompressionControlScheme << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) & ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) | - ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0)) + ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0) | + ((psHints->bTFBCCompressionControlLossyMinChannel) ? RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN : 0)) << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK; + /* Save the CompressionControlGroup for later use by + * ->pfnGetTFBCLossyGroup() */ +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + psDevInfo->ui32TFBCLossyGroup = psHints->ui32TFBCCompressionControlGroup; +#endif } -#else - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT)) + else if (ui32TFBCVersion == 11U) { + switch (psHints->ui32TFBCVersion) { + case 10: /* TFBC Version 1.0 */ + psHints->bTFBCCompressionControlLossyMinChannel = true; + psHints->bTFBCCompressionControlYUVFormat = true; + psHints->ui32TFBCCompressionControlScheme = 2U; + psHints->ui32TFBCCompressionControlGroup = 0U; + +#if defined(DEBUG) + PVR_LOG(("%s: Setting TFBC Version 1.0, Native v%u", + __func__, ui32TFBCVersion)); +#endif + break; + + case 0: /* Leave with whatever the ui32TFBCVersion is */ + break; + + default: /* Unexpected / unsupported value */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Unexpected TFBC Version %u" + " Ignoring. Using value %u instead", + __func__, psHints->ui32TFBCVersion, + ui32TFBCVersion)); + break; + } ui32FWConfigFlagsExt |= - ((((PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) & - ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) | - ((PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) & - ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK)) + ((((psHints->ui32TFBCCompressionControlGroup << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) & + ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) | + ((psHints->ui32TFBCCompressionControlScheme << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) & + ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) | + ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0) | + ((psHints->bTFBCCompressionControlLossyMinChannel) ? RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN : 0)) << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK; + /* Save the CompressionControlGroup for later use by + * ->pfnGetTFBCLossyGroup() */ +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + psDevInfo->ui32TFBCLossyGroup = psHints->ui32TFBCCompressionControlGroup; +#endif } + else /* TFBC v1.0 */ + { + PVR_UNREFERENCED_PARAMETER(ui32FWConfigFlagsSupValExt); +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + psDevInfo->ui32TFBCLossyGroup = 0; #endif + if ((psHints->ui32TFBCVersion != 0U) && + (psHints->ui32TFBCVersion != ui32TFBCVersion)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Cannot specify TFBC version %u" + " on a version %u GPU core", __func__, + psHints->ui32TFBCVersion, ui32TFBCVersion)); + } + } } +#endif /* defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) */ *pui32FWConfigFlags = ui32FWConfigFlags; *pui32FWConfigFlagsExt = ui32FWConfigFlagsExt; - *pui32FwOsCfgFlags = psHints->ui32FWContextSwitchCrossDM | - (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK); + *pui32FwOsCfgFlags = ui32FwOsCfgFlags; } -/*! -******************************************************************************* - - @Function GetFilterFlags - - @Description Initialise and return filter flags - - @Input psHints : Apphints container - - @Return IMG_UINT32 : Filter flags - -******************************************************************************/ -static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints) -{ - IMG_UINT32 ui32FilterFlags = 0; - - ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0; - if (psHints->ui32TruncateMode == 2) - { - ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT; - } - else if (psHints->ui32TruncateMode == 3) - { - ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF; - } - - return ui32FilterFlags; -} - - -/*! -******************************************************************************* - - @Function InittDeviceFlags - - @Description Initialise and return device flags - - @Input psHints : Apphints container - @Input pui32DeviceFlags : Pointer to device flags - - @Return void - -******************************************************************************/ -static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints, - IMG_UINT32 *pui32DeviceFlags) -{ - IMG_UINT32 ui32DeviceFlags = 0; - - ui32DeviceFlags |= psHints->bGPUUnitsPowerChange ? RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN : 0; - ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0; - ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0; -#if defined(PVRSRV_ENABLE_CCCB_GROW) - BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); -#endif - - *pui32DeviceFlags = ui32DeviceFlags; -} - #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) /*! ******************************************************************************* @@ -599,19 +690,17 @@ static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints, the FW image setup @Input psDeviceNode : Device node - @Input psRGXFW : Firmware blob - @Input puFWParams : Parameters used by the FW at boot time + @Input psFWParams : Firmware and parameters used by the FW @Return PVRSRV_ERROR ******************************************************************************/ static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, - OS_FW_IMAGE *psRGXFW, - PVRSRV_FW_BOOT_PARAMS *puFWParams) + PVRSRV_FW_PARAMS *psFWParams) { PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_TD_FW_PARAMS sTDFWParams; - RGX_LAYER_PARAMS sLayerParams; +#endif PVRSRV_ERROR eError; if (psDevConfig->pfnTDSendFWImage == NULL) @@ -620,42 +709,27 @@ static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, return PVRSRV_ERROR_NOT_IMPLEMENTED; } - sLayerParams.psDevInfo = psDevInfo; - - sTDFWParams.pvFirmware = OSFirmwareData(psRGXFW); - sTDFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW); - -#if defined(RGX_FEATURE_META_IDX) - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - sTDFWParams.uFWP.sMeta = puFWParams->sMeta; - } - else -#endif +#if defined(RGX_FEATURE_MIPS_BIT_MASK) if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { - sTDFWParams.uFWP.sMips = puFWParams->sMips; - - if (sTDFWParams.uFWP.sMips.ui32FWPageTableNumPages > TD_MAX_NUM_MIPS_PAGETABLE_PAGES) + if (psFWParams->uFWP.sMips.ui32FWPageTableNumPages > TD_MAX_NUM_MIPS_PAGETABLE_PAGES) { PVR_DPF((PVR_DBG_ERROR, "%s: Number of page table pages %u greater " "than what is allowed by the TD interface (%u), FW might " "not work properly!", __func__, - puFWParams->sMips.ui32FWPageTableNumPages, + psFWParams->uFWP.sMips.ui32FWPageTableNumPages, TD_MAX_NUM_MIPS_PAGETABLE_PAGES)); } } - else - { - sTDFWParams.uFWP.sRISCV = puFWParams->sRISCV; - } +#endif - eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams); + eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, psFWParams); return eError; } #endif +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /*! ******************************************************************************* @@ -736,6 +810,7 @@ static PVRSRV_ERROR RGXAcquireMipsBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, return eError; } +#endif /*! ******************************************************************************* @@ -745,16 +820,14 @@ static PVRSRV_ERROR RGXAcquireMipsBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, @Description Allocate, initialise and pdump Firmware code and data memory @Input psDeviceNode : Device Node - @Input psHints : Apphints @Return PVRSRV_ERROR ******************************************************************************/ -static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_SRVINIT_APPHINTS *psHints) +static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode) { OS_FW_IMAGE *psRGXFW = NULL; - const IMG_BYTE *pbRGXFirmware = NULL; + PVRSRV_FW_PARAMS sFWParams = {0}; /* FW code memory */ IMG_DEVMEM_SIZE_T uiFWCodeAllocSize; @@ -773,26 +846,23 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, void *pvFWCorememDataHostAddr = NULL; PVRSRV_FW_BOOT_PARAMS uFWParams; - RGX_LAYER_PARAMS sLayerParams; PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) IMG_BOOL bUseSecureFWData = -#if defined(RGX_FEATURE_META_IDX) RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || -#endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) - RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR) || -#endif +#if defined(RGX_FEATURE_MIPS_BIT_MASK) (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && - RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32); + RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32) || +#endif + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR); #endif /* * Get pointer to Firmware image */ - eError = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW, &pbRGXFirmware); + eError = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW); if (eError != PVRSRV_OK) { @@ -800,19 +870,35 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, goto fw_load_fail; } - sLayerParams.psDevInfo = psDevInfo; + /* + * Get pointer and size + */ + sFWParams.pvFirmware = OSFirmwareData(psRGXFW); + sFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW); + + /* + * Allow it to be pre-processed by the platform hook + */ + if (psDeviceNode->psDevConfig->pfnPrepareFWImage != NULL) + { + eError = psDeviceNode->psDevConfig->pfnPrepareFWImage( + psDeviceNode->psDevConfig->hSysData, &sFWParams); + if (eError != PVRSRV_OK) + goto cleanup_initfw; + } /* * Allocate Firmware memory */ - eError = RGXGetFWImageAllocSize(&sLayerParams, - pbRGXFirmware, - OSFirmwareSize(psRGXFW), + eError = RGXGetFWImageAllocSize(&psDevInfo->sLayerParams, + sFWParams.pvFirmware, + sFWParams.ui32FirmwareSize, &uiFWCodeAllocSize, &uiFWDataAllocSize, &uiFWCorememCodeAllocSize, - &uiFWCorememDataAllocSize); + &uiFWCorememDataAllocSize, + &psDevInfo->sFWInfoHeader); if (eError != PVRSRV_OK) { @@ -822,15 +908,30 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, goto cleanup_initfw; } - psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize; - -#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_META_DMA_BIT_MASK) - /* Disable META core memory allocation unless the META DMA is available */ - if (!RGX_DEVICE_HAS_FEATURE(&sLayerParams, META_DMA)) + /* + * Initiate FW compatibility check for Native and Host. + * Guest compatibility check must be done after FW boot. + */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - uiFWCorememCodeAllocSize = 0; - uiFWCorememDataAllocSize = 0; + eError = PVRSRVDevInitCompatCheck(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed compatibility check for device %p (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + goto cleanup_initfw; + } } + + psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize; + +#if defined(SUPPORT_TRUSTED_DEVICE) + PVR_DPF((PVR_DBG_WARNING, + "%s: META DMA not available, disabling core memory code/data", + __func__)); + uiFWCorememCodeAllocSize = 0; + uiFWCorememDataAllocSize = 0; #endif psDevInfo->ui32FWCorememCodeSizeInBytes = uiFWCorememCodeAllocSize; @@ -853,14 +954,18 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, * Acquire pointers to Firmware allocations */ -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr); - PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw); - -#else - /* We can't get a pointer to a secure FW allocation from within the DDK */ - pvFWCodeHostAddr = NULL; +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (bUseSecureFWData) + { + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCodeHostAddr = NULL; + } + else #endif + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw); + } #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) if (bUseSecureFWData) @@ -875,16 +980,19 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code); } -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) - if (uiFWCorememCodeAllocSize) +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (bUseSecureFWData) + { + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCorememCodeHostAddr = NULL; + } + else +#endif + if (uiFWCorememCodeAllocSize != 0) { eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr); PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data); } -#else - /* We can't get a pointer to a secure FW allocation from within the DDK */ - pvFWCorememCodeHostAddr = NULL; -#endif #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) if (bUseSecureFWData) @@ -893,7 +1001,7 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, } else #endif - if (uiFWCorememDataAllocSize) + if (uiFWCorememDataAllocSize != 0) { eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr); PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode); @@ -902,21 +1010,8 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, /* * Prepare FW boot parameters */ + OSCachedMemSet(&uFWParams, 0, sizeof(PVRSRV_FW_BOOT_PARAMS)); - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) - { - eError = RGXAcquireMipsBootldrData(psDeviceNode, &uFWParams); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: RGXAcquireMipsBootldrData failed (%d)", - __func__, eError)); - goto release_fw_allocations; - } - } - else -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) { uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; @@ -932,9 +1027,25 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, uFWParams.sMeta.ui32NumThreads = 1; #endif } - else +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = RGXAcquireMipsBootldrData(psDeviceNode, &uFWParams); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXAcquireMipsBootldrData failed (%d)", + __func__, eError)); + goto release_fw_allocations; + } + } #endif + else { + uFWParams.sRISCV.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; + uFWParams.sRISCV.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase; + uFWParams.sRISCV.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; uFWParams.sRISCV.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; uFWParams.sRISCV.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; @@ -955,8 +1066,8 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, */ if (!psDeviceNode->bAutoVzFwIsUp) { - eError = RGXProcessFWImage(&sLayerParams, - pbRGXFirmware, + eError = RGXProcessFWImage(&psDevInfo->sLayerParams, + sFWParams.pvFirmware, pvFWCodeHostAddr, pvFWDataHostAddr, pvFWCorememCodeHostAddr, @@ -969,10 +1080,32 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, __func__, eError)); goto release_fw_allocations; } +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + RGXFwSharedMemCacheOpExec(pvFWCodeHostAddr, + sizeof(psDevInfo->psRGXFWCodeMemDesc->uiAllocSize), + PVRSRV_CACHE_OP_FLUSH); + if (uiFWCorememCodeAllocSize) + { + RGXFwSharedMemCacheOpExec(pvFWCorememCodeHostAddr, + sizeof(psDevInfo->psRGXFWCorememCodeMemDesc->uiAllocSize), + PVRSRV_CACHE_OP_FLUSH); + } + + RGXFwSharedMemCacheOpExec(pvFWDataHostAddr, + sizeof(psDevInfo->psRGXFWDataMemDesc->uiAllocSize), + PVRSRV_CACHE_OP_FLUSH); + if (uiFWCorememDataAllocSize) + { + RGXFwSharedMemCacheOpExec(pvFWCorememDataHostAddr, + sizeof(psDevInfo->psRGXFWIfCorememDataStoreMemDesc->uiAllocSize), + PVRSRV_CACHE_OP_FLUSH); + } +#endif } #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) - RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams); + sFWParams.uFWP = uFWParams; + RGXTDProcessFWImage(psDeviceNode, &sFWParams); #endif @@ -1002,7 +1135,7 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, } #if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) - if (uiFWCorememCodeAllocSize) + if (uiFWCorememCodeAllocSize != 0) { PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem code image"); @@ -1016,7 +1149,7 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) if (!bUseSecureFWData && uiFWCorememDataAllocSize) #else - if (uiFWCorememDataAllocSize) + if (uiFWCorememDataAllocSize != 0) #endif { PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, @@ -1030,27 +1163,27 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, /* * Release Firmware allocations and clean up */ - release_fw_allocations: #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) if (!bUseSecureFWData && uiFWCorememDataAllocSize) #else - if (uiFWCorememDataAllocSize) + if (uiFWCorememDataAllocSize != 0) #endif { DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); } + release_corememcode: -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) - if (uiFWCorememCodeAllocSize) +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData && (uiFWCorememCodeAllocSize != 0)) +#else + if (uiFWCorememCodeAllocSize != 0) +#endif { DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); } -#endif -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) release_data: -#endif #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) if (!bUseSecureFWData) #endif @@ -1059,9 +1192,12 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, } release_code: -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData) #endif + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + } cleanup_initfw: OSUnloadFirmware(psRGXFW); fw_load_fail: @@ -1094,7 +1230,9 @@ static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt) { RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData; +#if defined(HWPERF_UNIFIED) RGXFWIF_HWPERF_DA_BLK *psHWPerfInitDABlkData; +#endif IMG_UINT32 ui32CntBlkModelLen; const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc; @@ -1108,8 +1246,8 @@ static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++) { - IMG_UINT32 uiUnit; - IMG_BOOL bDirect; + __maybe_unused IMG_UINT32 uiUnit; + __maybe_unused IMG_BOOL bDirect; /* Exit early if this core does not have any of these counter blocks * due to core type/BVNC features.... */ @@ -1120,7 +1258,7 @@ static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, } /* Program all counters in one block so those already on may - * be configured off and vice-a-versa. */ + * be configured off and vice-versa. */ for (ui32BlockID = psBlkTypeDesc->ui32CntBlkIdBase; ui32BlockID < psBlkTypeDesc->ui32CntBlkIdBase+sCntBlkRtInfo.ui32NumUnits; ui32BlockID++) @@ -1133,13 +1271,14 @@ static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, /* Get the block configure store to update from the global store of * block configuration. This is used to remember the configuration * between configurations and core power on in APM. - * For RGX_FEATURE_HWPERF_OCEANIC layout we have a different + * For HWPERF_UNIFIED layout we will have a different * structure type to decode the HWPerf block. This is indicated by * the RGX_CNTBLK_ID_DA_MASK bit being set in the block-ID value. */ bDirect = (psBlkTypeDesc->ui32IndirectReg == 0U); uiUnit = ui32BlockID - psBlkTypeDesc->ui32CntBlkIdBase; +#if defined(HWPERF_UNIFIED) if ((ui32BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK) { psHWPerfInitDABlkData = rgxfw_hwperf_get_da_block_ctl(ui32BlockID, psHWPerfInitDataInt); @@ -1197,25 +1336,26 @@ static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, } } else +#endif /* defined(HWPERF_UNIFIED) */ { psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt); /* Assert to check for HWPerf block mis-configuration */ PVR_ASSERT(psHWPerfInitBlkData); - psHWPerfInitBlkData->bValid = IMG_TRUE; + psHWPerfInitBlkData->ui32Valid = IMG_TRUE; PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "bValid: This specifies if the layout block is valid for the given BVNC."); DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, - (size_t)&(psHWPerfInitBlkData->bValid) - (size_t)(psHWPerfInitDataInt), - psHWPerfInitBlkData->bValid, + (size_t)&(psHWPerfInitBlkData->ui32Valid) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->ui32Valid, PDUMP_FLAGS_CONTINUOUS); - psHWPerfInitBlkData->bEnabled = IMG_FALSE; + psHWPerfInitBlkData->ui32Enabled = IMG_FALSE; PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "bEnabled: Set to 0x1 if the block needs to be enabled during playback."); DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, - (size_t)&(psHWPerfInitBlkData->bEnabled) - (size_t)(psHWPerfInitDataInt), - psHWPerfInitBlkData->bEnabled, + (size_t)&(psHWPerfInitBlkData->ui32Enabled) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->ui32Enabled, PDUMP_FLAGS_CONTINUOUS); psHWPerfInitBlkData->eBlockID = ui32BlockID; @@ -1331,6 +1471,7 @@ static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode) InitialiseHWPerfCounters(psDeviceNode, psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData); InitialiseCustomCounters(psDeviceNode, psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + RGXFwSharedMemCacheOpPtr(psHWPerfInitData, FLUSH); failHWPerfCountersMemDescAqCpuVirt: DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); @@ -1339,38 +1480,6 @@ static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode) } #endif /* PDUMP */ -/* - * _ParseHTBAppHints: - * - * Generate necessary references to the globally visible AppHints which are - * declared in the above #include "km_apphint_defs.h" - * Without these local references some compiler tool-chains will treat - * unreferenced declarations as fatal errors. This function duplicates the - * HTB_specific apphint references which are made in htbserver.c:HTBInit() - * However, it makes absolutely *NO* use of these hints. - */ -static void -_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode) -{ - void *pvParamState = NULL; - IMG_UINT32 ui32LogType; - IMG_BOOL bAnyLogGroupConfigured; - IMG_UINT32 ui32BufferSize; - IMG_UINT32 ui32OpMode; - - /* Services initialisation parameters */ - pvParamState = SrvInitParamOpen(); - if (pvParamState == NULL) - return; - - SrvInitParamGetUINT32BitField(INITPARAM_NO_DEVICE, pvParamState, EnableHTBLogGroup, ui32LogType); - bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE; - SrvInitParamGetUINT32List(INITPARAM_NO_DEVICE, pvParamState, HTBOperationMode, ui32OpMode); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HTBufferSizeInKB, ui32BufferSize); - - SrvInitParamClose(pvParamState); -} - #if defined(SUPPORT_TRUSTED_DEVICE) static PVRSRV_ERROR RGXValidateTDHeap(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_PHYS_HEAP ePhysHeap, @@ -1379,7 +1488,7 @@ static PVRSRV_ERROR RGXValidateTDHeap(PVRSRV_DEVICE_NODE *psDeviceNode, PHYS_HEAP *psHeap = psDeviceNode->apsPhysHeap[ePhysHeap]; PHYS_HEAP_USAGE_FLAGS ui32HeapFlags = PhysHeapGetFlags(psHeap); PHYS_HEAP_USAGE_FLAGS ui32InvalidFlags = ~(PHYS_HEAP_USAGE_FW_PRIV_DATA | PHYS_HEAP_USAGE_FW_CODE - | PHYS_HEAP_USAGE_GPU_SECURE); + | PHYS_HEAP_USAGE_GPU_SECURE | PHYS_HEAP_USAGE_FW_PRIVATE); PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32RequiredFlags) != 0, PVRSRV_ERROR_NOT_SUPPORTED, @@ -1400,11 +1509,14 @@ static PVRSRV_ERROR RGXValidateTDHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError; - eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_PRIV_DATA, PHYS_HEAP_USAGE_FW_PRIV_DATA); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_PRIV_DATA"); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_PRIV_DATA, PHYS_HEAP_USAGE_FW_PRIV_DATA); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_PRIV_DATA"); - eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_CODE, PHYS_HEAP_USAGE_FW_CODE); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_CODE"); + eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_CODE, PHYS_HEAP_USAGE_FW_CODE); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_CODE"); + } eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_GPU_SECURE, PHYS_HEAP_USAGE_GPU_SECURE); PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:GPU_SECURE"); @@ -1430,12 +1542,10 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_ERROR eError; /* Services initialisation parameters */ - RGX_SRVINIT_APPHINTS sApphints = {0}; + RGX_INIT_APPHINTS sApphints = {0}; IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt, ui32FwOsCfgFlags; - IMG_UINT32 ui32DeviceFlags; PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - RGX_LAYER_PARAMS sLayerParams; PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 1"); @@ -1443,7 +1553,7 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) psDeviceNode->psDevConfig->pszName); PDUMPCOMMENT(psDeviceNode, "Device ID: %u (%d)", psDeviceNode->sDevId.ui32InternalID, - psDeviceNode->sDevId.i32OsDeviceID); + psDeviceNode->sDevId.i32KernelDeviceID); if (psDeviceNode->psDevConfig->pszVersion) { @@ -1459,52 +1569,28 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); - RGXInitMultiCoreInfo(psDeviceNode); - -#if defined(PDUMP) - eError = DevmemIntAllocDefBackingPage(psDeviceNode, - &psDeviceNode->sDummyPage, - PVR_DUMMY_PAGE_INIT_VALUE, - DUMMY_PAGE, - IMG_TRUE); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__)); - goto cleanup; - } - - eError = DevmemIntAllocDefBackingPage(psDeviceNode, - &psDeviceNode->sDevZeroPage, - PVR_ZERO_PAGE_INIT_VALUE, - DEV_ZERO_PAGE, - IMG_TRUE); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__)); - goto cleanup; - } -#endif /* defined(PDUMP) */ - - sLayerParams.psDevInfo = psDevInfo; #if defined(SUPPORT_TRUSTED_DEVICE) eError = RGXValidateTDHeaps(psDeviceNode); PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeaps"); #endif #if defined(SUPPORT_AUTOVZ) - if (PVRSRV_VZ_MODE_IS(HOST)) + if (PVRSRV_VZ_MODE_IS(HOST, DEVNODE, psDeviceNode)) { - /* The RGX_CR_MTS_DM0_INTERRUPT_ENABLE register is always set by the firmware during initialisation + /* The RGX_CR_MTS_DM4_INTERRUPT_ENABLE register is always set by the firmware during initialisation * and it provides a good method of determining if the firmware has been booted previously */ - psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_DM0_INTERRUPT_ENABLE) != 0); + psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_DM4_INTERRUPT_ENABLE) != 0); PVR_LOG(("AutoVz startup check: firmware is %s;", (psDeviceNode->bAutoVzFwIsUp) ? "already running" : "powered down")); + PVR_LOG(("AutoVz allow GPU powerdown is %s:", + (psDeviceNode->bAutoVzAllowGPUPowerdown) ? "enabled" : "disabled")); } - else if (PVRSRV_VZ_MODE_IS(GUEST)) + else if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { /* Guest assumes the firmware is always available */ psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; + psDeviceNode->bAutoVzAllowGPUPowerdown = IMG_FALSE; } else #endif @@ -1513,7 +1599,7 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; } - if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) || (psDeviceNode->bAutoVzFwIsUp)) { /* set the device power state here as the regular power * callbacks will not be executed on this driver */ @@ -1521,27 +1607,26 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) } /* Set which HW Safety Events will be handled by the driver */ -#if defined(RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK) psDevInfo->ui32HostSafetyEventMask |= RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER) ? RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN : 0; -#endif -#if defined(RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX) - psDevInfo->ui32HostSafetyEventMask |= (RGX_DEVICE_HAS_FEATURE_VALUE(&sLayerParams, ECC_RAMS) - && (RGX_DEVICE_GET_FEATURE_VALUE(&sLayerParams, ECC_RAMS) > 0)) ? + psDevInfo->ui32HostSafetyEventMask |= (RGX_DEVICE_HAS_FEATURE_VALUE(&psDevInfo->sLayerParams, ECC_RAMS) + && (RGX_DEVICE_GET_FEATURE_VALUE(&psDevInfo->sLayerParams, ECC_RAMS) > 0)) ? RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN : 0; + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "Register defs revision: %d", RGX_CR_DEFS_KM_REVISION); #endif /* Services initialisation parameters */ - _ParseHTBAppHints(psDeviceNode); GetApphints(psDevInfo, &sApphints); - InitDeviceFlags(&sApphints, &ui32DeviceFlags); #if defined(SUPPORT_GPUVIRT_VALIDATION) #if defined(EMULATOR) if ((sApphints.bEnableTrustedDeviceAceConfig) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE))) { - SetTrustedDeviceAceEnabled(); + SetTrustedDeviceAceEnabled(psDeviceNode->psDevConfig->hSysData); } #endif #endif @@ -1555,10 +1640,9 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) goto cleanup; } - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - eError = InitFirmware(psDeviceNode, &sApphints); - + eError = InitFirmware(psDeviceNode); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1575,25 +1659,8 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt, &ui32FwOsCfgFlags); eError = RGXInitFirmware(psDeviceNode, - sApphints.bEnableSignatureChecks, - sApphints.ui32SignatureChecksBufSize, - sApphints.ui32HWPerfFWBufSize, - (IMG_UINT64)sApphints.ui32HWPerfFilter0 | - ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32), + &sApphints, ui32FWConfigFlags, - sApphints.ui32LogType, - GetFilterFlags(&sApphints), - sApphints.ui32JonesDisableMask, - sApphints.ui32HWRDebugDumpLimit, - sizeof(RGXFWIF_HWPERF_CTL), -#if defined(SUPPORT_VALIDATION) - &sApphints.aui32TPUTrilinearFracMask[0], -#else - NULL, -#endif - sApphints.eRGXRDPowerIslandConf, - sApphints.eFirmwarePerf, - sApphints.ui32KCCBSizeLog2, ui32FWConfigFlagsExt, ui32FwOsCfgFlags); @@ -1601,13 +1668,12 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) { PVR_DPF((PVR_DBG_ERROR, "%s: RGXInitFirmware failed (%d)", - __func__, - eError)); + __func__, eError)); goto cleanup; } #if defined(PDUMP) - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { eError = InitialiseAllCounters(psDeviceNode); if (eError != PVRSRV_OK) @@ -1624,10 +1690,7 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) * Perform second stage of RGX initialisation */ eError = RGXInitDevPart2(psDeviceNode, - ui32DeviceFlags, - sApphints.ui32HWPerfHostFilter, - sApphints.eRGXActivePMConf); - + &sApphints); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1636,9 +1699,6 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) goto cleanup; } -#if defined(SUPPORT_VALIDATION) - PVRSRVAppHintDumpState(psDeviceNode); -#endif eError = PVRSRV_OK; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxstartstop.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxstartstop.c index ee17f3ffb9da..2b93b46ff3ef 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxstartstop.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxstartstop.c @@ -49,81 +49,136 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * which should be extended when necessary. */ #include "rgxstartstop.h" -#if defined(SUPPORT_SHARED_SLC) -#include "rgxapi_km.h" -#endif - -#include "rgxdevice.h" -#include "km/rgxdefs_km.h" - #define SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING - -/*! -******************************************************************************* - - @Function RGXEnableClocks - - @Description Enable RGX Clocks - - @Input hPrivate : Implementation specific data - - @Return void - -******************************************************************************/ -static void RGXEnableClocks(const void *hPrivate) -{ - RGXCommentLog(hPrivate, "RGX clock: use default (automatic clock gating)"); -} - -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) -static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +/* + RGXWriteMetaRegThroughSP +*/ +PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue) { PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32StateReg, ui32StateReadyFlag; + IMG_UINT32 ui32CtrlReg, ui32DataReg; - /* Wait for Slave Port to be Ready */ - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); - if (eError != PVRSRV_OK) return eError; +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_REGISTER_UNPACKED_ACCESSES)) + { + /* ensure the meta_registers_unpacked_accesses auto-increment feature is not used */ + BITMASK_UNSET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN); - /* Issue a Write */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); - (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); - (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) > 1) + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN | + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA; + ui32DataReg = RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA; + } + else + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN | + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32DataReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + } + } + else +#endif + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1_READY_EN | + RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL0; + ui32DataReg = RGX_CR_META_SP_MSLVDATAT; + } + + eError = RGXPollReg32(hPrivate, ui32StateReg, ui32StateReadyFlag, ui32StateReadyFlag); + + if (eError == PVRSRV_OK) + { + /* Issue a Write */ + RGXWriteReg32(hPrivate, ui32CtrlReg, ui32RegAddr); + (void) RGXReadReg32(hPrivate, ui32CtrlReg); /* Fence write */ + RGXWriteReg32(hPrivate, ui32DataReg, ui32RegValue); + (void) RGXReadReg32(hPrivate, ui32DataReg); /* Fence write */ + } return eError; } -static PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT32* ui32RegValue) +/* + RGXReadMetaRegThroughSP +*/ +PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32* ui32RegValue) { PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32StateReg, ui32StateReadyFlag; + IMG_UINT32 ui32CtrlReg, ui32DataReg; - /* Wait for Slave Port to be Ready */ - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); - if (eError != PVRSRV_OK) return eError; +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_REGISTER_UNPACKED_ACCESSES)) + { + /* ensure the meta_registers_unpacked_accesses auto-increment feature is not used */ + BITMASK_UNSET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN); - /* Issue a Read */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); - (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) > 1) + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN | + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA; + ui32DataReg = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA; + BITMASK_SET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__RD_EN); + } + else + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN | + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32DataReg = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA; + BITMASK_SET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__RD_EN); + } + } + else +#endif + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1_READY_EN | + RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL0; + ui32DataReg = RGX_CR_META_SP_MSLVDATAX; + BITMASK_SET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0_RD_EN); + } /* Wait for Slave Port to be Ready */ eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); - if (eError != PVRSRV_OK) return eError; + ui32StateReg, + ui32StateReadyFlag, + ui32StateReadyFlag); + if (eError == PVRSRV_OK) + { + /* Issue a Read */ + RGXWriteReg32(hPrivate, ui32CtrlReg, ui32RegAddr); + (void) RGXReadReg32(hPrivate, ui32CtrlReg); /* Fence write */ + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + ui32StateReg, + ui32StateReadyFlag, + ui32StateReadyFlag); + if (eError != PVRSRV_OK) return eError; + } #if !defined(NO_HARDWARE) - *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX); + *ui32RegValue = RGXReadReg32(hPrivate, ui32DataReg); #else + PVR_UNREFERENCED_PARAMETER(ui32DataReg); *ui32RegValue = 0xFFFFFFFF; #endif @@ -196,22 +251,11 @@ static void RGXInitMetaProcWrapper(const void *hPrivate) { IMG_UINT64 ui64GartenConfig; - /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */ - /* Garten IDLE bit controlled by META */ ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META; - /* The fence addr is set at the fw init sequence */ - - if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) - { - /* Set PC = 0 for fences */ - ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK; - ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV - << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT; - - } - else +#if defined(RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_SHIFT) + /* Set the Garten Wrapper BIF Fence address */ { /* Set PC = 0 for fences */ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK; @@ -221,12 +265,13 @@ static void RGXInitMetaProcWrapper(const void *hPrivate) /* Set SLC DM=META */ ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_BIFDM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT; } +#endif RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper"); RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig); } -#endif +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /*! ******************************************************************************* @@ -282,9 +327,11 @@ static void RGXInitMipsProcWrapper(const void *hPrivate) ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; #endif -#if defined(MIPS_FW_CODE_OSID) ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; - ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#if defined(MIPS_FW_CODE_OSID) + ui64RemapSettings |= ((IMG_UINT64) MIPS_FW_CODE_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#else + ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; #endif RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers"); @@ -335,9 +382,8 @@ static void RGXInitMipsProcWrapper(const void *hPrivate) } #endif -#if defined(MIPS_FW_CODE_OSID) ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; -#endif + ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; RGXCommentLog(hPrivate, "RGXStart: Write data remap registers"); RGXDataRemapConfig(hPrivate, @@ -359,9 +405,11 @@ static void RGXInitMipsProcWrapper(const void *hPrivate) ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; #endif -#if defined(MIPS_FW_CODE_OSID) ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; - ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#if defined(MIPS_FW_CODE_OSID) + ui64RemapSettings |= ((IMG_UINT64) MIPS_FW_CODE_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#else + ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; #endif RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers"); @@ -387,9 +435,8 @@ static void RGXInitMipsProcWrapper(const void *hPrivate) ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; #endif -#if defined(MIPS_FW_CODE_OSID) ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; -#endif + ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers"); RGXTrampolineRemapConfig(hPrivate, @@ -408,9 +455,9 @@ static void RGXInitMipsProcWrapper(const void *hPrivate) /* Turn on the EJTAG probe (only useful driver live) */ RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0); } +#endif -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) /*! ******************************************************************************* @@ -425,38 +472,44 @@ static void RGXInitMipsProcWrapper(const void *hPrivate) ******************************************************************************/ static void RGXInitRiscvProcWrapper(const void *hPrivate) { + IMG_UINT32 ui32BootCodeRemap = RGXRISCVFW_BOOTLDR_CODE_REMAP; + IMG_UINT32 ui32BootDataRemap = RGXRISCVFW_BOOTLDR_DATA_REMAP; IMG_DEV_VIRTADDR sTmp; RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper"); - RGXCommentLog(hPrivate, "RGXStart: Write boot code remap"); - RGXAcquireBootCodeAddr(hPrivate, &sTmp); - RGXWriteReg64(hPrivate, - RGXRISCVFW_BOOTLDR_CODE_REMAP, - sTmp.uiAddr | - (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) - << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | - (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | - RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN); - - RGXCommentLog(hPrivate, "RGXStart: Write boot data remap"); - RGXAcquireBootDataAddr(hPrivate, &sTmp); - RGXWriteReg64(hPrivate, - RGXRISCVFW_BOOTLDR_DATA_REMAP, - sTmp.uiAddr | - (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) - << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | - (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | -#if defined(SUPPORT_TRUSTED_DEVICE) - RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN | +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) < 4) #endif - RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN); + { + RGXCommentLog(hPrivate, "RGXStart: Write boot code remap"); + RGXAcquireBootCodeAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + ui32BootCodeRemap, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) + << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | + RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN); + + RGXCommentLog(hPrivate, "RGXStart: Write boot data remap"); + RGXAcquireBootDataAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + ui32BootDataRemap, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) + << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN) + RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN | +#endif + RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN); + } /* Garten IDLE bit controlled by RISCV */ RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV"); RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); } -#endif /*! @@ -473,76 +526,6 @@ static void RGXInitRiscvProcWrapper(const void *hPrivate) ******************************************************************************/ static void __RGXInitSLC(const void *hPrivate) { -#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_CACHE_HIERARCHY)) - { - IMG_UINT32 ui32Reg; - IMG_UINT32 ui32RegVal; - - /* - * SLC control - */ - ui32Reg = RGX_CR_SLC3_CTRL_MISC; - ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH | - RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN; - RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal); - - /* - * SLC scramble bits - */ - { - IMG_UINT32 i; - IMG_UINT32 ui32Count=0; - IMG_UINT32 ui32SLCBanks = RGXGetDeviceSLCBanks(hPrivate); - IMG_UINT64 aui64ScrambleValues[4]; - IMG_UINT32 aui32ScrambleRegs[] = { - RGX_CR_SLC3_SCRAMBLE, - RGX_CR_SLC3_SCRAMBLE2, - RGX_CR_SLC3_SCRAMBLE3, - RGX_CR_SLC3_SCRAMBLE4 - }; - - if (2 == ui32SLCBanks) - { - aui64ScrambleValues[0] = IMG_UINT64_C(0x6965a99a55696a6a); - aui64ScrambleValues[1] = IMG_UINT64_C(0x6aa9aa66959aaa9a); - aui64ScrambleValues[2] = IMG_UINT64_C(0x9a5665965a99a566); - aui64ScrambleValues[3] = IMG_UINT64_C(0x5aa69596aa66669a); - ui32Count = 4; - } - else if (4 == ui32SLCBanks) - { - aui64ScrambleValues[0] = IMG_UINT64_C(0xc6788d722dd29ce4); - aui64ScrambleValues[1] = IMG_UINT64_C(0x7272e4e11b279372); - aui64ScrambleValues[2] = IMG_UINT64_C(0x87d872d26c6c4be1); - aui64ScrambleValues[3] = IMG_UINT64_C(0xe1b4878d4b36e478); - ui32Count = 4; - - } - else if (8 == ui32SLCBanks) - { - aui64ScrambleValues[0] = IMG_UINT64_C(0x859d6569e8fac688); - aui64ScrambleValues[1] = IMG_UINT64_C(0xf285e1eae4299d33); - aui64ScrambleValues[2] = IMG_UINT64_C(0x1e1af2be3c0aa447); - ui32Count = 3; - } - - for (i = 0; i < ui32Count; i++) - { - IMG_UINT32 ui32Reg = aui32ScrambleRegs[i]; - IMG_UINT64 ui64Value = aui64ScrambleValues[i]; - RGXWriteReg64(hPrivate, ui32Reg, ui64Value); - } - } - - { - /* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */ - RGXCommentLog(hPrivate, "Disable forced SLC coherency"); - RGXWriteReg64(hPrivate, RGX_CR_GARTEN_SLC, 0); - } - } - else -#endif { IMG_UINT32 ui32Reg; IMG_UINT32 ui32RegVal; @@ -577,10 +560,7 @@ static void __RGXInitSLC(const void *hPrivate) */ ui32Reg = RGX_CR_SLC_CTRL_MISC; ui32RegVal = RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1; - -#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) ui32RegVal |= RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN; -#endif #if defined(FIX_HW_BRN_60084_BIT_MASK) if (RGX_DEVICE_HAS_BRN(hPrivate, 60084)) @@ -596,19 +576,58 @@ static void __RGXInitSLC(const void *hPrivate) } #endif -#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) /* Bypass burst combiner if SLC line size is smaller than 1024 bits */ if (RGXGetDeviceCacheLineSize(hPrivate) < 1024) { ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN; } -#endif + + if (RGX_DEVICE_HAS_BRN(hPrivate, 71242) && !RGX_DEVICE_HAS_FEATURE(hPrivate, GPU_MULTICORE_SUPPORT)) + { + ui32RegVal |= RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN; + } RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal); } } +static void RGXWriteKernelCatBase(const void *hPrivate, IMG_DEV_PHYADDR sPCAddr) +{ + { + /* Write the cat-base address */ + RGXWriteKernelMMUPC64(hPrivate, + BIF_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), + RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT, + RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT, + ((sPCAddr.uiAddr + >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT) + << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT) + & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK); + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + /* Keep catbase registers in sync */ + RGXWriteKernelMMUPC64(hPrivate, + FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), + RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT, + RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT, + ((sPCAddr.uiAddr + >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) + << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) + & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK); + } + + /* + * Trusted Firmware boot + */ +#if defined(SUPPORT_TRUSTED_DEVICE) + RGXCommentLog(hPrivate, "RGXWriteKernelCatBase: Trusted Device enabled"); + RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); +#endif + } +} + /*! ******************************************************************************* @@ -623,7 +642,19 @@ static void __RGXInitSLC(const void *hPrivate) ******************************************************************************/ static void RGXInitBIF(const void *hPrivate) { - if (!RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) + { + /* + * Trusted Firmware boot + */ +#if defined(SUPPORT_TRUSTED_DEVICE) + RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled"); + RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); +#endif + } + else +#endif /* defined(RGX_FEATURE_MIPS_BIT_MASK) */ { IMG_DEV_PHYADDR sPCAddr; @@ -637,108 +668,51 @@ static void RGXInitBIF(const void *hPrivate) */ RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue"); -#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) - if (!RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) - { - /* Write the cat-base address */ - RGXWriteKernelMMUPC64(hPrivate, - BIF_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), - RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT, - RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT, - ((sPCAddr.uiAddr - >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT) - << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT) - & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK); + RGXWriteKernelCatBase(hPrivate, sPCAddr); + } +} -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) - { - /* Keep catbase registers in sync */ - RGXWriteKernelMMUPC64(hPrivate, - FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), - RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT, - RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT, - ((sPCAddr.uiAddr - >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) - << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) - & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK); - } -#endif - /* - * Trusted Firmware boot - */ -#if defined(SUPPORT_TRUSTED_DEVICE) - RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled"); - RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); -#endif - } - else -#endif /* defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) */ - { - /* Oceanic does not have this define, but has a MIPS processor so the codepath - * will not run */ -#if defined(RGX_CR_MMU_CBASE_MAPPING) - IMG_UINT32 uiPCAddr; - uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT) - << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) - & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK); - - /* Set the mapping context */ - RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV); - (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ - - /* Write the cat-base address */ - RGXWriteKernelMMUPC32(hPrivate, - RGX_CR_MMU_CBASE_MAPPING, - RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, - RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, - uiPCAddr); - -#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV) - /* Set-up different MMU ID mapping to the same PC used above */ - RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF); - (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ - - RGXWriteKernelMMUPC32(hPrivate, - RGX_CR_MMU_CBASE_MAPPING, - RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, - RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, - uiPCAddr); -#endif -#endif - } - } - else +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) +/**************************************************************************/ /*! +@Function RGXInitMMURangeRegisters +@Description Initialises MMU range registers for Non4K pages. +@Input hPrivate Implementation specific data +@Return void + */ /**************************************************************************/ +static void RGXInitMMURangeRegisters(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; + PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; + IMG_UINT32 ui32RegAddr = RGX_CR_MMU_PAGE_SIZE_RANGE_ONE; + IMG_UINT32 i; + + for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i, ui32RegAddr += sizeof(IMG_UINT64)) { - /* - * Trusted Firmware boot - */ -#if defined(SUPPORT_TRUSTED_DEVICE) - RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled"); - RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); -#endif + RGXWriteReg64(hPrivate, ui32RegAddr, psDevInfo->aui64MMUPageSizeRangeValue[i]); } } +#endif /*! ******************************************************************************* - @Function RGXAXIACELiteInit + @Function RGXInitAXIACE - @Description Initialise AXI-ACE Lite interface + @Description Initialise AXI-ACE interface @Input hPrivate : Implementation specific data @Return void ******************************************************************************/ -static void RGXAXIACELiteInit(const void *hPrivate) +static void RGXInitAXIACE(const void *hPrivate) { - IMG_UINT32 ui32RegAddr; IMG_UINT64 ui64RegVal; + IMG_UINT32 ui32RegAddr; +#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION; /* Setup AXI-ACE config. Set everything to outer cache */ @@ -761,136 +735,150 @@ static void RGXAXIACELiteInit(const void *hPrivate) #if defined(FIX_HW_BRN_68186_BIT_MASK) if (RGX_DEVICE_HAS_BRN(hPrivate, 68186)) { - /* default value for reg_enable_fence_out is zero. Force to 1 to allow core_clk < mem_clk */ + /* default value for reg_enable_fence_out is zero. Force to 1 to allow core_clk < mem_clk */ ui64RegVal |= (IMG_UINT64)1 << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT; } #endif +#else /* defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) */ + ui32RegAddr = RGX_CR_ACE_CTRL; -#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) - { - RGXCommentLog(hPrivate, "OSID 0 and 1 are trusted"); - ui64RegVal |= IMG_UINT64_C(0xFC) - << RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT; - } -#endif + /** + * The below configuration is only applicable for RGX cores supporting + * ACE/ACE-lite protocol and connected to ACE coherent interconnect. + */ + + /** + * Configure AxDomain and AxCache for MMU transactions. + * AxDomain set to non sharable (0x0). + */ + ui64RegVal = RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE | + RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE; + + /** + * Configure AxCache for PM/MMU transactions. + * Set to same value (i.e WBRWALLOC caching, rgxmmunit.c:RGXDerivePTEProt8) + * as non-coherent PTEs + */ + ui64RegVal |= (IMG_UINT64_C(0xF)) << RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT; + + /** + * Configure AxDomain for non MMU transactions. + */ + ui64RegVal |= (IMG_UINT64)(RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE | + RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE); +#endif /* defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) */ RGXCommentLog(hPrivate, "Init AXI-ACE interface"); RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal); } -PVRSRV_ERROR RGXStart(const void *hPrivate) +static void RGXResetSequence(const void *hPrivate, const IMG_CHAR *pcRGXFW_PROCESSOR) { - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_CHAR *pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS; -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) - IMG_BOOL bDoFWSlaveBoot = IMG_FALSE; - IMG_BOOL bMetaFW = IMG_FALSE; -#endif + IMG_UINT64 ui64SoftResetMask; -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) +#if defined(RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, PBE2_IN_XE)) { - pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; + ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL; } else #endif -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) - if (RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META)) { - pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; - bMetaFW = IMG_TRUE; - bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate); + ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL; } -#endif - if (RGX_DEVICE_HAS_FEATURE(hPrivate, SYS_BUS_SECURE_RESET)) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) { - /* Disable the default sys_bus_secure protection to perform minimal setup */ - RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure"); - RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0); - (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ + RGXCommentLog(hPrivate, "RGXStart: soft reset cpu core"); + RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 0); } -#if defined(SUPPORT_SHARED_SLC) - /* When the SLC is shared, the SLC reset is performed by the System layer when calling - * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid - * soft_resetting it here. - */ -#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN) - RGXCommentLog(hPrivate, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)"); -#else -#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL) -#endif - -#if defined(RGX_S7_SOFT_RESET_DUSTS) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) { /* Set RGX in soft-reset */ - RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1"); - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS); + RGXCommentLog(hPrivate, "RGXStart: soft reset everything"); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, ui64SoftResetMask); /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); - (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); - RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2"); - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS); - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2); + /* Take Rascal and Dust out of reset */ + RGXCommentLog(hPrivate, "RGXStart: Rascal and Dust out of reset"); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, ui64SoftResetMask ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN); (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); - (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); /* Take everything out of reset but the FW processor */ - RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR); - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN); - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, 0x0); - - (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); - (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); + RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR); - RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR); RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); } - else -#endif - { - /* Set RGX in soft-reset */ - RGXCommentLog(hPrivate, "RGXStart: soft reset everything"); - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL); +} - /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ - (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); +static void DeassertMetaReset(const void *hPrivate) +{ + /* Need to wait for at least 32 cycles before taking the FW processor out of reset ... */ + RGXWaitCycles(hPrivate, 32, 3); - /* Take Rascal and Dust out of reset */ - RGXCommentLog(hPrivate, "RGXStart: Rascal and Dust out of reset"); - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0); + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); - (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + /* ... and afterwards */ + RGXWaitCycles(hPrivate, 32, 3); +} - /* Take everything out of reset but the FW processor */ - RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR); +PVRSRV_ERROR RGXStart(const void *hPrivate) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_CHAR *pcRGXFW_PROCESSOR; + IMG_BOOL bDoFWSlaveBoot = IMG_FALSE; + IMG_BOOL bMetaFW = IMG_FALSE; -#if defined(RGX_FEATURE_XE_ARCHITECTURE) && (RGX_FEATURE_XE_ARCHITECTURE > 1) - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_CPU_EN); -#else - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; + } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS; + } #endif + else + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; + bMetaFW = IMG_TRUE; + bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate); + } - (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + if (RGX_DEVICE_HAS_FEATURE(hPrivate, SYS_BUS_SECURE_RESET)) + { + /* Disable the default sys_bus_secure protection to perform minimal setup */ + RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure"); + RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0); + (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ } - /* Enable clocks */ - RGXEnableClocks(hPrivate); +#if defined(RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK) + /* Only bypass HMMU if the module is present */ + if (RGX_DEVICE_HAS_FEATURE(hPrivate, HYPERVISOR_MMU)) + { + /* Always set HMMU in bypass mode */ + RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL); + (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS); + } +#endif + + /*! + * Start FW init sequence + */ + RGXResetSequence(hPrivate, pcRGXFW_PROCESSOR); /* * Initialise SLC. */ -#if !defined(SUPPORT_SHARED_SLC) __RGXInitSLC(hPrivate); -#endif if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) > 0) { @@ -899,7 +887,6 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL); } -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (bMetaFW) { if (bDoFWSlaveBoot) @@ -907,7 +894,6 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) /* Configure META to Slave boot */ RGXCommentLog(hPrivate, "RGXStart: META Slave boot"); RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0); - } else { @@ -916,33 +902,39 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN); } } -#endif /* * Initialise Firmware wrapper */ -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) { RGXInitRiscvProcWrapper(hPrivate); } - else -#endif -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) - if (bMetaFW) + else if (bMetaFW) { RGXInitMetaProcWrapper(hPrivate); } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) else -#endif { RGXInitMipsProcWrapper(hPrivate); } +#endif + +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, MMU_VERSION) >= 4) + { + /* initialise the MMU range based config registers for Non4K pages */ + RGXInitMMURangeRegisters(hPrivate); + } +#endif +#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) if (RGX_DEVICE_HAS_FEATURE(hPrivate, AXI_ACELITE)) +#endif { /* We must init the AXI-ACE interface before 1st BIF transaction */ - RGXAXIACELiteInit(hPrivate); + RGXInitAXIACE(hPrivate); } /* @@ -950,18 +942,11 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) */ RGXInitBIF(hPrivate); - RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR); + RGXSetPoweredState(hPrivate, IMG_TRUE); - /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */ - RGXWaitCycles(hPrivate, 32, 3); - - RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0); - (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); - - /* ... and afterwards */ - RGXWaitCycles(hPrivate, 32, 3); + RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR); + DeassertMetaReset(hPrivate); -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (bMetaFW && bDoFWSlaveBoot) { eError = RGXFabricCoherencyTest(hPrivate); @@ -972,21 +957,27 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) if (eError != PVRSRV_OK) return eError; } else -#endif { RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); -#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) { /* Bring Debug Module out of reset */ - RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL, RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) >= 4) + { + RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4, RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__DMACTIVE_EN); + } + else +#endif + { + RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL, RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); + } /* Boot the FW */ RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 1); RGXWaitCycles(hPrivate, 32, 3); } -#endif } #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) @@ -1000,36 +991,19 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) PVRSRV_ERROR RGXStop(const void *hPrivate) { -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) || defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) - IMG_BOOL bMipsFW = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS); - IMG_BOOL bRiscvFW = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); - IMG_BOOL bMetaFW = !bMipsFW && !bRiscvFW; -#endif PVRSRV_ERROR eError = PVRSRV_OK; - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; + IMG_BOOL bMetaFW = RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META); RGXDeviceAckIrq(hPrivate); + /* Set FW power state OFF to disable LISR handler */ + RGXSetPoweredState(hPrivate, IMG_FALSE); + /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper * For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW */ -#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) - if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) + if (!(RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, LAYOUT_MARS) > 0)) { -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) - { - eError = RGXPollReg32(hPrivate, - RGX_CR_JONES_IDLE, - RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN), - RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN)); - } - else -#endif { eError = RGXPollReg32(hPrivate, RGX_CR_SIDEKICK_IDLE, @@ -1039,32 +1013,19 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) if (eError != PVRSRV_OK) return eError; } -#endif - if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) + if (!(RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, LAYOUT_MARS) > 0)) { -#if !defined(SUPPORT_SHARED_SLC) /* * Wait for SLC to signal IDLE * For LAYOUT_MARS = 1, SLC would have been powered down by FW */ -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) - { - eError = RGXPollReg32(hPrivate, - RGX_CR_SLC3_IDLE, - RGX_CR_SLC3_IDLE_MASKFULL, - RGX_CR_SLC3_IDLE_MASKFULL); - } - else -#endif { eError = RGXPollReg32(hPrivate, RGX_CR_SLC_IDLE, RGX_CR_SLC_IDLE_MASKFULL, RGX_CR_SLC_IDLE_MASKFULL); } -#endif /* SUPPORT_SHARED_SLC */ if (eError != PVRSRV_OK) return eError; } @@ -1078,19 +1039,19 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL); - /* Oceanic does not have defines for the second thread */ -#if defined(RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC) - RGXWriteReg32(hPrivate, - RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC, - RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK - & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL); - RGXWriteReg32(hPrivate, - RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC, - RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK - & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL); -#endif + if (bMetaFW) + { + RGXWriteReg32(hPrivate, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL); + } -#if defined(PDUMP) && defined(RGX_FEATURE_META_MAX_VALUE_IDX) +#if defined(PDUMP) if (bMetaFW) { /* Disabling threads is only required for pdumps to stop the fw gracefully */ @@ -1111,19 +1072,51 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) * threads to avoid a race condition). * This is only really needed for PDumps but we do it anyway driver-live. */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0); - (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */ - +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) /* Wait for the Slave Port to finish all the transactions */ - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1, - RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_REGISTER_UNPACKED_ACCESSES)) + { + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) > 1) + { + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA); /* Fence write */ + + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN); + } + else + { + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA); /* Fence write */ + + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN); + } + } + else +#endif + { + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */ + + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + } + if (eError != PVRSRV_OK) return eError; } #endif -#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) /* Extra Idle checks */ eError = RGXPollReg32(hPrivate, RGX_CR_BIF_STATUS_MMU, @@ -1136,12 +1129,8 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) 0, RGX_CR_BIFPM_STATUS_MMU_MASKFULL); if (eError != PVRSRV_OK) return eError; -#endif -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE) && - !RGX_DEVICE_HAS_FEATURE(hPrivate, XT_TOP_INFRASTRUCTURE)) -#endif + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, XT_TOP_INFRASTRUCTURE)) { eError = RGXPollReg32(hPrivate, RGX_CR_BIF_READS_EXT_STATUS, @@ -1150,24 +1139,18 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) if (eError != PVRSRV_OK) return eError; } -#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) eError = RGXPollReg32(hPrivate, RGX_CR_BIFPM_READS_EXT_STATUS, 0, RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL); if (eError != PVRSRV_OK) return eError; -#endif - { - IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL; - eError = RGXPollReg64(hPrivate, - RGX_CR_SLC_STATUS1, - 0, - ui64SLCMask); - if (eError != PVRSRV_OK) return eError; - } + eError = RGXPollReg64(hPrivate, + RGX_CR_SLC_STATUS1, + 0, + RGX_CR_SLC_STATUS1_MASKFULL); + if (eError != PVRSRV_OK) return eError; -#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) if (4 == RGXGetDeviceSLCBanks(hPrivate)) { eError = RGXPollReg64(hPrivate, @@ -1176,56 +1159,27 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) RGX_CR_SLC_STATUS2_MASKFULL); if (eError != PVRSRV_OK) return eError; } -#endif - if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) + if (!(RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, LAYOUT_MARS) > 0)) { -#if !defined(SUPPORT_SHARED_SLC) /* * Wait for SLC to signal IDLE * For LAYOUT_MARS = 1, SLC would have been powered down by FW */ -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) - { - eError = RGXPollReg32(hPrivate, - RGX_CR_SLC3_IDLE, - RGX_CR_SLC3_IDLE_MASKFULL, - RGX_CR_SLC3_IDLE_MASKFULL); - } - else -#endif { eError = RGXPollReg32(hPrivate, RGX_CR_SLC_IDLE, RGX_CR_SLC_IDLE_MASKFULL, RGX_CR_SLC_IDLE_MASKFULL); } -#endif /* SUPPORT_SHARED_SLC */ if (eError != PVRSRV_OK) return eError; } /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper * For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW */ -#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) - if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) + if (!(RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, LAYOUT_MARS) > 0)) { -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) - if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) - { -#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) - if (!RGX_DEVICE_HAS_FEATURE(hPrivate, FASTRENDER_DM)) - { - eError = RGXPollReg32(hPrivate, - RGX_CR_JONES_IDLE, - RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN), - RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN)); - } -#endif - } - else -#endif { eError = RGXPollReg32(hPrivate, RGX_CR_SIDEKICK_IDLE, @@ -1235,9 +1189,7 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) if (eError != PVRSRV_OK) return eError; } -#endif -#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) if (bMetaFW) { IMG_UINT32 ui32RegValue; @@ -1252,7 +1204,6 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) /* Wait for Sidekick/Jones to signal IDLE including * the Garten Wrapper if there is no debugger attached * (TxVECINT_BHALT = 0x0) */ - if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) { eError = RGXPollReg32(hPrivate, RGX_CR_SIDEKICK_IDLE, @@ -1260,20 +1211,11 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) RGX_CR_SIDEKICK_IDLE_GARTEN_EN); if (eError != PVRSRV_OK) return eError; } - else - { - eError = RGXPollReg32(hPrivate, - RGX_CR_JONES_IDLE, - RGX_CR_JONES_IDLE_GARTEN_EN, - RGX_CR_JONES_IDLE_GARTEN_EN); - if (eError != PVRSRV_OK) return eError; - } } } else -#endif { - if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, LAYOUT_MARS) > 0) { /* As FW core has been moved from SIDEKICK to the new MARS domain, checking * idle bits for CPU & System Arbiter excluding SOCIF which will never be Idle @@ -1285,7 +1227,6 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN); if (eError != PVRSRV_OK) return eError; } -#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) else { eError = RGXPollReg32(hPrivate, @@ -1294,42 +1235,11 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) RGX_CR_SIDEKICK_IDLE_GARTEN_EN); if (eError != PVRSRV_OK) return eError; } -#endif } return eError; } - -/* - * RGXInitSLC - */ -#if defined(SUPPORT_SHARED_SLC) -PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle) -{ - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo; - void *pvPowerParams; - - if (psDeviceNode == NULL) - { - return PVRSRV_ERROR_INVALID_PARAMS; - } - psDevInfo = psDeviceNode->pvDevice; - pvPowerParams = &psDevInfo->sLayerParams; - - /* reset the SLC */ - RGXCommentLog(pvPowerParams, "RGXInitSLC: soft reset SLC"); - RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN); - - /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ - (void) RGXReadReg64(pvPowerParams, RGX_CR_SOFT_RESET); - - /* Take everything out of reset */ - RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, 0x0); - - __RGXInitSLC(pvPowerParams); - - return PVRSRV_OK; -} -#endif +/****************************************************************************** + End of file (rgxstartstop.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxta3d.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxta3d.c index bf78894babc0..515b988e938f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxta3d.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxta3d.c @@ -51,6 +51,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_debug.h" #include "rgxutils.h" #include "rgxfwutils.h" +#include "rgxfwcmnctx.h" #include "rgxta3d.h" #include "rgxmem.h" #include "allocmem.h" @@ -64,7 +65,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxhwperf.h" #include "ospvr_gputrace.h" #include "rgxsyncutils.h" -#include "htbuffer.h" +#include "htbserver.h" #include "rgxdefs_km.h" #include "rgx_fwif_km.h" @@ -74,6 +75,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "sync.h" #include "process_stats.h" +#if !defined(PM_INTERACTIVE_MODE) +#include "rgxpmdefs.h" +#endif + #include "rgxtimerquery.h" #if defined(SUPPORT_BUFFER_SYNC) @@ -89,10 +94,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if defined(SUPPORT_WORKLOAD_ESTIMATION) #include "rgxworkest.h" - #define HASH_CLEAN_LIMIT 6 #endif +#include "rgxmmudefs_km.h" + /* Enable this to dump the compiled list of UFOs prior to kick call */ #define ENABLE_TA3D_UFO_DUMP 0 @@ -102,8 +108,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define CHKPT_DBG(X) PVR_DPF(X) static INLINE void _DebugSyncValues(const IMG_CHAR *pszFunction, - const IMG_UINT32 *pui32UpdateValues, - const IMG_UINT32 ui32Count) + const IMG_UINT32 *pui32UpdateValues, + const IMG_UINT32 ui32Count) { IMG_UINT32 i; IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; @@ -117,9 +123,9 @@ void _DebugSyncValues(const IMG_CHAR *pszFunction, static INLINE void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction, - const IMG_CHAR *pszDMName, - const PSYNC_CHECKPOINT *apsSyncCheckpoints, - const IMG_UINT32 ui32Count) + const IMG_CHAR *pszDMName, + const PSYNC_CHECKPOINT *apsSyncCheckpoints, + const IMG_UINT32 ui32Count) { IMG_UINT32 i; @@ -194,103 +200,50 @@ struct _RGX_SERVER_RENDER_CONTEXT_ { static PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData, - PVRSRV_DEVICE_NODE *psDeviceNode) + PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError; /* Check if the FW has finished with this resource ... */ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, - psTAData->psServerCommonContext, - RGXFWIF_DM_GEOM, - PDUMP_FLAGS_CONTINUOUS); - if (eError == PVRSRV_ERROR_RETRY) - { - return eError; - } - else if (eError != PVRSRV_OK) - { - PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + psTAData->psServerCommonContext, + RGXFWIF_DM_GEOM, + PDUMP_FLAGS_CONTINUOUS); - /* ... it has so we can free its resources */ -#if defined(DEBUG) - /* Log the number of TA context stores which occurred */ - { - RGXFWIF_TACTX_STATE *psFWTAState; + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psDeviceNode, + eError, + RGXFWRequestCommonContextCleanUp); - eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc, - (void**)&psFWTAState); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware render context state (%s)", - __func__, PVRSRVGetErrorString(eError))); - } - else - { - /* Release the CPU virt addr */ - DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc); - } - } -#endif + /* ... it has so we can free its resources */ FWCommonContextFree(psTAData->psServerCommonContext); DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc); psTAData->psServerCommonContext = NULL; - return PVRSRV_OK; + + return eError; } static PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData, - PVRSRV_DEVICE_NODE *psDeviceNode) + PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError; /* Check if the FW has finished with this resource ... */ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, - ps3DData->psServerCommonContext, - RGXFWIF_DM_3D, - PDUMP_FLAGS_CONTINUOUS); - if (eError == PVRSRV_ERROR_RETRY) - { - return eError; - } - else if (eError != PVRSRV_OK) - { - PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + ps3DData->psServerCommonContext, + RGXFWIF_DM_3D, + PDUMP_FLAGS_CONTINUOUS); - /* ... it has so we can free its resources */ -#if defined(DEBUG) - /* Log the number of 3D context stores which occurred */ - { - RGXFWIF_3DCTX_STATE *psFW3DState; - - eError = DevmemAcquireCpuVirtAddr(ps3DData->psContextStateMemDesc, - (void**)&psFW3DState); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware render context state (%s)", - __func__, PVRSRVGetErrorString(eError))); - } - else - { - /* Release the CPU virt addr */ - DevmemReleaseCpuVirtAddr(ps3DData->psContextStateMemDesc); - } - } -#endif + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psDeviceNode, + eError, + RGXFWRequestCommonContextCleanUp); + /* ... it has so we can free its resources */ FWCommonContextFree(ps3DData->psServerCommonContext); DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc); ps3DData->psServerCommonContext = NULL; - return PVRSRV_OK; + + return eError; } static void _RGXDumpPMRPageList(DLLIST_NODE *psNode) @@ -299,7 +252,7 @@ static void _RGXDumpPMRPageList(DLLIST_NODE *psNode) PVRSRV_ERROR eError; eError = PMRDumpPageList(psPMRNode->psPMR, - RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -314,9 +267,9 @@ IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList) DLLIST_NODE *psNode, *psNext; PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx, - psFreeList->sFreeListFWDevVAddr.ui32Addr, - psFreeList->ui32FreelistID, - psFreeList->ui64FreelistChecksum)); + psFreeList->sFreeListFWDevVAddr.ui32Addr, + psFreeList->ui32FreelistID, + psFreeList->ui64FreelistChecksum)); /* Dump Init FreeList page list */ PVR_LOG((" Initial Memory block")); @@ -336,9 +289,9 @@ IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList) } static void _CheckFreelist(RGX_FREELIST *psFreeList, - IMG_UINT32 ui32NumOfPagesToCheck, - IMG_UINT64 ui64ExpectedCheckSum, - IMG_UINT64 *pui64CalculatedCheckSum) + IMG_UINT32 ui32NumOfPagesToCheck, + IMG_UINT64 ui64ExpectedCheckSum, + IMG_UINT64 *pui64CalculatedCheckSum) { #if defined(NO_HARDWARE) /* No checksum needed as we have all information in the pdumps */ @@ -373,13 +326,12 @@ static void _CheckFreelist(RGX_FREELIST *psFreeList, /* Copy freelist content into Buffer */ eError = PMR_ReadBytes(psFreeList->psFreeListPMR, - psFreeList->uiFreeListPMROffset + - (((psFreeList->ui32MaxFLPages - - psFreeList->ui32CurrentFLPages - - psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) & - ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)), - pui8Buffer, - ui32NumOfPagesToCheck * sizeof(IMG_UINT32), + psFreeList->uiFreeListPMROffset + + (((psFreeList->ui32MaxFLPages - + psFreeList->ui32CurrentFLPages - psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)), + pui8Buffer, + ui32NumOfPagesToCheck * sizeof(IMG_UINT32), &uiNumBytes); if (eError != PVRSRV_OK) { @@ -452,10 +404,10 @@ static void _CheckFreelist(RGX_FREELIST *psFreeList, * is not used. */ static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList, - IMG_UINT32 ui32FLPages) + IMG_UINT32 ui32FLPages) { IMG_UINT32 ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) & - ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1); + ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1); if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages) { @@ -467,8 +419,8 @@ static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList, PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, - IMG_UINT32 ui32NumPages, - PDLLIST_NODE pListHeader) + IMG_UINT32 ui32NumPages, + PDLLIST_NODE pListHeader) { RGX_PMR_NODE *psPMRNode; IMG_DEVMEM_SIZE_T uiSize; @@ -540,6 +492,7 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, { uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE; } + eError = PhysmemNewRamBackedPMR(psFreeList->psConnection, psFreeList->psDevInfo->psDeviceNode, uiSize, @@ -547,7 +500,10 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, 1, &ui32MappingTable, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, - PVRSRV_MEMALLOCFLAG_GPU_READABLE, + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_OS_LINUX_DENY_MOVE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_PRIVATE) | + PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP, sizeof(szAllocName), szAllocName, psFreeList->ownerPid, @@ -557,9 +513,9 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX, - __func__, - (IMG_UINT64)uiSize)); + "%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX, + __func__, + (IMG_UINT64)uiSize)); goto ErrorBlockAlloc; } @@ -586,7 +542,7 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR, - psFreeList->ownerPid); + psFreeList->ownerPid); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -595,35 +551,36 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, PVRSRVGetErrorString(eError))); } - /* Attach RI information */ - eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR, - OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN), - szAllocName, - 0, - uiSize, - IMG_FALSE, - IMG_FALSE, - &psPMRNode->hRIHandle); + /* Attach RI information */ + eError = RIWriteMEMDESCEntryKM(psFreeList->psConnection, + psFreeList->psDevInfo->psDeviceNode, + psPMRNode->psPMR, + OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN), + szAllocName, + 0, + uiSize, + 0, + &psPMRNode->hRIHandle); PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM"); #endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ /* write Freelist with Memory Block physical addresses */ eError = PMRWritePMPageList( - /* Target PMR, offset, and length */ - psFreeList->psFreeListPMR, - (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), - (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), - /* Referenced PMR, and "page" granularity */ - psPMRNode->psPMR, - RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, - &psPMRNode->psPageList); + /* Target PMR, offset, and length */ + psFreeList->psFreeListPMR, + (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), + (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), + /* Referenced PMR, and "page" granularity */ + psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + &psPMRNode->psPageList); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to write pages of Node %p", - __func__, - psPMRNode)); + "%s: Failed to write pages of Node %p", + __func__, + psPMRNode)); goto ErrorPopulateFreelist; } @@ -638,15 +595,22 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, PVRSRV_ERROR res; IMG_HANDLE hMapHandle; + /* Check for overflow. Validate size and offset. */ + PVR_GOTO_IF_INVALID_PARAM(psFreeList->uiFreeListPMROffset + ui32MapSize > psFreeList->uiFreeListPMROffset, + eError, ErrorPopulateFreelist); + PVR_GOTO_IF_INVALID_PARAM(psFreeList->uiFreeListPMROffset + ui32MapSize <= PMR_LogicalSize(psFreeList->psFreeListPMR), + eError, ErrorPopulateFreelist); + /* Map both the FL and the shadow FL */ res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize, - (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle); + (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle); if (res != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map freelist (ID=%d)", - __func__, - psFreeList->ui32FreelistID)); + "%s: Failed to map freelist (ID=%d)", + __func__, + psFreeList->ui32FreelistID)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto ErrorPopulateFreelist; } @@ -664,11 +628,11 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof(IMG_UINT32)) { PMRPDumpCopyMem32(psFreeList->psFreeListPMR, - uiCurrOffset + ui32FLMaxSize, - psFreeList->psFreeListPMR, - uiCurrOffset, - ":SYSMEM:$1", - PDUMP_FLAGS_CONTINUOUS); + uiCurrOffset + ui32FLMaxSize, + psFreeList->psFreeListPMR, + uiCurrOffset, + ":SYSMEM:$1", + PDUMP_FLAGS_CONTINUOUS); } } #endif @@ -679,9 +643,10 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, if (res != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to release freelist mapping (ID=%d)", - __func__, - psFreeList->ui32FreelistID)); + "%s: Failed to release freelist mapping (ID=%d)", + __func__, + psFreeList->ui32FreelistID)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto ErrorPopulateFreelist; } } @@ -721,10 +686,9 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, } OSLockRelease(psFreeList->psDevInfo->hLockFreeList); - PVR_DPF((PVR_DBG_MESSAGE, - "Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)", + PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)", psFreeList, - ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"), + ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"), ui32NumPages, psFreeList->ui32CurrentFLPages, psFreeList->ui32ReadyFLPages, @@ -736,7 +700,7 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, /* Error handling */ ErrorPopulateFreelist: - PMRUnrefPMR(psPMRNode->psPMR); + (void) PMRUnrefPMR(psPMRNode->psPMR); ErrorBlockAlloc: OSFreeMem(psPMRNode); @@ -749,7 +713,7 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, } static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, - RGX_FREELIST *psFreeList) + RGX_FREELIST *psFreeList) { DLLIST_NODE *psNode; RGX_PMR_NODE *psPMRNode; @@ -786,9 +750,9 @@ static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to unwrite pages of Node %p", - __func__, - psPMRNode)); + "%s: Failed to unwrite pages of Node %p", + __func__, + psPMRNode)); PVR_ASSERT(IMG_FALSE); } @@ -802,17 +766,17 @@ static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM"); } -#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ /* Free PMR (We should be the only one that holds a ref on the PMR) */ eError = PMRUnrefPMR(psPMRNode->psPMR); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to free PB block %p (%s)", - __func__, - psPMRNode->psPMR, - PVRSRVGetErrorString(eError))); + "%s: Failed to free PB block %p (%s)", + __func__, + psPMRNode->psPMR, + PVRSRVGetErrorString(eError))); PVR_ASSERT(IMG_FALSE); } @@ -837,10 +801,10 @@ static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)", - psFreeList, - psPMRNode->ui32NumPages, - psFreeList->ui32CurrentFLPages, - psFreeList->ui32MaxFLPages)); + psFreeList, + psPMRNode->ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32MaxFLPages)); OSFreeMem(psPMRNode); } @@ -848,8 +812,8 @@ static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, { PVR_DPF((PVR_DBG_WARNING, "Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)", - psFreeList, - psFreeList->ui32InitFLPages)); + psFreeList, + psFreeList->ui32InitFLPages)); eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN; } @@ -891,79 +855,83 @@ void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_ASSERT(psDevInfo); psFreeList = FindFreeList(psDevInfo, ui32FreelistID); - - if (psFreeList) + if (psFreeList == NULL) { - /* Since the FW made the request, it has already consumed the ready pages, update the host struct */ - psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages; - psFreeList->ui32ReadyFLPages = 0; + /* Should never happen */ + PVR_DPF((PVR_DBG_ERROR, + "FreeList Lookup for FreeList ID 0x%08x failed (Populate)", + ui32FreelistID)); + PVR_ASSERT(IMG_FALSE); + return; + } - /* Try to grow the freelist */ - eError = RGXGrowFreeList(psFreeList, - psFreeList->ui32GrowFLPages, - &psFreeList->sMemoryBlockHead); + /* Since the FW made the request, it has already consumed the ready pages, update the host struct */ + psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages; + psFreeList->ui32ReadyFLPages = 0; - if (eError == PVRSRV_OK) - { - /* Grow successful, return size of grow size */ - ui32GrowValue = psFreeList->ui32GrowFLPages; + /* Try to grow the freelist */ + eError = RGXGrowFreeList(psFreeList, + psFreeList->ui32GrowFLPages, + &psFreeList->sMemoryBlockHead); + + if (eError == PVRSRV_OK) + { + /* Grow successful, return size of grow size */ + ui32GrowValue = psFreeList->ui32GrowFLPages; - psFreeList->ui32NumGrowReqByFW++; + psFreeList->ui32NumGrowReqByFW++; #if defined(PVRSRV_ENABLE_PROCESS_STATS) - /* Update Stats */ - PVRSRVStatsUpdateFreelistStats(psDevInfo->psDeviceNode, - 0, - 1, /* Add 1 to the appropriate counter (Requests by FW) */ - psFreeList->ui32InitFLPages, - psFreeList->ui32NumHighPages, - psFreeList->ownerPid); + /* Update Stats */ + PVRSRVStatsUpdateFreelistStats(psDevInfo->psDeviceNode, + 0, + 1, /* Add 1 to the appropriate counter (Requests by FW) */ + psFreeList->ui32InitFLPages, + psFreeList->ui32NumHighPages, + psFreeList->ownerPid); #endif - } - else - { - /* Grow failed */ - ui32GrowValue = 0; - PVR_DPF((PVR_DBG_ERROR, - "Grow for FreeList %p failed (%s)", - psFreeList, - PVRSRVGetErrorString(eError))); - } - - /* send feedback */ - s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE; - s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr; - s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue; - s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; - s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages; - - - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_3D, - &s3DCCBCmd, - PDUMP_FLAGS_NONE); - if (eError != PVRSRV_ERROR_RETRY) - { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - /* Kernel CCB should never fill up, as the FW is processing them right away */ - - PVR_ASSERT(eError == PVRSRV_OK); } else { - /* Should never happen */ + /* Grow failed */ + ui32GrowValue = 0; PVR_DPF((PVR_DBG_ERROR, - "FreeList Lookup for FreeList ID 0x%08x failed (Populate)", - ui32FreelistID)); - PVR_ASSERT(IMG_FALSE); + "Grow for FreeList %p failed (%s)", + psFreeList, + PVRSRVGetErrorString(eError))); } + + /* send feedback */ + s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE; + s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages; + + PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: Grow pages=%u, new pages=%u, ready pages=%u, counter=%d", + psFreeList, + ui32GrowValue, + s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages, + s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages, + psFreeList->ui32NumGrowReqByFW)); + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_3D, + &s3DCCBCmd, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + /* Kernel CCB should never fill up, as the FW is processing them right away */ + + PVR_ASSERT(eError == PVRSRV_OK); } static void _RGXFreeListReconstruction(PDLLIST_NODE psNode) @@ -1024,7 +992,6 @@ static void _RGXFreeListReconstruction(PDLLIST_NODE psNode) } } - psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages; } @@ -1062,19 +1029,73 @@ static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList) return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED; } - /* Reset the firmware freelist structure */ + /* Update firmware freelist structure */ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); if (eError != PVRSRV_OK) { return eError; } + psFWFreeList->ui32MaxPages = psFreeList->ui32MaxFLPages; + psFWFreeList->ui32GrowPages = psFreeList->ui32GrowFLPages; + psFWFreeList->ui32CurrentPages = psFreeList->ui32CurrentFLPages; + psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID; + psFWFreeList->bGrowPending = IMG_FALSE; + psFWFreeList->ui32ReadyPages = psFreeList->ui32ReadyFLPages; + +#if defined(PM_INTERACTIVE_MODE) psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; + psFWFreeList->ui64CurrentDevVAddr = (psFWFreeList->psFreeListDevVAddr.uiAddr + + ((psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); psFWFreeList->ui32AllocatedPageCount = 0; psFWFreeList->ui32AllocatedMMUPageCount = 0; +#else + psFWFreeList->bUpdatePending = IMG_FALSE; + psFWFreeList->ui32UpdateNewPages = 0; + psFWFreeList->ui32UpdateNewReadyPages = 0; + psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0; +#endif + RGXFwSharedMemCacheOpPtr(psFWFreeList, FLUSH); DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); +#if !defined(PM_INTERACTIVE_MODE) + /* Reset freelist state buffer */ + { + RGX_PM_FREELISTSTATE_BUFFER sFLState; + size_t uiNbBytes; + IMG_DEV_VIRTADDR sFLBaseAddr; + + eError = PMR_ReadBytes(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, (IMG_UINT8*)&sFLState, sizeof(sFLState), &uiNbBytes); + + if (eError != PVRSRV_OK) + { + return eError; + } + + PVR_ASSERT(uiNbBytes == sizeof(sFLState)); + + sFLBaseAddr.uiAddr = (psFreeList->sFreeListBaseDevVAddr.uiAddr + + ((psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); + /* Note: Freelist base address is packed shifted down. */ + RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(sFLState, sFLBaseAddr.uiAddr >> RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT); + RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(sFLState, psFreeList->ui32CurrentFLPages - 1); + RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(sFLState, 0); + RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(sFLState, 0); + + eError = PMR_WriteBytes(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, (IMG_UINT8*)&sFLState, sizeof(sFLState), &uiNbBytes); + + if (eError != PVRSRV_OK) + { + return eError; + } + + PVR_ASSERT(uiNbBytes == sizeof(sFLState)); + } +#endif + /* Check the Freelist checksum if required (as the list is fully populated) */ if (psFreeList->bCheckFreelist) { @@ -1100,8 +1121,8 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; RGXFWIF_HWRTDATA *psHWRTData; #endif - IMG_UINT32 ui32FinalFreelistsCount = 0; - IMG_UINT32 aui32FinalFreelists[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT * 2]; /* Worst-case is double what we are sent */ + IMG_UINT32 ui32PIDCount = 0; + IMG_UINT32 aui32PIDList[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; PVR_ASSERT(psDevInfo != NULL); PVR_ASSERT(ui32FreelistsCount <= RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT); @@ -1123,69 +1144,53 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, { sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] | RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; - aui32FinalFreelists[ui32Loop] = paui32Freelists[ui32Loop]; } - ui32FinalFreelistsCount = ui32FreelistsCount; - /* - * The list of freelists we have been given for reconstruction will - * consist of local and global freelists (maybe MMU as well). Any - * local freelists should have their global list specified as well. - * There may be cases where the global freelist is not given (in - * cases of partial setups before a poll failure for example). To - * handle that we must first ensure every local freelist has a global - * freelist specified, otherwise we add that to the 'final' list. - * This final list of freelists is created in a first pass. - * - * Even with the global freelists listed, there may be other local - * freelists not listed, which are going to have their global freelist - * reconstructed. Therefore we have to find those freelists as well - * meaning we will have to iterate the entire list of freelists to - * find which must be reconstructed. This is the second pass. + * All freelists belonging to the same PID will be reconstructed. + * This simplifies tracking for AGP, since there is no longer a + * single global freelist per local freelist. The list of unique + * PIDs is calculated from the list of freelists in this first pass. */ OSLockAcquire(psDevInfo->hLockFreeList); dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) { RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); - IMG_BOOL bInList = IMG_FALSE; - IMG_BOOL bGlobalInList = IMG_FALSE; - /* Check if this local freelist is in the list and ensure its global is too. */ - if (psFreeList->ui32FreelistGlobalID != 0) + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) { - for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + IMG_UINT32 ui32PIDLoop; + + if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID) { - if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID) + for (ui32PIDLoop = 0; ui32PIDLoop < ui32PIDCount; ui32PIDLoop++) { - bInList = IMG_TRUE; + if (aui32PIDList[ui32PIDLoop] == psFreeList->ownerPid) + { + break; + } } - if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + + if (ui32PIDLoop == ui32PIDCount) { - bGlobalInList = IMG_TRUE; + aui32PIDList[ui32PIDCount++] = psFreeList->ownerPid; } } - - if (bInList && !bGlobalInList) - { - aui32FinalFreelists[ui32FinalFreelistsCount] = psFreeList->ui32FreelistGlobalID; - ui32FinalFreelistsCount++; - } } } dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) { RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); IMG_BOOL bReconstruct = IMG_FALSE; + IMG_UINT32 ui32PIDLoop; /* - * Check if this freelist needs to be reconstructed (was it requested - * or is its global freelist going to be reconstructed)... + * Check if this freelist needs to be reconstructed (is it in the list + * of PIDs which will have every single one of their freelists reconstructed) */ - for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + for (ui32PIDLoop = 0; ui32PIDLoop < ui32PIDCount; ui32PIDLoop++) { - if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID || - aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + if (aui32PIDList[ui32PIDLoop] == psFreeList->ownerPid) { bReconstruct = IMG_TRUE; break; @@ -1214,6 +1219,8 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, psHWRTData->eState = RGXFWIF_RTDATA_STATE_HWR; psHWRTData->ui32HWRTDataFlags &= ~HWRTDATA_HAS_LAST_TA; + RGXFwSharedMemCacheOpValue(psHWRTData->eState, FLUSH); + RGXFwSharedMemCacheOpValue(psHWRTData->ui32HWRTDataFlags, FLUSH); DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); } @@ -1249,37 +1256,36 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, } /* send feedback */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GEOM, - &sTACCBCmd, - PDUMP_FLAGS_NONE); + RGXFWIF_DM_GEOM, + &sTACCBCmd, + PDUMP_FLAGS_NONE); if (eError != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); - /* Kernel CCB should never fill up, as the FW is processing them right away */ + /* Kernel CCB should never fill up, as the FW is processing them right away */ PVR_ASSERT(eError == PVRSRV_OK); } /* Create a single HWRTData instance */ -static PVRSRV_ERROR RGXCreateHWRTData_aux( - CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_DEV_VIRTADDR psVHeapTableDevVAddr, - IMG_DEV_VIRTADDR psPMMListDevVAddr, /* per-HWRTData */ - RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], - IMG_DEV_VIRTADDR sTailPtrsDevVAddr, - IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr, /* per-HWRTData */ - IMG_DEV_VIRTADDR sRgnHeaderDevVAddr, /* per-HWRTData */ - IMG_DEV_VIRTADDR sRTCDevVAddr, - IMG_UINT16 ui16MaxRTs, - RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie, - RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) /* per-HWRTData */ +static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR sPMDataDevVAddr, /* per-HWRTData */ + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr, /* per-HWRTData */ + IMG_DEV_VIRTADDR sRgnHeaderDevVAddr, /* per-HWRTData */ + IMG_DEV_VIRTADDR sRTCDevVAddr, + IMG_UINT16 ui16MaxRTs, + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) /* per-HWRTData */ { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo; @@ -1296,6 +1302,14 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux( /* local pointer for CPU-mapped [FW]HWRTData */ RGXFWIF_HWRTDATA *psHWRTData = NULL; + /* + * Fill in a local copy of RGXFWIF_HWRTDATA first to reduces reads and writes to + * device memory. Using a byte array buffer avoids the limitation that some OSs + * cannot align local variables to 64 bytes, usually needed by RGXFWIF_HWRTDATA + */ + IMG_BYTE aui8HWRTDataLocal[sizeof(RGXFWIF_HWRTDATA)] = {0}; + RGXFWIF_HWRTDATA *psHWRTDataLocal = (RGXFWIF_HWRTDATA *)aui8HWRTDataLocal; + PVR_UNREFERENCED_PARAMETER(psConnection); /* Prepare the HW RT DataSet struct */ @@ -1316,14 +1330,14 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux( /* * This FW RT-Data is only mapped into kernel for initialisation. * Otherwise this allocation is only used by the FW. - * Therefore the GPU cache doesn't need coherency, and write-combine will - * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) + * Therefore the GPU cache doesn't need coherency and write-combine will + * suffice on the CPU side. (WC buffer will be flushed at the first TA-kick) */ eError = DevmemFwAllocate(psDevInfo, - sizeof(RGXFWIF_HWRTDATA), - RGX_FWCOMCTX_ALLOCFLAGS, - "FwHWRTData", - &psHWRTDataFwMemDesc); + sizeof(RGXFWIF_HWRTDATA), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwHwRTData", + &psHWRTDataFwMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1343,35 +1357,25 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux( (void **)&psHWRTData); PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError); - psHWRTData->psVHeapTableDevVAddr = psVHeapTableDevVAddr; - - psHWRTData->sHWRTDataCommonFwAddr = psHWRTDataCommonCookie->sHWRTDataCommonFwAddr; - - psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr; +#if defined(PM_INTERACTIVE_MODE) + psHWRTDataLocal->psVHeapTableDevVAddr = psVHeapTableDevVAddr; +#endif - psHWRTData->sTailPtrsDevVAddr = sTailPtrsDevVAddr; - psHWRTData->sMacrotileArrayDevVAddr = sMacrotileArrayDevVAddr; - psHWRTData->sRgnHeaderDevVAddr = sRgnHeaderDevVAddr; - psHWRTData->sRTCDevVAddr = sRTCDevVAddr; + psHWRTDataLocal->sHWRTDataCommonFwAddr = psHWRTDataCommonCookie->sHWRTDataCommonFwAddr; - OSLockAcquire(psDevInfo->hLockFreeList); - for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) - { - psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop]; - psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++; - psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr; - /* invalid initial snapshot value, the snapshot is always taken during first kick - * and hence the value get replaced during the first kick anyway. So it's safe to set it 0. - */ - psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0; - } -#if !defined(SUPPORT_SHADOW_FREELISTS) - dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData)); +#if defined(PM_INTERACTIVE_MODE) + psHWRTDataLocal->psPMMListDevVAddr = sPMDataDevVAddr; +#else + psHWRTDataLocal->sPMRenderStateDevVAddr = sPMDataDevVAddr; #endif - OSLockRelease(psDevInfo->hLockFreeList); + + psHWRTDataLocal->sTailPtrsDevVAddr = sTailPtrsDevVAddr; + psHWRTDataLocal->sMacrotileArrayDevVAddr = sMacrotileArrayDevVAddr; + psHWRTDataLocal->sRgnHeaderDevVAddr = sRgnHeaderDevVAddr; + psHWRTDataLocal->sRTCDevVAddr = sRTCDevVAddr; { - RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl; + RGXFWIF_RTA_CTL *psRTACtl = &psHWRTDataLocal->sRTACtl; psRTACtl->ui32RenderTargetIndex = 0; psRTACtl->ui32ActiveRenderTargets = 0; @@ -1382,17 +1386,17 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux( if (ui16MaxRTs > 1) { PDUMPCOMMENT(psDeviceNode, "Allocate memory for shadow render target cache"); - eError = DevmemFwAllocate(psDevInfo, - ui16MaxRTs * sizeof(IMG_UINT32), - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), - "FwShadowRTCache", - &psRTArrayFwMemDesc); + eError = DevmemFwAllocate( psDevInfo, + ui16MaxRTs * sizeof(IMG_UINT32), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwShadowRTCache", + &psRTArrayFwMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1403,24 +1407,25 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux( } psKMHWRTDataSet->psRTArrayFwMemDesc = psRTArrayFwMemDesc; - eError = RGXSetFirmwareAddress(&psRTACtl->sValidRenderTargets, - psRTArrayFwMemDesc, - 0, - RFW_FWADDR_FLAG_NONE); + + eError = RGXSetFirmwareAddress( &psRTACtl->sValidRenderTargets, + psRTArrayFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE ); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", FWAllocateRTArryFwAddrError); PDUMPCOMMENT(psDeviceNode, "Allocate memory for tracking renders accumulation"); eError = DevmemFwAllocate(psDevInfo, - ui16MaxRTs * sizeof(IMG_UINT32), - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), - "FwRendersAccumulation", - &psRendersAccArrayFwMemDesc); + ui16MaxRTs * sizeof(IMG_UINT32), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwRendersAccumulation", + &psRendersAccArrayFwMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1429,37 +1434,51 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux( ui16MaxRTs, PVRSRVGetErrorString(eError))); goto FWAllocateRTAccArryError; } + psKMHWRTDataSet->psRendersAccArrayFwMemDesc = psRendersAccArrayFwMemDesc; - eError = RGXSetFirmwareAddress(&psRTACtl->sRTANumPartialRenders, - psRendersAccArrayFwMemDesc, - 0, - RFW_FWADDR_FLAG_NONE); + + eError = RGXSetFirmwareAddress( &psRTACtl->sRTANumPartialRenders, + psRendersAccArrayFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE ); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", FWAllocRTAccArryFwAddrError); } } + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop]; + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++; + psHWRTDataLocal->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr; + /* Invalid initial snapshot value. The snapshot is always taken during the first + * kick and hence this value gets replaced, so it's safe to set it to zero. + */ + psHWRTDataLocal->aui32FreeListHWRSnapshot[ui32Loop] = 0; + } +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData)); +#endif + OSLockRelease(psDevInfo->hLockFreeList); + + OSCachedMemCopy(psHWRTData, psHWRTDataLocal, sizeof(*psHWRTDataLocal)); + #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, "Dump HWRTData 0x%08X", psKMHWRTDataSet->sHWRTDataFwAddr.ui32Addr); DevmemPDumpLoadMem(psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS); #endif + RGXFwSharedMemCacheOpPtr(psHWRTData, FLUSH); DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); return PVRSRV_OK; FWAllocRTAccArryFwAddrError: DevmemFwUnmapAndFree(psDevInfo, psRendersAccArrayFwMemDesc); FWAllocateRTAccArryError: - RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc); + RGXUnsetFirmwareAddress(psRTArrayFwMemDesc); FWAllocateRTArryFwAddrError: - DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psRTArrayFwMemDesc); FWAllocateRTArryError: - OSLockAcquire(psDevInfo->hLockFreeList); - for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) - { - PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); - psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; - } - OSLockRelease(psDevInfo->hLockFreeList); DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); FWRTDataCpuMapError: RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); @@ -1518,8 +1537,9 @@ static void RGXDestroyHWRTData_aux(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) } /* Create set of HWRTData(s) and bind it with a shared FW HWRTDataCommon */ -PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, +PVRSRV_ERROR RGXCreateHWRTDataSet( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, IMG_DEV_VIRTADDR asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS], IMG_DEV_VIRTADDR asPMMListDevVAddr[RGXMKIF_NUM_RTDATAS], RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], @@ -1546,23 +1566,112 @@ PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, IMG_UINT32 uiRgnHeaderSize, IMG_UINT32 ui32ISPMtileSize, IMG_UINT16 ui16MaxRTs, - RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]) + RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(asVHeapTableDevVAddr); + PVR_UNREFERENCED_PARAMETER(asPMMListDevVAddr); + PVR_UNREFERENCED_PARAMETER(apsFreeLists); + PVR_UNREFERENCED_PARAMETER(ui32ScreenPixelMax); + PVR_UNREFERENCED_PARAMETER(ui64MultiSampleCtl); + PVR_UNREFERENCED_PARAMETER(ui64FlippedMultiSampleCtl); + PVR_UNREFERENCED_PARAMETER(ui32TPCStride); + PVR_UNREFERENCED_PARAMETER(asTailPtrsDevVAddr); + PVR_UNREFERENCED_PARAMETER(ui32TPCSize); + PVR_UNREFERENCED_PARAMETER(ui32TEScreen); + PVR_UNREFERENCED_PARAMETER(ui32TEAA); + PVR_UNREFERENCED_PARAMETER(ui32TEMTILE1); + PVR_UNREFERENCED_PARAMETER(ui32TEMTILE2); + PVR_UNREFERENCED_PARAMETER(ui32MTileStride); + PVR_UNREFERENCED_PARAMETER(ui32ISPMergeLowerX); + PVR_UNREFERENCED_PARAMETER(ui32ISPMergeLowerY); + PVR_UNREFERENCED_PARAMETER(ui32ISPMergeUpperX); + PVR_UNREFERENCED_PARAMETER(ui32ISPMergeUpperY); + PVR_UNREFERENCED_PARAMETER(ui32ISPMergeScaleX); + PVR_UNREFERENCED_PARAMETER(ui32ISPMergeScaleY); + PVR_UNREFERENCED_PARAMETER(asMacrotileArrayDevVAddr); + PVR_UNREFERENCED_PARAMETER(asRgnHeaderDevVAddr); + PVR_UNREFERENCED_PARAMETER(asRTCDevVAddr); + PVR_UNREFERENCED_PARAMETER(uiRgnHeaderSize); + PVR_UNREFERENCED_PARAMETER(ui32ISPMtileSize); + PVR_UNREFERENCED_PARAMETER(ui16MaxRTs); + PVR_UNREFERENCED_PARAMETER(pasKMHWRTDataSet); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +} + +/* Create set of HWRTData(s) and bind it with a shared FW HWRTDataCommon */ +PVRSRV_ERROR RGXCreateHWRTDataSet2( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS], + DEVMEMINT_RESERVATION *psPMMListsReservation, + RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64MultiSampleCtl, + IMG_UINT64 ui64FlippedMultiSampleCtl, + IMG_UINT32 ui32TPCStride, + IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32MTileStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_DEV_VIRTADDR asMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS], + IMG_DEV_VIRTADDR asRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS], + IMG_DEV_VIRTADDR asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT32 uiRgnHeaderSize, + IMG_UINT32 ui32ISPMtileSize, + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]) { PVRSRV_ERROR eError; IMG_UINT32 ui32RTDataID; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32GlobalFLMaxPages, ui32LocalFLMaxPages; + IMG_DEVMEM_SIZE_T ui64MListSize; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; + RGXFWIF_HWRTDATA_COMMON *psHWRTDataCommon; + DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; + RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + + PMR* psMListsPMR = NULL; + IMG_DEV_VIRTADDR sMListsDevVAddr; + + PVR_LOG_RETURN_IF_INVALID_PARAM(0 < ui16MaxRTs && ui16MaxRTs <= RGX_MAX_TA_RENDER_TARGETS, "Number of TA Render targets outside the range (0, RGX_MAX_TA_RENDER_TARGETS) is unsupported"); - RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; - RGXFWIF_HWRTDATA_COMMON *psHWRTDataCommon; - DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; - RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + /* Check if freelists have correct sizes */ + eError = ValidateFreeListSizes(apsFreeLists, + &ui32LocalFLMaxPages, + &ui32GlobalFLMaxPages); + PVR_LOG_RETURN_IF_ERROR(eError, "Invalid freelist sizes"); + + ui64MListSize = RGXCalcMListSize(psDeviceNode, + ui32LocalFLMaxPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + ui32GlobalFLMaxPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE); + + eError = AcquireValidateRefCriticalBuffer(psDeviceNode, + psPMMListsReservation, + ui64MListSize * RGXMKIF_NUM_RTDATAS, + &psMListsPMR, + &sMListsDevVAddr); + PVR_LOG_RETURN_IF_ERROR(eError, "Failed to obtain or validate MLIST buffer"); /* Prepare KM cleanup object for HWRTDataCommon FW object */ psHWRTDataCommonCookie = OSAllocZMem(sizeof(*psHWRTDataCommonCookie)); if (psHWRTDataCommonCookie == NULL) { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_HWRTDataCommonCookieAlloc; + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_HWRTDataCommonCookieAlloc; } /* @@ -1572,15 +1681,15 @@ PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) */ eError = DevmemFwAllocate(psDevInfo, - sizeof(RGXFWIF_HWRTDATA_COMMON), - RGX_FWCOMCTX_ALLOCFLAGS, - "FwHWRTDataCommon", - &psHWRTDataCommonFwMemDesc); + sizeof(RGXFWIF_HWRTDATA_COMMON), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwHWRTDataCommon", + &psHWRTDataCommonFwMemDesc); if (eError != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "%s: DevmemAllocate for FwHWRTDataCommon failed", __func__)); - goto err_HWRTDataCommonAlloc; + PVR_DPF((PVR_DBG_ERROR, "%s: DevmemAllocate for FwHWRTDataCommon failed", __func__)); + goto err_HWRTDataCommonAlloc; } eError = RGXSetFirmwareAddress(&sHWRTDataCommonFwAddr, psHWRTDataCommonFwMemDesc, 0, RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", err_HWRTDataCommonFwAddr); @@ -1599,51 +1708,57 @@ PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, psHWRTDataCommon->ui32TEMTILE1 = ui32TEMTILE1; psHWRTDataCommon->ui32TEMTILE2 = ui32TEMTILE2; psHWRTDataCommon->ui32MTileStride = ui32MTileStride; - psHWRTDataCommon->ui32ISPMergeLowerX = ui32ISPMergeLowerX; - psHWRTDataCommon->ui32ISPMergeLowerY = ui32ISPMergeLowerY; - psHWRTDataCommon->ui32ISPMergeUpperX = ui32ISPMergeUpperX; - psHWRTDataCommon->ui32ISPMergeUpperY = ui32ISPMergeUpperY; - psHWRTDataCommon->ui32ISPMergeScaleX = ui32ISPMergeScaleX; - psHWRTDataCommon->ui32ISPMergeScaleY = ui32ISPMergeScaleY; - psHWRTDataCommon->uiRgnHeaderSize = uiRgnHeaderSize; - psHWRTDataCommon->ui32ISPMtileSize = ui32ISPMtileSize; + psHWRTDataCommon->ui32ISPMergeLowerX = ui32ISPMergeLowerX; + psHWRTDataCommon->ui32ISPMergeLowerY = ui32ISPMergeLowerY; + psHWRTDataCommon->ui32ISPMergeUpperX = ui32ISPMergeUpperX; + psHWRTDataCommon->ui32ISPMergeUpperY = ui32ISPMergeUpperY; + psHWRTDataCommon->ui32ISPMergeScaleX = ui32ISPMergeScaleX; + psHWRTDataCommon->ui32ISPMergeScaleY = ui32ISPMergeScaleY; + psHWRTDataCommon->uiRgnHeaderSize = uiRgnHeaderSize; + psHWRTDataCommon->ui32ISPMtileSize = ui32ISPMtileSize; #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, "Dump HWRTDataCommon"); DevmemPDumpLoadMem(psHWRTDataCommonFwMemDesc, 0, sizeof(*psHWRTDataCommon), PDUMP_FLAGS_CONTINUOUS); #endif + RGXFwSharedMemCacheOpPtr(psHWRTDataCommon, FLUSH); DevmemReleaseCpuVirtAddr(psHWRTDataCommonFwMemDesc); psHWRTDataCommonCookie->ui32RefCount = 0; psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc = psHWRTDataCommonFwMemDesc; psHWRTDataCommonCookie->sHWRTDataCommonFwAddr = sHWRTDataCommonFwAddr; + psHWRTDataCommonCookie->psPMMListsReservation = psPMMListsReservation; + /* Here we are creating a set of HWRTData(s) the number of elements in the set equals RGXMKIF_NUM_RTDATAS. */ for (ui32RTDataID = 0; ui32RTDataID < RGXMKIF_NUM_RTDATAS; ui32RTDataID++) { + IMG_DEV_VIRTADDR sMListDevVAddr; + sMListDevVAddr.uiAddr = sMListsDevVAddr.uiAddr + ui32RTDataID * ui64MListSize; + eError = RGXCreateHWRTData_aux( - psConnection, - psDeviceNode, - asVHeapTableDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], - asPMMListDevVAddr[ui32RTDataID], - &apsFreeLists[(ui32RTDataID % RGXMKIF_NUM_GEOMDATAS) * RGXFW_MAX_FREELISTS], - asTailPtrsDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], - asMacrotileArrayDevVAddr[ui32RTDataID], - asRgnHeaderDevVAddr[ui32RTDataID], - asRTCDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], - ui16MaxRTs, - psHWRTDataCommonCookie, - &pasKMHWRTDataSet[ui32RTDataID]); + psConnection, + psDeviceNode, + asVHeapTableDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], + sMListDevVAddr, + &apsFreeLists[(ui32RTDataID % RGXMKIF_NUM_GEOMDATAS) * RGXFW_MAX_FREELISTS], + asTailPtrsDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], + asMacrotileArrayDevVAddr[ui32RTDataID], + asRgnHeaderDevVAddr[ui32RTDataID], + asRTCDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], + ui16MaxRTs, + psHWRTDataCommonCookie, + &pasKMHWRTDataSet[ui32RTDataID]); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to create HWRTData [slot %u] (%s)", - __func__, - ui32RTDataID, - PVRSRVGetErrorString(eError))); + "%s: Failed to create HWRTData [slot %u] (%s)", + __func__, + ui32RTDataID, + PVRSRVGetErrorString(eError))); goto err_HWRTDataAlloc; } psHWRTDataCommonCookie->ui32RefCount += 1; @@ -1672,7 +1787,7 @@ PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, err_HWRTDataCommonAlloc: OSFreeMem(psHWRTDataCommonCookie); err_HWRTDataCommonCookieAlloc: - + UnrefAndReleaseCriticalBuffer(psPMMListsReservation); return eError; } @@ -1682,7 +1797,6 @@ PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, */ PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) { - PVRSRV_RGXDEV_INFO *psDevInfo; PVRSRV_DEVICE_NODE *psDevNode; PVRSRV_ERROR eError; PRGXFWIF_HWRTDATA psHWRTData; @@ -1691,7 +1805,6 @@ PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) PVR_ASSERT(psKMHWRTDataSet); psDevNode = psKMHWRTDataSet->psDeviceNode; - psDevInfo = psDevNode->pvDevice; eError = RGXSetFirmwareAddress(&psHWRTData, psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, @@ -1700,10 +1813,10 @@ PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) /* Cleanup HWRTData */ eError = RGXFWRequestHWRTDataCleanUp(psDevNode, psHWRTData); - if (eError != PVRSRV_OK) - { - return eError; - } + + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psDevNode, + eError, + RGXFWRequestHWRTDataCleanUp); psCommonCookie = psKMHWRTDataSet->psHWRTDataCommonCookie; @@ -1737,31 +1850,87 @@ PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) DevmemFwUnmapAndFree(psDevNode->pvDevice, psCommonCookie->psHWRTDataCommonFwMemDesc); + UnrefAndReleaseCriticalBuffer(psCommonCookie->psPMMListsReservation); OSFreeMem(psCommonCookie); } - return PVRSRV_OK; + return eError; } -PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_HANDLE hMemCtxPrivData, - IMG_UINT32 ui32MaxFLPages, - IMG_UINT32 ui32InitFLPages, - IMG_UINT32 ui32GrowFLPages, - IMG_UINT32 ui32GrowParamThreshold, - RGX_FREELIST *psGlobalFreeList, - IMG_BOOL bCheckFreelist, - IMG_DEV_VIRTADDR sFreeListDevVAddr, - PMR *psFreeListPMR, - IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, - RGX_FREELIST **ppsFreeList) +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + DEVMEMINT_RESERVATION *psFreeListReservation, + RGX_FREELIST **ppsFreeList) { - PVRSRV_ERROR eError; - RGXFWIF_FREELIST *psFWFreeList; - DEVMEM_MEMDESC *psFWFreelistMemDesc; - RGX_FREELIST *psFreeList; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + RGXFWIF_FREELIST *psFWFreeList; + RGXFWIF_FREELIST sFWFreeList = {{0}}; + DEVMEM_MEMDESC *psFWFreelistMemDesc; + RGX_FREELIST *psFreeList; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_DEV_VIRTADDR sFreeListDevVAddr; + PMR* psFreeListPMR = NULL; + + /* Obtain reference to reservation object */ + if (!DevmemIntReservationAcquire(psFreeListReservation)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire reservation for freelist buffer", + __func__)); + eError = PVRSRV_ERROR_REFCOUNT_OVERFLOW; + goto ErrorReservationAcquire; + } + + eError = DevmemIntGetReservationData(psFreeListReservation, &psFreeListPMR, &sFreeListDevVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error from DevmemIntGetReservationData: %s", + __func__, PVRSRVGetErrorString(eError))); + + goto ErrorAllocHost; + } + + /* Check if client properly allocated PMMETA_PROTECT */ + if ((PMR_Flags(psFreeListPMR) & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT)) == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Freelist PMR must have PMMETA_PROTECT set", + __func__)); + eError = PVRSRV_ERROR_INVALID_FLAGS; + goto ErrorAllocHost; + } + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + if (PVRSRV_CHECK_OS_LINUX_MOVABLE(PMR_Flags(psFreeListPMR))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Freelist PMR must not have OS_LINUX_MOVABLE set", + __func__)); + eError = PVRSRV_ERROR_INVALID_FLAGS; + goto ErrorAllocHost; + } +#endif + + if (PMR_IsSparse(psFreeListPMR)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Free list PMR cannot be sparse!", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto ErrorAllocHost; + } + + /* Ref the PMR to prevent resource being destroyed before use */ + eError = PMRRefPMR(psFreeListPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRRefPMR", ErrorAllocHost); if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) { @@ -1781,8 +1950,8 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; - PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u", - __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u", + __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages)); ui32InitFLPages = ui32NewInitFLPages; ui32GrowFLPages = ui32NewGrowFLPages; @@ -1794,10 +1963,10 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, if (psFreeList == NULL) { PVR_DPF((PVR_DBG_ERROR, - "%s: failed to allocate host data structure", - __func__)); + "%s: failed to allocate host data structure", + __func__)); eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto ErrorAllocHost; + goto ErrorRefPMR; } /* @@ -1808,33 +1977,37 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) */ eError = DevmemFwAllocate(psDevInfo, - sizeof(*psFWFreeList), - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), - "FwFreeList", - &psFWFreelistMemDesc); + sizeof(*psFWFreeList), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_OS_LINUX_DENY_MOVE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwFreeList", + &psFWFreelistMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: DevmemAllocate for RGXFWIF_FREELIST failed", - __func__)); + "%s: DevmemAllocate for RGXFWIF_FREELIST failed", + __func__)); goto FWFreeListAlloc; } /* Initialise host data structures */ psFreeList->psDevInfo = psDevInfo; psFreeList->psConnection = psConnection; + psFreeList->psFreeListPMR = psFreeListPMR; - psFreeList->uiFreeListPMROffset = uiFreeListPMROffset; + psFreeList->psFreeListReservation = psFreeListReservation; + + psFreeList->uiFreeListPMROffset = 0U; + psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc; eError = RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); @@ -1872,28 +2045,30 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, { const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages); - psFWFreeList->ui32MaxPages = ui32MaxFLPages; - psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages; - psFWFreeList->ui32GrowPages = ui32GrowFLPages; - psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; - psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr; - psFWFreeList->ui64CurrentDevVAddr = (sFreeListDevVAddr.uiAddr + - ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & - ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); - psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID; - psFWFreeList->bGrowPending = IMG_FALSE; - psFWFreeList->ui32ReadyPages = ui32ReadyPages; + sFWFreeList.ui32MaxPages = ui32MaxFLPages; + sFWFreeList.ui32CurrentPages = ui32InitFLPages - ui32ReadyPages; + sFWFreeList.ui32GrowPages = ui32GrowFLPages; + sFWFreeList.psFreeListDevVAddr = sFreeListDevVAddr; +#if defined(PM_INTERACTIVE_MODE) + sFWFreeList.ui32CurrentStackTop = sFWFreeList.ui32CurrentPages - 1; + sFWFreeList.ui64CurrentDevVAddr = (sFreeListDevVAddr.uiAddr + + ((ui32MaxFLPages - sFWFreeList.ui32CurrentPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); +#endif + sFWFreeList.ui32FreeListID = psFreeList->ui32FreelistID; + sFWFreeList.bGrowPending = IMG_FALSE; + sFWFreeList.ui32ReadyPages = ui32ReadyPages; #if defined(SUPPORT_SHADOW_FREELISTS) /* Get the FW Memory Context address... */ - eError = RGXSetFirmwareAddress(&psFWFreeList->psFWMemContext, + eError = RGXSetFirmwareAddress(&sFWFreeList.psFWMemContext, RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData), 0, RFW_FWADDR_NOREF_FLAG); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed", - __func__)); + "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed", + __func__)); DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); goto FWFreeListCpuMap; } @@ -1902,60 +2077,63 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, #endif } + OSCachedMemCopy(psFWFreeList, &sFWFreeList, sizeof(sFWFreeList)); + RGXFwSharedMemCacheOpPtr(psFWFreeList, FLUSH); + PVR_DPF((PVR_DBG_MESSAGE, - "Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, " - "Max FL base address 0x%016" IMG_UINT64_FMTSPECx ", " - "Init FL base address 0x%016" IMG_UINT64_FMTSPECx, - psFreeList, - ui32MaxFLPages, - ui32InitFLPages, - sFreeListDevVAddr.uiAddr, - psFWFreeList->ui64CurrentDevVAddr)); + "Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, " + "Max FL base address 0x%016" IMG_UINT64_FMTSPECx ", " + "Init FL base address 0x%016" IMG_UINT64_FMTSPECx, + psFreeList, + ui32MaxFLPages, + ui32InitFLPages, + sFreeListDevVAddr.uiAddr, + sFWFreeList.ui64CurrentDevVAddr)); #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, "Dump FW FreeList"); DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS); +#if defined(PM_INTERACTIVE_MODE) /* * Separate dump of the Freelist's number of Pages and stack pointer. * This allows to easily modify the PB size in the out2.txt files. */ PDUMPCOMMENT(psDeviceNode, "FreeList TotalPages"); DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, - offsetof(RGXFWIF_FREELIST, ui32CurrentPages), - psFWFreeList->ui32CurrentPages, - PDUMP_FLAGS_CONTINUOUS); + offsetof(RGXFWIF_FREELIST, ui32CurrentPages), + sFWFreeList.ui32CurrentPages, + PDUMP_FLAGS_CONTINUOUS); PDUMPCOMMENT(psDeviceNode, "FreeList StackPointer"); DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, - offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop), - psFWFreeList->ui32CurrentStackTop, - PDUMP_FLAGS_CONTINUOUS); + offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop), + sFWFreeList.ui32CurrentStackTop, + PDUMP_FLAGS_CONTINUOUS); +#endif #endif - DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); - /* Add initial PB block */ eError = RGXGrowFreeList(psFreeList, - ui32InitFLPages, - &psFreeList->sMemoryBlockInitHead); + ui32InitFLPages, + &psFreeList->sMemoryBlockInitHead); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%d)", - __func__, - sFreeListDevVAddr.uiAddr, - eError)); + "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%s)", + __func__, + sFreeListDevVAddr.uiAddr, + PVRSRVGetErrorString(eError))); goto FWFreeListCpuMap; } #if defined(PVRSRV_ENABLE_PROCESS_STATS) /* Update Stats */ PVRSRVStatsUpdateFreelistStats(psDeviceNode, - 1, /* Add 1 to the appropriate counter (Requests by App)*/ - 0, - psFreeList->ui32InitFLPages, - psFreeList->ui32NumHighPages, - psFreeList->ownerPid); + 1, /* Add 1 to the appropriate counter (Requests by App)*/ + 0, + psFreeList->ui32InitFLPages, + psFreeList->ui32NumHighPages, + psFreeList->ownerPid); #endif @@ -1967,7 +2145,7 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, /* Error handling */ FWFreeListCpuMap: - /* Remove freelists from list */ + /* Remove freelists from list */ OSLockAcquire(psDevInfo->hLockFreeList); dllist_remove_node(&psFreeList->sNode); OSLockRelease(psDevInfo->hLockFreeList); @@ -1979,7 +2157,13 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, FWFreeListAlloc: OSFreeMem(psFreeList); +ErrorRefPMR: + (void) PMRUnrefPMR(psFreeListPMR); + ErrorAllocHost: + DevmemIntReservationRelease(psFreeListReservation); + +ErrorReservationAcquire: PVR_ASSERT(eError != PVRSRV_OK); return eError; } @@ -1987,7 +2171,7 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, /* RGXDestroyFreeList - */ +*/ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) { PVRSRV_ERROR eError; @@ -2007,13 +2191,15 @@ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) /* Freelist is not in use => start firmware cleanup */ eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo, - psFreeList->sFreeListFWDevVAddr); - if (eError != PVRSRV_OK) - { - /* Can happen if the firmware took too long to handle the cleanup request, - * or if SLC-flushes didn't went through (due to some GPU lockup) */ - return eError; - } + psFreeList->sFreeListFWDevVAddr); + + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psFreeList->psDevInfo->psDeviceNode, + eError, + RGXFWRequestFreeListCleanUp); + + /* Device becomes unrecoverable if the firmware took too long to + * handle the cleanup request, or if SLC-flushes didn't go through + * (due to some GPU lockup) */ /* Remove FreeList from linked list before we destroy it... */ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); @@ -2024,18 +2210,26 @@ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) #endif OSLockRelease(psFreeList->psDevInfo->hLockFreeList); +#if defined(PM_INTERACTIVE_MODE) if (psFreeList->bCheckFreelist) { RGXFWIF_FREELIST *psFWFreeList; - IMG_UINT64 ui32CurrentStackTop; + IMG_UINT32 ui32CurrentStackTop; IMG_UINT64 ui64CheckSum; /* Get the current stack pointer for this free list */ DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + RGXFwSharedMemCacheOpValue(psFWFreeList->ui32CurrentStackTop, INVALIDATE); ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop; DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); - if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1) + if (ui32CurrentStackTop > psFreeList->ui32MaxFLPages) + { + PVR_LOG(("%s: FW freelist corrupted (%d)", + __func__, + ui32CurrentStackTop)); + } + else if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1) { /* Do consistency tests (as the list is fully populated) */ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); @@ -2046,6 +2240,7 @@ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum); } } +#endif /* Destroy FW structures */ RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc); @@ -2066,6 +2261,12 @@ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead)); PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0); + /* Remove reference from the PMR and reservation resources */ + eError = PMRUnrefPMR(psFreeList->psFreeListPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + DevmemIntReservationRelease(psFreeList->psFreeListReservation); + /* free Freelist */ OSFreeMem(psFreeList); @@ -2075,17 +2276,18 @@ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) /* RGXCreateZSBuffer - */ +*/ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - DEVMEMINT_RESERVATION *psReservation, - PMR *psPMR, - PVRSRV_MEMALLOCFLAGS_T uiMapFlags, - RGX_ZSBUFFER_DATA **ppsZSBuffer) + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + RGX_ZSBUFFER_DATA **ppsZSBuffer) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGXFWIF_PRBUFFER *psFWZSBuffer; + RGXFWIF_PRBUFFER sFWZSBuffer = {0}; RGX_ZSBUFFER_DATA *psZSBuffer; DEVMEM_MEMDESC *psFWZSBufferMemDesc; IMG_BOOL bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE; @@ -2104,38 +2306,42 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, /* Populate Host data */ psZSBuffer->psDevInfo = psDevInfo; psZSBuffer->psReservation = psReservation; + + /* Obtain reference to reservation object */ + if (!DevmemIntReservationAcquire(psZSBuffer->psReservation)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire reservation for ZS-Buffer", + __func__)); + eError = PVRSRV_ERROR_REFCOUNT_OVERFLOW; + goto ErrorReservationAcquire; + } + psZSBuffer->psPMR = psPMR; - psZSBuffer->uiMapFlags = uiMapFlags; + /* Obtain reference to PMR */ + eError = PMRRefPMR(psZSBuffer->psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRRefPMR", ErrorRefPMR); + psZSBuffer->ui32RefCount = 0; + psZSBuffer->bIsBacked = IMG_FALSE; psZSBuffer->bOnDemand = bOnDemand; - if (bOnDemand) - { - /* psZSBuffer->ui32ZSBufferID set below with lock... */ - psZSBuffer->psMapping = NULL; - - OSLockAcquire(psDevInfo->hLockZSBuffer); - psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++; - dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode); - OSLockRelease(psDevInfo->hLockZSBuffer); - } /* Allocate firmware memory for ZS-Buffer. */ PDUMPCOMMENT(psDeviceNode, "Allocate firmware ZS-Buffer data structure"); eError = DevmemFwAllocate(psDevInfo, - sizeof(*psFWZSBuffer), - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), - "FwZSBuffer", - &psFWZSBufferMemDesc); + sizeof(*psFWZSBuffer), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwZSBuffer", + &psFWZSBufferMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2148,7 +2354,7 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, /* Temporarily map the firmware render context to the kernel. */ eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc, - (void **)&psFWZSBuffer); + (void **)&psFWZSBuffer); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2158,20 +2364,31 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, goto ErrorAcquireFWZSBuffer; } - /* Populate FW ZS-Buffer data structure */ - psFWZSBuffer->bOnDemand = bOnDemand; - psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED; - psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID; - /* Get firmware address of ZS-Buffer. */ eError = RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); + if (bOnDemand) + { + /* psZSBuffer->ui32ZSBufferID set below with lock... */ + OSLockAcquire(psDevInfo->hLockZSBuffer); + psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++; + dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode); + OSLockRelease(psDevInfo->hLockZSBuffer); + } + + /* Populate FW ZS-Buffer data structure */ + sFWZSBuffer.bOnDemand = bOnDemand; + sFWZSBuffer.eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED; + sFWZSBuffer.ui32BufferID = psZSBuffer->ui32ZSBufferID; + + OSCachedMemCopy(psFWZSBuffer, &sFWZSBuffer, sizeof(sFWZSBuffer)); /* Dump the ZS-Buffer and the memory content */ #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, "Dump firmware ZS-Buffer"); DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS); #endif + RGXFwSharedMemCacheOpPtr(psFWZSBuffer, FLUSH); /* Release address acquired above. */ DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); @@ -2181,8 +2398,8 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, *ppsZSBuffer = psZSBuffer; PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)", - psZSBuffer, - (bOnDemand) ? "On-Demand": "Up-front")); + psZSBuffer, + (bOnDemand) ? "On-Demand": "Up-front")); psZSBuffer->owner=OSGetCurrentClientProcessIDKM(); @@ -2196,6 +2413,10 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, DevmemFwUnmapAndFree(psDevInfo, psFWZSBufferMemDesc); ErrorAllocFWZSBuffer: + (void) PMRUnrefPMR(psZSBuffer->psPMR); +ErrorRefPMR: + DevmemIntReservationRelease(psZSBuffer->psReservation); +ErrorReservationAcquire: OSFreeMem(psZSBuffer); ErrorAllocCleanup: @@ -2206,42 +2427,57 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, /* RGXDestroyZSBuffer - */ +*/ PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer) { POS_LOCK hLockZSBuffer; - PVRSRV_ERROR eError; + PVRSRV_ERROR eError, eError2; PVR_ASSERT(psZSBuffer); hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; /* Request ZS Buffer cleanup */ eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo, - psZSBuffer->sZSBufferFWDevVAddr); - if (eError == PVRSRV_OK) - { - /* Free the firmware render context. */ - RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc); - DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc); + psZSBuffer->sZSBufferFWDevVAddr); - /* Remove Deferred Allocation from list */ - if (psZSBuffer->bOnDemand) - { - OSLockAcquire(hLockZSBuffer); - PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode)); - dllist_remove_node(&psZSBuffer->sNode); - OSLockRelease(hLockZSBuffer); - } + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psZSBuffer->psDevInfo->psDeviceNode, + eError, + RGXFWRequestZSBufferCleanUp); - PVR_ASSERT(psZSBuffer->ui32RefCount == 0); + OSLockAcquire(hLockZSBuffer); - PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer)); + if (psZSBuffer->ui32RefCount != 0) + { + /* ZS-Buffer is still referenced */ + OSLockRelease(hLockZSBuffer); + PVR_DPF((PVR_DBG_WARNING, "Trying to destroy a ZS-Buffer [%p] that's still in use.", psZSBuffer)); + return PVRSRV_ERROR_RETRY; + } - /* Free ZS-Buffer host data structure */ - OSFreeMem(psZSBuffer); + /* Free the firmware render context. */ + RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc); + DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc); + /* Remove Deferred Allocation from list */ + if (psZSBuffer->bOnDemand) + { + PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode)); + dllist_remove_node(&psZSBuffer->sNode); } + PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer)); + + /* Release reference to reservation object and the PMR */ + eError2 = PMRUnrefPMR(psZSBuffer->psPMR); + PVR_LOG_IF_ERROR(eError2, "PMRUnrefPMR"); + + DevmemIntReservationRelease(psZSBuffer->psReservation); + + /* Free ZS-Buffer host data structure */ + OSFreeMem(psZSBuffer); + + OSLockRelease(hLockZSBuffer); + return eError; } @@ -2264,32 +2500,25 @@ RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing requested", - psZSBuffer, - psZSBuffer->ui32ZSBufferID)); + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; OSLockAcquire(hLockZSBuffer); - if (psZSBuffer->ui32RefCount == 0) + if (psZSBuffer->bIsBacked == IMG_FALSE) { IMG_HANDLE hDevmemHeap; - PVR_ASSERT(psZSBuffer->psMapping == NULL); - /* Get Heap */ eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap); - PVR_ASSERT(psZSBuffer->psMapping == NULL); if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL)) { OSLockRelease(hLockZSBuffer); return PVRSRV_ERROR_INVALID_HEAP; } - eError = DevmemIntMapPMR(hDevmemHeap, - psZSBuffer->psReservation, - psZSBuffer->psPMR, - psZSBuffer->uiMapFlags, - &psZSBuffer->psMapping); + eError = DevmemIntMapPMR(psZSBuffer->psReservation, psZSBuffer->psPMR); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2302,12 +2531,11 @@ RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) } PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired", - psZSBuffer, - psZSBuffer->ui32ZSBufferID)); - } + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); - /* Increase refcount*/ - psZSBuffer->ui32RefCount++; + psZSBuffer->bIsBacked = IMG_TRUE; + } OSLockRelease(hLockZSBuffer); @@ -2317,16 +2545,20 @@ RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, - RGX_POPULATION **ppsPopulation) + RGX_POPULATION **ppsPopulation) { RGX_POPULATION *psPopulation; PVRSRV_ERROR eError; + OSLockAcquire(psZSBuffer->psDevInfo->hLockZSBuffer); + psZSBuffer->ui32RefCount++; + OSLockRelease(psZSBuffer->psDevInfo->hLockZSBuffer); + psZSBuffer->ui32NumReqByApp++; #if defined(PVRSRV_ENABLE_PROCESS_STATS) PVRSRVStatsUpdateZSBufferStats(psZSBuffer->psDevInfo->psDeviceNode, - 1, 0, psZSBuffer->owner); + 1, 0, psZSBuffer->owner); #endif /* Do the backing */ @@ -2356,6 +2588,11 @@ RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, OnErrorBacking: PVR_ASSERT(eError != PVRSRV_OK); + + OSLockAcquire(psZSBuffer->psDevInfo->hLockZSBuffer); + psZSBuffer->ui32RefCount--; + OSLockRelease(psZSBuffer->psDevInfo->hLockZSBuffer); + return eError; } @@ -2374,39 +2611,33 @@ RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested", - psZSBuffer, - psZSBuffer->ui32ZSBufferID)); + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; OSLockAcquire(hLockZSBuffer); - if (psZSBuffer->bOnDemand) + if (psZSBuffer->bOnDemand && psZSBuffer->bIsBacked == IMG_TRUE) { - if (psZSBuffer->ui32RefCount == 1) + eError = DevmemIntUnmapPMR(psZSBuffer->psReservation); + if (eError != PVRSRV_OK) { - PVR_ASSERT(psZSBuffer->psMapping); - - eError = DevmemIntUnmapPMR(psZSBuffer->psMapping); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)", - psZSBuffer, - psZSBuffer->ui32ZSBufferID, - PVRSRVGetErrorString(eError))); - OSLockRelease(hLockZSBuffer); - return eError; - } - - PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed", + PVR_DPF((PVR_DBG_ERROR, + "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)", psZSBuffer, - psZSBuffer->ui32ZSBufferID)); + psZSBuffer->ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + OSLockRelease(hLockZSBuffer); + return eError; } - } - /* Decrease refcount*/ - psZSBuffer->ui32RefCount--; + PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + + psZSBuffer->bIsBacked = IMG_FALSE; + } OSLockRelease(hLockZSBuffer); @@ -2417,24 +2648,30 @@ PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation) { PVRSRV_ERROR eError; - + POS_LOCK hLockZSBuffer; if (!psPopulation) { return PVRSRV_ERROR_INVALID_PARAMS; } + hLockZSBuffer = psPopulation->psZSBuffer->psDevInfo->hLockZSBuffer; + eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer); if (eError != PVRSRV_OK) { return eError; } + OSLockAcquire(hLockZSBuffer); + psPopulation->psZSBuffer->ui32RefCount--; + OSLockRelease(hLockZSBuffer); + OSFreeMem(psPopulation); return PVRSRV_OK; } -static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID) +static RGX_ZSBUFFER_DATA *FindAndRefZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID) { DLLIST_NODE *psNode, *psNext; RGX_ZSBUFFER_DATA *psZSBuffer = NULL; @@ -2448,17 +2685,21 @@ static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID) { psZSBuffer = psThisZSBuffer; + + psZSBuffer->ui32RefCount++; break; } } + OSLockRelease(psDevInfo->hLockZSBuffer); return psZSBuffer; } void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32ZSBufferID) + IMG_UINT32 ui32ZSBufferID) { + IMG_BOOL bBackingDone = IMG_TRUE; RGX_ZSBUFFER_DATA *psZSBuffer; RGXFWIF_KCCB_CMD sTACCBCmd; PVRSRV_ERROR eError; @@ -2466,62 +2707,64 @@ void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_ASSERT(psDevInfo); /* scan all deferred allocations */ - psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + psZSBuffer = FindAndRefZSBuffer(psDevInfo, ui32ZSBufferID); - if (psZSBuffer) + if (psZSBuffer == NULL) { - IMG_BOOL bBackingDone = IMG_TRUE; + PVR_DPF((PVR_DBG_ERROR, + "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", + ui32ZSBufferID)); - /* Populate ZLS */ - eError = RGXBackingZSBuffer(psZSBuffer); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "Populating ZS-Buffer (ID = 0x%08x) failed (%s)", - ui32ZSBufferID, - PVRSRVGetErrorString(eError))); - bBackingDone = IMG_FALSE; - } + return; + } + + /* Populate ZLS */ + eError = RGXBackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Populating ZS-Buffer (ID = 0x%08x) failed (%s)", + ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + bBackingDone = IMG_FALSE; + } - /* send confirmation */ - sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE; - sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; - sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone; + /* send confirmation */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE; + sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; + sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone; - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) { - eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GEOM, - &sTACCBCmd, - PDUMP_FLAGS_NONE); - if (eError != PVRSRV_ERROR_RETRY) - { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); - /* Kernel CCB should never fill up, as the FW is processing them right away */ - PVR_ASSERT(eError == PVRSRV_OK); + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); - psZSBuffer->ui32NumReqByFW++; + psZSBuffer->ui32NumReqByFW++; #if defined(PVRSRV_ENABLE_PROCESS_STATS) - PVRSRVStatsUpdateZSBufferStats(psDevInfo->psDeviceNode, - 0, 1, psZSBuffer->owner); + PVRSRVStatsUpdateZSBufferStats(psDevInfo->psDeviceNode, + 0, 1, psZSBuffer->owner); #endif - } - else - { - PVR_DPF((PVR_DBG_ERROR, - "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", - ui32ZSBufferID)); - } + OSLockAcquire(psDevInfo->hLockZSBuffer); + psZSBuffer->ui32RefCount--; + OSLockRelease(psDevInfo->hLockZSBuffer); + } void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32ZSBufferID) + IMG_UINT32 ui32ZSBufferID) { RGX_ZSBUFFER_DATA *psZSBuffer; RGXFWIF_KCCB_CMD sTACCBCmd; @@ -2530,83 +2773,87 @@ void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_ASSERT(psDevInfo); /* scan all deferred allocations */ - psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + psZSBuffer = FindAndRefZSBuffer(psDevInfo, ui32ZSBufferID); - if (psZSBuffer) + if (psZSBuffer == NULL) { - /* Unpopulate ZLS */ - eError = RGXUnbackingZSBuffer(psZSBuffer); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "UnPopulating ZS-Buffer (ID = 0x%08x) failed (%s)", - ui32ZSBufferID, - PVRSRVGetErrorString(eError))); - PVR_ASSERT(IMG_FALSE); - } - - /* send confirmation */ - sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE; - sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; - sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE; - - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GEOM, - &sTACCBCmd, - PDUMP_FLAGS_NONE); - if (eError != PVRSRV_ERROR_RETRY) - { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - - /* Kernel CCB should never fill up, as the FW is processing them right away */ - PVR_ASSERT(eError == PVRSRV_OK); + PVR_DPF((PVR_DBG_ERROR, + "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", + ui32ZSBufferID)); + return; } - else + + /* Unpopulate ZLS */ + eError = RGXUnbackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", - ui32ZSBufferID)); + "UnPopulating ZS-Buffer (ID = 0x%08x) failed (%s)", + ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(IMG_FALSE); } + + /* send confirmation */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE; + sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; + sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE; + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); + + OSLockAcquire(psDevInfo->hLockZSBuffer); + psZSBuffer->ui32RefCount--; + OSLockRelease(psDevInfo->hLockZSBuffer); } static PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - DEVMEM_MEMDESC *psAllocatedMemDesc, - IMG_UINT32 ui32AllocatedOffset, - DEVMEM_MEMDESC *psFWMemContextMemDesc, - IMG_DEV_VIRTADDR sVDMCallStackAddr, - IMG_UINT32 ui32CallStackDepth, - IMG_INT32 i32Priority, - IMG_UINT32 ui32MaxDeadlineMS, - IMG_UINT64 ui64RobustnessAddress, - RGX_COMMON_CONTEXT_INFO *psInfo, - RGX_SERVER_RC_TA_DATA *psTAData, - IMG_UINT32 ui32CCBAllocSizeLog2, - IMG_UINT32 ui32CCBMaxAllocSizeLog2, - IMG_UINT32 ui32ContextFlags) + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_DEV_VIRTADDR sVDMCallStackAddr, + IMG_UINT32 ui32CallStackDepth, + IMG_INT32 i32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_TA_DATA *psTAData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGXFWIF_TACTX_STATE *psContextState; IMG_UINT32 uiCoreIdx; PVRSRV_ERROR eError; + /* Allocate device memory for the firmware GPU context suspend state. Note: the FW reads/writes the state to memory by accessing the GPU register interface. - */ + */ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware TA context suspend state"); eError = DevmemFwAllocate(psDevInfo, - sizeof(RGXFWIF_TACTX_STATE), - RGX_FWCOMCTX_ALLOCFLAGS, - "FwTAContextState", - &psTAData->psContextStateMemDesc); + sizeof(RGXFWIF_TACTX_STATE), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTAContextState", + &psTAData->psContextStateMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2632,26 +2879,27 @@ PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, psContextState->asGeomCore[uiCoreIdx].uTAReg_VDM_CALL_STACK_POINTER_Init = sVDMCallStackAddr.uiAddr + (uiCoreIdx * ui32CallStackDepth * sizeof(IMG_UINT64)); } + RGXFwSharedMemCacheOpPtr(psContextState->asGeomCore, FLUSH); DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc); eError = FWCommonContextAllocate(psConnection, - psDeviceNode, - REQ_TYPE_TA, - RGXFWIF_DM_GEOM, - NULL, - psAllocatedMemDesc, - ui32AllocatedOffset, - psFWMemContextMemDesc, - psTAData->psContextStateMemDesc, - ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2, - ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2, - ui32ContextFlags, - i32Priority, - ui32MaxDeadlineMS, - ui64RobustnessAddress, - psInfo, - &psTAData->psServerCommonContext); + psDeviceNode, + REQ_TYPE_TA, + RGXFWIF_DM_GEOM, + NULL, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + psTAData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + i32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &psTAData->psServerCommonContext); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2667,9 +2915,9 @@ PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, "Dump the TA context suspend state buffer"); DevmemPDumpLoadMem(psTAData->psContextStateMemDesc, - 0, - sizeof(RGXFWIF_TACTX_STATE), - PDUMP_FLAGS_CONTINUOUS); + 0, + sizeof(RGXFWIF_TACTX_STATE), + PDUMP_FLAGS_CONTINUOUS); #endif psTAData->i32Priority = i32Priority; @@ -2686,18 +2934,18 @@ PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, static PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - DEVMEM_MEMDESC *psAllocatedMemDesc, - IMG_UINT32 ui32AllocatedOffset, - DEVMEM_MEMDESC *psFWMemContextMemDesc, - IMG_INT32 i32Priority, - IMG_UINT32 ui32MaxDeadlineMS, - IMG_UINT64 ui64RobustnessAddress, - RGX_COMMON_CONTEXT_INFO *psInfo, - RGX_SERVER_RC_3D_DATA *ps3DData, - IMG_UINT32 ui32CCBAllocSizeLog2, - IMG_UINT32 ui32CCBMaxAllocSizeLog2, - IMG_UINT32 ui32ContextFlags) + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_INT32 i32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_3D_DATA *ps3DData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; @@ -2707,9 +2955,22 @@ PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, /* Allocate device memory for the firmware GPU context suspend state. Note: the FW reads/writes the state to memory by accessing the GPU register interface. - */ + */ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware 3D context suspend state"); +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + uiNumISPStoreRegs = RGXFWIF_IPP_RESUME_REG_COUNT; /* default 1 register for IPP_resume */ + + uiNumISPStoreRegs += (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU) * + RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_PER_SPU) * + RGXFWIF_PIPE_COUNT_PER_ISP); + + + if (uiNumISPStoreRegs > (RGXFWIF_ISP_PIPE_COUNT_MAX + RGXFWIF_IPP_RESUME_REG_COUNT)) + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } +#else /* defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) */ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) { uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, @@ -2725,17 +2986,18 @@ PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); } +#endif /* defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) */ /* Size of the CS buffer */ /* Calculate the size of the 3DCTX ISP state */ ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + - uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]); + (uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0])); eError = DevmemFwAllocate(psDevInfo, - ui3DRegISPStateStoreSize, - RGX_FWCOMCTX_ALLOCFLAGS, - "Fw3DContextState", - &ps3DData->psContextStateMemDesc); + ui3DRegISPStateStoreSize, + RGX_FWCOMCTX_ALLOCFLAGS, + "Fw3DContextState", + &ps3DData->psContextStateMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2746,22 +3008,22 @@ PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, } eError = FWCommonContextAllocate(psConnection, - psDeviceNode, - REQ_TYPE_3D, - RGXFWIF_DM_3D, - NULL, - psAllocatedMemDesc, - ui32AllocatedOffset, - psFWMemContextMemDesc, - ps3DData->psContextStateMemDesc, - ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2, - ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2, - ui32ContextFlags, - i32Priority, - ui32MaxDeadlineMS, - ui64RobustnessAddress, - psInfo, - &ps3DData->psServerCommonContext); + psDeviceNode, + REQ_TYPE_3D, + RGXFWIF_DM_3D, + NULL, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + ps3DData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + i32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &ps3DData->psServerCommonContext); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2776,9 +3038,9 @@ PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, */ PDUMPCOMMENT(psDeviceNode, "Dump the 3D context suspend state buffer"); DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc, - 0, - sizeof(RGXFWIF_3DCTX_STATE), - PDUMP_FLAGS_CONTINUOUS); + 0, + sizeof(RGXFWIF_3DCTX_STATE), + PDUMP_FLAGS_CONTINUOUS); ps3DData->i32Priority = i32Priority; return PVRSRV_OK; @@ -2796,21 +3058,21 @@ PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, * PVRSRVRGXCreateRenderContextKM */ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_INT32 i32Priority, - IMG_DEV_VIRTADDR sVDMCallStackAddr, - IMG_UINT32 ui32CallStackDepth, - IMG_UINT32 ui32FrameworkRegisterSize, - IMG_PBYTE pabyFrameworkRegisters, - IMG_HANDLE hMemCtxPrivData, - IMG_UINT32 ui32StaticRenderContextStateSize, - IMG_PBYTE pStaticRenderContextState, - IMG_UINT32 ui32PackedCCBSizeU8888, - IMG_UINT32 ui32ContextFlags, - IMG_UINT64 ui64RobustnessAddress, - IMG_UINT32 ui32MaxTADeadlineMS, - IMG_UINT32 ui32Max3DDeadlineMS, - RGX_SERVER_RENDER_CONTEXT **ppsRenderContext) + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_INT32 i32Priority, + IMG_DEV_VIRTADDR sVDMCallStackAddr, + IMG_UINT32 ui32CallStackDepth, + IMG_UINT32 ui32FrameworkRegisterSize, + IMG_PBYTE pabyFrameworkRegisters, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticRenderContextStateSize, + IMG_PBYTE pStaticRenderContextState, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxTADeadlineMS, + IMG_UINT32 ui32Max3DDeadlineMS, + RGX_SERVER_RENDER_CONTEXT **ppsRenderContext) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; @@ -2844,19 +3106,22 @@ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, /* Create the FW render context, this has the TA and 3D FW common contexts embedded within it - */ + */ eError = DevmemFwAllocate(psDevInfo, - sizeof(RGXFWIF_FWRENDERCONTEXT), - RGX_FWCOMCTX_ALLOCFLAGS, - "FwRenderContext", - &psRenderContext->psFWRenderContextMemDesc); + sizeof(RGXFWIF_FWRENDERCONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwRenderContext", + &psRenderContext->psFWRenderContextMemDesc); if (eError != PVRSRV_OK) { goto fail_fwrendercontext; } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); + } #endif if (ui32FrameworkRegisterSize) @@ -2870,9 +3135,9 @@ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to allocate firmware GPU framework state (%s)", - __func__, - PVRSRVGetErrorString(eError))); + "%s: Failed to allocate firmware GPU framework state (%s)", + __func__, + PVRSRVGetErrorString(eError))); goto fail_frameworkcreate; } @@ -2884,48 +3149,47 @@ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to populate the framework buffer (%s)", - __func__, - PVRSRVGetErrorString(eError))); + "%s: Failed to populate the framework buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); goto fail_frameworkcopy; } - sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc; } eError = _Create3DContext(psConnection, - psDeviceNode, - psRenderContext->psFWRenderContextMemDesc, - offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), - psFWMemContextMemDesc, - i32Priority, - ui32Max3DDeadlineMS, - ui64RobustnessAddress, - &sInfo, - &psRenderContext->s3DData, - U32toU8_Unpack3(ui32PackedCCBSizeU8888), - U32toU8_Unpack4(ui32PackedCCBSizeU8888), - ui32ContextFlags); + psDeviceNode, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), + psFWMemContextMemDesc, + i32Priority, + ui32Max3DDeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->s3DData, + U32toU8_Unpack3(ui32PackedCCBSizeU8888), + U32toU8_Unpack4(ui32PackedCCBSizeU8888), + ui32ContextFlags); if (eError != PVRSRV_OK) { goto fail_3dcontext; } eError = _CreateTAContext(psConnection, - psDeviceNode, - psRenderContext->psFWRenderContextMemDesc, - offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), - psFWMemContextMemDesc, - sVDMCallStackAddr, - ui32CallStackDepth, - i32Priority, - ui32MaxTADeadlineMS, - ui64RobustnessAddress, - &sInfo, - &psRenderContext->sTAData, - U32toU8_Unpack1(ui32PackedCCBSizeU8888), - U32toU8_Unpack2(ui32PackedCCBSizeU8888), - ui32ContextFlags); + psDeviceNode, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), + psFWMemContextMemDesc, + sVDMCallStackAddr, + ui32CallStackDepth, + i32Priority, + ui32MaxTADeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->sTAData, + U32toU8_Unpack1(ui32PackedCCBSizeU8888), + U32toU8_Unpack2(ui32PackedCCBSizeU8888), + ui32ContextFlags); if (eError != PVRSRV_OK) { goto fail_tacontext; @@ -2940,18 +3204,22 @@ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, /* Copy the static render context data */ OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize); +#if defined(SUPPORT_TRP) + psFWRenderContext->eTRPGeomCoreAffinity = RGXFWIF_DM_MAX; +#endif DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS); + RGXFwSharedMemCacheOpPtr(psFWRenderContext, FLUSH); DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); #if defined(SUPPORT_BUFFER_SYNC) psRenderContext->psBufferSyncContext = - pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, - "rogue-ta3d"); + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-ta3d"); if (IS_ERR(psRenderContext->psBufferSyncContext)) { PVR_DPF((PVR_DBG_ERROR, - "%s: failed to create buffer_sync context (err=%ld)", - __func__, PTR_ERR(psRenderContext->psBufferSyncContext))); + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(psRenderContext->psBufferSyncContext))); eError = PVRSRV_ERROR_INVALID_PARAMS; goto fail_buffer_sync_context_create; @@ -2978,11 +3246,29 @@ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, fail_buffer_sync_context_create: #endif fail_acquire_cpu_mapping: - _DestroyTAContext(&psRenderContext->sTAData, - psDeviceNode); + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + PVRSRV_ERROR eError2 = _DestroyTAContext(&psRenderContext->sTAData, + psDeviceNode); + if (!PVRSRVIsRetryError(eError2)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } + END_LOOP_UNTIL_TIMEOUT_US(); fail_tacontext: - _Destroy3DContext(&psRenderContext->s3DData, - psRenderContext->psDeviceNode); + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + PVRSRV_ERROR eError2 = _Destroy3DContext(&psRenderContext->s3DData, + psRenderContext->psDeviceNode); + if (!PVRSRVIsRetryError(eError2)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } + END_LOOP_UNTIL_TIMEOUT_US(); fail_3dcontext: fail_frameworkcopy: if (psRenderContext->psFWFrameworkMemDesc) @@ -3007,10 +3293,6 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice; -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; - IMG_UINT32 ui32WorkEstCCBSubmitted; -#endif /* remove node from list before calling destroy - as destroy, if successful * will invalidate the node @@ -3035,7 +3317,7 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0) { eError = _DestroyTAContext(&psRenderContext->sTAData, - psRenderContext->psDeviceNode); + psRenderContext->psDeviceNode); if (eError == PVRSRV_OK) { psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE; @@ -3050,7 +3332,7 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0) { eError = _Destroy3DContext(&psRenderContext->s3DData, - psRenderContext->psDeviceNode); + psRenderContext->psDeviceNode); if (eError == PVRSRV_OK) { psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE; @@ -3062,39 +3344,46 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, - (void **)&psFWRenderContext); - if (eError != PVRSRV_OK) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware render context (%s)", - __func__, - PVRSRVGetErrorString(eError))); - goto e0; - } + RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; - ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted; + eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, + (void **)&psFWRenderContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto e0; + } + RGXFwSharedMemCacheOpValue(psFWRenderContext->ui32WorkEstCCBSubmitted, INVALIDATE); - DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); + ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted; - /* Check if all of the workload estimation CCB commands for this workload are read */ - if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived) - { + DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); - PVR_DPF((PVR_DBG_WARNING, - "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", - __func__, ui32WorkEstCCBSubmitted, - psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)); + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived) + { + + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)); - eError = PVRSRV_ERROR_RETRY; - goto e0; + eError = PVRSRV_ERROR_RETRY; + goto e0; + } } #endif /* Only if both TA and 3D contexts have been cleaned up can we free the shared resources - */ + */ if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE)) { if (psRenderContext->psFWFrameworkMemDesc) @@ -3112,7 +3401,10 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) + { + WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); + } #endif OSLockDestroy(psRenderContext->hLock); @@ -3130,7 +3422,6 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender } - #if (ENABLE_TA3D_UFO_DUMP == 1) static void DumpUfoList(IMG_UINT32 ui32ClientTAFenceCount, IMG_UINT32 ui32ClientTAUpdateCount, @@ -3265,101 +3556,102 @@ static void DumpUfoList(IMG_UINT32 ui32ClientTAFenceCount, * PVRSRVRGXKickTA3DKM */ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, - IMG_UINT32 ui32ClientTAFenceCount, - SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, - IMG_UINT32 *paui32ClientTAFenceSyncOffset, - IMG_UINT32 *paui32ClientTAFenceValue, - IMG_UINT32 ui32ClientTAUpdateCount, - SYNC_PRIMITIVE_BLOCK **apsClientTAUpdateSyncPrimBlock, - IMG_UINT32 *paui32ClientTAUpdateSyncOffset, - IMG_UINT32 *paui32ClientTAUpdateValue, - IMG_UINT32 ui32Client3DUpdateCount, - SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, - IMG_UINT32 *paui32Client3DUpdateSyncOffset, - IMG_UINT32 *paui32Client3DUpdateValue, - SYNC_PRIMITIVE_BLOCK *psPRFenceSyncPrimBlock, - IMG_UINT32 ui32PRFenceSyncOffset, - IMG_UINT32 ui32PRFenceValue, - PVRSRV_FENCE iCheckTAFence, - PVRSRV_TIMELINE iUpdateTATimeline, - PVRSRV_FENCE *piUpdateTAFence, - IMG_CHAR szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH], - PVRSRV_FENCE iCheck3DFence, - PVRSRV_TIMELINE iUpdate3DTimeline, - PVRSRV_FENCE *piUpdate3DFence, - IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], - IMG_UINT32 ui32TACmdSize, - IMG_PBYTE pui8TADMCmd, - IMG_UINT32 ui323DPRCmdSize, - IMG_PBYTE pui83DPRDMCmd, - IMG_UINT32 ui323DCmdSize, - IMG_PBYTE pui83DDMCmd, - IMG_UINT32 ui32ExtJobRef, - IMG_BOOL bKickTA, - IMG_BOOL bKickPR, - IMG_BOOL bKick3D, - IMG_BOOL bAbort, - IMG_UINT32 ui32PDumpFlags, - RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, - RGX_ZSBUFFER_DATA *psZSBuffer, - RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, - IMG_UINT32 ui32SyncPMRCount, - IMG_UINT32 *paui32SyncPMRFlags, - PMR **ppsSyncPMRs, - IMG_UINT32 ui32RenderTargetSize, - IMG_UINT32 ui32NumberOfDrawCalls, - IMG_UINT32 ui32NumberOfIndices, - IMG_UINT32 ui32NumberOfMRTs, - IMG_UINT64 ui64DeadlineInus) + IMG_UINT32 ui32ClientTAFenceCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, + IMG_UINT32 *paui32ClientTAFenceSyncOffset, + IMG_UINT32 *paui32ClientTAFenceValue, + IMG_UINT32 ui32ClientTAUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAUpdateSyncPrimBlock, + IMG_UINT32 *paui32ClientTAUpdateSyncOffset, + IMG_UINT32 *paui32ClientTAUpdateValue, + IMG_UINT32 ui32Client3DUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, + IMG_UINT32 *paui32Client3DUpdateSyncOffset, + IMG_UINT32 *paui32Client3DUpdateValue, + SYNC_PRIMITIVE_BLOCK *psPRFenceSyncPrimBlock, + IMG_UINT32 ui32PRFenceSyncOffset, + IMG_UINT32 ui32PRFenceValue, + PVRSRV_FENCE iCheckTAFence, + PVRSRV_TIMELINE iUpdateTATimeline, + PVRSRV_FENCE *piUpdateTAFence, + IMG_CHAR szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iCheck3DFence, + PVRSRV_TIMELINE iUpdate3DTimeline, + PVRSRV_FENCE *piUpdate3DFence, + IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32TACmdSize, + IMG_PBYTE pui8TADMCmd, + IMG_UINT32 ui323DPRCmdSize, + IMG_PBYTE pui83DPRDMCmd, + IMG_UINT32 ui323DCmdSize, + IMG_PBYTE pui83DDMCmd, + IMG_UINT32 ui32ExtJobRef, + IMG_BOOL bKickTA, + IMG_BOOL bKickPR, + IMG_BOOL bKick3D, + IMG_BOOL bAbort, + IMG_UINT32 ui32PDumpFlags, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs, + IMG_UINT32 ui32RenderTargetSize, + IMG_UINT32 ui32NumberOfDrawCalls, + IMG_UINT32 ui32NumberOfIndices, + IMG_UINT32 ui32NumberOfMRTs, + IMG_UINT64 ui64DeadlineInus) { /* per-context helper structures */ RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData; RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData; - IMG_UINT32 ui32TACmdCount=0; - IMG_UINT32 ui323DCmdCount=0; - IMG_UINT32 ui32TACmdOffset=0; - IMG_UINT32 ui323DCmdOffset=0; - RGXFWIF_UFO sPRUFO; - IMG_UINT32 i; - PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_ERROR eError2 = PVRSRV_OK; + IMG_UINT32 ui32TACmdCount=0; + IMG_UINT32 ui323DCmdCount=0; + __maybe_unused IMG_UINT32 ui32TACmdOffset=0; + __maybe_unused IMG_UINT32 ui323DCmdOffset=0; + + RGXFWIF_UFO sPRUFO; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError2 = PVRSRV_OK; - PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext); - IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); - IMG_BOOL bCCBStateOpen = IMG_FALSE; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_BOOL bCCBStateOpen = IMG_FALSE; - IMG_UINT32 ui32ClientPRUpdateCount = 0; - PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL; - IMG_UINT32 *paui32ClientPRUpdateValue = NULL; + IMG_UINT32 ui32ClientPRUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL; + IMG_UINT32 *paui32ClientPRUpdateValue = NULL; PRGXFWIF_TIMESTAMP_ADDR pPreAddr; PRGXFWIF_TIMESTAMP_ADDR pPostAddr; PRGXFWIF_UFO_ADDR pRMWUFOAddr; - PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL; - PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL; - PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL; - PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL; - PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress; + PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress; - IMG_UINT64 uiCheckTAFenceUID = 0; - IMG_UINT64 uiCheck3DFenceUID = 0; - IMG_UINT64 uiUpdateTAFenceUID = 0; - IMG_UINT64 uiUpdate3DFenceUID = 0; + IMG_UINT64 uiCheckTAFenceUID = 0; + IMG_UINT64 uiCheck3DFenceUID = 0; + IMG_UINT64 uiUpdateTAFenceUID = 0; + IMG_UINT64 uiUpdate3DFenceUID = 0; IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd; - RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; - RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D); IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0; IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE; - PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE; - PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE; + PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE; + PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE; IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE; IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0; @@ -3387,10 +3679,6 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, RGX_SYNC_DATA s3DSyncData = {NULL}; /*!< Contains internal update syncs for 3D */ IMG_BOOL bTestSLRAdd3DCheck = IMG_FALSE; -#if defined(SUPPORT_VALIDATION) - PVRSRV_FENCE hTestSLRTmpFence = PVRSRV_NO_FENCE; - PSYNC_CHECKPOINT psDummySyncCheckpoint = NULL; -#endif #if defined(SUPPORT_BUFFER_SYNC) PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; @@ -3459,6 +3747,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, CMDTA3D_SHARED *psGeomCmdShared = (CMDTA3D_SHARED *)pui8TADMCmd; CMDTA3D_SHARED *ps3DCmdShared = (CMDTA3D_SHARED *)pui83DDMCmd; CMDTA3D_SHARED *psPR3DCmdShared = (CMDTA3D_SHARED *)pui83DPRDMCmd; + RGXFWIF_DEV_VIRTADDR sNullFWAddr = {0}; if (psKMHWRTDataSet == NULL) { @@ -3470,48 +3759,53 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, */ if (psGeomCmdShared != NULL) { - psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; - - if (psZSBuffer != NULL) - { - psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; - } - if (psMSAAScratchBuffer != NULL) + if (ui32TACmdSize < sizeof(*psGeomCmdShared)) { - psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid TACmd size", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; } + + psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = + psZSBuffer ? psZSBuffer->sZSBufferFWDevVAddr : sNullFWAddr; + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = + psMSAAScratchBuffer ? psMSAAScratchBuffer->sZSBufferFWDevVAddr : sNullFWAddr; } /* Write FW address for 3D CMD */ if (ps3DCmdShared != NULL) { - ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; - - if (psZSBuffer != NULL) - { - ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; - } - if (psMSAAScratchBuffer != NULL) + if (ui323DCmdSize < sizeof(*ps3DCmdShared)) { - ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid 3DCmd size", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; } + + ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = + psZSBuffer ? psZSBuffer->sZSBufferFWDevVAddr : sNullFWAddr; + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = + psMSAAScratchBuffer ? psMSAAScratchBuffer->sZSBufferFWDevVAddr : sNullFWAddr; } /* Write FW address for PR3D CMD */ if (psPR3DCmdShared != NULL) { - psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; - - if (psZSBuffer != NULL) - { - psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; - } - if (psMSAAScratchBuffer != NULL) + if (ui323DPRCmdSize < sizeof(*psPR3DCmdShared)) { - psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid 3DPRCmd size", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; } + + psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = + psZSBuffer ? psZSBuffer->sZSBufferFWDevVAddr : sNullFWAddr; + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = + psMSAAScratchBuffer ? psMSAAScratchBuffer->sZSBufferFWDevVAddr : sNullFWAddr; } } @@ -3531,7 +3825,6 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount, ui32Client3DUpdateCount)); - RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice, &pPreAddr, &pPostAddr, @@ -3742,50 +4035,6 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, UPDATE_FENCE_CHECKPOINT_COUNT : 0; } -#if defined(SUPPORT_VALIDATION) - /* Check if TestingSLR is adding an extra sync checkpoint to the - * 3D fence check (which we won't signal) - */ - if ((psDevInfo->ui32TestSLRInterval > 0) && - (--psDevInfo->ui32TestSLRCount == 0)) - { - bTestSLRAdd3DCheck = IMG_TRUE; - psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; - } - - if ((bTestSLRAdd3DCheck) && (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)) - { - if (iUpdate3DTimeline == PVRSRV_NO_TIMELINE) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Would append additional SLR checkpoint " - "to 3D fence but no update 3D timeline provided", __func__)); - } - else - { - SyncCheckpointAlloc(psRenderContext->psDeviceNode->hSyncCheckpointContext, - iUpdate3DTimeline, - hTestSLRTmpFence, - "TestSLRCheck", - &psDummySyncCheckpoint); - PVR_DPF((PVR_DBG_WARNING, "%s: Appending additional SLR checkpoint to 3D fence " - "checkpoints (psDummySyncCheckpoint=<%p>)", - __func__, (void*)psDummySyncCheckpoint)); - SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, - 1, - &psDummySyncCheckpoint); - if (!pauiClient3DFenceUFOAddress) - { - pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; - } - - if (ui32Client3DFenceCount == 0) - { - b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; - } - ui323DFenceCount++; - } - } -#endif /* defined(SUPPORT_VALIDATION) */ if (bKickTA) { @@ -3795,7 +4044,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, RGXCmdHelperInitCmdCCB_CommandSize( psDevInfo, - 0, + 0, ui32TAFenceCount, ui32TAUpdateCount, ui32TACmdSize, @@ -3861,7 +4110,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, bKickTA ? 0 : ui323DFenceCount, ui323DUpdateCount, ui323DCmdSize, - (bKickTA ? NULL : &pPreAddr), + (bKickTA ? NULL : & pPreAddr), &pPostAddr, &pRMWUFOAddr, &pas3DCmdHelperData[ui323DCmdCount++] @@ -3904,9 +4153,9 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...", __func__, ui32ClientTAFenceCount)); eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence, - ui32ClientTAFenceCount, - apsClientTAFenceSyncPrimBlock, - paui32ClientTAFenceSyncOffset); + ui32ClientTAFenceCount, + apsClientTAFenceSyncPrimBlock, + paui32ClientTAFenceSyncOffset); if (unlikely(eError != PVRSRV_OK)) { goto err_populate_sync_addr_list_ta_fence; @@ -3925,9 +4174,9 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...", __func__, ui32ClientTAUpdateCount)); eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate, - ui32ClientTAUpdateCount, - apsClientTAUpdateSyncPrimBlock, - paui32ClientTAUpdateSyncOffset); + ui32ClientTAUpdateCount, + apsClientTAUpdateSyncPrimBlock, + paui32ClientTAUpdateSyncOffset); if (unlikely(eError != PVRSRV_OK)) { goto err_populate_sync_addr_list_ta_update; @@ -3945,9 +4194,9 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...", __func__, ui32Client3DFenceCount)); eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence, - ui32Client3DFenceCount, - NULL, - NULL); + ui32Client3DFenceCount, + NULL, + NULL); if (unlikely(eError != PVRSRV_OK)) { goto err_populate_sync_addr_list_3d_fence; @@ -3964,9 +4213,9 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...", __func__, ui32Client3DUpdateCount)); eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate, - ui32Client3DUpdateCount, - apsClient3DUpdateSyncPrimBlock, - paui32Client3DUpdateSyncOffset); + ui32Client3DUpdateCount, + apsClient3DUpdateSyncPrimBlock, + paui32Client3DUpdateSyncOffset); if (unlikely(eError != PVRSRV_OK)) { goto err_populate_sync_addr_list_3d_update; @@ -4012,8 +4261,13 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, (void*)&psRenderContext->sSyncAddrListTAFence , (void*)pauiClientTAFenceUFOAddress)); SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence, - ui32BufferFenceSyncCheckpointCount, - apsBufferFenceSyncCheckpoints); + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + apsBufferFenceSyncCheckpoints = NULL; + } if (!pauiClientTAFenceUFOAddress) { pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; @@ -4025,7 +4279,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount; } else -#endif +#endif /* !defined(SUPPORT_STRIP_RENDERING) */ /* Append buffer sync fences to 3D fences */ if (ui32BufferFenceSyncCheckpointCount > 0) { @@ -4040,6 +4294,11 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence, ui32BufferFenceSyncCheckpointCount, apsBufferFenceSyncCheckpoints); + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + apsBufferFenceSyncCheckpoints = NULL; + } if (!pauiClient3DFenceUFOAddress) { pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; @@ -4066,8 +4325,8 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, (void*)pauiClient3DUpdateUFOAddress)); /* Append buffer sync update to 3D updates */ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, - 1, - &psBufferUpdateSyncCheckpoint); + 1, + &psBufferUpdateSyncCheckpoint); if (!pauiClient3DUpdateUFOAddress) { pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; @@ -4086,8 +4345,8 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, (void*)pauiClientPRUpdateUFOAddress)); /* Attach update to the 3D (used for PR) Updates */ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, - 1, - &psBufferUpdateSyncCheckpoint); + 1, + &psBufferUpdateSyncCheckpoint); if (!pauiClientPRUpdateUFOAddress) { pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; @@ -4107,6 +4366,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); + eError = PVRSRV_ERROR_INVALID_PARAMS; goto err_no_buffer_sync_invalid_params; #endif /* defined(SUPPORT_BUFFER_SYNC) */ @@ -4129,9 +4389,6 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) { - PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL; - PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL; - CHKPT_DBG((PVR_DBG_ERROR, "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d", __func__, iCheckTAFence, iUpdateTATimeline)); @@ -4177,11 +4434,17 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, (void*)psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue)); - /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */ - pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint); - CHKPT_DBG((PVR_DBG_ERROR, - "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", - __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr)); +#if defined(TA3D_CHECKPOINT_DEBUG) + { + PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL; + + /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */ + pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", + __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr)); + } +#endif } /* Append the sync prim update for the TA timeline (if required) */ @@ -4193,11 +4456,11 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, sTASyncData.paui32ClientUpdateValue = paui32ClientTAUpdateValue; eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue, - &psRenderContext->sSyncAddrListTAUpdate, - (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate, - psTAFenceTimelineUpdateSync, - &sTASyncData, - bKick3D); + &psRenderContext->sSyncAddrListTAUpdate, + (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate, + psTAFenceTimelineUpdateSync, + &sTASyncData, + bKick3D); if (unlikely(eError != PVRSRV_OK)) { goto fail_alloc_update_values_mem_TA; @@ -4246,11 +4509,17 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue)); - /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */ - pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint); - CHKPT_DBG((PVR_DBG_ERROR, - "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", - __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr)); +#if defined(TA3D_CHECKPOINT_DEBUG) + { + PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL; + + /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */ + pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", + __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr)); + } +#endif } /* Append the sync prim update for the 3D timeline (if required) */ @@ -4262,11 +4531,11 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue; eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue, - &psRenderContext->sSyncAddrList3DUpdate, - &psRenderContext->sSyncAddrList3DUpdate, /*!< PR update: is this required? */ - ps3DFenceTimelineUpdateSync, - &s3DSyncData, - bKick3D); + &psRenderContext->sSyncAddrList3DUpdate, + &psRenderContext->sSyncAddrList3DUpdate, /*!< PR update: is this required? */ + ps3DFenceTimelineUpdateSync, + &s3DSyncData, + bKick3D); if (unlikely(eError != PVRSRV_OK)) { goto fail_alloc_update_values_mem_3D; @@ -4312,8 +4581,8 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ui32FenceTASyncCheckpointCount, (void*)apsFenceTASyncCheckpoints)); SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, - ui32FenceTASyncCheckpointCount, - apsFenceTASyncCheckpoints); + ui32FenceTASyncCheckpointCount, + apsFenceTASyncCheckpoints); if (!pauiClientTAFenceUFOAddress) { pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; @@ -4340,8 +4609,8 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, __func__, (void*)psUpdateTASyncCheckpoint, SyncCheckpointGetId(psUpdateTASyncCheckpoint))); SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate, - 1, - &psUpdateTASyncCheckpoint); + 1, + &psUpdateTASyncCheckpoint); if (!pauiClientTAUpdateUFOAddress) { pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; @@ -4357,8 +4626,8 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, __func__, (void*)psUpdate3DSyncCheckpoint, SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, - 1, - &psUpdate3DSyncCheckpoint); + 1, + &psUpdate3DSyncCheckpoint); if (!pauiClientPRUpdateUFOAddress) { pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; @@ -4378,8 +4647,8 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, "%s: Append %d sync checkpoints to 3D Fence...", __func__, ui32Fence3DSyncCheckpointCount)); SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, - ui32Fence3DSyncCheckpointCount, - apsFence3DSyncCheckpoints); + ui32Fence3DSyncCheckpointCount, + apsFence3DSyncCheckpoints); if (!pauiClient3DFenceUFOAddress) { pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; @@ -4406,8 +4675,8 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, __func__, (void*)psUpdate3DSyncCheckpoint, SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, - 1, - &psUpdate3DSyncCheckpoint); + 1, + &psUpdate3DSyncCheckpoint); if (!pauiClient3DUpdateUFOAddress) { pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; @@ -4532,7 +4801,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, #if (ENABLE_TA3D_UFO_DUMP == 1) DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), - ui32Client3DUpdateCount, + ui32Client3DUpdateCount, pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, pauiClient3DFenceUFOAddress, NULL, @@ -4572,7 +4841,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - if (bKickTA || bKick3D || bAbort) + if ((!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) && (bKickTA || bKick3D || bAbort)) { sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize = ui32RenderTargetSize; sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls; @@ -4587,49 +4856,56 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData; #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Prepare workload estimation */ - WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, - &psRenderContext->sWorkEstData, - &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA, - RGXFWIF_CCB_CMD_TYPE_GEOM, - &sWorkloadCharacteristics, - ui64DeadlineInus, - &sWorkloadKickDataTA); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) + { + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA, + RGXFWIF_CCB_CMD_TYPE_GEOM, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataTA); + } #endif /* Init the TA command helper */ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount)); - RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(psTAData->psServerCommonContext), - ui32ClientTAFenceCount, - pauiClientTAFenceUFOAddress, - paui32ClientTAFenceValue, - ui32ClientTAUpdateCount, - pauiClientTAUpdateUFOAddress, - paui32ClientTAUpdateValue, - ui32TACmdSize, - pui8TADMCmd, - &pPreAddr, - (bKick3D ? NULL : &pPostAddr), - (bKick3D ? NULL : &pRMWUFOAddr), - RGXFWIF_CCB_CMD_TYPE_GEOM, - ui32ExtJobRef, - ui32IntJobRef, - ui32PDumpFlags, + RGXCmdHelperInitCmdCCB_OtherData(psDevInfo, + FWCommonContextGetClientCCB(psTAData->psServerCommonContext), + ui32ClientTAFenceCount, + pauiClientTAFenceUFOAddress, + paui32ClientTAFenceValue, + ui32ClientTAUpdateCount, + pauiClientTAUpdateUFOAddress, + paui32ClientTAUpdateValue, + ui32TACmdSize, + pui8TADMCmd, + &pPreAddr, + (bKick3D ? NULL : & pPostAddr), + (bKick3D ? NULL : & pRMWUFOAddr), + RGXFWIF_CCB_CMD_TYPE_GEOM, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, #if defined(SUPPORT_WORKLOAD_ESTIMATION) - &sWorkloadKickDataTA, + &sWorkloadKickDataTA, #else - NULL, + NULL, #endif - "TA", - bCCBStateOpen, - pasTACmdHelperData); + "TA", + bCCBStateOpen, + pasTACmdHelperData); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* The following is used to determine the offset of the command header containing - the workload estimation data so that can be accessed when the KCCB is read */ - ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) + { + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData); + } #endif eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData); @@ -4662,26 +4938,27 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d", __func__, ui32Client3DFenceCount)); - RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), - ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), - pauiClient3DFenceUFOAddress, - NULL, - 0, - NULL, - NULL, - sizeof(sPRUFO), - (IMG_UINT8*) &sPRUFO, - NULL, - NULL, - NULL, - RGXFWIF_CCB_CMD_TYPE_FENCE_PR, - ui32ExtJobRef, - ui32IntJobRef, - ui32PDumpFlags, - NULL, - "3D-PR-Fence", - bCCBStateOpen, - &pas3DCmdHelperData[ui323DCmdCount++]); + RGXCmdHelperInitCmdCCB_OtherData(psDevInfo, + FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + pauiClient3DFenceUFOAddress, + NULL, + 0, + NULL, + NULL, + sizeof(sPRUFO), + (IMG_UINT8*) &sPRUFO, + NULL, + NULL, + NULL, + RGXFWIF_CCB_CMD_TYPE_FENCE_PR, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + "3D-PR-Fence", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); /* Init the 3D PR command helper */ /* @@ -4689,7 +4966,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, if no 3D is present. This is so the timeline update cannot happen out of order with any other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB). This out of order timeline sync prim update could happen if we attach it to the TA update. - */ + */ if (ui32ClientPRUpdateCount) { CHKPT_DBG((PVR_DBG_ERROR, @@ -4708,26 +4985,27 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d", __func__, ui32ClientPRUpdateCount)); - RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), - 0, - NULL, - NULL, - ui32ClientPRUpdateCount, - pauiClientPRUpdateUFOAddress, - paui32ClientPRUpdateValue, - pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead - pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd, - NULL, - NULL, - NULL, - RGXFWIF_CCB_CMD_TYPE_3D_PR, - ui32ExtJobRef, - ui32IntJobRef, - ui32PDumpFlags, - NULL, - "3D-PR", - bCCBStateOpen, - &pas3DCmdHelperData[ui323DCmdCount++]); + RGXCmdHelperInitCmdCCB_OtherData(psDevInfo, + FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + 0, + NULL, + NULL, + ui32ClientPRUpdateCount, + pauiClientPRUpdateUFOAddress, + paui32ClientPRUpdateValue, + pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead + pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd, + NULL, + NULL, + NULL, + RGXFWIF_CCB_CMD_TYPE_3D_PR, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + "3D-PR", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); } } @@ -4737,47 +5015,54 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, const RGXFWIF_CCB_CMD_TYPE e3DCmdType = bAbort ? RGXFWIF_CCB_CMD_TYPE_ABORT : RGXFWIF_CCB_CMD_TYPE_3D; #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Prepare workload estimation */ - WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, - &psRenderContext->sWorkEstData, - &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D, - e3DCmdType, - &sWorkloadCharacteristics, - ui64DeadlineInus, - &sWorkloadKickData3D); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) + { + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D, + e3DCmdType, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickData3D); + } #endif /* Init the 3D command helper */ - RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), - bKickTA ? 0 : ui32Client3DFenceCount, /* For a kick with a TA, the 3D fences are added before the PR command instead */ - bKickTA ? NULL : pauiClient3DFenceUFOAddress, - NULL, - ui32Client3DUpdateCount, - pauiClient3DUpdateUFOAddress, - paui32Client3DUpdateValue, - ui323DCmdSize, - pui83DDMCmd, - (bKickTA ? NULL : &pPreAddr), - &pPostAddr, - &pRMWUFOAddr, - e3DCmdType, - ui32ExtJobRef, - ui32IntJobRef, - ui32PDumpFlags, + RGXCmdHelperInitCmdCCB_OtherData(psDevInfo, + FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + bKickTA ? 0 : ui32Client3DFenceCount, /* For a kick with a TA, the 3D fences are added before the PR command instead */ + bKickTA ? NULL : pauiClient3DFenceUFOAddress, + NULL, + ui32Client3DUpdateCount, + pauiClient3DUpdateUFOAddress, + paui32Client3DUpdateValue, + ui323DCmdSize, + pui83DDMCmd, + (bKickTA ? NULL : & pPreAddr), + &pPostAddr, + &pRMWUFOAddr, + e3DCmdType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, #if defined(SUPPORT_WORKLOAD_ESTIMATION) - &sWorkloadKickData3D, + &sWorkloadKickData3D, #else - NULL, + NULL, #endif - "3D", - bCCBStateOpen, - &pas3DCmdHelperData[ui323DCmdCount++]); + "3D", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* The following are used to determine the offset of the command header containing the workload estimation - data so that can be accessed when the KCCB is read */ - ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]); - ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) + { + /* The following are used to determine the offset of the command header containing the workload estimation + data so that can be accessed when the KCCB is read */ + ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]); + ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1); + } #endif } @@ -4804,6 +5089,40 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, } } + if (ui32TACmdCount) + { + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl, + &sTACmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_GEOM, + bKickTA, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_taattachcleanupctls; + } + } + + if (ui323DCmdCount) + { + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl, + &s3DCmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_3D, + bKick3D, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_3dattachcleanupctls; + } + } + /* We should acquire the space in the kernel CCB here as after this point we release the commands which will take operations on server syncs @@ -4813,26 +5132,44 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, /* Everything is ready to go now, release the commands */ + + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + goto fail_acquirepowerlock; + } + if (ui32TACmdCount) { ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); RGXCmdHelperReleaseCmdCCB(ui32TACmdCount, - pasTACmdHelperData, - "TA", - FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr); + pasTACmdHelperData, + "TA", + FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); - - /* This checks if the command would wrap around at the end of the CCB and therefore would start at an - offset of 0 rather than the current command offset */ - if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck) - { - ui32TACommandOffset = ui32TACmdOffset; - } - else + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) { - ui32TACommandOffset = 0; + ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and therefore would start at an + offset of 0 rather than the current command offset */ + if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck) + { + ui32TACommandOffset = ui32TACmdOffset; + } + else + { + ui32TACommandOffset = 0; + } } #endif } @@ -4841,20 +5178,23 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, { ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, - pas3DCmdHelperData, - "3D", - FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr); + pas3DCmdHelperData, + "3D", + FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); - - if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck) - { - ui323DCommandOffset = ui323DCmdOffset; - } - else + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) { - ui323DCommandOffset = 0; + ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); + + if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck) + { + ui323DCommandOffset = ui323DCmdOffset; + } + else + { + ui323DCommandOffset = 0; + } } #endif } @@ -4869,25 +5209,13 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, sTACmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); - /* Add the Workload data into the KCCB kick */ #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ - sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset; -#endif - - eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl, - &sTACmdKickData.ui32NumCleanupCtl, - RGXFWIF_DM_GEOM, - bKickTA, - psKMHWRTDataSet, - psZSBuffer, - psMSAAScratchBuffer); - if (unlikely(eError != PVRSRV_OK)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) { - CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", - __func__, eError)); - goto fail_taattachcleanupctls; + /* Add the Workload data into the KCCB kick */ + sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset; } +#endif if (psGeomCmdShared) { @@ -4920,9 +5248,9 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; sTAKCCBCmd.uCmdData.sCmdKickData = sTACmdKickData; - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + eError2 = RGXScheduleCommandWithoutPowerLock(psRenderContext->psDeviceNode->pvDevice, RGXFWIF_DM_GEOM, &sTAKCCBCmd, ui32PDumpFlags); @@ -4931,7 +5259,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); } if (eError2 != PVRSRV_OK) @@ -4939,7 +5267,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2)); /* Mark the error and bail out */ eError = eError2; - goto fail_taacquirecmd; + goto fail_tasubmitcmd; } PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode, @@ -4960,23 +5288,12 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, /* Add the Workload data into the KCCB kick */ #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ - s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset; -#endif - - eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl, - &s3DCmdKickData.ui32NumCleanupCtl, - RGXFWIF_DM_3D, - bKick3D, - psKMHWRTDataSet, - psZSBuffer, - psMSAAScratchBuffer); - if (unlikely(eError != PVRSRV_OK)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) { - CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", - __func__, eError)); - goto fail_3dattachcleanupctls; + /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ + s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset; } +#endif if (ps3DCmdShared) { @@ -5016,18 +5333,18 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, s3DKCCBCmd.uCmdData.sCmdKickData = s3DCmdKickData; } - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, - RGXFWIF_DM_3D, - &s3DKCCBCmd, - ui32PDumpFlags); + eError2 = RGXScheduleCommandWithoutPowerLock(psRenderContext->psDeviceNode->pvDevice, + RGXFWIF_DM_3D, + &s3DKCCBCmd, + ui32PDumpFlags); if (eError2 != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (eError2 != PVRSRV_OK) { @@ -5036,7 +5353,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, { eError = eError2; } - goto fail_3dacquirecmd; + goto fail_3dsubmitcmd; } PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode, @@ -5053,9 +5370,11 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, { CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); - goto fail_3dacquirecmd; + goto fail_3dsubmitcmd; } + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + #if defined(NO_HARDWARE) /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ if (psUpdateTASyncCheckpoint) @@ -5162,16 +5481,14 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, OSFreeMem(s3DSyncData.paui32ClientUpdateValue); } -#if defined(SUPPORT_VALIDATION) - if (bTestSLRAdd3DCheck) - { - SyncCheckpointFree(psDummySyncCheckpoint); - } -#endif OSLockRelease(psRenderContext->hLock); return PVRSRV_OK; +fail_3dsubmitcmd: +fail_tasubmitcmd: + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); +fail_acquirepowerlock: fail_3dattachcleanupctls: fail_taattachcleanupctls: fail_3dacquirecmd: @@ -5217,8 +5534,11 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, * NOTE: 3D fence is always submitted, either via 3D or TA(PR). */ #if defined(SUPPORT_BUFFER_SYNC) - SyncAddrListDeRefCheckpoints(ui32BufferFenceSyncCheckpointCount, - apsBufferFenceSyncCheckpoints); + if (apsBufferFenceSyncCheckpoints) + { + SyncAddrListDeRefCheckpoints(ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + } #endif SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); fail_resolve_input_3d_fence: @@ -5244,12 +5564,6 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, { OSFreeMem(s3DSyncData.paui32ClientUpdateValue); } -#if defined(SUPPORT_VALIDATION) - if (bTestSLRAdd3DCheck) - { - SyncCheckpointFree(psDummySyncCheckpoint); - } -#endif #if defined(SUPPORT_BUFFER_SYNC) if (psBufferSyncData) { @@ -5265,10 +5579,59 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, return eError; } +PVRSRV_ERROR PVRSRVRGXSendZSStoreDisableKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_BOOL bDisableDepthStore, + IMG_BOOL bDisableStencilStore, + IMG_UINT32 ui32ExtJobRefToDisableZSStore) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sDisableZSStoreCmd = { 0 }; + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + sDisableZSStoreCmd.eCmdType = RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE; + sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext); + sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.sDisableZSStore.bDisableZStore = bDisableDepthStore; + sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.sDisableZSStore.bDisableSStore = bDisableStencilStore; + sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.sDisableZSStore.ui32ExtJobRefToDisableZSStore = ui32ExtJobRefToDisableZSStore; + + if (psRenderContext->ui32CleanupStatus & (RC_CLEANUP_TA_COMPLETE | RC_CLEANUP_3D_COMPLETE)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: submit disable depth and stencil store command to render context that has been cleaned up (%u)", + __func__, PVRSRV_ERROR_INVALID_CONTEXT)); + return PVRSRV_ERROR_INVALID_CONTEXT; + } + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + RGXFWIF_DM_3D, + &sDisableZSStoreCmd, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to submit disable depth and stencil store command (%u)", + __func__, + eError)); + } + + return eError; +} + PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE * psDeviceNode, - RGX_SERVER_RENDER_CONTEXT *psRenderContext, - IMG_INT32 i32Priority) + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_INT32 i32Priority) { PVRSRV_ERROR eError; @@ -5279,10 +5642,10 @@ PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, if (psRenderContext->sTAData.i32Priority != i32Priority) { eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext, - psConnection, - psRenderContext->psDeviceNode->pvDevice, - i32Priority, - RGXFWIF_DM_GEOM); + psConnection, + psRenderContext->psDeviceNode->pvDevice, + i32Priority, + RGXFWIF_DM_GEOM); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -5296,10 +5659,10 @@ PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, if (psRenderContext->s3DData.i32Priority != i32Priority) { eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext, - psConnection, - psRenderContext->psDeviceNode->pvDevice, - i32Priority, - RGXFWIF_DM_3D); + psConnection, + psRenderContext->psDeviceNode->pvDevice, + i32Priority, + RGXFWIF_DM_3D); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -5356,7 +5719,6 @@ PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRe return eError; } - void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, @@ -5367,7 +5729,7 @@ void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) { RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = - IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); + IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); DumpFWCommonContextInfo(psCurrentServerRenderCtx->sTAData.psServerCommonContext, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); @@ -5387,7 +5749,7 @@ IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) { RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = - IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); + IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext) { if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxta3d.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxta3d.h index d6da644cabac..ff8c33d90893 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxta3d.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxta3d.h @@ -52,7 +52,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_fwif_resetframework.h" #include "sync_server.h" #include "connection_server.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "pvr_notifier.h" typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT; @@ -118,6 +118,7 @@ typedef struct _RGX_HWRTDATA_COMMON_COOKIE_ { DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + DEVMEMINT_RESERVATION *psPMMListsReservation; IMG_UINT32 ui32RefCount; } RGX_HWRTDATA_COMMON_COOKIE; @@ -148,6 +149,8 @@ struct _RGX_FREELIST_ { PMR *psFreeListPMR; IMG_DEVMEM_OFFSET_T uiFreeListPMROffset; + DEVMEMINT_RESERVATION* psFreeListReservation; + /* Freelist config */ IMG_UINT32 ui32MaxFLPages; IMG_UINT32 ui32InitFLPages; @@ -200,11 +203,10 @@ typedef struct { DEVMEMINT_RESERVATION *psReservation; PMR *psPMR; - DEVMEMINT_MAPPING *psMapping; - PVRSRV_MEMALLOCFLAGS_T uiMapFlags; IMG_UINT32 ui32ZSBufferID; IMG_UINT32 ui32RefCount; IMG_BOOL bOnDemand; + IMG_BOOL bIsBacked; IMG_UINT32 ui32NumReqByApp; /* Number of Backing Requests from Application */ IMG_UINT32 ui32NumReqByFW; /* Number of Backing Requests from Firmware */ @@ -223,35 +225,65 @@ IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList); /* Create set of HWRTData(s) */ -PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_DEV_VIRTADDR asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS], - IMG_DEV_VIRTADDR psPMMListDevVAddr[RGXMKIF_NUM_RTDATAS], - RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], - IMG_UINT32 ui32ScreenPixelMax, - IMG_UINT64 ui64MultiSampleCtl, - IMG_UINT64 ui64FlippedMultiSampleCtl, - IMG_UINT32 ui32TPCStride, - IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], - IMG_UINT32 ui32TPCSize, - IMG_UINT32 ui32TEScreen, - IMG_UINT32 ui32TEAA, - IMG_UINT32 ui32TEMTILE1, - IMG_UINT32 ui32TEMTILE2, - IMG_UINT32 ui32MTileStride, - IMG_UINT32 ui32ISPMergeLowerX, - IMG_UINT32 ui32ISPMergeLowerY, - IMG_UINT32 ui32ISPMergeUpperX, - IMG_UINT32 ui32ISPMergeUpperY, - IMG_UINT32 ui32ISPMergeScaleX, - IMG_UINT32 ui32ISPMergeScaleY, - IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS], - IMG_DEV_VIRTADDR sRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS], - IMG_DEV_VIRTADDR asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS], - IMG_UINT32 uiRgnHeaderSize, - IMG_UINT32 ui32ISPMtileSize, - IMG_UINT16 ui16MaxRTs, - RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]); +PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_DEV_VIRTADDR psPMMListDevVAddr[RGXMKIF_NUM_RTDATAS], + RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64MultiSampleCtl, + IMG_UINT64 ui64FlippedMultiSampleCtl, + IMG_UINT32 ui32TPCStride, + IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32MTileStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS], + IMG_DEV_VIRTADDR sRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS], + IMG_DEV_VIRTADDR asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT32 uiRgnHeaderSize, + IMG_UINT32 ui32ISPMtileSize, + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]); + +PVRSRV_ERROR RGXCreateHWRTDataSet2(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS], + DEVMEMINT_RESERVATION *psPMMListsReservation, + RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64MultiSampleCtl, + IMG_UINT64 ui64FlippedMultiSampleCtl, + IMG_UINT32 ui32TPCStride, + IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32MTileStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS], + IMG_DEV_VIRTADDR sRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS], + IMG_DEV_VIRTADDR asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT32 uiRgnHeaderSize, + IMG_UINT32 ui32ISPMtileSize, + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]); /* Destroy HWRTDataSet */ PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet); @@ -259,12 +291,12 @@ PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet); /* RGXCreateZSBufferKM */ -PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - DEVMEMINT_RESERVATION *psReservation, - PMR *psPMR, +PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, PVRSRV_MEMALLOCFLAGS_T uiMapFlags, - RGX_ZSBUFFER_DATA **ppsZSBuffer); + RGX_ZSBUFFER_DATA **ppsZSBuffer); /* RGXDestroyZSBufferKM @@ -322,19 +354,17 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, PDLLIST_NODE pListHeader); /* Create free list */ -PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_HANDLE hMemCtxPrivData, - IMG_UINT32 ui32MaxFLPages, - IMG_UINT32 ui32InitFLPages, - IMG_UINT32 ui32GrowFLPages, - IMG_UINT32 ui32GrowParamThreshold, - RGX_FREELIST *psGlobalFreeList, - IMG_BOOL bCheckFreelist, - IMG_DEV_VIRTADDR sFreeListDevVAddr, - PMR *psFreeListPMR, - IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, - RGX_FREELIST **ppsFreeList); +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + DEVMEMINT_RESERVATION* psFreeListReservation, + RGX_FREELIST **ppsFreeList); /* Destroy free list */ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList); @@ -478,6 +508,14 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, IMG_UINT64 ui64DeadlineInus); +PVRSRV_ERROR PVRSRVRGXSendZSStoreDisableKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_BOOL bDisableDepthStore, + IMG_BOOL bDisableStencilStore, + IMG_UINT32 ui32ExtJobRefToDisableZSStore); + + PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE * psDevNode, RGX_SERVER_RENDER_CONTEXT *psRenderContext, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxtransfer.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxtransfer.c index 6d05e52e5d91..63379f57a032 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxtransfer.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxtransfer.c @@ -45,7 +45,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "rgxccb.h" #include "rgxutils.h" -#include "rgxfwutils.h" +#include "rgxfwcmnctx.h" #include "rgxtransfer.h" #include "rgx_tq_shared.h" #include "rgxmem.h" @@ -59,7 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_memallocflags.h" #include "rgxhwperf.h" #include "ospvr_gputrace.h" -#include "htbuffer.h" +#include "htbserver.h" #include "rgxshader.h" #include "pdump_km.h" @@ -78,6 +78,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxtimerquery.h" + /* Enable this to dump the compiled list of UFOs prior to kick call */ #define ENABLE_TQ_UFO_DUMP 0 @@ -143,14 +144,24 @@ static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection, PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; IMG_UINT ui3DRegISPStateStoreSize = 0; - IMG_UINT uiNumISPStoreRegs = 1; /* default value 1 expected */ + IMG_UINT uiNumISPStoreRegs; /* Allocate device memory for the firmware GPU context suspend state. Note: the FW reads/writes the state to memory by accessing the GPU register interface. */ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware TQ/3D context suspend state"); - if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) + { + uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, + RGX_FEATURE_NUM_RASTER_PIPES_IDX); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + uiNumISPStoreRegs *= (1U + psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, + RGX_FEATURE_XPU_MAX_SLAVES_IDX)); + } + } + else { uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); @@ -303,17 +314,10 @@ static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData, ps2DData->psServerCommonContext, RGXFWIF_DM_2D, ui32PDumpFlags); - if (eError == PVRSRV_ERROR_RETRY) - { - return eError; - } - else if (eError != PVRSRV_OK) - { - PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psDeviceNode, + eError, + RGXFWRequestCommonContextCleanUp); /* ... it has so we can free its resources */ FWCommonContextFree(ps2DData->psServerCommonContext); @@ -324,7 +328,7 @@ static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData, ps2DData->psBufferSyncContext = NULL; #endif - return PVRSRV_OK; + return eError; } #endif /* #if defined(RGX_FEATURE_TLA_BIT_MASK) */ @@ -339,7 +343,7 @@ static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData, ps3DData->psServerCommonContext, RGXFWIF_DM_3D, ui32PDumpFlags); - if (eError == PVRSRV_ERROR_RETRY) + if (PVRSRVIsRetryError(eError)) { return eError; } @@ -348,7 +352,6 @@ static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData, PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", __func__, PVRSRVGetErrorString(eError))); - return eError; } /* ... it has so we can free its resources */ @@ -361,7 +364,7 @@ static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData, ps3DData->psBufferSyncContext = NULL; #endif - return PVRSRV_OK; + return eError; } @@ -377,9 +380,7 @@ PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32PackedCCBSizeU8888, IMG_UINT32 ui32ContextFlags, IMG_UINT64 ui64RobustnessAddress, - RGX_SERVER_TQ_CONTEXT **ppsTransferContext, - PMR **ppsCLIPMRMem, - PMR **ppsUSCPMRMem) + RGX_SERVER_TQ_CONTEXT **ppsTransferContext) { RGX_SERVER_TQ_CONTEXT *psTransferContext; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; @@ -494,8 +495,6 @@ PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, } #endif - PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); - { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; @@ -608,6 +607,7 @@ PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransfer /* * PVRSRVSubmitTQ3DKickKM */ +/* Old bridge call for backwards compatibility. */ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, IMG_UINT32 ui32PrepareCount, IMG_UINT32 *paui32ClientUpdateCount, @@ -627,6 +627,49 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, IMG_UINT32 ui32SyncPMRCount, IMG_UINT32 *paui32SyncPMRFlags, PMR **ppsSyncPMRs) +{ + return PVRSRVRGXSubmitTransfer3KM(psTransferContext, + ui32PrepareCount, + paui32ClientUpdateCount, + papauiClientUpdateUFODevVarBlock, + papaui32ClientUpdateSyncOffset, + papaui32ClientUpdateValue, + iCheckFence, + i2DUpdateTimeline, + pi2DUpdateFence, + i3DUpdateTimeline, + pi3DUpdateFence, + szFenceName, + PVRSRV_NO_FENCE, + paui32FWCommandSize, + papaui8FWCommand, + pui32TQPrepareFlags, + ui32ExtJobRef, + ui32SyncPMRCount, + paui32SyncPMRFlags, + ppsSyncPMRs); +} + +PVRSRV_ERROR PVRSRVRGXSubmitTransfer3KM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32PrepareCount, + IMG_UINT32 *paui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, + IMG_UINT32 **papaui32ClientUpdateSyncOffset, + IMG_UINT32 **papaui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE i2DUpdateTimeline, + PVRSRV_FENCE *pi2DUpdateFence, + PVRSRV_TIMELINE i3DUpdateTimeline, + PVRSRV_FENCE *pi3DUpdateFence, + IMG_CHAR szFenceName[32], + PVRSRV_FENCE iExportFenceToSignal, + IMG_UINT32 *paui32FWCommandSize, + IMG_UINT8 **papaui8FWCommand, + IMG_UINT32 *pui32TQPrepareFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs) { PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; @@ -634,13 +677,13 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, #if defined(RGX_FEATURE_TLA_BIT_MASK) RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper; #endif - IMG_UINT32 ui323DCmdCount = 0; - IMG_UINT32 ui323DCmdLast = 0; - IMG_UINT32 ui323DCmdOffset = 0; + IMG_UINT32 ui323DCmdCount = 0; + IMG_UINT32 ui323DCmdLast = 0; + __maybe_unused IMG_UINT32 ui323DCmdOffset = 0; #if defined(RGX_FEATURE_TLA_BIT_MASK) - IMG_UINT32 ui322DCmdCount = 0; - IMG_UINT32 ui322DCmdLast = 0; - IMG_UINT32 ui322DCmdOffset = 0; + IMG_UINT32 ui322DCmdCount = 0; + IMG_UINT32 ui322DCmdLast = 0; + __maybe_unused IMG_UINT32 ui322DCmdOffset = 0; #endif IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE; IMG_UINT32 i; @@ -651,11 +694,14 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, IMG_UINT64 ui3DUpdateFenceUID = 0; PSYNC_CHECKPOINT ps3DUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT ps3DExportFenceSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *ppsExportFenceSyncCheckpoint = NULL; PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; IMG_UINT32 ui32FenceSyncCheckpointCount = 0; IMG_UINT32 *pui323DIntAllocatedUpdateValues = NULL; #if defined(RGX_FEATURE_TLA_BIT_MASK) PSYNC_CHECKPOINT ps2DUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT ps2DExportFenceSyncCheckpoint = NULL; IMG_UINT32 *pui322DIntAllocatedUpdateValues = NULL; PVRSRV_CLIENT_SYNC_PRIM *ps2DFenceTimelineUpdateSync = NULL; IMG_UINT32 ui322DFenceTimelineUpdateValue = 0; @@ -741,7 +787,7 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, } /* Ensure the string is null-terminated (Required for safety) */ - szFenceName[31] = '\0'; + szFenceName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT)) { @@ -859,6 +905,7 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, IMG_UINT32 **ppui32IntAllocatedUpdateValues = NULL; IMG_BOOL bCheckFence = IMG_FALSE; IMG_BOOL bUpdateFence = IMG_FALSE; + IMG_BOOL bExportFence = IMG_FALSE; IMG_UINT64 *puiUpdateFenceUID = NULL; IMG_BOOL bCCBStateOpen = IMG_FALSE; @@ -876,6 +923,8 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, bCheckFence = ui323DCmdCount == 1; bUpdateFence = ui323DCmdCount == ui323DCmdLast && i3DUpdateTimeline != PVRSRV_NO_TIMELINE; + bExportFence = ui323DCmdCount == ui323DCmdLast + && iExportFenceToSignal != PVRSRV_NO_FENCE; if (bUpdateFence) { @@ -888,6 +937,11 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, ppui32IntAllocatedUpdateValues = &pui323DIntAllocatedUpdateValues; puiUpdateFenceUID = &ui3DUpdateFenceUID; } + + if (bExportFence) + { + ppsExportFenceSyncCheckpoint = &ps3DExportFenceSyncCheckpoint; + } } else #if defined(RGX_FEATURE_TLA_BIT_MASK) @@ -905,6 +959,8 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, bCheckFence = ui322DCmdCount == 1; bUpdateFence = ui322DCmdCount == ui322DCmdLast && i2DUpdateTimeline != PVRSRV_NO_TIMELINE; + bExportFence = ui322DCmdCount == ui322DCmdLast + && iExportFenceToSignal != PVRSRV_NO_FENCE; if (bUpdateFence) { @@ -917,6 +973,11 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, ppui32IntAllocatedUpdateValues = &pui322DIntAllocatedUpdateValues; puiUpdateFenceUID = &ui2DUpdateFenceUID; } + + if (bExportFence) + { + ppsExportFenceSyncCheckpoint = &ps2DExportFenceSyncCheckpoint; + } } else #endif @@ -1067,6 +1128,47 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (piUpdateFence=%p)", __func__, piUpdateFence)); + /* Resolve the iExportFenceToSignal (if required) */ + if (bExportFence) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncCheckpointResolveExportFence(iExportFenceToSignal=%d), ui32FenceSyncCheckpointCount=%d", __func__, iExportFenceToSignal, ui32FenceSyncCheckpointCount)); + eError = SyncCheckpointResolveExportFence(iExportFenceToSignal, + psDeviceNode->hSyncCheckpointContext, + ppsExportFenceSyncCheckpoint, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%s) psExportFenceSyncCheckpoint=<%p>", __func__, PVRSRVGetErrorString(eError), *ppsExportFenceSyncCheckpoint)); + goto fail_prepare_loop; + } + + /* Check that the export fence was not also included as part of the + * check fence (which is an error and would lead to a stalled kick). + */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Checking export fence is not part of check fence...", __func__)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32FenceSyncCheckpointCount=%d", + __func__, ui32FenceSyncCheckpointCount)); + if (ui32FenceSyncCheckpointCount > 0) + { + IMG_UINT32 iii; + + for (iii=0; iii, FWAddr=0x%x", __func__, iii, apsFenceSyncCheckpoints[iii], SyncCheckpointGetFirmwareAddr(apsFenceSyncCheckpoints[iii]))); + if (apsFenceSyncCheckpoints[iii] == *ppsExportFenceSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ERROR psExportFenceSyncCheckpoint=<%p>", __func__, *ppsExportFenceSyncCheckpoint)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, " %s - iCheckFence includes iExportFenceToSignal", PVRSRVGetErrorString(eError))); + goto fail_prepare_loop; + } + } + } + } + } + /* Append the sync prim update for the timeline (if required) */ if (*ppsFenceTimelineUpdateSync) { @@ -1186,6 +1288,35 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, } #endif } + + if (bExportFence && *ppsExportFenceSyncCheckpoint) + { + /* Append the update (from export fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Transfer Update (psSyncAddrListUpdate=<%p>, psExportFenceSyncCheckpoint=<%p>)...", __func__, (void*)psSyncAddrListUpdate , (void*)*ppsExportFenceSyncCheckpoint)); + SyncAddrListAppendCheckpoints(psSyncAddrListUpdate, + 1, + ppsExportFenceSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); #if (ENABLE_TQ_UFO_DUMP == 1) @@ -1249,6 +1380,7 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, pszCommandName, bCCBStateOpen, psCmdHelper); + } /* @@ -1281,6 +1413,20 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, and we would have to roll back all the syncs */ + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + goto fail_acquirepowerlock; + } + if (ui323DCmdCount) { ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext)); @@ -1311,8 +1457,10 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, * this is used to obtain the frame num. Each command should share the same * frame number so we can just get the first. */ +#if defined(PVRSRV_ENABLE_HTB) RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas3DCmdHelper[0]; CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0); +#endif /* Construct the kernel 3D CCB command. */ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; @@ -1325,6 +1473,7 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; #endif +#if defined(PVRSRV_ENABLE_HTB) HTBLOGK(HTB_SF_MAIN_KICK_3D, s3DKCCBCmd.uCmdData.sCmdKickData.psContext, ui323DCmdOffset, @@ -1332,6 +1481,7 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, ui32ExtJobRef, ui32IntJobRef ); +#endif RGXSRV_HWPERF_ENQ(psTransferContext, OSGetCurrentClientProcessIDKM(), @@ -1347,9 +1497,9 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, NO_DEADLINE, NO_CYCEST); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError2 = RGXScheduleCommand(psDevInfo, + eError2 = RGXScheduleCommandWithoutPowerLock(psDevInfo, RGXFWIF_DM_3D, &s3DKCCBCmd, ui32PDumpFlags); @@ -1358,13 +1508,13 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (eError2 != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2)); eError = eError2; - goto fail_cmdacquire; + goto fail_cmdsubmit; } PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, @@ -1382,8 +1532,10 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, * this is used to obtain the frame num. Each command should share the same * frame number so we can just get the first. */ +#if defined(PVRSRV_ENABLE_HTB) RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas2DCmdHelper[0]; CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0); +#endif /* Construct the kernel 2D CCB command. */ s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; @@ -1392,12 +1544,14 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps2DTQCCB); s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; +#if defined(PVRSRV_ENABLE_HTB) HTBLOGK(HTB_SF_MAIN_KICK_2D, s2DKCCBCmd.uCmdData.sCmdKickData.psContext, ui322DCmdOffset, psTransferCmdCmn->ui32FrameNum, ui32ExtJobRef, ui32IntJobRef); +#endif RGXSRV_HWPERF_ENQ(psTransferContext, OSGetCurrentClientProcessIDKM(), @@ -1413,9 +1567,9 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, NO_DEADLINE, NO_CYCEST); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError2 = RGXScheduleCommand(psDevInfo, + eError2 = RGXScheduleCommandWithoutPowerLock(psDevInfo, RGXFWIF_DM_2D, &s2DKCCBCmd, ui32PDumpFlags); @@ -1424,13 +1578,13 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (eError2 != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2)); eError = eError2; - goto fail_cmdacquire; + goto fail_cmdsubmit; } PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, @@ -1445,9 +1599,11 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, */ if (eError != PVRSRV_OK ) { - goto fail_cmdacquire; + goto fail_cmdsubmit; } + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + #if defined(NO_HARDWARE) /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ #if defined(RGX_FEATURE_TLA_BIT_MASK) @@ -1473,6 +1629,16 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); } SyncCheckpointNoHWUpdateTimelines(NULL); + + if (ppsExportFenceSyncCheckpoint && *ppsExportFenceSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, + (void*)*ppsExportFenceSyncCheckpoint, SyncCheckpointGetId(*ppsExportFenceSyncCheckpoint), + SyncCheckpointGetFirmwareAddr(*ppsExportFenceSyncCheckpoint))); + SyncCheckpointSignalNoHW(*ppsExportFenceSyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncCheckpointNoHWSignalExportFence(iExportFenceToSignal=%d)", __func__, iExportFenceToSignal)); + SyncCheckpointNoHWSignalExportFence(iExportFenceToSignal); + } #endif /* defined(NO_HARDWARE) */ #if defined(SUPPORT_BUFFER_SYNC) @@ -1546,11 +1712,12 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, If we fail after the client CCB acquire there is still nothing to do as only the client CCB release will modify the client CCB */ +fail_cmdsubmit: + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); +fail_acquirepowerlock: fail_cmdacquire: fail_prepare_loop: - PVR_ASSERT(eError != PVRSRV_OK); - for (i=0;iasSyncAddrListFence[i]); @@ -1577,6 +1744,10 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, OSFreeMem(pui323DIntAllocatedUpdateValues); pui323DIntAllocatedUpdateValues = NULL; } + if (ppsExportFenceSyncCheckpoint && *ppsExportFenceSyncCheckpoint) + { + SyncCheckpointRollbackExportFence(iExportFenceToSignal); + } #if defined(RGX_FEATURE_TLA_BIT_MASK) if (i2DUpdateFence != PVRSRV_NO_FENCE) { @@ -1621,6 +1792,8 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, fail_alloc3dhelper: OSLockRelease(psTransferContext->hLock); + + PVR_ASSERT(eError != PVRSRV_OK); return eError; } @@ -1795,6 +1968,23 @@ IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) return ui32ContextBitMask; } +PVRSRV_ERROR PVRSRVRGXTQGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem) +{ + PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTQReleaseSharedMemoryKM(PMR * psPMRMem) +{ + PVR_UNREFERENCED_PARAMETER(psPMRMem); + + return PVRSRV_OK; +} + /**************************************************************************//** End of file (rgxtransfer.c) ******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxtransfer.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxtransfer.h index deb2bd6a5123..3cdc454d3de0 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxtransfer.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/rogue/rgxtransfer.h @@ -49,7 +49,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "rgxfwutils.h" #include "rgx_fwif_resetframework.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "pvr_notifier.h" #include "sync_server.h" @@ -79,9 +79,7 @@ PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32PackedCCBSizeU8888, IMG_UINT32 ui32ContextFlags, IMG_UINT64 ui64RobustnessAddress, - RGX_SERVER_TQ_CONTEXT **ppsTransferContext, - PMR **ppsCLIPMRMem, - PMR **ppsUSCPMRMem); + RGX_SERVER_TQ_CONTEXT **ppsTransferContext); /*! @@ -131,6 +129,27 @@ PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, IMG_UINT32 *paui32SyncPMRFlags, PMR **ppsSyncPMRs); +PVRSRV_ERROR PVRSRVRGXSubmitTransfer3KM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32PrepareCount, + IMG_UINT32 *paui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, + IMG_UINT32 **papaui32ClientUpdateSyncOffset, + IMG_UINT32 **papaui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE i2DUpdateTimeline, + PVRSRV_FENCE *pi2DUpdateFence, + PVRSRV_TIMELINE i3DUpdateTimeline, + PVRSRV_FENCE *pi3DUpdateFence, + IMG_CHAR szFenceName[32], + PVRSRV_FENCE iExportFenceToSignal, + IMG_UINT32 *paui32FWCommandSize, + IMG_UINT8 **papaui8FWCommand, + IMG_UINT32 *pui32TQPrepareFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs); + PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE * psDevNode, RGX_SERVER_TQ_CONTEXT *psTransferContext, @@ -150,4 +169,11 @@ void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, /* Debug/Watchdog - check if client transfer contexts are stalled */ IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR PVRSRVRGXTQGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem); + +PVRSRV_ERROR PVRSRVRGXTQReleaseSharedMemoryKM(PMR * psUSCPMRMem); + #endif /* RGXTRANSFER_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdebug.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdebug.c index c3bb14fa938b..61e808bb976b 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdebug.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdebug.c @@ -50,14 +50,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "allocmem.h" #include "cache_km.h" #include "osfunc.h" +#include "os_apphint.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "pvrversion.h" #include "pvr_debug.h" #include "srvkm.h" #include "rgxutils.h" #include "tlstream.h" -#include "rgxfwutils.h" +#include "rgxfwriscv.h" #include "pvrsrv.h" #include "services_km.h" @@ -66,13 +67,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "devicemem_utils.h" #include "rgx_fwif_km.h" #include "rgx_fwif_sf.h" -#include "rgxfw_log_helper.h" -#include "fwtrace_string.h" -#include "rgxfwimageutils.h" -#include "fwload.h" +#include "debug_common.h" #include "rgxta3d.h" +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) #include "rgxkicksync.h" +#endif #include "rgxcompute.h" #include "rgxtdmtransfer.h" #include "rgxtimecorr.h" @@ -80,21 +80,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxinit.h" #include "rgxlayer_impl.h" #include "devicemem_history_server.h" -#include "info_page.h" - -#define PVR_DUMP_FIRMWARE_INFO(x) \ - PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ - PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \ - PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \ - (x).ui32DDKBuild, \ - ((x).ui32BuildOptions & OPTIONS_DEBUG_EN) ? "debug":"release", \ - (x).ui32BuildOptions); #define DD_SUMMARY_INDENT "" -#define DD_NORMAL_INDENT " " #define RGX_DEBUG_STR_SIZE (150U) -#define MAX_FW_DESCRIPTION_LENGTH (600U) #define RGX_TEXAS_BIF0_ID (0) @@ -118,18 +107,41 @@ static const IMG_CHAR *const pszPowStateName[] = #undef X }; -typedef struct _IMG_FLAGS2DESC_ +static const IMG_FLAGS2DESC asHwrState2Description[] = +{ + {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"}, + {RGXFWIF_HWR_RESET_IN_PROGRESS, " Reset ongoing;"}, + {RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"}, + {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"}, + {RGXFWIF_HWR_DM_STALLING, " DM stalling;"}, + {RGXFWIF_HWR_FW_FAULT, " FW fault;"}, + {RGXFWIF_HWR_RESTART_REQUESTED, " Restart requested;"}, +}; + +static const IMG_FLAGS2DESC asDmState2Description[] = { - IMG_UINT32 uiFlag; - const IMG_CHAR *pszLabel; -} IMG_FLAGS2DESC; + {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"}, + {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"}, + {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"}, + {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"}, + {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"}, + {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"}, + {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"}, + {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"}, + {RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH, " hard context switching;"}, + {RGXFWIF_DM_STATE_GPU_ECC_HWR, " GPU ECC hwr;"}, + {RGXFWIF_DM_STATE_GPU_PARITY_HWR, " GPU PARITY hwr;"}, + {RGXFWIF_DM_STATE_GPU_LATENT_HWR, " GPU LATENT hwr;"}, + {RGXFWIF_DM_STATE_ICS_HWR, " ICS hwr;"}, +}; static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] = { "offline", "ready", "active", - "offloading" + "offloading", + "cooldown" }; #if defined(PVR_ENABLE_PHR) @@ -137,343 +149,9 @@ static const IMG_FLAGS2DESC asPHRConfig2Description[] = { {BIT_ULL(RGXFWIF_PHR_MODE_OFF), "off"}, {BIT_ULL(RGXFWIF_PHR_MODE_RD_RESET), "reset RD hardware"}, - {BIT_ULL(RGXFWIF_PHR_MODE_FULL_RESET), "full gpu reset "}, }; #endif -#if !defined(NO_HARDWARE) -static PVRSRV_ERROR -RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset, - IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask) -{ - IMG_UINT32 ui32RegValue, ui32NumPolls = 0; - PVRSRV_ERROR eError; - - do - { - eError = RGXReadFWModuleAddr(psDevInfo, ui32RegOffset, &ui32RegValue); - if (eError != PVRSRV_OK) - { - return eError; - } - } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000)); - - return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY; -} - -static PVRSRV_ERROR -RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal) -{ - PVRSRV_ERROR eError; - - /* Core Read Ready? */ - eError = RGXPollMetaRegThroughSP(psDevInfo, - META_CR_TXUXXRXRQ_OFFSET, - META_CR_TXUXXRXRQ_DREADY_BIT, - META_CR_TXUXXRXRQ_DREADY_BIT); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); - - /* Set the reg we are interested in reading */ - eError = RGXWriteFWModuleAddr(psDevInfo, META_CR_TXUXXRXRQ_OFFSET, - ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteFWModuleAddr"); - - /* Core Read Done? */ - eError = RGXPollMetaRegThroughSP(psDevInfo, - META_CR_TXUXXRXRQ_OFFSET, - META_CR_TXUXXRXRQ_DREADY_BIT, - META_CR_TXUXXRXRQ_DREADY_BIT); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); - - /* Read the value */ - return RGXReadFWModuleAddr(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal); -} -#endif - -#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) -static PVRSRV_ERROR _ValidateWithFWModule(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_DEV_VIRTADDR *psFWAddr, - void *pvHostCodeAddr, - IMG_UINT32 ui32MaxLen, - const IMG_CHAR *pszDesc, - IMG_UINT32 ui32StartOffset) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 ui32Value = 0; - IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset; - IMG_UINT32 *pui32FWCode = (IMG_PUINT32) ((IMG_PBYTE)pvHostCodeAddr + ui32StartOffset); - IMG_UINT32 i; - -#if defined(EMULATOR) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - return PVRSRV_OK; - } -#endif - - ui32MaxLen -= ui32StartOffset; - ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */ - - for (i = 0; i < ui32MaxLen; i++) - { - eError = RGXReadFWModuleAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); - return eError; - } - -#if defined(EMULATOR) - if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -#endif - { - PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value)); - - if (pui32FWCode[i] != ui32Value) - { - PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)", - __func__, pszDesc, - (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr); - return PVRSRV_ERROR_FW_IMAGE_MISMATCH; - } - } - - ui32FWCodeDevVAAddr += 4; - } - - PVR_DUMPDEBUG_LOG("Match between Host and Firmware view of the %s", pszDesc); - return PVRSRV_OK; -} -#endif - -#if !defined(NO_HARDWARE) -static PVRSRV_ERROR _ValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) - PVRSRV_ERROR eError; - IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL; - OS_FW_IMAGE *psRGXFW = NULL; - const IMG_BYTE *pbRGXFirmware = NULL; - RGXFWIF_DEV_VIRTADDR sFWAddr; - IMG_UINT32 ui32StartOffset = 0; - RGX_LAYER_PARAMS sLayerParams; - sLayerParams.psDevInfo = psDevInfo; - -#if defined(EMULATOR) - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - PVR_DUMPDEBUG_LOG("Validation of RISC-V FW code is disabled on emulator"); - return PVRSRV_OK; - } -#endif - - if (psDevInfo->pvRegsBaseKM == NULL) - { - PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__)); - return PVRSRV_ERROR_BAD_MAPPING; - } - - /* Load FW from system for code verification */ - pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); - if (pui32HostFWCode == NULL) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed in allocating memory for FW code. " - "So skipping FW code verification", - __func__)); - return PVRSRV_ERROR_OUT_OF_MEMORY; - } - - if (psDevInfo->ui32FWCorememCodeSizeInBytes) - { - pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes); - if (pui32HostFWCoremem == NULL) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed in allocating memory for FW core code. " - "So skipping FW code verification", - __func__)); - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto freeHostFWCode; - } - } - - /* Load FW image */ - eError = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW, &pbRGXFirmware); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file (%s).", - __func__, PVRSRVGetErrorString(eError))); - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto cleanup_initfw; - } - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - eError = ProcessLDRCommandStream(&sLayerParams, pbRGXFirmware, - (void*) pui32HostFWCode, NULL, - (void*) pui32HostFWCoremem, NULL, NULL); - } - else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, - pui32HostFWCode, NULL, - pui32HostFWCoremem, NULL); - } - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); - goto cleanup_initfw; - } - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - /* starting checking after BOOT LOADER config */ - sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; - - ui32StartOffset = RGXFW_MAX_BOOTLDR_OFFSET; - } - else - { - /* Use bootloader code remap which is always configured before the FW is started */ - sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE; - } - - eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, - psDevInfo, &sFWAddr, - pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes, - "FW code", ui32StartOffset); - if (eError != PVRSRV_OK) - { - goto cleanup_initfw; - } - - if (psDevInfo->ui32FWCorememCodeSizeInBytes) - { - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); - } - else - { - sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); - - /* Core must be halted while issuing abstract commands */ - eError = RGXRiscvHalt(psDevInfo); - PVR_GOTO_IF_ERROR(eError, cleanup_initfw); - } - - eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, - psDevInfo, &sFWAddr, - pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes, - "FW coremem code", 0); - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - eError = RGXRiscvResume(psDevInfo); - PVR_GOTO_IF_ERROR(eError, cleanup_initfw); - } - } - -cleanup_initfw: - if (psRGXFW) - { - OSUnloadFirmware(psRGXFW); - } - - if (pui32HostFWCoremem) - { - OSFreeMem(pui32HostFWCoremem); - } -freeHostFWCode: - if (pui32HostFWCode) - { - OSFreeMem(pui32HostFWCode); - } - return eError; -#else - PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); - PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); - PVR_UNREFERENCED_PARAMETER(psDevInfo); - return PVRSRV_OK; -#endif -} -#endif /* !defined(NO_HARDWARE) */ - -#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) - IMG_PBYTE pbCodeMemoryPointer; - PVRSRV_ERROR eError; - RGXFWIF_DEV_VIRTADDR sFWAddr; - - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer); - if (eError != PVRSRV_OK) - { - return eError; - } - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; - } - else - { - PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); - sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE; - }; - - eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0); - if (eError != PVRSRV_OK) - { - goto releaseFWCodeMapping; - } - - if (psDevInfo->ui32FWCorememCodeSizeInBytes) - { - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, (void **)&pbCodeMemoryPointer); - if (eError != PVRSRV_OK) - { - goto releaseFWCoreCodeMapping; - } - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); - } - else - { - PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); - sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); - } - - eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, - psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0); - } - -releaseFWCoreCodeMapping: - if (psDevInfo->ui32FWCorememCodeSizeInBytes) - { - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); - } -releaseFWCodeMapping: - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); - - return eError; -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); - return PVRSRV_OK; -#endif -} -#endif - - /*! ******************************************************************************* @@ -485,7 +163,7 @@ PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo) @Input ui32MMULevel - MMU level - @Return IMG_CHAR* to the sting describing the MMU level that faulted. + @Return IMG_CHAR* to the string describing the MMU level that faulted. ******************************************************************************/ static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel) @@ -514,104 +192,27 @@ static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel) Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and RGX_CR_MMU_FAULT_STATUS regs. - @Input ui32TagID - Tag ID value + @Input psDevInfo - RGX device info + @Input ui32Requester - Requester ID value @Input ui32BIFModule - BIF module - @Input bRead - Read flag @Input bWriteBack - Write Back flag - @Output ppszTagID - Decoded string from the Tag ID - @Output ppszTagSB - Decoded string from the Tag SB - @Output pszScratchBuf - Buffer provided to the function to generate the debug strings - @Input ui32ScratchBufSize - Size of the provided buffer + @Input bFBMFault - FBM Fault flag + @Output ppszRequester - Decoded string from the Requester ID @Return void ******************************************************************************/ -#define RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8 (12) -#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__SERIES8 (15) -#define RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX (6) -#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__ALBIORIX (9) -#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_LAST (33) -#define RGX_TEXAS_BIF0_TAG_RTU_RAC_FIRST (41) -#define RGX_TEXAS_BIF0_TAG_RTU_RAC_LAST (48) -#define RGX_TEXAS_BIF0_TAG_LAST (51) - -#define RGX_TEXAS_BIF1_TAG_LAST (26) - -#define RGX_JONES_BIF_IPP_TAG (0) -#define RGX_JONES_BIF_DCE_TAG_FIRST (1) -#define RGX_JONES_BIF_DCE_TAG_LAST (14) -#define RGX_JONES_BIF_TDM_TAG_FIRST (15) -#define RGX_JONES_BIF_TDM_TAG_LAST (19) -#define RGX_JONES_BIF_PM_TAG (20) -#define RGX_JONES_BIF_CDM_TAG_FIRST (21) -#define RGX_JONES_BIF_CDM_TAG_LAST (31) -#define RGX_JONES_BIF_META_TAG (32) -#define RGX_JONES_BIF_META_DMA_TAG (33) -#define RGX_JONES_BIF_TE_TAG_FIRST (34) -#define RGX_JONES_BIF_TE_TAG_LAST (47) -#define RGX_JONES_BIF_RTU_TAG_FIRST (48) -#define RGX_JONES_BIF_RTU_TAG_LAST (53) -#define RGX_JONES_BIF_RPM_TAG (54) -#define RGX_JONES_BIF_TAG_LAST (54) - - -/* The MCU L1 requestors are common to all Texas BIFs so put them - * in their own function. */ -static INLINE void _RGXDecodeMMUReqMCULevel1(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32TagID, - IMG_CHAR **ppszTagSB) -{ - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) - { - switch (ui32TagID) - { - case 0: *ppszTagSB = "IP0 PDS"; break; - case 1: *ppszTagSB = "IP0 Global"; break; - case 2: *ppszTagSB = "IP1 PDS"; break; - case 3: *ppszTagSB = "IP1 Global"; break; - case 4: *ppszTagSB = "IP2 PDS"; break; - case 5: *ppszTagSB = "IP2 Global"; break; - } - } - else - { - switch (ui32TagID) - { - case 0: *ppszTagSB = "IP0 PDS"; break; - case 1: *ppszTagSB = "IP0 Global"; break; - case 2: *ppszTagSB = "IP0 BSC"; break; - case 3: *ppszTagSB = "IP0 Constants"; break; - - case 4: *ppszTagSB = "IP1 PDS"; break; - case 5: *ppszTagSB = "IP1 Global"; break; - case 6: *ppszTagSB = "IP1 BSC"; break; - case 7: *ppszTagSB = "IP1 Constants"; break; - - case 8: *ppszTagSB = "IP2 PDS"; break; - case 9: *ppszTagSB = "IP2 Global"; break; - case 10: *ppszTagSB = "IP2 BSC"; break; - case 11: *ppszTagSB = "IP2 Constants"; break; - } - } -} - -static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32TagID, - IMG_UINT32 ui32BIFModule, - IMG_BOOL bRead, - IMG_BOOL bWriteBack, - IMG_BOOL bFBMFault, - IMG_CHAR **ppszTagID, - IMG_CHAR **ppszTagSB, - IMG_CHAR *pszScratchBuf, - IMG_UINT32 ui32ScratchBufSize) +static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Requester, + IMG_UINT32 ui32BIFModule, + IMG_BOOL bWriteBack, + IMG_BOOL bFBMFault, + IMG_CHAR **ppszRequester) { IMG_UINT32 ui32BIFsPerSPU = 2; - IMG_CHAR *pszTagID = "-"; - IMG_CHAR *pszTagSB = "-"; + IMG_CHAR *pszRequester = "-"; - PVR_ASSERT(ppszTagID != NULL); - PVR_ASSERT(ppszTagSB != NULL); + PVR_ASSERT(ppszRequester != NULL); if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) { @@ -620,755 +221,218 @@ static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, if (bFBMFault) { - pszTagID = "FBM"; if (bWriteBack) { - pszTagSB = "Header/state cache request"; + pszRequester = "FBM (Header/state cache request)"; + } + else + { + pszRequester = "FBM"; } } else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_SPU) && ui32BIFModule < RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU)*ui32BIFsPerSPU) { - if ((ui32BIFModule % ui32BIFsPerSPU) == 0) + IMG_BOOL bTexasBIFA = ((ui32BIFModule % 2) == 0); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) { - IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST = - (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) - ? RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX - : RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8; - IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST = - (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) - ? RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__ALBIORIX - : RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__SERIES8; - - /* Texas 0 BIF */ - if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST) + switch (ui32Requester) { - pszTagID = "MCU L1"; - _RGXDecodeMMUReqMCULevel1(psDevInfo, ui32TagID, &pszTagSB); - } - else if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST) - { - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) - { - switch (ui32TagID) - { - case 6: pszTagID = "TCU L1"; break; - case 7: - case 8: pszTagID = "PBE0"; break; - } - } - else - { - switch (ui32TagID) - { - case 12: pszTagID = "TCU L1"; break; - case 13: - case 14: pszTagID = "PBE0"; break; - } - } - } - else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_LAST) - { - pszTagID = "IPF ID Array"; - } - else if (ui32TagID < RGX_TEXAS_BIF0_TAG_RTU_RAC_FIRST) - { - switch (ui32TagID) - { - case 34: pszTagID = "IPF_CPF"; break; - case 35: pszTagID = "PPP"; break; - case 36: - case 37: pszTagID = "ISP0 ID Array"; break; - case 38: - case 39: pszTagID = "ISP2 ID Array"; break; - case 40: pszTagID = "VCE RTC"; break; - } - } - else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_RTU_RAC_LAST) - { - pszTagID = "RTU RAC"; - } - else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_LAST) - { - switch (ui32TagID) - { - case 49: pszTagID = "VCE AMC"; break; - case 50: - case 51: pszTagID = "SHF"; break; - } - } - else - { - PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Tag ID: %d", __func__, ui32TagID)); + case 0: pszRequester = "MCU L1 (IP0 PDS)"; break; + case 1: pszRequester = "MCU L1 (IP0 Global)"; break; + case 2: pszRequester = "MCU L1 (IP1 PDS)"; break; + case 3: pszRequester = "MCU L1 (IP1 Global)"; break; + case 4: pszRequester = "MCU L1 (IP2 PDS)"; break; + case 5: pszRequester = "MCU L1 (IP2 Global)"; break; + case 6: pszRequester = (bTexasBIFA ? "TCU L1" : "BSC"); break; + case 7: pszRequester = (bTexasBIFA ? "PBE0" : "BSC"); break; + case 8: pszRequester = "PBE0"; break; + case 9: pszRequester = "IPF"; break; + case 10: pszRequester = "IPF"; break; + case 11: pszRequester = "IPF"; break; + case 12: pszRequester = (bTexasBIFA ? "IPF" : "TCU L1"); break; + case 13: pszRequester = (bTexasBIFA ? "IPF" : "TPF"); break; + case 14: pszRequester = (bTexasBIFA ? "IPF" : "TPF CPF"); break; + case 15: pszRequester = (bTexasBIFA ? "IPF" : "PBE1"); break; + case 16: pszRequester = (bTexasBIFA ? "IPF" : "PBE1"); break; + case 17: pszRequester = (bTexasBIFA ? "IPF" : "PDSRW cache"); break; + case 18: pszRequester = (bTexasBIFA ? "IPF" : "PDS"); break; + case 19: pszRequester = (bTexasBIFA ? "IPF" : "ISP1"); break; + case 20: pszRequester = (bTexasBIFA ? "IPF" : "ISP1"); break; + case 21: pszRequester = (bTexasBIFA ? "IPF" : "USC L2"); break; + case 22: pszRequester = (bTexasBIFA ? "IPF" : "URI"); break; + case 23: pszRequester = "IPF"; break; + case 24: pszRequester = "IPF"; break; + case 25: pszRequester = "IPF"; break; + case 26: pszRequester = "IPF"; break; + case 27: pszRequester = "IPF"; break; + case 28: pszRequester = "IPF"; break; + case 29: pszRequester = "IPF"; break; + case 30: pszRequester = "IPF"; break; + case 31: pszRequester = "IPF"; break; + case 32: pszRequester = "IPF"; break; + case 33: pszRequester = "IPF_CPF"; break; + case 34: pszRequester = "ISP0"; break; + case 35: pszRequester = "ISP0"; break; + case 36: pszRequester = "ISP2"; break; + case 37: pszRequester = "ISP2"; break; + case 38: pszRequester = "GEOM"; break; + case 39: pszRequester = "GEOM"; break; + case 40: pszRequester = "GEOM"; break; + case 41: pszRequester = "GEOM"; break; + case 42: pszRequester = "BSC"; break; + case 43: pszRequester = "ASC"; break; + + default: + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Requester ID: %d", __func__, ui32Requester)); + break; } } - else if ((ui32BIFModule % ui32BIFsPerSPU) == 1) + else { - IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST = - (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) - ? RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX - : RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8; - - /* Texas 1 BIF */ - if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST) - { - pszTagID = "MCU L1"; - _RGXDecodeMMUReqMCULevel1(psDevInfo, ui32TagID, &pszTagSB); - } - else if (ui32TagID <= RGX_TEXAS_BIF1_TAG_LAST) - { - switch (ui32TagID) - { - /** Albiorix/NUM_TPU_PER_SPU > 1 **/ - case 6: - case 7: pszTagID = "BSC"; break; - /** All cores **/ - case 12: pszTagID = "TCU L1"; break; - case 13: pszTagID = "TPF"; break; - case 14: pszTagID = "TPF CPF"; break; - case 15: - case 16: pszTagID = "PBE1"; break; - case 17: pszTagID = "PDSRW cache"; break; - case 18: pszTagID = "PDS"; break; - case 19: - case 20: pszTagID = "ISP1 ID Array"; break; - case 21: pszTagID = "USC L2"; break; - case 22: pszTagID = "VDM L2"; break; - case 23: pszTagID = "RTU FBA L2"; break; - case 24: pszTagID = "RTU SHR L2"; break; - case 25: pszTagID = "RTU SHG L2"; break; - case 26: pszTagID = "RTU TUL L2"; break; - } - } - else + switch (ui32Requester) { - PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Tag ID: %d", __func__, ui32TagID)); + case 0: pszRequester = "MCU L1 (IP0 PDS)"; break; + case 1: pszRequester = "MCU L1 (IP0 Global)"; break; + case 2: pszRequester = "MCU L1 (IP0 BSC)"; break; + case 3: pszRequester = "MCU L1 (IP0 Constants)"; break; + case 4: pszRequester = "MCU L1 (IP1 PDS)"; break; + case 5: pszRequester = "MCU L1 (IP1 Global)"; break; + case 6: pszRequester = "MCU L1 (IP1 BSC)"; break; + case 7: pszRequester = "MCU L1 (IP1 Constants)"; break; + case 8: pszRequester = "MCU L1 (IP2 PDS)"; break; + case 9: pszRequester = "MCU L1 (IP2 Global)"; break; + case 10: pszRequester = "MCU L1 (IP2 BSC)"; break; + case 11: pszRequester = "MCU L1 (IP2 Constants)"; break; + case 12: pszRequester = "TCU L1"; break; + case 13: pszRequester = (bTexasBIFA ? "PBE0" : "TPF"); break; + case 14: pszRequester = (bTexasBIFA ? "PBE0" : "TPF CPF"); break; + case 15: pszRequester = (bTexasBIFA ? "IPF" : "PBE1"); break; + case 16: pszRequester = (bTexasBIFA ? "IPF" : "PBE1"); break; + case 17: pszRequester = (bTexasBIFA ? "IPF" : "PDSRW cache"); break; + case 18: pszRequester = (bTexasBIFA ? "IPF" : "PDS"); break; + case 19: pszRequester = (bTexasBIFA ? "IPF" : "ISP1"); break; + case 20: pszRequester = (bTexasBIFA ? "IPF" : "ISP1"); break; + case 21: pszRequester = (bTexasBIFA ? "IPF" : "USC L2"); break; + case 22: pszRequester = (bTexasBIFA ? "IPF" : "VDM L2"); break; + case 23: pszRequester = (bTexasBIFA ? "IPF" : "RTU FBA L2"); break; + case 24: pszRequester = (bTexasBIFA ? "IPF" : "RTU SHR L2"); break; + case 25: pszRequester = (bTexasBIFA ? "IPF" : "RTU SHG L2"); break; + case 26: pszRequester = (bTexasBIFA ? "IPF" : "RTU TUL L2"); break; + case 27: pszRequester = "IPF"; break; + case 28: pszRequester = "IPF"; break; + case 29: pszRequester = "IPF"; break; + case 30: pszRequester = "IPF"; break; + case 31: pszRequester = "IPF"; break; + case 32: pszRequester = "IPF"; break; + case 33: pszRequester = "IPF"; break; + case 34: pszRequester = "IPF_CPF"; break; + case 35: pszRequester = "PPP"; break; + case 36: pszRequester = "ISP0"; break; + case 37: pszRequester = "ISP0"; break; + case 38: pszRequester = "ISP2"; break; + case 39: pszRequester = "ISP2"; break; + case 40: pszRequester = "VCE RTC"; break; + case 41: pszRequester = "RTU RAC"; break; + case 42: pszRequester = "RTU RAC"; break; + case 43: pszRequester = "RTU RAC"; break; + case 44: pszRequester = "RTU RAC"; break; + case 45: pszRequester = "RTU RAC"; break; + case 46: pszRequester = "RTU RAC"; break; + case 47: pszRequester = "RTU RAC"; break; + case 48: pszRequester = "RTU RAC"; break; + case 49: pszRequester = "VCE AMC"; break; + case 50: pszRequester = "SHF"; break; + case 51: pszRequester = "SHF"; break; + + default: + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Requester ID: %d", __func__, ui32Requester)); + break; } } } else if (ui32BIFModule == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU)*ui32BIFsPerSPU) { /* Jones BIF */ - - if ((ui32TagID >= RGX_JONES_BIF_DCE_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_DCE_TAG_LAST)) - { - pszTagID = "DCE"; - } - else if ((ui32TagID >= RGX_JONES_BIF_TDM_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_TDM_TAG_LAST)) - { - pszTagID = "TDM"; - } - else if ((ui32TagID >= RGX_JONES_BIF_CDM_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_CDM_TAG_LAST)) - { - pszTagID = "CDM"; - } - else if ((ui32TagID >= RGX_JONES_BIF_TE_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_TE_TAG_LAST)) - { - pszTagID = "Tiling Engine (TE3)"; - } - else if ((ui32TagID >= RGX_JONES_BIF_RTU_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_RTU_TAG_LAST)) - { - pszTagID = "RTU"; - } - else if (ui32TagID <= RGX_JONES_BIF_TAG_LAST) - { - switch (ui32TagID) - { - case RGX_JONES_BIF_IPP_TAG: pszTagID = "IPP"; break; - case RGX_JONES_BIF_PM_TAG: pszTagID = "PM"; break; - case RGX_JONES_BIF_META_TAG: pszTagID = "META"; break; - case RGX_JONES_BIF_META_DMA_TAG:pszTagID = "META DMA"; break; - case RGX_JONES_BIF_RPM_TAG: pszTagID = "RPM"; break; - } - } - else - { - PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Jones BIF Tag ID: %d", __func__, ui32TagID)); + switch (ui32Requester) + { + case 0: pszRequester = "IPP"; break; + case 1: pszRequester = "DCE"; break; + case 2: pszRequester = "DCE"; break; + case 3: pszRequester = "DCE"; break; + case 4: pszRequester = "DCE"; break; + case 5: pszRequester = "DCE"; break; + case 6: pszRequester = "DCE"; break; + case 7: pszRequester = "DCE"; break; + case 8: pszRequester = "DCE"; break; + case 9: pszRequester = "DCE"; break; + case 10: pszRequester = "DCE"; break; + case 11: pszRequester = "DCE"; break; + case 12: pszRequester = "DCE"; break; + case 13: pszRequester = "DCE"; break; + case 14: pszRequester = "DCE"; break; + case 15: pszRequester = "TDM"; break; + case 16: pszRequester = "TDM"; break; + case 17: pszRequester = "TDM"; break; + case 18: pszRequester = "TDM"; break; + case 19: pszRequester = "TDM"; break; + case 20: pszRequester = "PM"; break; + case 21: pszRequester = "CDM"; break; + case 22: pszRequester = "CDM"; break; + case 23: pszRequester = "CDM"; break; + case 24: pszRequester = "CDM"; break; + case 25: pszRequester = "CDM"; break; + case 26: pszRequester = "CDM"; break; + case 27: pszRequester = "CDM"; break; + case 28: pszRequester = "CDM"; break; + case 29: pszRequester = "CDM"; break; + case 30: pszRequester = "CDM"; break; + case 31: pszRequester = "CDM"; break; + case 32: pszRequester = "META"; break; + case 33: pszRequester = "META DMA"; break; + case 34: pszRequester = "TE"; break; + case 35: pszRequester = "TE"; break; + case 36: pszRequester = "TE"; break; + case 37: pszRequester = "TE"; break; + case 38: pszRequester = "TE"; break; + case 39: pszRequester = "TE"; break; + case 40: pszRequester = "TE"; break; + case 41: pszRequester = "TE"; break; + case 42: pszRequester = "TE"; break; + case 43: pszRequester = "TE"; break; + case 44: pszRequester = "TE"; break; + case 45: pszRequester = "TE"; break; + case 46: pszRequester = "TE"; break; + case 47: pszRequester = "TE"; break; + case 48: pszRequester = "TE"; break; + case 49: pszRequester = "TE"; break; + case 50: pszRequester = "RCE"; break; + case 51: pszRequester = "RCE"; break; + case 52: pszRequester = "RCE"; break; + + default: + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Jones BIF Tag ID: %d", __func__, ui32Requester)); + break; } } else if (bWriteBack) { - pszTagID = ""; - pszTagSB = "Writeback of dirty cacheline"; + pszRequester = "Unknown (Writeback of dirty cacheline)"; } else { PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified BIF Module: %d", __func__, ui32BIFModule)); } - *ppszTagID = pszTagID; - *ppszTagSB = pszTagSB; -} - - -static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, - IMG_UINT64 *pui64Seconds, - IMG_UINT64 *pui64Nanoseconds) -{ - IMG_UINT32 ui32Remainder; - - *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder); - *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL); + *ppszRequester = pszRequester; } -typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_ -{ - DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING, - DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED, - DEVICEMEM_HISTORY_QUERY_INDEX_NEXT, - DEVICEMEM_HISTORY_QUERY_INDEX_COUNT, -} DEVICEMEM_HISTORY_QUERY_INDEX; - - /*! ******************************************************************************* - @Function _PrintDevicememHistoryQueryResult - - @Description - - Print details of a single result from a DevicememHistory query - - @Input pfnDumpDebugPrintf - Debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psFaultProcessInfo - The process info derived from the page fault - @Input psResult - The DevicememHistory result to be printed - @Input ui32Index - The index of the result - - @Return void - -******************************************************************************/ -static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - RGXMEM_PROCESS_INFO *psFaultProcessInfo, - DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult, - IMG_UINT32 ui32Index, - const IMG_CHAR* pszIndent) -{ - IMG_UINT32 ui32Remainder; - IMG_UINT64 ui64Seconds, ui64Nanoseconds; - - ConvertOSTimestampToSAndNS(psResult->ui64When, - &ui64Seconds, - &ui64Nanoseconds); - - if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE) - { - PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC - " Size: " IMG_DEVMEM_SIZE_FMTSPEC - " Operation: %s Modified: %" IMG_UINT64_FMTSPEC - " us ago (OS time %" IMG_UINT64_FMTSPEC - ".%09" IMG_UINT64_FMTSPEC " s)", - pszIndent, - ui32Index, - psResult->szString, - psResult->sBaseDevVAddr.uiAddr, - psResult->uiSize, - psResult->bMap ? "Map": "Unmap", - OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), - ui64Seconds, - ui64Nanoseconds); - } - else - { - PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC - " Size: " IMG_DEVMEM_SIZE_FMTSPEC - " Operation: %s Modified: %" IMG_UINT64_FMTSPEC - " us ago (OS time %" IMG_UINT64_FMTSPEC - ".%09" IMG_UINT64_FMTSPEC - ") PID: %u (%s)", - pszIndent, - ui32Index, - psResult->szString, - psResult->sBaseDevVAddr.uiAddr, - psResult->uiSize, - psResult->bMap ? "Map": "Unmap", - OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), - ui64Seconds, - ui64Nanoseconds, - psResult->sProcessInfo.uiPID, - psResult->sProcessInfo.szProcessName); - } - - if (!psResult->bRange) - { - PVR_DUMPDEBUG_LOG("%s Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped"); - } - else - { - PVR_DUMPDEBUG_LOG("%s Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s", - pszIndent, - psResult->ui32StartPage, - psResult->ui32StartPage + psResult->ui32PageCount - 1, - psResult->sMapStartAddr.uiAddr, - psResult->sMapEndAddr.uiAddr, - psResult->bAll ? "(whole allocation) " : "", - psResult->bMap ? "mapped": "unmapped"); - } -} - -/*! -******************************************************************************* - - @Function _PrintDevicememHistoryQueryOut - - @Description - - Print details of all the results from a DevicememHistory query - - @Input pfnDumpDebugPrintf - Debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psFaultProcessInfo - The process info derived from the page fault - @Input psQueryOut - Storage for the query results - - @Return void - -******************************************************************************/ -static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - RGXMEM_PROCESS_INFO *psFaultProcessInfo, - DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, - const IMG_CHAR* pszIndent) -{ - IMG_UINT32 i; - - if (psQueryOut->ui32NumResults == 0) - { - PVR_DUMPDEBUG_LOG("%s No results", pszIndent); - } - else - { - for (i = 0; i < psQueryOut->ui32NumResults; i++) - { - _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile, - psFaultProcessInfo, - &psQueryOut->sResults[i], - i, - pszIndent); - } - } -} - -/* table of HW page size values and the equivalent */ -static const unsigned int aui32HWPageSizeTable[][2] = -{ - { 0, PVRSRV_4K_PAGE_SIZE }, - { 1, PVRSRV_16K_PAGE_SIZE }, - { 2, PVRSRV_64K_PAGE_SIZE }, - { 3, PVRSRV_256K_PAGE_SIZE }, - { 4, PVRSRV_1M_PAGE_SIZE }, - { 5, PVRSRV_2M_PAGE_SIZE } -}; - -/*! -******************************************************************************* - - @Function _PageSizeHWToBytes - - @Description - - Convert a HW page size value to its size in bytes - - @Input ui32PageSizeHW - The HW page size value - - @Return IMG_UINT32 The page size in bytes - -******************************************************************************/ -static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW) -{ - if (ui32PageSizeHW > 5) - { - /* This is invalid, so return a default value as we cannot ASSERT in this code! */ - return PVRSRV_4K_PAGE_SIZE; - } - - return aui32HWPageSizeTable[ui32PageSizeHW][1]; -} - -/*! -******************************************************************************* - - @Function _GetDevicememHistoryData - - @Description - - Get the DevicememHistory results for the given PID and faulting device virtual address. - The function will query DevicememHistory for information about the faulting page, as well - as the page before and after. - - @Input psDeviceNode - The device which this allocation search should be made on - @Input uiPID - The process ID to search for allocations belonging to - @Input sFaultDevVAddr - The device address to search for allocations at/before/after - @Input asQueryOut - Storage for the query results - @Input ui32PageSizeBytes - Faulted page size in bytes - - @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault - -******************************************************************************/ -static IMG_BOOL _GetDevicememHistoryData(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PID uiPID, - IMG_DEV_VIRTADDR sFaultDevVAddr, - DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT], - IMG_UINT32 ui32PageSizeBytes) -{ - DEVICEMEM_HISTORY_QUERY_IN sQueryIn; - IMG_BOOL bAnyHits = IMG_FALSE; - - /* if the page fault originated in the firmware then the allocation may - * appear to belong to any PID, because FW allocations are attributed - * to the client process creating the allocation, so instruct the - * devicemem_history query to search all available PIDs - */ - if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) - { - sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY; - } - else - { - sQueryIn.uiPID = uiPID; - } - - sQueryIn.psDevNode = psDeviceNode; - /* Query the DevicememHistory for all allocations in the previous page... */ - sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - ui32PageSizeBytes; - if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING], - ui32PageSizeBytes, IMG_TRUE)) - { - bAnyHits = IMG_TRUE; - } - - /* Query the DevicememHistory for any record at the exact address... */ - sQueryIn.sDevVAddr = sFaultDevVAddr; - if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], - ui32PageSizeBytes, IMG_FALSE)) - { - bAnyHits = IMG_TRUE; - } - else - { - /* If not matched then try matching any record in the faulting page... */ - if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], - ui32PageSizeBytes, IMG_TRUE)) - { - bAnyHits = IMG_TRUE; - } - } - - /* Query the DevicememHistory for all allocations in the next page... */ - sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes; - if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT], - ui32PageSizeBytes, IMG_TRUE)) - { - bAnyHits = IMG_TRUE; - } - - return bAnyHits; -} - -/* stored data about one page fault */ -typedef struct _FAULT_INFO_ -{ - /* the process info of the memory context that page faulted */ - RGXMEM_PROCESS_INFO sProcessInfo; - IMG_DEV_VIRTADDR sFaultDevVAddr; - MMU_FAULT_DATA sMMUFaultData; - DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT]; - /* the CR timer value at the time of the fault, recorded by the FW. - * used to differentiate different page faults - */ - IMG_UINT64 ui64CRTimer; - /* time when this FAULT_INFO entry was added. used for timing - * reference against the map/unmap information - */ - IMG_UINT64 ui64When; - IMG_UINT32 ui32FaultInfoFlags; -} FAULT_INFO; - -/* history list of page faults. - * Keeps the first `n` page faults and the last `n` page faults, like the FW - * HWR log - */ -typedef struct _FAULT_INFO_LOG_ -{ - IMG_UINT32 ui32Head; - /* the number of faults in this log need not correspond exactly to - * the HWINFO number of the FW, as the FW HWINFO log may contain - * non-page fault HWRs - */ - FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX]; -} FAULT_INFO_LOG; - -#define FAULT_INFO_PROC_INFO (0x1U) -#define FAULT_INFO_DEVMEM_HIST (0x2U) - -static FAULT_INFO_LOG gsFaultInfoLog = { 0 }; - -static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo, - FAULT_INFO *psInfo, - RGXMEM_PROCESS_INFO *psProcInfo) -{ - IMG_UINT32 i, j; - - for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) - { - for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++) - { - IMG_BOOL bFound; - - RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo; - bFound = RGXPCPIDToProcessInfo(psDevInfo, - psProcInfo->uiPID, - psProcInfo); - if (!bFound) - { - OSStringLCopy(psProcInfo->szProcessName, - "(unknown)", - sizeof(psProcInfo->szProcessName)); - } - } - } -} - -/*! -******************************************************************************* - - @Function _PrintFaultInfo - - @Description - - Print all the details of a page fault from a FAULT_INFO structure - - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psInfo - The page fault occurrence to print - - @Return void - -******************************************************************************/ -static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - FAULT_INFO *psInfo, - const IMG_CHAR* pszIndent) -{ - IMG_UINT32 i; - IMG_UINT64 ui64Seconds, ui64Nanoseconds; - - ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds); - - if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO)) - { - IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ? - 0 : psInfo->sProcessInfo.uiPID; - - PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC - ", CRTimer: 0x%016" IMG_UINT64_FMTSPECX - ", PID: %u (%s, unregistered: %u) OS time: " - "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, - pszIndent, - psInfo->sFaultDevVAddr.uiAddr, - psInfo->ui64CRTimer, - uiPID, - psInfo->sProcessInfo.szProcessName, - psInfo->sProcessInfo.bUnregistered, - ui64Seconds, - ui64Nanoseconds); - } - else - { - PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent); - } - - if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST)) - { - for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) - { - const IMG_CHAR *pszWhich = NULL; - - switch (i) - { - case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: - pszWhich = "Preceding page"; - break; - case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: - pszWhich = "Faulted page"; - break; - case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: - pszWhich = "Next page"; - break; - } - - PVR_DUMPDEBUG_LOG("%s %s:", pszIndent, pszWhich); - _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile, - &psInfo->sProcessInfo, - &psInfo->asQueryOut[i], - pszIndent); - } - } - else - { - PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent); - } -} - -static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, - FAULT_INFO *psInfo, - IMG_DEV_VIRTADDR sFaultDevVAddr, - IMG_DEV_PHYADDR sPCDevPAddr, - IMG_UINT64 ui64CRTimer, - IMG_UINT32 ui32PageSizeBytes) -{ - IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE; - RGXMEM_PROCESS_INFO sProcessInfo; - - psInfo->ui32FaultInfoFlags = 0; - psInfo->sFaultDevVAddr = sFaultDevVAddr; - psInfo->ui64CRTimer = ui64CRTimer; - psInfo->ui64When = OSClockns64(); - - if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) - { - /* Check if this is PM fault */ - if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM) - { - bIsPMFault = IMG_TRUE; - bFound = IMG_TRUE; - sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM; - OSStringLCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName)); - sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0'; - sProcessInfo.bUnregistered = IMG_FALSE; - } - else - { - /* look up the process details for the faulting page catalogue */ - bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo); - } - - if (bFound) - { - IMG_BOOL bHits; - - psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO; - psInfo->sProcessInfo = sProcessInfo; - - if (bIsPMFault) - { - bHits = IMG_TRUE; - } - else - { - /* get any DevicememHistory data for the faulting address */ - bHits = _GetDevicememHistoryData(psDevInfo->psDeviceNode, - sProcessInfo.uiPID, - sFaultDevVAddr, - psInfo->asQueryOut, - ui32PageSizeBytes); - - if (bHits) - { - psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST; - - /* if the page fault was caused by the firmware then get information about - * which client application created the related allocations. - * - * Fill in the process info data for each query result. - */ - - if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) - { - _FillAppForFWFaults(psDevInfo, psInfo, &sProcessInfo); - } - } - } - } - } -} - -/*! -******************************************************************************* - - @Function _DumpFaultAddressHostView - - @Description - - Dump FW HWR fault status in human readable form. - - @Input ui32Index - Index of global Fault info - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Return void - -******************************************************************************/ -static void _DumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - const IMG_CHAR* pszIndent) -{ - MMU_LEVEL eTopLevel; - const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" }; - const IMG_CHAR szPageError[][3] = {"", "PT", "PD", "PC" }; - - eTopLevel = psFaultData->eTopLevel; - - if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN) - { - PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent); - return; - } - else if (psFaultData->eType == MMU_FAULT_TYPE_PM) - { - PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address); - } - else - { - MMU_LEVEL eCurrLevel; - PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST); - - for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--) - { - MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel]; - if (psMMULevelData->ui64Address) - { - if (psMMULevelData->uiBytesPerEntry == 4) - { - PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s", - pszIndent, - szPageLevel[eCurrLevel], - psMMULevelData->ui32Index, - (IMG_UINT) psMMULevelData->ui64Address, - psMMULevelData->psDebugStr); - } - else - { - PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s", - pszIndent, - szPageLevel[eCurrLevel], - psMMULevelData->ui32Index, - psMMULevelData->ui64Address, - psMMULevelData->psDebugStr); - } - } - else - { - PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)", - pszIndent, - szPageError[eCurrLevel], - psMMULevelData->ui32Index, - psMMULevelData->ui32NumOfEntries); - break; - } - } - } - -} - -/*! -******************************************************************************* - - @Function _RGXDumpRGXMMUFaultStatus + @Function _RGXDumpRGXMMUFaultStatus @Description @@ -1412,17 +476,15 @@ static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT) == 0x3; IMG_UINT32 ui32BIFModule; IMG_BOOL bWriteBack, bFBMFault; - IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; - IMG_CHAR *pszTagID = NULL; - IMG_CHAR *pszTagSB = NULL; + IMG_CHAR *pszRequester = NULL; const IMG_PCHAR pszMetaOrRiscv = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? "META" : "RISCV"; if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) { - ui32BIFModule = (aui64MMUStatus[1] & ~RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_SHIFT; - bWriteBack = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_EN) != 0; - bFBMFault = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_EN) != 0; + ui32BIFModule = (aui64MMUStatus[1] & ~RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__BIF_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__BIF_ID_SHIFT; + bWriteBack = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__WRITEBACK_EN) != 0; + bFBMFault = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__AXT_INFRA__FBM_FAULT_EN) != 0; } else { @@ -1451,19 +513,18 @@ static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } else { - _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32BIFModule, bRead, bWriteBack, bFBMFault, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE); + _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32BIFModule, bWriteBack, bFBMFault, &pszRequester); } PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore); - PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECX " | 0x%08" IMG_UINT64_FMTSPECX "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECX ", %s(%s)%s%s%s%s.", + PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECX " | 0x%08" IMG_UINT64_FMTSPECX "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECX ", %s%s%s%s%s.", pszIndent, aui64MMUStatus[0], aui64MMUStatus[1], ui32PC, (bRead)?"Reading from":"Writing to", ui64Addr, - (pszTagID)? pszTagID : pszMetaOrRiscv, - (pszTagSB)? pszTagSB : "-", + (pszRequester) ? pszRequester : pszMetaOrRiscv, (bFault)?", Fault":"", (bROFault)?", Read Only fault":"", (bProtFault)?", PM/FW core protection fault":"", @@ -1498,129 +559,34 @@ static_assert((RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_ME "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -static const IMG_FLAGS2DESC asCswOpts2Description[] = +#if !defined(NO_HARDWARE) +static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause) { - {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"}, - {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"}, - {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"}, - {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"}, - {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"}, - {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"}, -}; + switch (ui32Mcause) + { +#define X(value, fatal, description) \ + case value: \ + if (fatal) \ + return description; \ + return NULL; -static const IMG_FLAGS2DESC asMisc2Description[] = -{ - {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"}, - {RGXFWIF_INICFG_SPU_CLOCK_GATE, " SPU Clock Gating (requires Power Rascal/Dust);"}, - {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"}, - {RGXFWIF_INICFG_FBCDC_V3_1_EN, " FBCDCv3.1;"}, - {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"}, - {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"}, - {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"}, - {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"}, - {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"}, - {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"}, - {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"}, - {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"}, - {RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED, " Coherent fabric on;"}, - {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"}, - {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"}, - {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"}, - {RGXFWIF_INICFG_WORKEST, " Workload Estim;"}, - {RGXFWIF_INICFG_PDVFS, " PDVFS;"}, - {RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND, " CDM task demand arbitration;"}, - {RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN, " CDM round-robin arbitration;"}, - {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"}, - {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"}, - {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"} -}; + RGXRISCVFW_MCAUSE_TABLE +#undef X + + default: + PVR_DPF((PVR_DBG_WARNING, "Invalid RISC-V FW mcause value 0x%08x", ui32Mcause)); + return NULL; + } +} +#endif /* !defined(NO_HARDWARE) */ -static const IMG_FLAGS2DESC asFwOsCfg2Description[] = + +static const IMG_FLAGS2DESC asHWErrorState[] = { - {RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN, " TDM;"}, - {RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN, " GEOM;"}, - {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"}, - {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"}, - {RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN, " RDM;"}, - {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"}, - {RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM, " LowPrio GEOM;"}, - {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"}, - {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"}, - {RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM, " LowPrio RDM;"}, + {RGX_HW_ERR_NA, "N/A"}, + {RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL, "Primitive ID failure during DM kill."}, }; -static const IMG_FLAGS2DESC asHwrState2Description[] = -{ - {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"}, - {RGXFWIF_HWR_RESET_IN_PROGRESS, " Reset ongoing;"}, - {RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"}, - {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"}, - {RGXFWIF_HWR_DM_STALLING, " DM stalling;"}, - {RGXFWIF_HWR_FW_FAULT, " FW Fault;"}, - {RGXFWIF_HWR_RESTART_REQUESTED, " Restart requested;"}, -}; - -static const IMG_FLAGS2DESC asDmState2Description[] = -{ - {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"}, - {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"}, - {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"}, - {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"}, - {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"}, - {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"}, - {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"}, - {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"}, - {RGXFWIF_DM_STATE_GPU_ECC_HWR, " GPU ECC hwr;"}, -}; - -static const IMG_FLAGS2DESC asHWErrorState[] = -{ - {RGX_HW_ERR_NA, "N/A"}, - {RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL, "Primitive ID failure during DM kill."}, -}; - -#if !defined(NO_HARDWARE) -static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause) -{ - switch (ui32Mcause) - { -#define X(value, fatal, description) \ - case value: \ - if (fatal) \ - return description; \ - return NULL; - - RGXRISCVFW_MCAUSE_TABLE -#undef X - - default: - PVR_DPF((PVR_DBG_WARNING, "Invalid RISC-V FW mcause value 0x%08x", ui32Mcause)); - return NULL; - } -} -#endif // !defined(NO_HARDWARE) - -/* - Appends flags strings to a null-terminated string buffer - each flag - description string starts with a space. -*/ -static void _Flags2Description(IMG_CHAR *psDesc, - IMG_UINT32 ui32DescSize, - const IMG_FLAGS2DESC *psConvTable, - IMG_UINT32 ui32TableSize, - IMG_UINT32 ui32Flags) -{ - IMG_UINT32 ui32Idx; - - for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) - { - if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag) - { - OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); - } - } -} - /* * Translate ID code to descriptive string. * Returns on the first match. @@ -1633,38 +599,12 @@ static void _ID2Description(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, const IMG { if (ui32ID == psConvTable[ui32Idx].uiFlag) { - OSStringLCopy(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); + OSStringSafeCopy(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); return; } } } -/* - Writes flags strings to an uninitialised buffer. -*/ -static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) -{ - const IMG_CHAR szCswLabel[] = "Ctx switch options:"; - size_t uLabelLen = sizeof(szCswLabel) - 1; - const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; - - OSStringLCopy(psDesc, szCswLabel, ui32DescSize); - - _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags); - _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags); -} - -static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) -{ - const IMG_CHAR szCswLabel[] = "Ctx switch:"; - size_t uLabelLen = sizeof(szCswLabel) - 1; - const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; - - OSStringLCopy(psDesc, szCswLabel, ui32DescSize); - - _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags); -} - /*! ******************************************************************************* @@ -1694,6 +634,7 @@ static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, for (i = 0; i < RGXFW_THREAD_NUM; i++) { + RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf, INVALIDATE); pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath; pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo; ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum; @@ -1701,8 +642,9 @@ static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, /* print non-null assert strings */ if (*pszTraceAssertInfo) { - PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)", - i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine); + PVR_DUMPDEBUG_LOG("FW-T%d Assert: %.*s (%.*s:%d)", + i, RGXFW_TRACE_BUFFER_ASSERT_SIZE, pszTraceAssertInfo, + RGXFW_TRACE_BUFFER_ASSERT_SIZE, pszTraceAssertPath, ui32TraceAssertLine); } } } @@ -1745,14 +687,14 @@ static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, IMG_UINT64 ui64Seconds, ui64Nanoseconds; /* Split OS timestamp in seconds and nanoseconds */ - ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); + RGXConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); - PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)", - ui32Index+1, psFaultInfo->sFaultBuf.szInfo, - psFaultInfo->sFaultBuf.szPath, + PVR_DUMPDEBUG_LOG("FW Fault %d: %.*s (%.*s:%d)", + ui32Index+1, RGXFW_TRACE_BUFFER_ASSERT_SIZE, psFaultInfo->sFaultBuf.szInfo, + RGXFW_TRACE_BUFFER_ASSERT_SIZE, psFaultInfo->sFaultBuf.szPath, psFaultInfo->sFaultBuf.ui32LineNum); - PVR_DUMPDEBUG_LOG(" Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, - psFaultInfo->ui32Data, + PVR_DUMPDEBUG_LOG(" Data = 0x%016"IMG_UINT64_FMTSPECx", CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, + psFaultInfo->ui64Data, psFaultInfo->ui64CRTimer, ui64Seconds, ui64Nanoseconds); } @@ -1764,6 +706,7 @@ static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, const RGXFWIF_SYSDATA *psFwSysData) { IMG_UINT32 i; + for (i = 0; i < RGXFW_THREAD_NUM; i++) { if (psFwSysData->aui32CrPollAddr[i]) @@ -1797,6 +740,11 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, IMG_UINT32 ui32HWRRecoveryFlags; IMG_UINT32 ui32ReadIndex; + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))) + { + apszDmNames[RGXFWIF_DM_TDM] = "2D"; + } + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) { if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] || @@ -1807,15 +755,15 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } } - if (!PVRSRV_VZ_MODE_IS(GUEST) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK)) { /* No HWR situation, print nothing */ return; } - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - IMG_BOOL bAnyHWROccured = IMG_FALSE; + IMG_BOOL bAnyHWROccurred = IMG_FALSE; for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) { @@ -1823,24 +771,24 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 || psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0) { - bAnyHWROccured = IMG_TRUE; + bAnyHWROccurred = IMG_TRUE; break; } } - if (!bAnyHWROccured) + if (!bAnyHWROccurred) { return; } } +/* + + + + + + + */ +#define FWHWRINFO_DM_STR_SIZE (5U + 10U + 1U + 10U + 1U + 10U + 3U) + ui32LineSize = sizeof(IMG_CHAR) * ( ui32MsgHeaderCharCount + - (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*( 4/*DM name + left parenthesis*/ + - 10/*UINT32 max num of digits*/ + - 1/*slash*/ + - 10/*UINT32 max num of digits*/ + - 3/*right parenthesis + comma + space*/)) + + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount * FWHWRINFO_DM_STR_SIZE) + ui32MsgFalseCharCount + 1 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6) + 1 /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */ ); @@ -1855,14 +803,13 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, return; } - OSStringLCopy(pszLine, szMsgHeader, ui32LineSize); + OSStringSafeCopy(pszLine, szMsgHeader, ui32LineSize); pszTemp = pszLine + ui32MsgHeaderCharCount; for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) { pszTemp += OSSNPrintf(pszTemp, - 4 + 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1 - /* (name + left parenthesis) + UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */, + FWHWRINFO_DM_STR_SIZE, "%s(%u/%u+%u), ", apszDmNames[dm], psHWRInfoBuf->aui32HwrDmRecoveredCount[dm], @@ -1894,7 +841,7 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } else { - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { IMG_UINT32 ui32HWRRecoveryFlags = psFwSysData->aui32HWRRecoveryFlags[dm]; IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE]; @@ -1902,15 +849,16 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, if (ui32HWRRecoveryFlags == RGXFWIF_DM_STATE_WORKING) { - OSStringLCopy(sPerDmHwrDescription, " working;", RGX_DEBUG_STR_SIZE); + OSStringSafeCopy(sPerDmHwrDescription, " working;", RGX_DEBUG_STR_SIZE); } else { - _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE, + DebugCommonFlagStrings(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE, asDmState2Description, ARRAY_SIZE(asDmState2Description), ui32HWRRecoveryFlags); } - PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%s)", dm, ui32HWRRecoveryFlags, sPerDmHwrDescription); + PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%.*s)", dm, ui32HWRRecoveryFlags, + RGX_DEBUG_STR_SIZE, sPerDmHwrDescription); } else { @@ -1927,6 +875,12 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, IMG_DEV_PHYADDR sPCDevPAddr = { 0 }; const RGX_HWRINFO *psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex]; + if (ui32ReadIndex >= RGXFWIF_HWINFO_MAX) + { + PVR_DUMPDEBUG_LOG("HWINFO index error: %u", ui32ReadIndex); + break; + } + if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0)) { IMG_CHAR aui8RecoveryNum[10+10+1]; @@ -1935,7 +889,7 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, IMG_DEV_VIRTADDR sFaultDevVAddr; /* Split OS timestamp in seconds and nanoseconds */ - ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); + RGXConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags; if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; } @@ -1944,15 +898,18 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; } else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; } else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_ECC_HWR) { pszLockupType = ", GPU ECC HWR"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_PARITY_HWR) { pszLockupType = ", GPU PARITY HWR"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_LATENT_HWR) { pszLockupType = ", GPU LATENT HWR"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_ICS_HWR) { pszLockupType = ", ICS HWR"; } OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber); if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) { - PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u / %s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", + PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u / %.*s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", aui8RecoveryNum, psHWRInfo->ui32CoreID, psHWRInfo->ui32PID, - psHWRInfo->szProcName, + RGXFW_PROCESS_NAME_LEN, psHWRInfo->szProcName, psHWRInfo->ui32FrameNum, psHWRInfo->ui32ActiveHWRTData, psHWRInfo->ui32EventStatus, @@ -1960,10 +917,10 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } else { - PVR_DUMPDEBUG_LOG(" %s PID = %u / %s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", + PVR_DUMPDEBUG_LOG(" %s PID = %u / %.*s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", aui8RecoveryNum, psHWRInfo->ui32PID, - psHWRInfo->szProcName, + RGXFW_PROCESS_NAME_LEN, psHWRInfo->szProcName, psHWRInfo->ui32FrameNum, psHWRInfo->ui32ActiveHWRTData, psHWRInfo->ui32EventStatus, @@ -1987,7 +944,7 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */ - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd, aui8RecoveryNum, @@ -2037,15 +994,19 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } } +#if defined(HW_ERN_65104_BIT_MASK) if (RGX_IS_ERN_SUPPORTED(psDevInfo, 65104)) { PVR_DUMPDEBUG_LOG(" Active PDS DM USCs = 0x%08x", psHWRInfo->ui32PDSActiveDMUSCs); } +#endif +#if defined(HW_ERN_69700_BIT_MASK) if (RGX_IS_ERN_SUPPORTED(psDevInfo, 69700)) { PVR_DUMPDEBUG_LOG(" DMs stalled waiting on PDS Store space = 0x%08x", psHWRInfo->ui32PDSStalledDMs); } +#endif switch (psHWRInfo->eHWRType) { @@ -2057,42 +1018,44 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, case RGX_HWRTYPE_MMUFAULT: { - _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, - &psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], - "Core", - DD_NORMAL_INDENT); - - bPageFault = IMG_TRUE; - sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; - sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK; - sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT; - sFaultDevVAddr.uiAddr <<= 4; /* align shift */ - ui32PC = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK) >> - RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT; -#if defined(SUPPORT_TRUSTED_DEVICE) - ui32PC = ui32PC - 1; -#endif - bPMFault = (ui32PC <= 8); - sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, + &psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], + "Core", + DD_NORMAL_INDENT); + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; + sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK; + sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT; + sFaultDevVAddr.uiAddr <<= 4; /* align shift */ + ui32PC = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT; + bPMFault = (ui32PC <= 8); + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + } } break; case RGX_HWRTYPE_MMUMETAFAULT: { - const IMG_PCHAR pszMetaOrRiscv = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? "Meta" : "RiscV"; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + const IMG_PCHAR pszMetaOrRiscv = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? "Meta" : "RiscV"; - _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], pszMetaOrRiscv, DD_NORMAL_INDENT); - bPageFault = IMG_TRUE; - sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; - sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK; - sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT; - sFaultDevVAddr.uiAddr <<= 4; /* align shift */ - sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; + sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK; + sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT; + sFaultDevVAddr.uiAddr <<= 4; /* align shift */ + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + } } break; @@ -2107,6 +1070,33 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } break; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + case RGX_HWRTYPE_MIPSTLBFAULT: + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + IMG_UINT32 ui32EntryLo = psHWRInfo->uHWRData.sTLBInfo.ui32EntryLo; + + /* This is not exactly what the MMU code does, but the result should be the same */ + const IMG_UINT32 ui32UnmappedEntry = + ((IMG_UINT32)(MMU_BAD_PHYS_ADDR & 0xffffffff) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) | RGXMIPSFW_ENTRYLO_UNCACHED; + + PVR_DUMPDEBUG_LOG(" MIPS TLB fault: BadVA = 0x%08X, EntryLo = 0x%08X" + " (page PA 0x%" IMG_UINT64_FMTSPECx", V %u)", + psHWRInfo->uHWRData.sTLBInfo.ui32BadVAddr, + ui32EntryLo, + RGXMIPSFW_TLB_GET_PA(ui32EntryLo), + ui32EntryLo & RGXMIPSFW_TLB_VALID ? 1 : 0); + + if (ui32EntryLo == ui32UnmappedEntry) + { + PVR_DUMPDEBUG_LOG(" Potential use-after-free detected"); + } + } + } + break; +#endif + case RGX_HWRTYPE_OVERRUN: case RGX_HWRTYPE_UNKNOWNFAILURE: { @@ -2123,48 +1113,8 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, if (bPageFault) { - - FAULT_INFO *psInfo; - - OSLockAcquire(psDevInfo->hDebugFaultInfoLock); - - /* Find the matching Fault Info for this HWRInfo */ - psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex]; - - /* if they do not match, we need to update the psInfo */ - if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) || - (psInfo->sFaultDevVAddr.uiAddr != sFaultDevVAddr.uiAddr)) - { - MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData; - - psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN; - - if (bPMFault) - { - /* PM fault and we dump PC details only */ - psFaultData->eTopLevel = MMU_LEVEL_0; - psFaultData->eType = MMU_FAULT_TYPE_PM; - psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = sPCDevPAddr.uiAddr; - } - else - { - RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr, psFaultData); - } - - _RecordFaultInfo(psDevInfo, psInfo, - sFaultDevVAddr, sPCDevPAddr, psHWRInfo->ui64CRTimer, - _PageSizeHWToBytes(ui32PageSize)); - - } - - _DumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT); - - if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) - { - _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT); - } - - OSLockRelease(psDevInfo->hDebugFaultInfoLock); + RGXDumpFaultInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psHWRInfo, + ui32ReadIndex, &sFaultDevVAddr, &sPCDevPAddr, bPMFault, ui32PageSize); } } @@ -2177,6 +1127,9 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } } + + + #if !defined(NO_HARDWARE) /*! @@ -2196,9 +1149,24 @@ static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ******************************************************************************/ static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo) { +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) /* MMU4 doesn't support pending pages */ return (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) < 4) && (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_ENTRY) & RGX_CR_MMU_ENTRY_PENDING_EN); +#else + IMG_UINT32 ui32BIFMMUEntry; + + ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY); + + if (ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } +#endif } /*! @@ -2237,29 +1205,47 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bRGXPoweredON) { - IMG_CHAR *pszState, *pszReason; const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; const RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; /* space for the current clock speed and 3 previous */ RGXFWIF_TIME_CORR asTimeCorrs[4]; IMG_UINT32 ui32NumClockSpeedChanges; + /* Should invalidate all reads below including when passed to functions. */ + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfRuntimeCfg, INVALIDATE); + #if defined(NO_HARDWARE) PVR_UNREFERENCED_PARAMETER(bRGXPoweredON); #else - if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) + if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - IMG_UINT64 aui64RegValMMUStatus[2]; - const IMG_PCHAR pszMetaOrRiscv = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? "Meta" : "RiscV"; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + IMG_UINT64 aui64RegValMMUStatus[2]; + const IMG_PCHAR pszMetaOrRiscv = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? "Meta" : "RiscV"; - aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS1); - aui64RegValMMUStatus[1] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS2); - _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], "Core", DD_SUMMARY_INDENT); + /* + * BRN72144 prevents reading RGX_CR_MMU_FAULT_STATUS1/2 + * registers while the FW is running... + */ + if (!RGX_IS_BRN_SUPPORTED(psDevInfo, 72144) || + (psRGXFWIfTraceBufCtl->sTraceBuf[0].sAssertBuf.szInfo[0] != 0)) + { + aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS1); + aui64RegValMMUStatus[1] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS2); + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], "Core", DD_SUMMARY_INDENT); + } + else + { + PVR_DUMPDEBUG_LOG("%sMMU (Core) - Unknown", DD_SUMMARY_INDENT); + } - aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META); - _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], pszMetaOrRiscv, DD_SUMMARY_INDENT); + aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META); + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], pszMetaOrRiscv, DD_SUMMARY_INDENT); + } if (_CheckForPendingPage(psDevInfo)) { @@ -2276,15 +1262,17 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } else { - IMG_UINT64 ui64CBaseMapping; IMG_DEV_PHYADDR sPCDevPAddr; MMU_FAULT_DATA sFaultData; - IMG_BOOL bIsValid; + IMG_BOOL bIsValid = IMG_TRUE; + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + IMG_UINT64 ui64CBaseMapping; IMG_UINT32 ui32CBaseMapCtxReg; if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) { - ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4; + ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4; OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32CBaseMapCtxReg, ui32CatBase); @@ -2306,49 +1294,27 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT); bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING_INVALID_EN); } +#else + sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase)); +#endif PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC - " on cat base %u. PC Addr = 0x%llX is %s", - (unsigned long long) sDevVAddr.uiAddr, + " on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECx " is %s", + sDevVAddr.uiAddr, ui32CatBase, - (unsigned long long) sPCDevPAddr.uiAddr, + sPCDevPAddr.uiAddr, bIsValid ? "valid":"invalid"); RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData); - _DumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT); + RGXDumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT); } } } #endif /* NO_HARDWARE */ - /* Firmware state */ - switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus)) - { - case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszState = "OK"; break; - case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszState = "NOT RESPONDING"; break; - case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszState = "DEAD"; break; - case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszState = "FAULT"; break; - case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszState = "UNDEFINED"; break; - default: pszState = "UNKNOWN"; break; - } - - switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason)) - { - case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; - case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " - Asserted"; break; - case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " - Poll failing"; break; - case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " - Global Event Object timeouts rising"; break; - case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " - KCCB offset invalid"; break; - case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " - KCCB stalled"; break; - case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " - Idling"; break; - case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " - Restarting"; break; - case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " - Missing interrupts"; break; - default: pszReason = " - Unknown reason"; break; - } - #if !defined(NO_HARDWARE) /* Determine the type virtualisation support used */ -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) - if (!PVRSRV_VZ_MODE_IS(NATIVE)) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) { #if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) #if defined(SUPPORT_AUTOVZ) @@ -2364,13 +1330,19 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation"); #endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ } -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ +#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)) - if (!PVRSRV_VZ_MODE_IS(NATIVE)) +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) { - RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo); - RGXFWIF_CONNECTION_OS_STATE eOsState = KM_GET_OS_CONNECTION(psDevInfo); + RGXFWIF_CONNECTION_FW_STATE eFwState; + RGXFWIF_CONNECTION_OS_STATE eOsState; + + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + KM_CONNECTION_CACHEOP(Os, INVALIDATE); + + eFwState = KM_GET_FW_CONNECTION(psDevInfo); + eOsState = KM_GET_OS_CONNECTION(psDevInfo); PVR_DUMPDEBUG_LOG("RGX Virtualisation firmware connection state: %s (Fw=%s; OS=%s)", ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"), @@ -2380,11 +1352,17 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } #endif -#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) - if (!PVRSRV_VZ_MODE_IS(NATIVE)) +#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) { - IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo); - IMG_UINT32 ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo); + IMG_UINT32 ui32FwAliveTS; + IMG_UINT32 ui32OsAliveTS; + + KM_ALIVE_TOKEN_CACHEOP(Fw, INVALIDATE); + KM_ALIVE_TOKEN_CACHEOP(Os, INVALIDATE); + + ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo); + ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo); PVR_DUMPDEBUG_LOG("RGX Virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u", ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS); @@ -2392,38 +1370,32 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, #endif #endif /* !defined(NO_HARDWARE) */ - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE]; - IMG_BOOL bOsIsolationEnabled = IMG_FALSE; - - if (psFwSysData == NULL) - { - /* can't dump any more information */ - PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason); - return; - } + IMG_BOOL bDriverIsolationEnabled = IMG_FALSE; + IMG_UINT32 ui32HostIsolationGroup; sHwrStateDescription[0] = '\0'; - _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE, + DebugCommonFlagStrings(sHwrStateDescription, RGX_DEBUG_STR_SIZE, asHwrState2Description, ARRAY_SIZE(asHwrState2Description), psFwSysData->ui32HWRStateFlags); - PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription); + PVR_DUMPDEBUG_LOG("RGX HWR State 0x%08x:%s", psFwSysData->ui32HWRStateFlags, sHwrStateDescription); PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)", - pszPowStateName[psFwSysData->ePowState], - (psDevInfo->pvAPMISRData)?"enabled":"disabled", - psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle, - psDevInfo->ui32ActivePMReqDenied, - psDevInfo->ui32ActivePMReqNonIdle, - psDevInfo->ui32ActivePMReqRetry, - psDevInfo->ui32ActivePMReqTotal - - psDevInfo->ui32ActivePMReqOk - - psDevInfo->ui32ActivePMReqDenied - - psDevInfo->ui32ActivePMReqRetry - - psDevInfo->ui32ActivePMReqNonIdle, - psDevInfo->ui32ActivePMReqTotal, - psRuntimeCfg->ui32ActivePMLatencyms); + (psFwSysData->ePowState < ARRAY_SIZE(pszPowStateName) ? pszPowStateName[psFwSysData->ePowState] : "???"), + (psDevInfo->pvAPMISRData)?"enabled":"disabled", + psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqDenied, + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqRetry, + psDevInfo->ui32ActivePMReqTotal - + psDevInfo->ui32ActivePMReqOk - + psDevInfo->ui32ActivePMReqDenied - + psDevInfo->ui32ActivePMReqRetry - + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqTotal, + psRuntimeCfg->ui32ActivePMLatencyms); ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges); RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs)); @@ -2452,29 +1424,48 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, asTimeCorrs[3].ui64OSTimeStamp); } - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + ui32HostIsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID]; + + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; + RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID]; + IMG_UINT32 ui32IsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID]; IMG_BOOL bMTSEnabled = IMG_FALSE; #if !defined(NO_HARDWARE) if (bRGXPoweredON) { - bMTSEnabled = (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ? IMG_TRUE : - ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32OSid)) != 0); + bMTSEnabled = ( +#if defined(FIX_HW_BRN_64502_BIT_MASK) + RGX_IS_BRN_SUPPORTED(psDevInfo, 64502) || +#endif + !RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ? + IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32DriverID)) != 0); } #endif - - PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d;%s %s", ui32OSid, + PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %u; Isolation group: %u; Time Slice%s: %u%% (%ums); %s", ui32DriverID, apszFwOsStateName[sFwRunFlags.bfOsState], (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok", (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "", - psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid], - (sFwRunFlags.bfIsolatedOS) ? " Isolated;" : "", + psDevInfo->psRGXFWIfRuntimeCfg->ai32DriverPriority[ui32DriverID], + ui32IsolationGroup, + (psDevInfo->psRGXFWIfRuntimeCfg->aui32TSPercentage[ui32DriverID] != 0) ? "" : "*", + psFwSysData->aui32TSMirror[ui32DriverID], + (psFwSysData->aui32TSMirror[ui32DriverID] * + psDevInfo->psRGXFWIfRuntimeCfg->ui32TSIntervalMs / 100), (bMTSEnabled) ? "MTS on;" : "MTS off;" ); - bOsIsolationEnabled |= sFwRunFlags.bfIsolatedOS; + if (ui32IsolationGroup != ui32HostIsolationGroup) + { + bDriverIsolationEnabled = IMG_TRUE; + } + + if (PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) + { + /* don't print guest information on native mode drivers */ + break; + } } #if defined(PVR_ENABLE_PHR) @@ -2482,15 +1473,15 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE]; sPHRConfigDescription[0] = '\0'; - _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE, + DebugCommonFlagStrings(sPHRConfigDescription, RGX_DEBUG_STR_SIZE, asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description), BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode)); - PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %s", psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, sPHRConfigDescription); + PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %.*s", psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, RGX_DEBUG_STR_SIZE, sPHRConfigDescription); } #endif - if (bOsIsolationEnabled) + if (bDriverIsolationEnabled) { PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); } @@ -2505,6 +1496,7 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation"); } + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfHWRInfoBufCtl, INVALIDATE); _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo); #if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) @@ -2539,613 +1531,146 @@ void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, } #if !defined(NO_HARDWARE) -static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) +PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) { -/* List of extra META Slave Port debug registers */ -/* Order in these two initialisers must match */ -#define RGX_META_SP_EXTRA_DEBUG \ - X(RGX_CR_META_SP_MSLVCTRL0) \ - X(RGX_CR_META_SP_MSLVCTRL1) \ - X(RGX_CR_META_SP_MSLVDATAX) \ - X(RGX_CR_META_SP_MSLVIRQSTATUS) \ - X(RGX_CR_META_SP_MSLVIRQENABLE) \ - X(RGX_CR_META_SP_MSLVIRQLEVEL) - -#define RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_V1_AND_METAREG_UNPACKED_ACCESSES \ - X(RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED) - -#define RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_GT1_AND_METAREG_UNPACKED_ACCESSES \ - X(RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \ - X(RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) - - IMG_UINT32 ui32Idx; - IMG_UINT32 ui32RegVal; - IMG_UINT32 ui32RegAddr; - - const IMG_UINT32* pui32DebugRegAddr; - const IMG_UINT32 aui32DebugRegAddr[] = { -#define X(A) A, - RGX_META_SP_EXTRA_DEBUG -#undef X - }; - const IMG_UINT32 aui32DebugRegAddrUAHSV1[] = { -#define X(A) A, - RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_V1_AND_METAREG_UNPACKED_ACCESSES -#undef X - }; - - const IMG_UINT32 aui32DebugRegAddrUAHSGT1[] = { -#define X(A) A, - RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_GT1_AND_METAREG_UNPACKED_ACCESSES -#undef X - }; - - const IMG_CHAR* apszDebugRegName[] = { -#define X(A) #A, - RGX_META_SP_EXTRA_DEBUG -#undef X - }; + void __iomem *pvRegsBaseKM = psDevInfo->pvSecureRegsBaseKM; + RGXRISCVFW_STATE sRiscvState; + const IMG_CHAR *pszException; + PVRSRV_ERROR eError; - PVR_DUMPDEBUG_LOG("META Slave Port extra debug:"); + /* Limit dump to what is currently being used */ +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) + { + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG4__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG5__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG6__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG12__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG13__HOST_SECURITY_GEQ4); + DDLOGUNCHECKED64(FWCORE_ADDR_REMAP_CONFIG14__HOST_SECURITY_GEQ4); + } + else +#endif + { + DDLOG64(FWCORE_ADDR_REMAP_CONFIG4); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG5); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG6); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG12); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG13); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG14); + } - /* array of register offset values depends on feature. But don't augment names in apszDebugRegName */ - PVR_ASSERT(sizeof(aui32DebugRegAddrUAHSGT1) == sizeof(aui32DebugRegAddr)); - PVR_ASSERT(sizeof(aui32DebugRegAddrUAHSV1) == sizeof(aui32DebugRegAddr)); - pui32DebugRegAddr = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? - ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? (aui32DebugRegAddrUAHSGT1) : (aui32DebugRegAddrUAHSV1)) : aui32DebugRegAddr; + PVR_DUMPDEBUG_LOG("---- [ RISC-V internal state ] ----"); - /* dump set of Slave Port debug registers */ - for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++) +#if defined(SUPPORT_RISCV_GDB) + if (RGXRiscvIsHalted(psDevInfo)) { - const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx]; - - ui32RegAddr = pui32DebugRegAddr[ui32Idx]; - ui32RegVal = OSReadUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32RegAddr); - PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal); + /* Avoid resuming the RISC-V FW as most operations + * on the debug module require a halted core */ + PVR_DUMPDEBUG_LOG("(skipping as RISC-V found halted)"); + return PVRSRV_OK; } +#endif -} -#endif /* !defined(NO_HARDWARE) */ + eError = RGXRiscvHalt(psDevInfo); + PVR_GOTO_IF_ERROR(eError, _RISCVDMError); -/* - * Array of all the Firmware Trace log IDs used to convert the trace data. - */ -typedef struct _TRACEBUF_LOG_ { - RGXFW_LOG_SFids eSFId; - const IMG_CHAR *pszName; - const IMG_CHAR *pszFmt; - IMG_UINT32 ui32ArgNum; -} TRACEBUF_LOG; - -static const TRACEBUF_LOG aLogDefinitions[] = -{ -#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e}, - RGXFW_LOG_SFIDLIST +#define X(name, address) \ + eError = RGXRiscvReadReg(psDevInfo, address, &sRiscvState.name); \ + PVR_LOG_GOTO_IF_ERROR(eError, "RGXRiscvReadReg", _RISCVDMError); \ + DDLOGVAL32(#name, sRiscvState.name); + + RGXRISCVFW_DEBUG_DUMP_REGISTERS #undef X -}; -#define NARGS_MASK ~(0xF<<16) -static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile) -{ - const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0]; - IMG_BOOL bIntegrityOk = IMG_TRUE; + eError = RGXRiscvResume(psDevInfo); + PVR_GOTO_IF_ERROR(eError, _RISCVDMError); - /* - * For every log ID, check the format string and number of arguments is valid. - */ - while (psLogDef->eSFId != RGXFW_SF_LAST) + pszException = _GetRISCVException(sRiscvState.mcause); + if (pszException != NULL) { - const TRACEBUF_LOG *psLogDef2; - const IMG_CHAR *pszString; - IMG_UINT32 ui32Count; - - /* - * Check the number of arguments matches the number of '%' in the string and - * check that no string uses %s which is not supported as it requires a - * pointer to memory that is not going to be valid. - */ - pszString = psLogDef->pszFmt; - ui32Count = 0; - - while (*pszString != '\0') - { - if (*pszString++ == '%') - { - ui32Count++; - if (*pszString == 's') - { - bIntegrityOk = IMG_FALSE; - PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.", - psLogDef->pszName, *pszString); - } - else if (*pszString == '%') - { - /* Double % is a printable % sign and not a format string... */ - ui32Count--; - } - } - } - - if (ui32Count != psLogDef->ui32ArgNum) - { - bIntegrityOk = IMG_FALSE; - PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.", - psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum); - } - - /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */ - if (ui32Count > 20) - { - bIntegrityOk = IMG_FALSE; - PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.", - psLogDef->pszName, ui32Count); - } - - /* Check the id number is unique (don't take into account the number of arguments) */ - ui32Count = 0; - psLogDef2 = &aLogDefinitions[0]; + PVR_DUMPDEBUG_LOG("RISC-V FW hit an exception: %s", pszException); - while (psLogDef2->eSFId != RGXFW_SF_LAST) + eError = RGXValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) { - if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK)) - { - ui32Count++; - } - psLogDef2++; + PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); } + } - if (ui32Count != 1) - { - bIntegrityOk = IMG_FALSE; - PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.", - psLogDef->pszName, psLogDef->eSFId, ui32Count - 1); - } + return PVRSRV_OK; - /* Move to the next log ID... */ - psLogDef++; - } +_RISCVDMError: + PVR_DPF((PVR_DBG_ERROR, "Failed to communicate with the Debug Module")); - return bIntegrityOk; + return eError; } -typedef struct { - IMG_UINT16 ui16Mask; - const IMG_CHAR *pszStr; -} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */ +void RGXDumpCoreRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + DDLOG32(CORE_ID); +} -/*! -******************************************************************************* +void RGXDumpMulticoreRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - @Function RGXPrepareExtraDebugInfo + DDLOG64(MULTICORE); + DDLOG32(MULTICORE_SYSTEM); + DDLOG32(MULTICORE_DOMAIN); - @Description +#if !defined(RGX_CR_MULTICORE_AXI) +#define RGX_CR_MULTICORE_AXI (0x2508U) +#define RGX_CR_MULTICORE_AXI_ERROR (0x2510U) +#endif + DDLOG32(MULTICORE_AXI); + DDLOG32(MULTICORE_AXI_ERROR); + DDLOG32(MULTICORE_TDM_CTRL_COMMON); + DDLOG32(MULTICORE_FRAGMENT_CTRL_COMMON); + DDLOG32(MULTICORE_COMPUTE_CTRL_COMMON); +} - Prepares debug info string by decoding ui16DebugInfo value passed - - @Input pszBuffer - pointer to debug info string buffer - - @Return void - -******************************************************************************/ -static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo) -{ - const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] = - { -#define X(a, b) {a, b}, - RGXFWT_DEBUG_INFO_MSKSTRLIST -#undef X - }; - - IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR); - IMG_UINT32 i; - IMG_BOOL bHasExtraDebugInfo = IMG_FALSE; - - /* Add prepend string */ - OSStringLCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize); - - /* Add debug info strings */ - for (i = 0; i < ui32NumFields; i++) - { - if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask) - { - if (bHasExtraDebugInfo) - { - OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */ - } - OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize); - bHasExtraDebugInfo = IMG_TRUE; - } - } - - /* Add append string */ - OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize); -} - -void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE; - - /* Check that the firmware trace is correctly defined... */ - if (!bIntegrityCheckPassed) - { - bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile); - if (!bIntegrityCheckPassed) - { - return; - } - } - - /* Dump FW trace information... */ - if (psRGXFWIfTraceBufCtl != NULL) - { - IMG_UINT32 tid; - IMG_UINT32 ui32TraceBufSizeInDWords = psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; - - /* Print the log type settings... */ - if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) - { - PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", - ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), - RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) - ); - } - else - { - PVR_DUMPDEBUG_LOG("Debug log type: none"); - } - - /* Print the decoded log for each thread... */ - for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) - { - volatile IMG_UINT32 *pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32WrapCount); - volatile IMG_UINT32 *pui32FWTracePtr = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); - IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; - IMG_UINT32 ui32HostWrapCount = *pui32FWWrapCount; - IMG_UINT32 ui32HostTracePtr = *pui32FWTracePtr; - IMG_UINT32 ui32Count = 0; - - if (pui32TraceBuf == NULL) - { - /* trace buffer not yet allocated */ - continue; - } - - while (ui32Count < ui32TraceBufSizeInDWords) - { - IMG_UINT32 ui32Data, ui32DataToId; - - /* Find the first valid log ID, skipping whitespace... */ - do - { - ui32Data = pui32TraceBuf[ui32HostTracePtr]; - ui32DataToId = idToStringID(ui32Data, SFs); - - /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */ - if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data)) - { - PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data); - } - - /* Update the trace pointer... */ - ui32HostTracePtr++; - if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) - { - ui32HostTracePtr = 0; - ui32HostWrapCount++; - } - ui32Count++; - } while ((RGXFW_SF_LAST == ui32DataToId) && - ui32Count < ui32TraceBufSizeInDWords); - - if (ui32Count < ui32TraceBufSizeInDWords) - { - IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> "; - IMG_CHAR szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = ""; - IMG_UINT64 ui64Timestamp; - IMG_UINT16 ui16DebugInfo; - - /* If we hit the ASSERT message then this is the end of the log... */ - if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED) - { - PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u", - psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo, - psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath, - psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum); - break; - } - - ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 | - (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 1) % ui32TraceBufSizeInDWords]); - - ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT); - ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT; - - /* - * Print the trace string and provide up to 20 arguments which - * printf function will be able to use. We have already checked - * that no string uses more than this. - */ - OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN); - - /* Check and append any extra debug info available */ - if (ui16DebugInfo) - { - /* Prepare debug info string */ - RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo); - - /* Append debug info string */ - OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN); - } - - PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)], - pui32TraceBuf[(ui32HostTracePtr + 2) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 3) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 4) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 5) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 6) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 7) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 8) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 9) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 10) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 11) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 12) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 13) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 14) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 15) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 16) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 17) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 18) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 19) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 20) % ui32TraceBufSizeInDWords], - pui32TraceBuf[(ui32HostTracePtr + 21) % ui32TraceBufSizeInDWords]); - - /* Update the trace pointer... */ - ui32HostTracePtr = ui32HostTracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data); - if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) - { - ui32HostTracePtr = ui32HostTracePtr % ui32TraceBufSizeInDWords; - ui32HostWrapCount++; - } - ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data)); - - /* Has the FW trace buffer overtaken the host pointer during the last line printed??? */ - if ((*pui32FWWrapCount > ui32HostWrapCount) || - ((*pui32FWWrapCount == ui32HostWrapCount) && (*pui32FWTracePtr > ui32HostTracePtr))) - { - /* Move forward to the oldest entry again... */ - PVR_DUMPDEBUG_LOG(". . ."); - ui32HostWrapCount = *pui32FWWrapCount; - ui32HostTracePtr = *pui32FWTracePtr; - } - } - } - } - } -} - -#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) -void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - - /* Print the power monitoring counters... */ - if (psFwSysData != NULL) - { - const IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer; - IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer; - IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords; - IMG_UINT32 ui32Count = 0; - IMG_UINT64 ui64Timestamp; - - if (pui32TraceBuf == NULL) - { - /* power monitoring buffer not yet allocated */ - return; - } - - if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER) - { - PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available.")); - return; - } - ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 | - (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]); - - /* Update the trace pointer... */ - ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords; - ui32Count = (ui32Count + 3); - - PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x", - pui32TraceBuf, - ui32TracePtr, - ui32PowerMonBufSizeInDWords)); - - while (ui32Count < ui32PowerMonBufSizeInDWords) - { - /* power monitoring data is (register, value) dword pairs */ - PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON 0x%08x 0x%08x 0x%08x 0x%08x", - ui64Timestamp, - pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords], - pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords], - pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords], - pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]); - - if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID || - pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID) - { - /* end of buffer */ - break; - } - - /* Update the trace pointer... */ - ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords; - ui32Count = (ui32Count + 4); - } - } -} -#endif - -static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState) -{ - switch (eDevState) - { - case PVRSRV_DEVICE_STATE_INIT: - return "Initialising"; - case PVRSRV_DEVICE_STATE_ACTIVE: - return "Active"; - case PVRSRV_DEVICE_STATE_DEINIT: - return "De-initialising"; - case PVRSRV_DEVICE_STATE_BAD: - return "Bad"; - case PVRSRV_DEVICE_STATE_UNDEFINED: - PVR_ASSERT(!"Device has undefined state"); - __fallthrough; - default: - return "Unknown"; - } -} - -static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState) -{ - switch (ePowerState) - { - case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT"; - case PVRSRV_DEV_POWER_STATE_OFF: return "OFF"; - case PVRSRV_DEV_POWER_STATE_ON: return "ON"; - default: return "UNKNOWN"; - } -} - -/* Helper macros to emit data */ -#define REG32_FMTSPEC "%-30s: 0x%08X" -#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECX -#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R)); -#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R)); -#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R)); -#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R)); -#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V); - -#if !defined(NO_HARDWARE) -static PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo) +void RGXDumpClkRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) { void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - RGXRISCVFW_STATE sRiscvState; - const IMG_CHAR *pszException; - PVRSRV_ERROR eError; - - /* Limit dump to what is currently being used */ - DDLOG64(FWCORE_ADDR_REMAP_CONFIG4); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG5); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG6); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG12); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG13); - DDLOG64(FWCORE_ADDR_REMAP_CONFIG14); - - PVR_DUMPDEBUG_LOG("---- [ RISC-V internal state ] ----"); - -#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) - if (RGXRiscvIsHalted(psDevInfo)) - { - /* Avoid resuming the RISC-V FW as most operations - * on the debug module require a halted core */ - PVR_DUMPDEBUG_LOG("(skipping as RISC-V found halted)"); - return PVRSRV_OK; - } -#endif - - eError = RGXRiscvHalt(psDevInfo); - PVR_GOTO_IF_ERROR(eError, _RISCVDMError); - -#define X(name, address) \ - eError = RGXRiscvReadReg(psDevInfo, address, &sRiscvState.name); \ - PVR_LOG_GOTO_IF_ERROR(eError, "RGXRiscvReadReg", _RISCVDMError); \ - DDLOGVAL32(#name, sRiscvState.name); - - RGXRISCVFW_DEBUG_DUMP_REGISTERS -#undef X - - eError = RGXRiscvResume(psDevInfo); - PVR_GOTO_IF_ERROR(eError, _RISCVDMError); - pszException = _GetRISCVException(sRiscvState.mcause); - if (pszException != NULL) - { - PVR_DUMPDEBUG_LOG("RISC-V FW hit an exception: %s", pszException); - - eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - if (eError != PVRSRV_OK) - { - PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); - } - } - - return PVRSRV_OK; - -_RISCVDMError: - PVR_DPF((PVR_DBG_ERROR, "Failed to communicate with the Debug Module")); - - return eError; + DDLOG64(CLK_CTRL0); + DDLOG64(CLK_STATUS0); + DDLOG64(CLK_CTRL1); + DDLOG64(CLK_STATUS1); + DDLOG32(CLK_CTRL2); + DDLOG32(CLK_STATUS2); } -#endif -PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, +void RGXDumpMMURegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, PVRSRV_RGXDEV_INFO *psDevInfo) { -#if !defined(NO_HARDWARE) - IMG_UINT32 ui32Meta = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0; - IMG_UINT32 ui32RegVal; - PVRSRV_ERROR eError; - IMG_BOOL bFirmwarePerf; -#endif - IMG_BOOL bMulticore = RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT); + RGX_LAYER_PARAMS sParams = {.psDevInfo = psDevInfo}; void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; -#if !defined(NO_HARDWARE) - /* Check if firmware perf was set at Init time */ - bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE); -#endif - - DDLOG64(CORE_ID); - - if (bMulticore) + /* BRN72144 prevents reading RGX_CR_MMU_FAULT_STATUS1/2 while the FW is running... */ + if (!RGX_IS_BRN_SUPPORTED(psDevInfo, 72144) || + (psDevInfo->psRGXFWIfTraceBufCtl->sTraceBuf[0].sAssertBuf.szInfo[0] != 0)) { - DDLOG64(MULTICORE); - DDLOG32(MULTICORE_SYSTEM); - DDLOG32(MULTICORE_DOMAIN); + DDLOG64(MMU_FAULT_STATUS1); + DDLOG64(MMU_FAULT_STATUS2); } - DDLOG32(EVENT_STATUS); - DDLOG64(TIMER); - DDLOG64(CLK_CTRL0); - DDLOG64(CLK_STATUS0); - DDLOG64(CLK_CTRL1); - DDLOG64(CLK_STATUS1); - DDLOG64(MMU_FAULT_STATUS1); - DDLOG64(MMU_FAULT_STATUS2); DDLOG64(MMU_FAULT_STATUS_PM); DDLOG64(MMU_FAULT_STATUS_META); DDLOG64(SLC_STATUS1); DDLOG64(SLC_STATUS2); + DDLOG64(SLC_STATUS3); DDLOG64(SLC_STATUS_DEBUG); DDLOG64(MMU_STATUS); DDLOG32(BIF_PFS); @@ -3156,25 +1681,28 @@ PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, DDLOG32(BIF_TEXAS1_OUTSTANDING_READ); DDLOG32(FBCDC_IDLE); DDLOG32(FBCDC_STATUS); - DDLOG32(SPU_ENABLE); + DDLOG32(FBCDC_SIGNATURE_STATUS); - DDLOG64(CONTEXT_MAPPING0); - DDLOG64(CONTEXT_MAPPING2); - DDLOG64(CONTEXT_MAPPING3); - DDLOG64(CONTEXT_MAPPING4); + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, POWER_ISLAND_VERSION) && + RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) < 2) + { + DDLOG32(SPU_ENABLE); + } - if (bMulticore) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) { -#if !defined(RGX_CR_MULTICORE_AXI) -#define RGX_CR_MULTICORE_AXI (0x2508U) -#define RGX_CR_MULTICORE_AXI_ERROR (0x2510U) -#endif - DDLOG32(MULTICORE_AXI); - DDLOG32(MULTICORE_AXI_ERROR); - DDLOG32(MULTICORE_TDM_CTRL_COMMON); - DDLOG32(MULTICORE_FRAGMENT_CTRL_COMMON); - DDLOG32(MULTICORE_COMPUTE_CTRL_COMMON); + DDLOG64(CONTEXT_MAPPING0); + DDLOG64(CONTEXT_MAPPING2); + DDLOG64(CONTEXT_MAPPING3); + DDLOG64(CONTEXT_MAPPING4); } +} + +void RGXDumpDMRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; DDLOG32(PERF_PHASE_2D); DDLOG32(PERF_CYCLE_2D_TOTAL); @@ -3191,23 +1719,32 @@ PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, DDLOG32(ISP_RENDER); DDLOG32(ISP_CTL); - DDLOG32(MTS_INTCTX); - DDLOG32(MTS_BGCTX); - DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE); - DDLOG32(MTS_SCHEDULE); - DDLOG32(MTS_GPU_INT_STATUS); - - DDLOG32(CDM_CONTEXT_STORE_STATUS); + DDLOG32(CDM_CONTEXT_STORE_STATUS__CDM_CSF_LT5); DDLOG64(CDM_CONTEXT_PDS0); DDLOG64(CDM_CONTEXT_PDS1); DDLOG64(CDM_TERMINATE_PDS); DDLOG64(CDM_TERMINATE_PDS1); DDLOG64(CDM_CONTEXT_LOAD_PDS0); DDLOG64(CDM_CONTEXT_LOAD_PDS1); +} + +void RGXDumpSLCRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - DDLOG32(JONES_IDLE); DDLOG32(SLC_IDLE); DDLOG32(SLC_FAULT_STOP_STATUS); +} + +void RGXDumpMiscRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + + DDLOG32(JONES_IDLE); DDLOG64(SCRATCH0); DDLOG64(SCRATCH1); @@ -3226,122 +1763,8 @@ PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, DDLOG64(SCRATCH14); DDLOG64(SCRATCH15); DDLOG32(IRQ_OS0_EVENT_STATUS); - -#if !defined(NO_HARDWARE) - if (ui32Meta) - { - IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE; - IMG_UINT32 ui32MSlvIrqStatusReg = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? - ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? - RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED : - RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED) : - RGX_CR_META_SP_MSLVIRQSTATUS; - - PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, "META_SP_MSLVIRQSTATUS", OSReadUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32MSlvIrqStatusReg)); - - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T0 TXENABLE", ui32RegVal); - if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT) - { - bIsT0Enabled = IMG_TRUE; - } - - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T0 TXSTATUS", ui32RegVal); - - /* check for FW fault */ - if (((ui32RegVal >> 20) & 0x3) == 0x2) - { - bIsFWFaulted = IMG_TRUE; - } - - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T0 TXDEFR", ui32RegVal); - - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T0 PC", ui32RegVal); - - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T0 PCX", ui32RegVal); - - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T0 SP", ui32RegVal); - - if ((ui32Meta == MTP218) || (ui32Meta == MTP219)) - { - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T1 TXENABLE", ui32RegVal); - - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T1 TXSTATUS", ui32RegVal); - - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("T1 TXDEFR", ui32RegVal); - - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T1 PC", ui32RegVal); - - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T1 PCX", ui32RegVal); - - eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); - DDLOGVAL32("T1 SP", ui32RegVal); - } - - if (bFirmwarePerf) - { - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("META_CR_PERF_COUNT0", ui32RegVal); - - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); - DDLOGVAL32("META_CR_PERF_COUNT1", ui32RegVal); - } - - if (bIsT0Enabled & bIsFWFaulted) - { - eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - if (eError != PVRSRV_OK) - { - PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); - } - } - else if (bIsFWFaulted) - { - PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled"); - } - } - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - eError = RGXDumpRISCVState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - PVR_RETURN_IF_ERROR(eError); - } -#endif - - return PVRSRV_OK; - -#if !defined(NO_HARDWARE) -_METASPError: - PVR_DUMPDEBUG_LOG("Dump Slave Port debug information"); - _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - - return eError; -#endif } +#endif /* !defined(NO_HARDWARE) */ #undef REG32_FMTSPEC #undef REG64_FMTSPEC @@ -3351,497 +1774,23 @@ PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, #undef DDLOG64_DPX #undef DDLOGVAL32 -/*! -******************************************************************************* - - @Function RGXDebugRequestProcess - - @Description - - This function will print out the debug for the specified level of verbosity - - @Input pfnDumpDebugPrintf - Optional replacement print function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - @Input ui32VerbLevel - Verbosity level - - @Return void - -******************************************************************************/ -static -void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32VerbLevel) +void RGXDumpAllContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) { - PVRSRV_ERROR eError; - PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; - PVRSRV_DEV_POWER_STATE ePowerState; - IMG_BOOL bRGXPoweredON; - IMG_UINT8 ui8FwOsCount; - RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - const RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; - IMG_BOOL bPwrLockAlreadyHeld; - - bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode); - if (!bPwrLockAlreadyHeld) - { - /* Only acquire the power-lock if not already held by the calling context */ - eError = PVRSRVPowerLock(psDeviceNode); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return; - } - } - - ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; - - eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Error retrieving RGX power state. No debug info dumped.", - __func__)); - goto Exit; - } - - if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || - (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) - { - PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", - (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount); - } - - PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); - - bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON); - - PVR_DUMPDEBUG_LOG("------[ RGX Info ]------"); - PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo); - PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B, - psDevInfo->sDevFeatureCfg.ui32V, - psDevInfo->sDevFeatureCfg.ui32N, - psDevInfo->sDevFeatureCfg.ui32C, - PVR_ARCH_NAME); - PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState)); - PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState)); - if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) - { - PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks); - } - else - { - PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED"); - } - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TILE_REGION_PROTECTION)) - { -#if defined(SUPPORT_TRP) - PVR_DUMPDEBUG_LOG("TRP: HW support - Yes; SW enabled"); -#else - PVR_DUMPDEBUG_LOG("TRP: HW support - Yes; SW disabled"); + DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) + DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); #endif - } - else - { - PVR_DUMPDEBUG_LOG("TRP: HW support - No"); - } - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, WORKGROUP_PROTECTION)) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) { -#if defined(SUPPORT_WGP) - PVR_DUMPDEBUG_LOG("WGP: HW support - Yes; SW enabled"); -#else - PVR_DUMPDEBUG_LOG("WGP: HW support - Yes; SW disabled"); -#endif - } - else - { - PVR_DUMPDEBUG_LOG("WGP: HW support - No"); - } - - RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON); - - /* Dump out the kernel CCB. */ - { - const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; - - if (psKCCBCtl != NULL) - { - PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X", - psKCCBCtl->ui32WriteOffset, - psKCCBCtl->ui32ReadOffset); - } - } - - /* Dump out the firmware CCB. */ - { - const RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl; - - if (psFCCBCtl != NULL) - { - PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X", - psFCCBCtl->ui32WriteOffset, - psFCCBCtl->ui32ReadOffset); - } - } - - if (psFwOsData != NULL) - { - IMG_UINT32 ui32TID; - - /* Dump the KCCB commands executed */ - PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d", - psFwOsData->ui32KCCBCmdsExecuted); - -#if defined(PVRSRV_STALLED_CCB_ACTION) - /* Dump the number of times we have performed a forced UFO update, - * and (if non-zero) the timestamp of the most recent occurrence/ - */ - PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d", - psFwOsData->ui32ForcedUpdatesRequested); - if (psFwOsData->ui32ForcedUpdatesRequested > 0) - { - IMG_UINT8 ui8Idx; - IMG_UINT64 ui64Seconds, ui64Nanoseconds; - - if (psFwOsData->ui64LastForcedUpdateTime > 0ULL) - { - ConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds); - PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")", - ui64Seconds, ui64Nanoseconds); - } - else - { - PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)"); - } - /* Dump SLR log */ - if (psFwOsData->sSLRLogFirst.aszCCBName[0]) - { - ConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); - PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC - "} Fence found on context 0x%x '%s' has %d UFOs", - ui64Seconds, ui64Nanoseconds, - psFwOsData->sSLRLogFirst.ui32FWCtxAddr, - psFwOsData->sSLRLogFirst.aszCCBName, - psFwOsData->sSLRLogFirst.ui32NumUFOs); - } - for (ui8Idx=0; ui8IdxsSLRLog[ui8Idx].aszCCBName[0]) - { - ConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); - PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC - "] Fence found on context 0x%x '%s' has %d UFOs", - ui64Seconds, ui64Nanoseconds, - psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr, - psFwOsData->sSLRLog[ui8Idx].aszCCBName, - psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs); - } - } - } -#else - PVR_DUMPDEBUG_LOG("RGX SLR: Disabled"); -#endif - - /* Dump the error counts */ - PVR_DUMPDEBUG_LOG("RGX Errors: WGP:%d, TRP:%d", - psDevInfo->sErrorCounts.ui32WGPErrorCount, - psDevInfo->sErrorCounts.ui32TRPErrorCount); - - for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) - { - /* Dump the IRQ info for threads */ - PVR_DUMPDEBUG_LOG("RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u", - ui32TID, - psFwOsData->aui32InterruptCount[ui32TID], - psDevInfo->aui32SampleIRQCount[ui32TID]); - } + DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); } - -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Dump out the Workload estimation CCB. */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) { - const RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; - - if (psWorkEstCCBCtl != NULL) - { - PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X", - psWorkEstCCBCtl->ui32WriteOffset, - psWorkEstCCBCtl->ui32ReadOffset); - } + DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); } -#endif - - /* Dump the FW Sys config flags on the Host */ - if (!PVRSRV_VZ_MODE_IS(GUEST)) - { - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; - - if (!psFwSysData) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__)); - goto Exit; - } - - _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags); - PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription); - } - - /* Dump the FW OS config flags */ - { - IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; - - if (!psFwOsData) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__)); - goto Exit; - } - - _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags); - PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription); - } - - if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) - { - - PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); - PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM); - PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr); - - if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) - { - PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Linear): 0x%p", - psDevInfo->pvSecureRegsBaseKM); - PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Physical): 0x%08lX", - (unsigned long)psDevInfo->sRegsPhysBase.uiAddr + RGX_HOST_SECURE_REGBANK_OFFSET); - } - -#if !defined(NO_HARDWARE) - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - IMG_UINT32 ui32MSlvCtrl1Reg = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? - ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED : - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED) : - RGX_CR_META_SP_MSLVCTRL1; - - /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ - OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32MSlvCtrl1Reg, 0x0); - } -#endif - - eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: RGXDumpRGXRegisters failed (%s)", - __func__, - PVRSRVGetErrorString(eError))); -#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - PVR_DUMPDEBUG_LOG("Dump Slave Port debug information"); - _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); - } -#endif - } - } - else - { - PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST) ? "Guest Mode of operation" : "RGX power is down"); - } - - PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------"); - - if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) - { - IMG_INT tid; - /* Dump FW trace information */ - if (psRGXFWIfTraceBufCtl != NULL) - { - for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++) - { - IMG_UINT32 i; - IMG_BOOL bPrevLineWasZero = IMG_FALSE; - IMG_BOOL bLineIsAllZeros = IMG_FALSE; - IMG_UINT32 ui32CountLines = 0; - IMG_UINT32 *pui32TraceBuffer; - IMG_CHAR *pszLine; - - if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) - { - PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", - ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), - RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) - ); - } - else - { - PVR_DUMPDEBUG_LOG("Debug log type: none"); - } - - pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; - - /* Skip if trace buffer is not allocated */ - if (pui32TraceBuffer == NULL) - { - PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid); - continue; - } - -/* Max number of DWords to be printed per line, in debug dump output */ -#define PVR_DD_FW_TRACEBUF_LINESIZE 30U - /* each element in the line is 8 characters plus a space. The '+ 1' is because of the final trailing '\0'. */ - pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1); - if (pszLine == NULL) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Out of mem allocating line string (size: %d)", - __func__, - 9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1)); - goto Exit; - } - - PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid); - PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); - PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords); - - for (i = 0; i < psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE) - { - IMG_UINT32 k = 0; - IMG_UINT32 ui32Line = 0x0; - IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32); - IMG_CHAR *pszBuf = pszLine; - - for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++) - { - if ((i + k) >= psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords) - { - /* Stop reading when the index goes beyond trace buffer size. This condition is - * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not - * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */ - break; - } - - ui32Line |= pui32TraceBuffer[i + k]; - - /* prepare the line to print it. The '+1' is because of the trailing '\0' added */ - OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]); - pszBuf += 9; /* write over the '\0' */ - } - - bLineIsAllZeros = (ui32Line == 0x0); - - if (bLineIsAllZeros) - { - if (bPrevLineWasZero) - { - ui32CountLines++; - } - else - { - bPrevLineWasZero = IMG_TRUE; - ui32CountLines = 1; - PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset); - } - } - else - { - if (bPrevLineWasZero && ui32CountLines > 1) - { - PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines); - } - bPrevLineWasZero = IMG_FALSE; - - PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine); - } - - } - if (bPrevLineWasZero) - { - PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines); - } - - PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid); - - OSFreeMem(pszLine); - } - } - - { - if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) - { - PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------"); - } - else - { - PVR_DUMPDEBUG_LOG("------[ FWCtxs Next CMD ]------"); - } - - DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); - DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); - - DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); - - DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); - } - } - - PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d End ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); - -Exit: - if (!bPwrLockAlreadyHeld) - { - PVRSRVPowerUnlock(psDeviceNode); - } -} - -/*! - ****************************************************************************** - - @Function RGXDebugRequestNotify - - @Description Dump the debug data for RGX - - ******************************************************************************/ -static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, - IMG_UINT32 ui32VerbLevel, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = hDbgRequestHandle; - - /* Only action the request if we've fully init'ed */ - if (psDevInfo->bDevInit2Done) - { - RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel); - } -} - -PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - return PVRSRVRegisterDeviceDbgRequestNotify(&psDevInfo->hDbgReqNotify, - psDevInfo->psDeviceNode, - RGXDebugRequestNotify, - DEBUG_REQUEST_RGX, - psDevInfo); -} - -PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - if (psDevInfo->hDbgReqNotify) - { - return PVRSRVUnregisterDeviceDbgRequestNotify(psDevInfo->hDbgReqNotify); - } - - /* No notifier registered */ - return PVRSRV_OK; } /****************************************************************************** diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdebug.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdebug.h deleted file mode 100644 index d253d899ff55..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdebug.h +++ /dev/null @@ -1,191 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title RGX debug header file -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Header for the RGX debugging functions -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#if !defined(RGXDEBUG_H) -#define RGXDEBUG_H - -#include "pvrsrv_error.h" -#include "img_types.h" -#include "device.h" -#include "pvr_notifier.h" -#include "pvrsrv.h" -#include "rgxdevice.h" - -/** - * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in - * LISR for each RGX FW thread. - * Macro takes pointer to PVRSRV_RGXDEV_INFO as input. - */ -#define RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo) \ - do \ - { \ - IMG_UINT32 ui32TID; \ - for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) \ - { \ - PVR_DPF((DBGPRIV_VERBOSE, \ - "RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u)", \ - ui32TID, \ - (psRgxDevInfo)->psRGXFWIfFwOsData->aui32InterruptCount[ui32TID], \ - (psRgxDevInfo)->aui32SampleIRQCount[ui32TID])); \ - } \ - } while (0) - -/*! -******************************************************************************* - - @Function RGXDumpRGXRegisters - - @Description - - Dumps an extensive list of RGX registers required for debugging - - @Input pfnDumpDebugPrintf - Optional replacement print function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - - @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise - -******************************************************************************/ -PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* - - @Function RGXDumpFirmwareTrace - - @Description Dumps the decoded version of the firmware trace buffer. - - Dump useful debugging info - - @Input pfnDumpDebugPrintf - Optional replacement print function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - - @Return void - -******************************************************************************/ -void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo); - -#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) -void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo); -#endif - -#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -/*! -******************************************************************************* - - @Function ValidateFWOnLoad - - @Description Compare the Firmware image as seen from the CPU point of view - against the same memory area as seen from the firmware point - of view after first power up. - - @Input psDevInfo - Device Info - - @Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo); -#endif - -/*! -******************************************************************************* - - @Function RGXDumpRGXDebugSummary - - @Description - - Dump a summary in human readable form with the RGX state - - @Input pfnDumpDebugPrintf - The debug printf function - @Input pvDumpDebugFile - Optional file identifier to be passed to the - 'printf' function if required - @Input psDevInfo - RGX device info - @Input bRGXPoweredON - IMG_TRUE if RGX device is on - - @Return void - -******************************************************************************/ -void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_BOOL bRGXPoweredON); - -/*! -******************************************************************************* - - @Function RGXDebugInit - - @Description - - Setup debug requests, calls into PVRSRVRegisterDeviceDbgRequestNotify - - @Input psDevInfo RGX device info - @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error - -******************************************************************************/ -PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* - - @Function RGXDebugDeinit - - @Description - - Remove debug requests, calls into PVRSRVUnregisterDeviceDbgRequestNotify - - @Output phNotify Points to debug notifier handle - @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error - -******************************************************************************/ -PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo); - -#endif /* RGXDEBUG_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdevice.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdevice.h index 58002401302c..5e197ec8a0f9 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdevice.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxdevice.h @@ -69,11 +69,24 @@ typedef struct { *****************************************************************************/ #define RGXKM_DEVICE_STATE_ZERO_FREELIST (0x1) /*!< Zeroing the physical pages of reconstructed free lists */ #define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x2) /*!< Used to disable the Devices Watchdog logging */ -#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN (0x4) /*!< Used for validation to inject SPU power state mask change every DM kick */ +#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN (0x4) /*!< Used for validation to inject power units state change every DM kick */ #define RGXKM_DEVICE_STATE_CCB_GROW_EN (0x8) /*!< Used to indicate CCB grow is permitted */ #define RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN (0x10) /*!< Used for validation to enable SPU power state mask change */ #define RGXKM_DEVICE_STATE_MASK (0x1F) +/*! + ****************************************************************************** + * ECC RAM Fault Validation + *****************************************************************************/ +#define RGXKM_ECC_ERR_INJ_DISABLE 0 +#define RGXKM_ECC_ERR_INJ_SLC 1 +#define RGXKM_ECC_ERR_INJ_USC 2 +#define RGXKM_ECC_ERR_INJ_TPU 3 +#define RGXKM_ECC_ERR_INJ_RASCAL 4 +#define RGXKM_ECC_ERR_INJ_MARS 5 + +#define RGXKM_ECC_ERR_INJ_INTERVAL 10U + /*! ****************************************************************************** * GPU DVFS Table @@ -89,13 +102,44 @@ typedef struct { ****************************************************************************** * Global flags for driver validation *****************************************************************************/ -#define RGX_VAL_LS_EN (0x1U) /*!< Enable dual lockstep firmware */ #define RGX_VAL_FBDC_SIG_CHECK_NOERR_EN (0x2U) /*!< Enable FBDC signature check. Signatures must match */ #define RGX_VAL_FBDC_SIG_CHECK_ERR_EN (0x4U) /*!< Enable FBDC signature check. Signatures must not match */ #define RGX_VAL_GPUSTATEPIN_EN (0x8U) /*!< Enable GPU state pin check */ -#define RGX_VAL_KZ_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable KZ signature check. Signatures must match */ -#define RGX_VAL_KZ_SIG_CHECK_ERR_EN (0x20U) /*!< Enable KZ signature check. Signatures must not match */ -#define RGX_VAL_SIG_CHECK_ERR_EN (RGX_VAL_FBDC_SIG_CHECK_ERR_EN) +#define RGX_VAL_WGP_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable WGP signature check. Signatures must match */ +#define RGX_VAL_WGP_SIG_CHECK_ERR_EN (0x20U) /*!< Enable WGP signature check. Signatures must not match */ +#define RGX_VAL_TRP_SIG_CHECK_NOERR_EN (0x40U) /*!< Enable TRP signature check. Signatures must match */ +#define RGX_VAL_TRP_SIG_CHECK_ERR_EN (0x80U) /*!< Enable TRP signature check. Signatures must not match */ + +/*! + ****************************************************************************** + * HWPerf L2 Stream ID type definition. + *****************************************************************************/ +typedef IMG_UINT32 RGX_HWPERF_L2_STREAM_ID; +/* HWPerf stream for Client HWPerf access. */ +#define RGX_HWPERF_L2_STREAM_HWPERF 0U +#if (defined(__linux__) && !defined(__QNXNTO__) && !defined(INTEGRITY_OS)) +/* HWPerf stream for FTrace HWPerf access. */ +#define RGX_HWPERF_L2_STREAM_FTRACE 1U +#define RGX_HWPERF_L2_STREAM_LAST 2U +#else +#define RGX_HWPERF_L2_STREAM_LAST 1U +#endif + +/*! + ****************************************************************************** + * Boot-time Faults Injection + *****************************************************************************/ +#define RGX_NO_FAULTS_INJ 0 +#define RGX_AXI_BUS_PARITY_INJ 1 +#define RGX_SAFETY_IRQ_AXI_BUS_PARITY_INJ 2 +#define RGX_XPU_BUS_PARITY_INJ 3 +#define RGX_AXI_MEM_BUS_PARITY_INJ 4 +#define RGX_DCLS_INJ 5 +#define RGX_MMU_PTE_PARITY_INJ 6 +#define RGX_MMU_PTE_PARITY_INJ_KICK_NAME "kick-tq" +#define RGX_MMU_PTE_PARITY_INJ_PMR "tquscpmr" +#define RGX_REST_LATENT_ICS_INJ 7 +#define RGX_REST_LATENT_ICS_KICK_NAME "DoKickTA" typedef struct _GPU_FREQ_TRACKING_DATA_ { @@ -152,6 +196,15 @@ typedef struct _RGX_GPU_DVFS_TABLE_ * GPU utilisation statistics *****************************************************************************/ +typedef struct _RGXFWIF_TEMP_GPU_UTIL_STATS_ +{ + IMG_UINT64 aaaui64DMOSTmpCounters[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED][RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM]; + IMG_UINT64 aaui64DMOSTmpLastWord[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 aaui64DMOSTmpLastState[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 aaui64DMOSTmpLastPeriod[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 aaui64DMOSTmpLastTime[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; +} RGXFWIF_TEMP_GPU_UTIL_STATS; + typedef struct _RGXFWIF_GPU_UTIL_STATS_ { IMG_BOOL bValid; /* If TRUE, statistics are valid. @@ -161,12 +214,13 @@ typedef struct _RGXFWIF_GPU_UTIL_STATS_ IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */ IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */ - IMG_UINT64 aaui64DMOSStatActive[RGXFWIF_DM_MAX][RGX_NUM_OS_SUPPORTED]; /* Per-DM per-OS active statistic */ - IMG_UINT64 aaui64DMOSStatBlocked[RGXFWIF_DM_MAX][RGX_NUM_OS_SUPPORTED]; /* Per-DM per-OS blocked statistic */ - IMG_UINT64 aaui64DMOSStatIdle[RGXFWIF_DM_MAX][RGX_NUM_OS_SUPPORTED]; /* Per-DM per-OS idle statistic */ - IMG_UINT64 aaui64DMOSStatCumulative[RGXFWIF_DM_MAX][RGX_NUM_OS_SUPPORTED]; /* Per-DM per-OS sum of active/blocked/idle stats */ + IMG_UINT64 aaui64DMOSStatInactive[RGXFWIF_GPU_UTIL_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS sum of idle and blocked stats */ + IMG_UINT64 aaui64DMOSStatActive[RGXFWIF_GPU_UTIL_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS active statistic */ + IMG_UINT64 aaui64DMOSStatCumulative[RGXFWIF_GPU_UTIL_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS sum of active/blocked/idle stats */ IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */ + + RGXFWIF_TEMP_GPU_UTIL_STATS sTempGpuStats; /* Temporary data used to calculate the per-DM per-OS statistics */ } RGXFWIF_GPU_UTIL_STATS; @@ -180,26 +234,6 @@ typedef struct _RGX_REG_CONFIG_ typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; -#if defined(SUPPORT_VALIDATION) -/** - * Structure containing information for calculating next SPU power domain state. - */ -typedef struct _RGX_POWER_DOMAIN_STATE_ -{ - /** - * Total number of power units in the core. - */ - IMG_UINT32 ui32PowUnitsCount; - /** - * Current power domain state - */ - IMG_UINT32 ui32CurrentState; - /** - * Stores last transition that happened for each power domain state. - */ - IMG_UINT32 *paui32LastTransition; -} RGX_POWER_DOMAIN_STATE; -#endif typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_ { @@ -212,7 +246,9 @@ typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_ IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX]; IMG_UINT32 ui32MAXDMCount; IMG_UINT32 ui32MAXPowUnitCount; +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) IMG_UINT32 ui32MAXRACCount; +#endif IMG_UINT32 ui32SLCSizeInBytes; IMG_PCHAR pszBVNCString; }PVRSRV_DEVICE_FEATURE_CONFIG; @@ -287,11 +323,13 @@ typedef union _RGX_WORKLOAD_ IMG_UINT32 ui32Characteristic2; } sTransfer; +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) struct { IMG_UINT32 ui32DispatchSize; IMG_UINT32 ui32AccStructSize; } sRay; +#endif } RGX_WORKLOAD; @@ -344,10 +382,12 @@ typedef struct _WORKEST_HOST_DATA_ WORKLOAD_MATCHING_DATA sDataTDM; /*!< matching data for TDM-TQ commands */ } sTransfer; +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) struct { WORKLOAD_MATCHING_DATA sDataRDM; /*!< matching data for RDM commands */ } sRay; +#endif } uWorkloadMatchingData; /* @@ -376,8 +416,16 @@ typedef struct _WORKEST_RETURN_DATA_ } WORKEST_RETURN_DATA; #endif - -#define RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES 4 +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +typedef struct +{ +#if defined(PDUMP) + IMG_HANDLE hPdumpPages; +#endif + PG_HANDLE sPages; + IMG_DEV_PHYADDR sPhysAddr; +} RGX_MIPS_ADDRESS_TRAMPOLINE; +#endif /*! @@ -390,6 +438,16 @@ typedef struct _PVRSRV_RGXDEV_ERROR_COUNTS_ IMG_UINT32 ui32TRPErrorCount; /*!< count of the number of TRP checksum errors */ } PVRSRV_RGXDEV_ERROR_COUNTS; +/*! + ****************************************************************************** + * RGX Debug dump firmware trace log type + *****************************************************************************/ +typedef IMG_UINT32 RGX_FWT_LOGTYPE; +#define RGX_FWT_LOGTYPE_NONE 0U +#define RGX_FWT_LOGTYPE_BINARY 1U +#define RGX_FWT_LOGTYPE_DECODED 2U +#define RGX_FWT_LOGTYPE_PARTIAL 3U + /*! ****************************************************************************** * RGX Device info @@ -410,8 +468,10 @@ typedef struct _PVRSRV_RGXDEV_INFO_ /* Kernel mode linear address of device registers */ void __iomem *pvRegsBaseKM; +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) /* Kernel mode linear address of device registers */ void __iomem *pvSecureRegsBaseKM; +#endif IMG_HANDLE hRegMapping; @@ -433,6 +493,7 @@ typedef struct _PVRSRV_RGXDEV_INFO_ /* Kernel CCB */ DEVMEM_MEMDESC *psKernelCCBCtlMemDesc; /*!< memdesc for Kernel CCB control */ RGXFWIF_CCB_CTL *psKernelCCBCtl; /*!< kernel mapping for Kernel CCB control */ + RGXFWIF_CCB_CTL *psKernelCCBCtlLocal; /*!< cpu local copy of Kernel CCB control */ DEVMEM_MEMDESC *psKernelCCBMemDesc; /*!< memdesc for Kernel CCB */ IMG_UINT8 *psKernelCCB; /*!< kernel mapping for Kernel CCB */ DEVMEM_MEMDESC *psKernelCCBRtnSlotsMemDesc; /*!< Return slot array for Kernel CCB commands */ @@ -441,19 +502,32 @@ typedef struct _PVRSRV_RGXDEV_INFO_ /* Firmware CCB */ DEVMEM_MEMDESC *psFirmwareCCBCtlMemDesc; /*!< memdesc for Firmware CCB control */ RGXFWIF_CCB_CTL *psFirmwareCCBCtl; /*!< kernel mapping for Firmware CCB control */ + RGXFWIF_CCB_CTL *psFirmwareCCBCtlLocal; /*!< cpu local copy of Firmware CCB control */ DEVMEM_MEMDESC *psFirmwareCCBMemDesc; /*!< memdesc for Firmware CCB */ IMG_UINT8 *psFirmwareCCB; /*!< kernel mapping for Firmware CCB */ /* Workload Estimation Firmware CCB */ DEVMEM_MEMDESC *psWorkEstFirmwareCCBCtlMemDesc; /*!< memdesc for Workload Estimation Firmware CCB control */ RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtl; /*!< kernel mapping for Workload Estimation Firmware CCB control */ + RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtlLocal; /*!< cpu local copy of Workload Estimation Firmware CCB control */ DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */ IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */ +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + /* Counter dumping */ + DEVMEM_MEMDESC *psCounterBufferMemDesc; /*!< mem desc for counter dumping buffer */ + POS_LOCK hCounterDumpingLock; /*!< Lock for guarding access to counter dumping buffer */ +#endif + PVRSRV_MEMALLOCFLAGS_T uiFWPoisonOnFreeFlag; /*!< Flag for poisoning FW allocations when freed */ IMG_BOOL bIgnoreHWReportedBVNC; /*!< Ignore BVNC reported by HW */ + /* multicore configuration information */ + IMG_UINT32 ui32MultiCoreNumCores; /* total cores primary + secondaries. 0 for non-multi core */ + IMG_UINT32 ui32MultiCorePrimaryId; /* primary core id for this device */ + IMG_UINT64 *pui64MultiCoreCapabilities; /* capabilities for each core */ + /* if we don't preallocate the pagetables we must insert newly allocated page tables dynamically @@ -469,6 +543,10 @@ typedef struct _PVRSRV_RGXDEV_INFO_ DEVMEM_MEMDESC *psRGXFWDataMemDesc; IMG_DEV_VIRTADDR sFWDataDevVAddrBase; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + RGX_MIPS_ADDRESS_TRAMPOLINE *psTrampoline; +#endif + DEVMEM_MEMDESC *psRGXFWCorememCodeMemDesc; IMG_DEV_VIRTADDR sFWCorememCodeDevVAddrBase; RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; @@ -493,15 +571,12 @@ typedef struct _PVRSRV_RGXDEV_INFO_ DEVMEM_MEMDESC *psRGXFWSigTDMChecksMemDesc; IMG_UINT32 ui32SigTDMChecksSize; +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) DEVMEM_MEMDESC *psRGXFWSigRDMChecksMemDesc; IMG_UINT32 ui32SigRDMChecksSize; - - -#if defined(SUPPORT_VALIDATION) - DEVMEM_MEMDESC *psRGXFWValidationSigMemDesc; - IMG_UINT32 ui32ValidationSigSize; #endif + IMG_BOOL bDumpedKCCBCtlAlready; POS_SPINLOCK hSyncCheckpointSignalSpinLock; /*!< Guards data shared between an atomic & sleepable-context */ @@ -511,6 +586,8 @@ typedef struct _PVRSRV_RGXDEV_INFO_ DEVMEM_MEMDESC *psRGXFWIfTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */ DEVMEM_MEMDESC *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM]; /*!< memdesc of actual FW trace (log) buffer(s) */ + IMG_PUINT32 apui32TraceBuffer[RGXFW_THREAD_NUM]; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */ + IMG_UINT32 ui32TraceBufSizeInDWords; /*!< CPU local copy of FW Trace buffer size in dwords */ DEVMEM_MEMDESC *psRGXFWIfPowMonBufferMemDesc; /*!< memdesc of FW power monitoring data */ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */ @@ -531,12 +608,13 @@ typedef struct _PVRSRV_RGXDEV_INFO_ IMG_UINT32 ui32ClockSource; IMG_UINT32 ui32LastClockSource; - DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCbCtlMemDesc; - RGXFWIF_GPU_UTIL_FWCB *psRGXFWIfGpuUtilFWCb; + DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCtlMemDesc; + RGXFWIF_GPU_UTIL_FW *psRGXFWIfGpuUtilFW; DEVMEM_MEMDESC *psRGXFWIfHWPerfBufMemDesc; IMG_BYTE *psRGXFWIfHWPerfBuf; IMG_UINT32 ui32RGXFWIfHWPerfBufSize; /* in bytes */ + IMG_UINT32 ui32RGXL2HWPerfBufSize; /* in bytes */ DEVMEM_MEMDESC *psRGXFWIfRegCfgMemDesc; @@ -555,9 +633,14 @@ typedef struct _PVRSRV_RGXDEV_INFO_ DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc; RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg; - /* Additional guest firmware memory context info */ - DEVMEM_HEAP *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED]; - DEVMEM_MEMDESC *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED]; +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) + DEVMEM_MEMDESC *psRGXFWIfActiveContextBufDesc; + RGXFWIF_ACTIVE_CONTEXT_BUF_DATA *psRGXFWIfActiveContextBuf; +#endif + + /* Premapped firmware memory context info */ + DEVMEM_HEAP *psPremappedFwRawHeap[RGX_NUM_DRIVERS_SUPPORTED]; + DEVMEM_MEMDESC *psPremappedFwRawMemDesc[RGX_NUM_DRIVERS_SUPPORTED]; #if defined(SUPPORT_WORKLOAD_ESTIMATION) /* Array to store data needed for workload estimation when a workload @@ -593,10 +676,12 @@ typedef struct _PVRSRV_RGXDEV_INFO_ * and loss/freeing of FW & Host resources while in * use in another thread e.g. MSIR. */ - IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */ - IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */ - IMG_UINT32 ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */ - IMG_BOOL bSuspendHWPerfL2DataCopy; /*! Flag to indicate if copying HWPerf data is suspended */ + IMG_UINT64 ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_LAST]; /*! Event filter for FW events (settable by AppHint) */ + IMG_HANDLE hHWPerfStream[RGX_HWPERF_L2_STREAM_LAST]; /*! TL Stream buffer (L2) for firmware event stream */ + IMG_UINT32 ui32L2BufMaxPacketSize[RGX_HWPERF_L2_STREAM_LAST]; /*! Max allowed packet size in FW HWPerf TL (L2) buffer */ + IMG_BOOL bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_LAST]; /*! Flag to indicate if copying HWPerf data is suspended */ + IMG_UINT64 ui64HWPerfFwFilter; /*! Event filter for FW events created from OR-ing ui64HWPerfFilter values. */ + IMG_UINT32 uiHWPerfStreamCount; /*! Value indicating if any of the HWPerf streams has been created */ IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */ POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */ @@ -697,7 +782,7 @@ typedef struct _PVRSRV_RGXDEV_INFO_ POS_LOCK hGPUUtilLock; /* Register configuration */ - RGX_REG_CONFIG sRegCongfig; + RGX_REG_CONFIG sRegConfig; IMG_BOOL bRGXPowered; DLLIST_NODE sMemoryContextList; @@ -729,13 +814,14 @@ typedef struct _PVRSRV_RGXDEV_INFO_ POS_LOCK hDebugFaultInfoLock; /*!< Lock to protect the debug fault info list */ POS_LOCK hMMUCtxUnregLock; /*!< Lock to protect list of unregistered MMU contexts */ -#if defined(SUPPORT_VALIDATION) - RGX_POWER_DOMAIN_STATE sPowerDomainState; /*!< Power island sequence */ - IMG_UINT32 ui32PowDomainKickInterval; /*!< Power island transition interval */ - IMG_UINT32 ui32ValidationFlags; /*!< Validation flags for host driver */ +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + POS_LOCK hNMILock; /*!< Lock to protect NMI operations */ #endif + +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) IMG_UINT32 ui32AvailablePowUnitsMask; IMG_UINT32 ui32AvailableRACMask; +#endif RGX_LAYER_PARAMS sLayerParams; @@ -758,34 +844,17 @@ typedef struct _PVRSRV_RGXDEV_INFO_ DEVMEM_MEMDESC *psFirmwareGcovBufferMemDesc; /*!< mem desc for Firmware gcov dumping buffer */ IMG_UINT32 ui32FirmwareGcovSize; #endif + +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) /* Value to store for each page size range config register in MMU4 */ IMG_UINT64 aui64MMUPageSizeRangeValue[RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES]; - -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) - struct - { - IMG_UINT64 ui64timerGray; - IMG_UINT64 ui64timerBinary; - IMG_UINT64 *pui64uscTimers; - } sRGXTimerValues; #endif -#if defined(SUPPORT_VALIDATION) - struct - { - IMG_UINT64 ui64RegVal; - struct completion sRegComp; - } sFwRegs; -#endif + IMG_HANDLE hTQCLISharedMem; /*!< TQ Client Shared Mem PMR */ IMG_HANDLE hTQUSCSharedMem; /*!< TQ USC Shared Mem PMR */ -#if defined(SUPPORT_VALIDATION) - IMG_UINT32 ui32TestSLRInterval; /* Don't enqueue an update sync checkpoint every nth kick */ - IMG_UINT32 ui32TestSLRCount; /* (used to test SLR operation) */ - IMG_UINT32 ui32SLRSkipFWAddr; -#endif #if defined(SUPPORT_SECURITY_VALIDATION) DEVMEM_MEMDESC *psRGXFWIfSecureBufMemDesc; @@ -817,8 +886,6 @@ typedef struct _PVRSRV_RGXDEV_INFO_ RGX_CONTEXT_RESET_REASON eLastDeviceError; /*!< device error reported to client */ - IMG_UINT32 ui32Log2Non4KPgSize; /* Page size of Non4k heap in log2 form */ - #if defined(SUPPORT_SECURE_ALLOC_KM) PMR *psGenHeapSecMem; /*!< An allocation of secure memory mapped to the general devmem heap. The allocation is @@ -829,6 +896,19 @@ typedef struct _PVRSRV_RGXDEV_INFO_ #if defined(SUPPORT_SECURE_CONTEXT_SWITCH) DEVMEM_MEMDESC *psRGXFWScratchBufMemDesc; #endif + + RGX_FWT_LOGTYPE eDebugDumpFWTLogType; + + RGX_FW_INFO_HEADER sFWInfoHeader; +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + IMG_UINT32 ui32TFBCLossyGroup; /*!< TFBCCompressionControlGroup + setting for those cores which support + this feature. */ +#endif + RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; /*!< GPU usage statistics */ + POS_LOCK hGpuUtilStatsLock; + + } PVRSRV_RGXDEV_INFO; @@ -838,6 +918,9 @@ typedef struct _RGX_TIMING_INFORMATION_ /*! GPU default core clock speed in Hz */ IMG_UINT32 ui32CoreClockSpeed; + /*! Default SOC clock speed in Hz */ + IMG_UINT32 ui32SOCClockSpeed; + /*! Active Power Management: GPU actively requests the host driver to be powered off */ IMG_BOOL bEnableActivePM; @@ -855,6 +938,12 @@ typedef struct _RGX_DATA_ RGX_TIMING_INFORMATION *psRGXTimingInfo; } RGX_DATA; +typedef enum _RGX_QUERY_TIMESTAMP_TYPE_ +{ + RGX_QUERY_HOST_TIMESTAMP, + RGX_QUERY_DEVICE_TIMESTAMP, +} RGX_QUERY_TIMESTAMP_TYPE; + /* RGX PDUMP register bank name (prefix) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxfwutils.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxfwutils.c index 9c4d5c7e15c2..25ead9588808 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxfwutils.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxfwutils.c @@ -53,9 +53,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_fwif_km.h" #include "pdump_km.h" #include "osfunc.h" -#if defined(__linux__) -#include "km_apphint.h" -#endif +#include "os_apphint.h" #include "cache_km.h" #include "allocmem.h" #include "physheap.h" @@ -66,6 +64,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_debug.h" #include "pvr_notifier.h" #include "rgxfwutils.h" +#include "rgxfwcmnctx.h" +#include "rgxfwriscv.h" #include "rgx_options.h" #include "rgx_fwif_alignchecks.h" #include "rgx_fwif_resetframework.h" @@ -73,7 +73,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "fwtrace_string.h" #include "rgxheapconfig.h" #include "pvrsrv.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "rgxhwperf.h" #include "rgxccb.h" #include "rgxcompute.h" @@ -84,8 +84,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif #include "rgxmem.h" #include "rgxmmudefs_km.h" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +#include "rgxmipsmmuinit.h" +#endif #include "rgxta3d.h" +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) #include "rgxkicksync.h" +#endif #include "rgxutils.h" #include "rgxtimecorr.h" #include "rgxfwimageutils.h" @@ -95,16 +100,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "sync_checkpoint_external.h" #include "tlstream.h" #include "devicemem_server_utils.h" -#include "htbuffer.h" +#include "htbserver.h" #include "info_page.h" #include "physmem_lma.h" #include "physmem_osmem.h" -#include "oskm_apphint.h" #ifdef __linux__ #include /* sprintf */ -#include "rogue_trace_events.h" #else #include #endif @@ -120,10 +123,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxpdvfs.h" #endif -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -#include "rgxsoctimer.h" +#if defined(SUPPORT_FW_OPP_TABLE) && defined(CONFIG_OF) +#include "pvr_dvfs_common.h" #endif + #include "vz_vmm_pvz.h" #include "rgx_heaps.h" @@ -136,6 +140,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXFW_HWPERF_L1_SIZE_MIN (16U) #define RGXFW_HWPERF_L1_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB #define RGXFW_HWPERF_L1_SIZE_MAX (12288U) +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) #if defined(DEBUG) /* Catch the use of auto-increment when meta_registers_unpacked_accesses feature is * present in case we ever use it. No WA exists so it must not be used */ @@ -146,6 +151,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #else #define CHECK_HWBRN_68777(v) #endif +#endif /* Firmware CCB length */ #if defined(NO_HARDWARE) && defined(PDUMP) @@ -156,20 +162,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGXFWIF_FWCCB_NUMCMDS_LOG2 (5) #endif -/* - * Maximum length of time a DM can run for before the DM will be marked - * as out-of-time. CDM has an increased value due to longer running kernels. - * - * These deadlines are increased on FPGA, EMU and VP due to the slower - * execution time of these platforms. PDUMPS are also included since they - * are often run on EMU, FPGA or in CSim. - */ -#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP) -#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (480000) -#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (1000000) -#else -#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (30000) -#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (600000) +#if defined(RGX_FW_IRQ_OS_COUNTERS) +const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OSIDS] = {IRQ_COUNTER_STORAGE_REGS}; #endif /* Workload Estimation Firmware CCB length */ @@ -187,6 +181,18 @@ typedef struct PVRSRV_RGXDEV_INFO *psDevInfo; } RGX_DEFERRED_KCCB_CMD; +typedef struct +{ + IMG_INT32 i32Priority; + IMG_UINT32 ui32IsolationGroups; + IMG_UINT32 ui32TSPercentage; +}RGX_QOS_DEFAULTS; + +#define RGX_QOS_DEFAULTS_INIT(osid) \ + {RGX_DRIVERID_##osid##_DEFAULT_PRIORITY,\ + RGX_DRIVERID_##osid##_DEFAULT_ISOLATION_GROUP,\ + RGX_DRIVERID_##osid##_DEFAULT_TIME_SLICE} + #if defined(PDUMP) /* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the * PID filter example entries @@ -196,9 +202,14 @@ static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32), "generates WRW commands for loading the PID values"); #endif +#if (RGXFW_MAX_NUM_OSIDS > 1) +static_assert(RGX_DRIVER_DEFAULT_TIME_SLICES_SUM <= PVRSRV_VZ_TIME_SLICE_MAX, "Invalid driverid time slice aggregate"); +#endif + static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo); static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo); +#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_SYSINIT* psFwSysInit) { PVRSRV_ERROR eError; @@ -245,14 +256,10 @@ static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo) DevmemFree(psSLC3FenceMemDesc); } } +#endif static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value) { - /* Ensure any uncached/WC memory writes are flushed from CPU write buffers - * before kicking MTS. - */ - OSWriteMemoryBarrier(NULL); - /* This should *NOT* happen. Try to trace what caused this and avoid a NPE * with the Write/Read at the foot of the function. */ @@ -540,6 +547,40 @@ static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO* psDevInfo } #endif +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) +/*! + ****************************************************************************** + @Function RGXFWSetupCounterBuffer + @Description + @Input psDevInfo + + @Return PVRSRV_ERROR + *****************************************************************************/ +static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_MEMDESC** ppsBufferMemDesc, + IMG_UINT32 ui32CounterDataBufferSize, + RGXFWIF_COUNTER_DUMP_CTL* psCounterDumpCtl) +{ + PVRSRV_ERROR eError; + + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), + ui32CounterDataBufferSize, + "FwCounterBuffer", + ppsBufferMemDesc, + &psCounterDumpCtl->sBuffer, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation"); + + psCounterDumpCtl->ui32SizeInDwords = ui32CounterDataBufferSize >> 2; + RGXFwSharedMemCacheOpValue(psCounterDumpCtl->ui32SizeInDwords, FLUSH); + + return PVRSRV_OK; +} +#endif + /*! ******************************************************************************* @Function RGXFWSetupAlignChecks @@ -551,7 +592,7 @@ static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO* psDevInfo The UM array is passed from the user side. Now the firmware is - is responsible for filling this part of the memory. If that + responsible for filling this part of the memory. If that happens the check of the UM structures will be performed by the host driver on client's connect. If the macro is not defined the client driver fills the memory @@ -597,9 +638,13 @@ static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_DEVICE_NODE *psDeviceNode, paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM); *paui32AlignChecks = 0; + + OSWriteMemoryBarrier(paui32AlignChecks); + RGXFwSharedMemCacheOpExec(paui32AlignChecks - (ARRAY_SIZE(aui32RGXFWAlignChecksKM) + 1), + ui32RGXFWAlignChecksTotal, + PVRSRV_CACHE_OP_FLUSH); } - OSWriteMemoryBarrier(paui32AlignChecks); DevmemPDumpLoadMem( psDevInfo->psRGXFWAlignChecksMemDesc, 0, @@ -656,7 +701,7 @@ PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; /* Honour the SLC cache flags */ - eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode); + eError = DevmemDeviceCacheMode(uiDevFlags, &uiGPUCacheMode); PVR_LOG_GOTO_IF_ERROR(eError, "DevmemDeviceCacheMode", failDevCacheMode); /* @@ -684,6 +729,15 @@ PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, ppDest->ui32Addr = ui32Offset; } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); + PVR_GOTO_IF_ERROR(eError, failDevVAAcquire); + + ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF); + } +#endif else { IMG_UINT32 ui32Offset; @@ -763,787 +817,173 @@ void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc) DevmemReleaseDevVirtAddr(psSrc); } -PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +/*! +******************************************************************************* + @Function RGXFreeCCB + @Description Free the kernel or firmware CCB + @Input psDevInfo + @Input ppsCCBCtl + @Input ppvCCBCtlLocal + @Input ppsCCBCtlMemDesc + @Input ppsCCBMemDesc + @Input psCCBCtlFWAddr +******************************************************************************/ +static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_CCB_CTL **ppsCCBCtl, + RGXFWIF_CCB_CTL **ppsCCBCtlLocal, + DEVMEM_MEMDESC **ppsCCBCtlMemDesc, + IMG_UINT8 **ppui8CCB, + DEVMEM_MEMDESC **ppsCCBMemDesc) { - PVRSRV_RGXDEV_INFO *psDevInfo; - PVRSRV_ERROR eError = PVRSRV_OK; - - /* Wait for Slave Port to be Ready */ - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + if (*ppsCCBMemDesc != NULL) { - if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) + if (*ppui8CCB != NULL) { - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN); - if (eError == PVRSRV_OK) - { - /* Issue a Write */ - CHECK_HWBRN_68777(ui32RegAddr); - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, ui32RegAddr); - (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED); /* Fence write */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, ui32RegValue); - (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED); /* Fence write */ - } + DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc); + *ppui8CCB = NULL; } - else + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBMemDesc); + *ppsCCBMemDesc = NULL; + } + if (*ppsCCBCtlMemDesc != NULL) + { + if (*ppsCCBCtl != NULL) { - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN); - if (eError == PVRSRV_OK) - { - /* Issue a Write */ - CHECK_HWBRN_68777(ui32RegAddr); - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED, ui32RegAddr); - (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED); /* Fence write */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED, ui32RegValue); - (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED); /* Fence write */ - } + DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc); + *ppsCCBCtl = NULL; } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc); + *ppsCCBCtlMemDesc = NULL; } - else + if (*ppsCCBCtlLocal != NULL) { - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); - if (eError == PVRSRV_OK) - { - /* Issue a Write */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); - (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); - (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ - } + OSFreeMem(*ppsCCBCtlLocal); + *ppsCCBCtlLocal = NULL; } - - return eError; } -PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32* ui32RegValue) +/*! +******************************************************************************* + @Function RGXFreeCCBReturnSlots + @Description Free the kernel CCB's return slot array and associated mappings + @Input psDevInfo Device Info struct + @Input ppui32CCBRtnSlots CPU mapping of slot array + @Input ppsCCBRtnSlotsMemDesc Slot array's device memdesc +******************************************************************************/ +static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 **ppui32CCBRtnSlots, + DEVMEM_MEMDESC **ppsCCBRtnSlotsMemDesc) { - PVRSRV_RGXDEV_INFO *psDevInfo; - PVRSRV_ERROR eError = PVRSRV_OK; - - /* Wait for Slave Port to be Ready */ - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) - { - if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) - { - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN); - if (eError == PVRSRV_OK) - { - /* Issue a Read */ - CHECK_HWBRN_68777(ui32RegAddr); - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, - ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_EN); - (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED); /* Fence write */ - - /* Wait for Slave Port to be Ready */ - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN); - if (eError != PVRSRV_OK) return eError; - } - } - else - { - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN); - if (eError == PVRSRV_OK) - { - /* Issue a Read */ - CHECK_HWBRN_68777(ui32RegAddr); - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED, - ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_EN); - (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED); /* Fence write */ - - /* Wait for Slave Port to be Ready */ - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN); - if (eError != PVRSRV_OK) return eError; - } - } -#if !defined(NO_HARDWARE) - if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) - { - *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED); - } - else - { - *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED); - } -#else - *ui32RegValue = 0xFFFFFFFF; -#endif - } - else + /* Free the return slot array if allocated */ + if (*ppsCCBRtnSlotsMemDesc != NULL) { - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); - if (eError == PVRSRV_OK) + /* Before freeing, ensure the CPU mapping as well is released */ + if (*ppui32CCBRtnSlots != NULL) { - /* Issue a Read */ - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); - (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ - - /* Wait for Slave Port to be Ready */ - eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); - if (eError != PVRSRV_OK) return eError; + DevmemReleaseCpuVirtAddr(*ppsCCBRtnSlotsMemDesc); + *ppui32CCBRtnSlots = NULL; } -#if !defined(NO_HARDWARE) - *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX); -#else - *ui32RegValue = 0xFFFFFFFF; -#endif + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBRtnSlotsMemDesc); + *ppsCCBRtnSlotsMemDesc = NULL; } - - return eError; } +/*! +******************************************************************************* + @Function RGXSetupCCB + @Description Allocate and initialise a circular command buffer + @Input psDevInfo + @Input ppsCCBCtl + @Input ppsCCBCtlMemDesc + @Input ppui8CCB + @Input ppsCCBMemDesc + @Input psCCBCtlFWAddr + @Input ui32NumCmdsLog2 + @Input ui32CmdSize + @Input uiCCBMemAllocFlags + @Input pszName -struct _RGX_SERVER_COMMON_CONTEXT_ { - PVRSRV_RGXDEV_INFO *psDevInfo; - DEVMEM_MEMDESC *psFWCommonContextMemDesc; - PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr; - SERVER_MMU_CONTEXT *psServerMMUContext; - DEVMEM_MEMDESC *psFWMemContextMemDesc; - DEVMEM_MEMDESC *psFWFrameworkMemDesc; - DEVMEM_MEMDESC *psContextStateMemDesc; - RGX_CLIENT_CCB *psClientCCB; - DEVMEM_MEMDESC *psClientCCBMemDesc; - DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; - IMG_BOOL bCommonContextMemProvided; - IMG_UINT32 ui32ContextID; - DLLIST_NODE sListNode; - RGX_CONTEXT_RESET_REASON eLastResetReason; - IMG_UINT32 ui32LastResetJobRef; - IMG_INT32 i32Priority; - RGX_CCB_REQUESTOR_TYPE eRequestor; -}; - -/*************************************************************************/ /*! -@Function _CheckPriority -@Description Check if priority is allowed for requestor type -@Input psDevInfo pointer to DevInfo struct -@Input i32Priority Requested priority -@Input eRequestor Requestor type specifying data master -@Return PVRSRV_ERROR PVRSRV_OK on success -*/ /**************************************************************************/ -static PVRSRV_ERROR _CheckPriority(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_INT32 i32Priority, - RGX_CCB_REQUESTOR_TYPE eRequestor) + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_CCB_CTL **ppsCCBCtl, + RGXFWIF_CCB_CTL **ppsCCBCtlLocal, + DEVMEM_MEMDESC **ppsCCBCtlMemDesc, + IMG_UINT8 **ppui8CCB, + DEVMEM_MEMDESC **ppsCCBMemDesc, + PRGXFWIF_CCB_CTL *psCCBCtlFWAddr, + PRGXFWIF_CCB *psCCBFWAddr, + IMG_UINT32 ui32NumCmdsLog2, + IMG_UINT32 ui32CmdSize, + PVRSRV_MEMALLOCFLAGS_T uiCCBMemAllocFlags, + const IMG_CHAR *pszName) { - /* Only one context allowed with real time priority (highest priority) */ - if (i32Priority == RGX_CTX_PRIORITY_REALTIME) - { - DLLIST_NODE *psNode, *psNext; + PVRSRV_ERROR eError; + RGXFWIF_CCB_CTL *psCCBCtl; + IMG_UINT32 ui32CCBSize = (1U << ui32NumCmdsLog2); + IMG_CHAR szCCBCtlName[DEVMEM_ANNOTATION_MAX_LEN]; + IMG_INT32 iStrLen; - dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) - { - RGX_SERVER_COMMON_CONTEXT *psThisContext = - IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + /* Append "Control" to the name for the control struct. */ + iStrLen = OSSNPrintf(szCCBCtlName, sizeof(szCCBCtlName), "%sControl", pszName); + PVR_ASSERT(iStrLen < sizeof(szCCBCtlName)); - if (psThisContext->i32Priority == RGX_CTX_PRIORITY_REALTIME && - psThisContext->eRequestor == eRequestor) - { - PVR_LOG(("Only one context with real time priority allowed")); - return PVRSRV_ERROR_INVALID_PARAMS; - } - } + if (unlikely(iStrLen < 0)) + { + OSStringSafeCopy(szCCBCtlName, "FwCCBControl", DEVMEM_ANNOTATION_MAX_LEN); } - return PVRSRV_OK; -} - -PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, - RGXFWIF_DM eDM, - SERVER_MMU_CONTEXT *psServerMMUContext, - DEVMEM_MEMDESC *psAllocatedMemDesc, - IMG_UINT32 ui32AllocatedOffset, - DEVMEM_MEMDESC *psFWMemContextMemDesc, - DEVMEM_MEMDESC *psContextStateMemDesc, - IMG_UINT32 ui32CCBAllocSizeLog2, - IMG_UINT32 ui32CCBMaxAllocSizeLog2, - IMG_UINT32 ui32ContextFlags, - IMG_INT32 i32Priority, - IMG_UINT32 ui32MaxDeadlineMS, - IMG_UINT64 ui64RobustnessAddress, - RGX_COMMON_CONTEXT_INFO *psInfo, - RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; - RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext; - IMG_UINT32 ui32FWCommonContextOffset; - IMG_UINT8 *pui8Ptr; - PVRSRV_ERROR eError; + /* Allocate memory for the CCB control.*/ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, + sizeof(RGXFWIF_CCB_CTL), + szCCBCtlName, + ppsCCBCtlMemDesc, + psCCBCtlFWAddr, + (void**) ppsCCBCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); /* - * Allocate all the resources that are required + * Allocate memory for the CCB. + * (this will reference further command data in non-shared CCBs) */ - psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext)); - if (psServerCommonContext == NULL) - { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto fail_alloc; - } - - psServerCommonContext->psDevInfo = psDevInfo; - psServerCommonContext->psServerMMUContext = psServerMMUContext; - - if (psAllocatedMemDesc) - { - PDUMPCOMMENT(psDeviceNode, - "Using existing MemDesc for Rogue firmware %s context (offset = %d)", - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - ui32AllocatedOffset); - ui32FWCommonContextOffset = ui32AllocatedOffset; - psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc; - psServerCommonContext->bCommonContextMemProvided = IMG_TRUE; - } - else - { - /* Allocate device memory for the firmware context */ - PDUMPCOMMENT(psDeviceNode, - "Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); - eError = DevmemFwAllocate(psDevInfo, - sizeof(*psFWCommonContext), - RGX_FWCOMCTX_ALLOCFLAGS, - "FwContext", - &psServerCommonContext->psFWCommonContextMemDesc); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to allocate firmware %s context (%s)", - __func__, - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - PVRSRVGetErrorString(eError))); - goto fail_contextalloc; - } - ui32FWCommonContextOffset = 0; - psServerCommonContext->bCommonContextMemProvided = IMG_FALSE; - } - - /* Record this context so we can refer to it if the FW needs to tell us it was reset. */ - psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; - psServerCommonContext->ui32LastResetJobRef = 0; - psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++; + eError = RGXSetupFwAllocation(psDevInfo, + uiCCBMemAllocFlags, + ui32CCBSize * ui32CmdSize, + pszName, + ppsCCBMemDesc, + psCCBFWAddr, + (void**) ppui8CCB, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); /* - * Temporarily map the firmware context to the kernel and initialise it + * Initialise the CCB control. */ - eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc, - (void **)&pui8Ptr); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware %s context to CPU (%s)", - __func__, - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - PVRSRVGetErrorString(eError))); - goto fail_cpuvirtacquire; - } - - /* Allocate the client CCB */ - eError = RGXCreateCCB(psDevInfo, - ui32CCBAllocSizeLog2, - ui32CCBMaxAllocSizeLog2, - ui32ContextFlags, - psConnection, - eRGXCCBRequestor, - psServerCommonContext, - &psServerCommonContext->psClientCCB, - &psServerCommonContext->psClientCCBMemDesc, - &psServerCommonContext->psClientCCBCtrlMemDesc); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: failed to create CCB for %s context (%s)", - __func__, - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - PVRSRVGetErrorString(eError))); - goto fail_allocateccb; - } + psCCBCtl = OSAllocZMem(sizeof(*psCCBCtl)); + PVR_LOG_GOTO_IF_NOMEM(psCCBCtl, eError, fail); - psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset); - psFWCommonContext->eDM = eDM; + psCCBCtl->ui32WrapMask = ui32CCBSize - 1; - /* Set the firmware CCB device addresses in the firmware common context */ - eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB, - psServerCommonContext->psClientCCBMemDesc, - 0, RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr); + OSDeviceMemCopy(*ppsCCBCtl, psCCBCtl, sizeof(*psCCBCtl)); + RGXFwSharedMemCacheOpPtr(psCCBCtl, FLUSH); - eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl, - psServerCommonContext->psClientCCBCtrlMemDesc, - 0, RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr); + *ppsCCBCtlLocal = psCCBCtl; - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) - { - RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr, - psServerCommonContext->psClientCCBMemDesc, - &psFWCommonContext->psCCB, - 0); - } + /* Pdump the CCB control */ + PDUMPCOMMENT(psDevInfo->psDeviceNode, "Initialise %s", szCCBCtlName); + DevmemPDumpLoadMem(*ppsCCBCtlMemDesc, + 0, + sizeof(RGXFWIF_CCB_CTL), + 0); - /* Set the memory context device address */ - psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc; - eError = RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext, - psFWMemContextMemDesc, - 0, RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr); - - /* Set the framework register updates address */ - psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc; - if (psInfo->psFWFrameworkMemDesc != NULL) - { - eError = RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd, - psInfo->psFWFrameworkMemDesc, - 0, RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwaddr); - } - else - { - /* This should never be touched in this contexts without a framework - * memdesc, but ensure it is zero so we see crashes if it is. - */ - psFWCommonContext->psRFCmd.ui32Addr = 0; - } - - eError = _CheckPriority(psDevInfo, i32Priority, eRGXCCBRequestor); - PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); - - psServerCommonContext->i32Priority = i32Priority; - psServerCommonContext->eRequestor = eRGXCCBRequestor; - - /* Store the FWMemContext device virtual address in server mmu context - * to be used in schedule command path */ - RGXSetFWMemContextDevVirtAddr(psServerMMUContext, psFWCommonContext->psFWMemContext); - - psFWCommonContext->i32Priority = i32Priority; - psFWCommonContext->ui32PrioritySeqNum = 0; - psFWCommonContext->ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, - (eDM == RGXFWIF_DM_CDM ? - RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS : - RGXFWIF_MAX_WORKLOAD_DEADLINE_MS)); - psFWCommonContext->ui64RobustnessAddress = ui64RobustnessAddress; - - /* Store a references to Server Common Context and PID for notifications back from the FW. */ - psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID; - psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM(); - OSCachedMemCopy(psFWCommonContext->szProcName, psConnection->pszProcName, RGXFW_PROCESS_NAME_LEN); - - /* Set the firmware GPU context state buffer */ - psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc; - if (psContextStateMemDesc) - { - eError = RGXSetFirmwareAddress(&psFWCommonContext->psContextState, - psContextStateMemDesc, - 0, - RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_ctxstatefwaddr); - } - - /* - * Dump the created context - */ - PDUMPCOMMENT(psDeviceNode, - "Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); - DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc, - ui32FWCommonContextOffset, - sizeof(*psFWCommonContext), - PDUMP_FLAGS_CONTINUOUS); - - /* We've finished the setup so release the CPU mapping */ - DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); - - /* Map this allocation into the FW */ - eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr, - psServerCommonContext->psFWCommonContextMemDesc, - ui32FWCommonContextOffset, - RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:6", fail_fwcommonctxfwaddr); - -#if defined(__linux__) - { - IMG_UINT32 ui32FWAddr; - switch (eDM) { - case RGXFWIF_DM_GEOM: - ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) - psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext)); - break; - case RGXFWIF_DM_3D: - ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) - psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext)); - break; - default: - ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr; - break; - } - - trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(), - aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], - psDeviceNode->sDevId.ui32InternalID, - ui32FWAddr); - } -#endif - /*Add the node to the list when finalised */ - OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock); - dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode)); - OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock); - - *ppsServerCommonContext = psServerCommonContext; - return PVRSRV_OK; - -fail_fwcommonctxfwaddr: - if (psContextStateMemDesc) - { - RGXUnsetFirmwareAddress(psContextStateMemDesc); - } -fail_ctxstatefwaddr: -fail_checkpriority: - if (psInfo->psFWFrameworkMemDesc != NULL) - { - RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc); - } -fail_fwframeworkfwaddr: - RGXUnsetFirmwareAddress(psFWMemContextMemDesc); -fail_fwmemctxfwaddr: - RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); -fail_cccbctrlfwaddr: - RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); -fail_cccbfwaddr: - RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB); -fail_allocateccb: - DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); -fail_cpuvirtacquire: - RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); - if (!psServerCommonContext->bCommonContextMemProvided) - { - DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc); - psServerCommonContext->psFWCommonContextMemDesc = NULL; - } -fail_contextalloc: - OSFreeMem(psServerCommonContext); -fail_alloc: - return eError; -} - -void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -{ - - OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); - /* Remove the context from the list of all contexts. */ - dllist_remove_node(&psServerCommonContext->sListNode); - OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); - - /* - Unmap the context itself and then all its resources - */ - - /* Unmap the FW common context */ - RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); - /* Umap context state buffer (if there was one) */ - if (psServerCommonContext->psContextStateMemDesc) - { - RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc); - } - /* Unmap the framework buffer */ - if (psServerCommonContext->psFWFrameworkMemDesc != NULL) - { - RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc); - } - /* Unmap client CCB and CCB control */ - RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); - RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); - /* Unmap the memory context */ - RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc); - - /* Destroy the client CCB */ - RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB); - - - /* Free the FW common context (if there was one) */ - if (!psServerCommonContext->bCommonContextMemProvided) - { - DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo, - psServerCommonContext->psFWCommonContextMemDesc); - psServerCommonContext->psFWCommonContextMemDesc = NULL; - } - /* Free the hosts representation of the common context */ - OSFreeMem(psServerCommonContext); -} - -PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -{ - return psServerCommonContext->sFWCommonContextFWAddr; -} - -RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -{ - return psServerCommonContext->psClientCCB; -} - -SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -{ - return psServerCommonContext->psServerMMUContext; -} - -RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - IMG_UINT32 *pui32LastResetJobRef) -{ - RGX_CONTEXT_RESET_REASON eLastResetReason; - - PVR_ASSERT(psServerCommonContext != NULL); - PVR_ASSERT(pui32LastResetJobRef != NULL); - - /* Take the most recent reason & job ref and reset for next time... */ - eLastResetReason = psServerCommonContext->eLastResetReason; - *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef; - psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; - psServerCommonContext->ui32LastResetJobRef = 0; - - if (eLastResetReason == RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH) - { - PVR_DPF((PVR_DBG_WARNING, - "A Hard Context Switch was triggered on the GPU to ensure Quality of Service.")); - } - - return eLastResetReason; -} - -PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -{ - return psServerCommonContext->psDevInfo; -} - -PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, - SERVER_MMU_CONTEXT *psServerMMUContext, - PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr) -{ - DLLIST_NODE *psNode, *psNext; - dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) - { - RGX_SERVER_COMMON_CONTEXT *psThisContext = - IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); - - if (psThisContext->psServerMMUContext == psServerMMUContext) - { - psFWCommonContextFWAddr->ui32Addr = psThisContext->sFWCommonContextFWAddr.ui32Addr; - return PVRSRV_OK; - } - } - return PVRSRV_ERROR_INVALID_PARAMS; -} - -PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - IMG_UINT32 ui32ContextFlags) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - - if (BITMASK_ANY(ui32ContextFlags, ~RGX_CONTEXT_FLAGS_WRITEABLE_MASK)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Context flag(s) invalid or not writeable (%d)", - __func__, ui32ContextFlags)); - eError = PVRSRV_ERROR_INVALID_PARAMS; - } - else - { - RGXSetCCBFlags(psServerCommonContext->psClientCCB, - ui32ContextFlags); - } - - return eError; -} - -/*! -******************************************************************************* - @Function RGXFreeCCB - @Description Free the kernel or firmware CCB - @Input psDevInfo - @Input ppsCCBCtl - @Input ppsCCBCtlMemDesc - @Input ppsCCBMemDesc - @Input psCCBCtlFWAddr -******************************************************************************/ -static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_CCB_CTL **ppsCCBCtl, - DEVMEM_MEMDESC **ppsCCBCtlMemDesc, - IMG_UINT8 **ppui8CCB, - DEVMEM_MEMDESC **ppsCCBMemDesc) -{ - if (*ppsCCBMemDesc != NULL) - { - if (*ppui8CCB != NULL) - { - DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc); - *ppui8CCB = NULL; - } - DevmemFwUnmapAndFree(psDevInfo, *ppsCCBMemDesc); - *ppsCCBMemDesc = NULL; - } - if (*ppsCCBCtlMemDesc != NULL) - { - if (*ppsCCBCtl != NULL) - { - DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc); - *ppsCCBCtl = NULL; - } - DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc); - *ppsCCBCtlMemDesc = NULL; - } -} - -/*! -******************************************************************************* - @Function RGXFreeCCBReturnSlots - @Description Free the kernel CCB's return slot array and associated mappings - @Input psDevInfo Device Info struct - @Input ppui32CCBRtnSlots CPU mapping of slot array - @Input ppsCCBRtnSlotsMemDesc Slot array's device memdesc -******************************************************************************/ -static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 **ppui32CCBRtnSlots, - DEVMEM_MEMDESC **ppsCCBRtnSlotsMemDesc) -{ - /* Free the return slot array if allocated */ - if (*ppsCCBRtnSlotsMemDesc != NULL) - { - /* Before freeing, ensure the CPU mapping as well is released */ - if (*ppui32CCBRtnSlots != NULL) - { - DevmemReleaseCpuVirtAddr(*ppsCCBRtnSlotsMemDesc); - *ppui32CCBRtnSlots = NULL; - } - DevmemFwUnmapAndFree(psDevInfo, *ppsCCBRtnSlotsMemDesc); - *ppsCCBRtnSlotsMemDesc = NULL; - } -} - -/*! -******************************************************************************* - @Function RGXSetupCCB - @Description Allocate and initialise a circular command buffer - @Input psDevInfo - @Input ppsCCBCtl - @Input ppsCCBCtlMemDesc - @Input ppui8CCB - @Input ppsCCBMemDesc - @Input psCCBCtlFWAddr - @Input ui32NumCmdsLog2 - @Input ui32CmdSize - @Input uiCCBMemAllocFlags - @Input pszName - - @Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_CCB_CTL **ppsCCBCtl, - DEVMEM_MEMDESC **ppsCCBCtlMemDesc, - IMG_UINT8 **ppui8CCB, - DEVMEM_MEMDESC **ppsCCBMemDesc, - PRGXFWIF_CCB_CTL *psCCBCtlFWAddr, - PRGXFWIF_CCB *psCCBFWAddr, - IMG_UINT32 ui32NumCmdsLog2, - IMG_UINT32 ui32CmdSize, - PVRSRV_MEMALLOCFLAGS_T uiCCBMemAllocFlags, - const IMG_CHAR *pszName) -{ - PVRSRV_ERROR eError; - RGXFWIF_CCB_CTL *psCCBCtl; - IMG_UINT32 ui32CCBSize = (1U << ui32NumCmdsLog2); - IMG_CHAR szCCBCtlName[DEVMEM_ANNOTATION_MAX_LEN]; - IMG_INT32 iStrLen; - - /* Append "Control" to the name for the control struct. */ - iStrLen = OSSNPrintf(szCCBCtlName, sizeof(szCCBCtlName), "%sControl", pszName); - PVR_ASSERT(iStrLen < sizeof(szCCBCtlName)); - - if (unlikely(iStrLen < 0)) - { - OSStringLCopy(szCCBCtlName, "FwCCBControl", DEVMEM_ANNOTATION_MAX_LEN); - } - - /* Allocate memory for the CCB control.*/ - eError = RGXSetupFwAllocation(psDevInfo, - RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, - sizeof(RGXFWIF_CCB_CTL), - szCCBCtlName, - ppsCCBCtlMemDesc, - psCCBCtlFWAddr, - (void**) ppsCCBCtl, - RFW_FWADDR_NOREF_FLAG); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); - - /* - * Allocate memory for the CCB. - * (this will reference further command data in non-shared CCBs) - */ - eError = RGXSetupFwAllocation(psDevInfo, - uiCCBMemAllocFlags, - ui32CCBSize * ui32CmdSize, - pszName, - ppsCCBMemDesc, - psCCBFWAddr, - (void**) ppui8CCB, - RFW_FWADDR_NOREF_FLAG); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); - - /* - * Initialise the CCB control. - */ - psCCBCtl = *ppsCCBCtl; - psCCBCtl->ui32WriteOffset = 0; - psCCBCtl->ui32ReadOffset = 0; - psCCBCtl->ui32WrapMask = ui32CCBSize - 1; - psCCBCtl->ui32CmdSize = ui32CmdSize; - - /* Pdump the CCB control */ - PDUMPCOMMENT(psDevInfo->psDeviceNode, "Initialise %s", szCCBCtlName); - DevmemPDumpLoadMem(*ppsCCBCtlMemDesc, - 0, - sizeof(RGXFWIF_CCB_CTL), - 0); - - return PVRSRV_OK; + return PVRSRV_OK; fail: RGXFreeCCB(psDevInfo, ppsCCBCtl, + ppsCCBCtlLocal, ppsCCBCtlMemDesc, ppui8CCB, ppsCCBMemDesc); @@ -1552,13 +992,14 @@ static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, return eError; } -#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo) { PMR *psPMR; +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) /* Run-time check feature support */ if (PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) +#endif { if (psDevInfo->psRGXFaultAddressMemDesc) { @@ -1581,11 +1022,13 @@ static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PMR *psPMR; +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) /* Run-time check feature support */ if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) { return PVRSRV_OK; } +#endif /* Allocate page of memory to use for page faults on non-blocking memory transactions. * Doesn't need to be cleared as it is initialised with the 0xDEADBEEF pattern below. */ @@ -1605,31 +1048,32 @@ static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, goto failFaultAddressDescAlloc; } - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc, - (void **)&pui32MemoryVirtAddr); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to acquire mem for fault address (%u)", - __func__, eError)); - goto failFaultAddressDescAqCpuVirt; - } if (!psDeviceNode->bAutoVzFwIsUp) { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc, + (void **)&pui32MemoryVirtAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire mem for fault address (%u)", + __func__, eError)); + goto failFaultAddressDescAqCpuVirt; + } + /* fill the page with a known pattern when booting the firmware */ for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++) { *(pui32MemoryVirtAddr + i) = 0xDEADBEEF; } - } - OSWriteMemoryBarrier(pui32MemoryVirtAddr); + OSWriteMemoryBarrier(pui32MemoryVirtAddr); + RGXFwSharedMemCacheOpExec(pui32MemoryVirtAddr, ui32PageSize, PVRSRV_CACHE_OP_FLUSH); - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); + } eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR); - if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1654,7 +1098,7 @@ static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, goto failFaultAddressDescLockPhys; } - eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid); + eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid, DEVICE_USE); if (eError != PVRSRV_OK) { @@ -1700,11 +1144,13 @@ static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo) PMR *psFWInitPMR, *psFaultAddrPMR; IMG_UINT32 ui32Dstoffset; +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) /* Run-time check feature support */ if (!PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) { return PVRSRV_OK; } +#endif psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfSysInitMemDesc->psImport->hPMR); ui32Dstoffset = psDevInfo->psRGXFWIfSysInitMemDesc->uiOffset + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr.uiAddr); @@ -1723,39 +1169,8 @@ static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo) return eError; } #endif -#endif /* defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) */ #if defined(SUPPORT_TBI_INTERFACE) -/*************************************************************************/ /*! -@Function RGXTBIBufferIsInitRequired - -@Description Returns true if the firmware tbi buffer is not allocated and - might be required by the firmware soon. TBI buffer allocated - on-demand to reduce RAM footprint on systems not needing - tbi. - -@Input psDevInfo RGX device info - -@Return IMG_BOOL Whether on-demand allocation(s) is/are needed - or not -*/ /**************************************************************************/ -INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - - /* The firmware expects a tbi buffer only when: - * - Logtype is "tbi" - */ - if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL) - && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE) - && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)) - { - return IMG_TRUE; - } - - return IMG_FALSE; -} - /*************************************************************************/ /*! @Function RGXTBIBufferDeinit @@ -1770,6 +1185,7 @@ static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc); psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL; psDevInfo->ui32RGXFWIfHWPerfBufSize = 0; + psDevInfo->ui32RGXL2HWPerfBufSize = 0; } /*************************************************************************/ /*! @@ -1831,39 +1247,6 @@ PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) } #endif -/*************************************************************************/ /*! -@Function RGXTraceBufferIsInitRequired - -@Description Returns true if the firmware trace buffer is not allocated and - might be required by the firmware soon. Trace buffer allocated - on-demand to reduce RAM footprint on systems not needing - firmware trace. - -@Input psDevInfo RGX device info - -@Return IMG_BOOL Whether on-demand allocation(s) is/are needed - or not -*/ /**************************************************************************/ -INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - - /* The firmware expects a trace buffer only when: - * - Logtype is "trace" AND - * - at least one LogGroup is configured - * - the Driver Mode is not Guest - */ - if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL) - && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE) - && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) - && !PVRSRV_VZ_MODE_IS(GUEST)) - { - return IMG_TRUE; - } - - return IMG_FALSE; -} - /*************************************************************************/ /*! @Function RGXTraceBufferDeinit @@ -1875,17 +1258,16 @@ INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) */ /**************************************************************************/ static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) { - RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; IMG_UINT32 i; for (i = 0; i < RGXFW_THREAD_NUM; i++) { if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i]) { - if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL) + if (psDevInfo->apui32TraceBuffer[i] != NULL) { DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); - psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL; + psDevInfo->apui32TraceBuffer[i] = NULL; } DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); @@ -1916,30 +1298,33 @@ PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, IMG_CHAR pszBufferName[] = "FwTraceBuffer_Thread0"; /* Check AppHint value for module-param FWTraceBufSizeInDWords */ - OSCreateKMAppHintState(&pvAppHintState); + OSCreateAppHintState(&pvAppHintState); ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FWTraceBufSizeInDWords, &ui32DefaultTraceBufSize, - &psTraceBufCtl->ui32TraceBufSizeInDWords); - OSFreeKMAppHintState(pvAppHintState); + &psDevInfo->ui32TraceBufSizeInDWords); + OSFreeAppHintState(pvAppHintState); pvAppHintState = NULL; - if (psTraceBufCtl->ui32TraceBufSizeInDWords < RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS || - psTraceBufCtl->ui32TraceBufSizeInDWords > RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS) + /* Write tracebuf size once to devmem */ + psTraceBufCtl->ui32TraceBufSizeInDWords = psDevInfo->ui32TraceBufSizeInDWords; + + if (psDevInfo->ui32TraceBufSizeInDWords < RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS || + psDevInfo->ui32TraceBufSizeInDWords > RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS) { PVR_DPF((PVR_DBG_ERROR, "%s: Requested trace buffer size (%u) out of its minimum (%u) & maximum (%u) range. Exiting error.", __func__, - psTraceBufCtl->ui32TraceBufSizeInDWords, + psDevInfo->ui32TraceBufSizeInDWords, RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS, RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS)); eError = PVRSRV_ERROR_OUT_OF_RANGE; goto exit_error; } - uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + uiTraceBufSizeInBytes = psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++) { @@ -1959,7 +1344,7 @@ PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, pszBufferName, &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum], &psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer, - (void**)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer, + (void**)&psDevInfo->apui32TraceBuffer[ui32FwThreadNum], RFW_FWADDR_NOREF_FLAG); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); } @@ -1972,100 +1357,6 @@ PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, return eError; } -#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) -/*************************************************************************/ /*! -@Function RGXPowmonBufferIsInitRequired - -@Description Returns true if the power monitoring buffer is not allocated and - might be required by the firmware soon. Powmon buffer allocated - on-demand to reduce RAM footprint on systems not needing - power monitoring. - -@Input psDevInfo RGX device info - -@Return IMG_BOOL Whether on-demand allocation(s) is/are needed - or not -*/ /**************************************************************************/ -INLINE IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - /* The firmware expects a power monitoring buffer only when: - * - Single-shot power counters are enabled with RGX_HWPERF_PWR_EST_REQUEST - * - the Driver Mode is not Guest - */ - if ((psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL) - && (psDevInfo->ui64HWPerfFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_EST_REQUEST)) - && !PVRSRV_VZ_MODE_IS(GUEST)) - { - return IMG_TRUE; - } - - return IMG_FALSE; -} - -/*************************************************************************/ /*! -@Function RGXPowmonBufferDeinit - -@Description Deinitialises all the allocations and references that are made - for the FW power monitoring buffer - -@Input ppsDevInfo RGX device info -@Return void -*/ /**************************************************************************/ -static void RGXPowmonBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - - if (psDevInfo->psRGXFWIfPowMonBufferMemDesc) - { - if (psFwSysData->sPowerMonBuf.pui32TraceBuffer != NULL) - { - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfPowMonBufferMemDesc); - psFwSysData->sPowerMonBuf.pui32TraceBuffer = NULL; - } - - DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfPowMonBufferMemDesc); - psDevInfo->psRGXFWIfPowMonBufferMemDesc = NULL; - } -} - -/*************************************************************************/ /*! -@Function RGXPowmonBufferInitOnDemandResources - -@Description Allocates the power monitoring buffer. - -@Input psDevInfo RGX device info - -@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. -*/ /**************************************************************************/ -PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - PVRSRV_ERROR eError = PVRSRV_OK; - -#define POWER_MON_BUF_SIZE (8192UL) - /* Ensure allocation API is only called when not already allocated */ - PVR_ASSERT(psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL); - - eError = RGXSetupFwAllocation(psDevInfo, - RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, - POWER_MON_BUF_SIZE, - "FwPowMonBuffer", - &psDevInfo->psRGXFWIfPowMonBufferMemDesc, - &psFwSysData->sPowerMonBuf.pui32RGXFWIfTraceBuffer, - (void **)&psFwSysData->sPowerMonBuf.pui32TraceBuffer, - RFW_FWADDR_NOREF_FLAG); - PVR_LOG_GOTO_IF_ERROR(eError, "Power Monitoring Buffer allocation", fail); - - psFwSysData->ui32PowerMonBufSizeInDWords = POWER_MON_BUF_SIZE >> 2; - OSWriteMemoryBarrier(psFwSysData->sPowerMonBuf.pui32TraceBuffer); - - return PVRSRV_OK; -fail: - RGXPowmonBufferDeinit(psDevInfo); - return eError; -} -#endif - #if defined(PDUMP) /*************************************************************************/ /*! @Function RGXPDumpLoadFWInitData @@ -2076,16 +1367,14 @@ PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) @Input psDevInfo RGX device info */ /*************************************************************************/ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_UINT32 ui32RenderKillingCtl, - IMG_UINT32 ui32CDMTDMKillingCtl, - IMG_BOOL bEnableSignatureChecks) + RGX_INIT_APPHINTS *psApphints, + IMG_UINT32 ui32HWPerfCountersDataSize) { - IMG_UINT32 ui32ConfigFlags = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; - IMG_UINT32 ui32FwOsCfgFlags = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags; + IMG_UINT32 ui32ConfigFlags = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; + IMG_UINT32 ui32FwOsCfgFlags = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags; PDUMPCOMMENT(psDevInfo->psDeviceNode, "Dump RGXFW Init data"); - if (!bEnableSignatureChecks) + if (!psApphints->bEnableSignatureChecks) { PDUMPCOMMENT(psDevInfo->psDeviceNode, "(to enable rgxfw signatures place the following line after the RTCONF line)"); @@ -2161,12 +1450,10 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, sizeof(RGXFWIF_OSINIT), PDUMP_FLAGS_CONTINUOUS); -#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) /* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Overwrite FaultPhysAddr of FwSysInit in pdump with actual physical address"); RGXPDumpFaultReadRegister(psDevInfo); -#endif /* defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) */ PDUMPCOMMENT(psDevInfo->psDeviceNode, "RTCONF: run-time configuration"); @@ -2181,16 +1468,8 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, "( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN); -#if defined(SUPPORT_VALIDATION) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Enable generic DM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN); -#endif /* defined(SUPPORT_VALIDATION) */ PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST); -#if defined(SUPPORT_VALIDATION) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( SPU Clock Gating (Needs R+D Power Island) 0x%08x)", RGXFWIF_INICFG_SPU_CLOCK_GATE); -#endif PDUMPCOMMENT(psDevInfo->psDeviceNode, "( FBCDC Version 3.1 Enable: 0x%08x)", RGXFWIF_INICFG_FBCDC_V3_1_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -2198,35 +1477,40 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Try overlapping DM pipelines: 0x%08x)", RGXFWIF_INICFG_TRY_OVERLAPPING_DM_PIPELINES); +#if defined(RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_MAX_VALUE_IDX) + "( Try overlapping DM pipelines: 0x%08x)", RGXFWIF_INICFG_TRY_OVERLAPPING_DM_PIPELINES_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Enable DM pipeline roadblocks: 0x%08x)", RGXFWIF_INICFG_DM_PIPELINE_ROADBLOCKS_EN); + PDUMPCOMMENT(psDevInfo->psDeviceNode, +#endif "( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Disable HWPerf counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Enable Ctx Switch profile mode: 0x%08x (none=d'0, fast=d'1, medium=d'2, slow=d'3, nodelay=d'4))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK); + "( Enable Ctx Switch profile mode: 0x%08x (none=b'000, fast=b'001, medium=b'010, slow=b'011, nodelay=b'100))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Enable coherent memory accesses: 0x%08x)", RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED); + "( ICS Fault Injection: 0x%08x)", RGXFWIF_INICFG_INJECT_ICS_FAULT); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( Enable IRQ validation: 0x%08x)", RGXFWIF_INICFG_VALIDATE_IRQ); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( SPU power state mask change Enable: 0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST); #if defined(SUPPORT_PDVFS) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS); #endif /* defined(SUPPORT_PDVFS) */ + } #endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( ISP Scheduling Mode (v1=b'01, v2=b'10): 0x%08x)", RGXFWIF_INICFG_ISPSCHEDMODE_MASK); PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -2243,40 +1527,62 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set the FW OS config options here)"); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Ctx Switch TDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN); + "( Ctx Switch TDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Ctx Switch GEOM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN); +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Ctx Switch RDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN); +#endif + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Lower Priority Ctx Switch 2D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Lower Priority Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Ctx Switch GEOM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN); + "( Lower Priority Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN); + "( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM); +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN); + "( Lower Priority Ctx Switch RDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM); +#endif +#if defined(SUPPORT_ICS) PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Ctx Switch RDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN); + "( Idle Cycle Stealing TDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_ICS_TDM_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Lower Priority Ctx Switch 2D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM); + "( Idle Cycle Stealing GEOM Enable: 0x%08x)", RGXFWIF_INICFG_OS_ICS_GEOM_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Lower Priority Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM); + "( Idle Cycle Stealing 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_ICS_3D_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Lower Priority Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D); + "( Idle Cycle Stealing CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_ICS_CDM_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM); + "( Idle Cycle Stealing RDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_ICS_RDM_EN); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Lower Priority Ctx Switch RDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM); + "( Enable FDTI profile mode: 0x%08x (none=b'00, long=b'01, medium=b'10, short=b'11))", RGXFWIF_INICFG_FDTI_PROFILE_MASK); +#endif DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwOsDataMemDesc, offsetof(RGXFWIF_OSDATA, ui32FwOsConfigFlags), ui32FwOsCfgFlags, PDUMP_FLAGS_CONTINUOUS); +#if defined(SUPPORT_ICS) + PDUMPCOMMENT(psDevInfo->psDeviceNode,"Set the FDTI value in usec here"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, ui32FDTI), + psDevInfo->psRGXFWIfSysInit->ui32FDTI, + PDUMP_FLAGS_CONTINUOUS); +#endif +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) { PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; -#if defined(SUPPORT_VALIDATION) - IMG_BOOL bRunTimeUpdate = (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1); -#else IMG_BOOL bRunTimeUpdate = IMG_FALSE; -#endif - IMG_UINT32 ui32DstOffset = psDevInfo->psRGXFWIfRuntimeCfgMemDesc->uiOffset + offsetof(RGXFWIF_RUNTIME_CFG, ui32PowUnitsStateMask); + IMG_UINT32 ui32DstOffset = psDevInfo->psRGXFWIfRuntimeCfgMemDesc->uiOffset + offsetof(RGXFWIF_RUNTIME_CFG, ui32PowUnitsState); IMG_CHAR aszPowUnitsMaskRegVar[] = ":SYSMEM:$1"; IMG_CHAR aszPowUnitsEnable[] = "RUNTIME_POW_UNITS_MASK"; PMR *psPMR = (PMR *)(psDevInfo->psRGXFWIfRuntimeCfgMemDesc->psImport->hPMR); @@ -2291,7 +1597,7 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, "Load initial value power units mask in FW runtime configuration"); DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, ui32DstOffset, - psDevInfo->psRGXFWIfRuntimeCfg->ui32PowUnitsStateMask, + psDevInfo->psRGXFWIfRuntimeCfg->ui32PowUnitsState, ui32PDumpFlags); if (bRunTimeUpdate) @@ -2304,6 +1610,7 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMPFI(psDevInfo->psDeviceNode, aszPowUnitsEnable, ui32PDumpFlags); } } +#endif #if defined(SUPPORT_SECURITY_VALIDATION) PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -2334,7 +1641,7 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMP_FLAGS_CONTINUOUS); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))", + "( PID filter PID/DriverID list (Up to %u entries. Terminate with a zero PID))", RGXFWIF_PID_FILTER_MAX_NUM_PIDS); { IMG_UINT32 i; @@ -2350,10 +1657,10 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, const IMG_DEVMEM_OFFSET_T uiPIDOff = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID); - const IMG_DEVMEM_OFFSET_T uiOSIDOff - = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID); + const IMG_DEVMEM_OFFSET_T uiDriverIDOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32DriverID); - PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID and OSID pair %u)", i); + PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID and DriverID pair %u)", i); PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID)"); DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, @@ -2361,33 +1668,24 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, 0, PDUMP_FLAGS_CONTINUOUS); - PDUMPCOMMENT(psDevInfo->psDeviceNode, "(OSID)"); + PDUMPCOMMENT(psDevInfo->psDeviceNode, "(DriverID)"); DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, - uiOSIDOff, + uiDriverIDOff, 0, PDUMP_FLAGS_CONTINUOUS); } } -#if defined(SUPPORT_VALIDATION) - PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set the FW GEOM/3D Killing Control.)"); - DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, - offsetof(RGXFWIF_SYSDATA, ui32RenderKillingCtl), - ui32RenderKillingCtl, - PDUMP_FLAGS_CONTINUOUS); - - PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set the FW CDM/TDM Killing Control.)"); - DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, - offsetof(RGXFWIF_SYSDATA, ui32CDMTDMKillingCtl), - ui32CDMTDMKillingCtl, - PDUMP_FLAGS_CONTINUOUS); -#endif /* defined(SUPPORT_VALIDATION) */ /* * Dump the log config so it can be edited. */ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set the log config here)"); PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( Log Type: set bit 0 for TRACE, reset for TBI)"); + "( Log Type: TRACE mode using shared memory buffer: 0x00000001)"); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( TBI mode via external interface or sim support: 0x00000000)"); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( Note: TBI mode will hang on most hardware devices!)"); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN); PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -2410,15 +1708,18 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, "( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP); - - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) - { - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA); - } - + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( VZ Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_VZ); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( SAFETY Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SAFETY); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( VERBOSE Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_VERBOSE); + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "( CUSTOMER Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CUSTOMER); PDUMPCOMMENT(psDevInfo->psDeviceNode, "( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG); DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, @@ -2435,12 +1736,19 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, #if defined(SUPPORT_USER_REGISTER_CONFIGURATION) PDUMPCOMMENT(psDevInfo->psDeviceNode, - "(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), TDM(%d))", + "(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), " +#if defined(RGX_FEATURE_TLA_BIT_MASK) + "tla(%d), " +#endif + "tdm(%d))", RGXFWIF_REG_CFG_TYPE_PWR_ON, RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, RGXFWIF_REG_CFG_TYPE_TA, RGXFWIF_REG_CFG_TYPE_3D, RGXFWIF_REG_CFG_TYPE_CDM, +#if defined(RGX_FEATURE_TLA_BIT_MASK) + RGXFWIF_REG_CFG_TYPE_TLA, +#endif RGXFWIF_REG_CFG_TYPE_TDM); { @@ -2479,7 +1787,7 @@ static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, @Description Allocate a Guard Page at the start of a Guest's Main Heap - @Input psDevceNode + @Input psDevInfo @Return PVRSRV_ERROR ******************************************************************************/ @@ -2511,40 +1819,21 @@ static PVRSRV_ERROR RGXSetupFwGuardPage(PVRSRV_RGXDEV_INFO *psDevInfo) @Return PVRSRV_ERROR ******************************************************************************/ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, + RGX_INIT_APPHINTS *psApphints, IMG_UINT32 ui32ConfigFlags, IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_UINT32 ui32RenderKillingCtl, - IMG_UINT32 ui32CDMTDMKillingCtl, - IMG_UINT32 *pui32TPUTrilinearFracMask, - IMG_UINT32 *pui32USRMNumRegions, - IMG_UINT64 *pui64UVBRMNumRegions, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, - IMG_BOOL bSPUClockGating, - FW_PERF_CONF eFirmwarePerf, - IMG_UINT32 ui32AvailablePowUnitsMask, - IMG_UINT32 ui32AvailableRACMask) + IMG_UINT32 ui32HWPerfCountersDataSize) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; - IMG_UINT32 ui32AllRACMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1; + IMG_UINT32 ui32AllRACMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1; +#endif RGXFWIF_SYSINIT *psFwSysInitScratch = NULL; -#if defined(SUPPORT_VALIDATION) - /* Create AppHint reference handle for use in SUPPORT_VALIDATION case. - * This is freed on exit from this routine. - */ - IMG_UINT32 ui32ApphintDefault = 0; - void *pvAppHintState = NULL; - OSCreateKMAppHintState(&pvAppHintState); -#endif /* defined(SUPPORT_VALIDATION) */ +#if defined(PDUMP) + IMG_UINT32 ui32SignatureChecksBufSize = psApphints->ui32SignatureChecksBufSize; +#endif psFwSysInitScratch = OSAllocZMem(sizeof(*psFwSysInitScratch)); PVR_LOG_GOTO_IF_NOMEM(psFwSysInitScratch, eError, fail); @@ -2562,73 +1851,89 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Sys Init structure allocation", fail); -#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) /* Setup Fault read register */ eError = RGXSetupFaultReadRegister(psDeviceNode, psFwSysInitScratch); PVR_LOG_GOTO_IF_ERROR(eError, "Fault read register setup", fail); -#endif #if defined(SUPPORT_AUTOVZ) psFwSysInitScratch->ui32VzWdgPeriod = PVR_AUTOVZ_WDG_PERIOD_MS; #endif +#if defined(SUPPORT_ICS) + psFwSysInitScratch->ui32FDTI = psApphints->ui32FDTI; + psFwSysInitScratch->ui32ICSThreshold = psApphints->ui32ICSThreshold; + psFwSysInitScratch->bTestModeOn = psApphints->bTestModeOn; +#endif + /* RD Power Island */ { RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland; - IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) || - (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON); + IMG_BOOL bEnableRDPowIsland = ((psApphints->eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) || + (psApphints->eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON); ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0; } -#if defined(SUPPORT_VALIDATION) - ui32ConfigFlags |= bSPUClockGating ? RGXFWIF_INICFG_SPU_CLOCK_GATE : 0; -#else - PVR_UNREFERENCED_PARAMETER(bSPUClockGating); -#endif - - /* Make sure to inform firmware if the device supports fullace fabric coherency */ - ui32ConfigFlags |= (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && - PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ? - RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED : 0; -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST; -#if defined(SUPPORT_PDVFS) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo; - IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg; - - /* Pro-active DVFS depends on Workload Estimation */ - psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo; - psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; - PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table"); - - if (psDVFSDeviceCfg->pasOPPTable != NULL) +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST; +#endif +#if defined(SUPPORT_FW_OPP_TABLE) { - if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)) + RGXFWIF_OPP_INFO *psOPPInfo; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg; + + /* Pro-active DVFS depends on Workload Estimation */ + psOPPInfo = &psFwSysInitScratch->sOPPInfo; + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; +#if defined(CONFIG_OF) && defined(CONFIG_PM_OPP) && !defined(NO_HARDWARE) + if (psDVFSDeviceCfg->bDTConfig) { - PVR_DPF((PVR_DBG_ERROR, - "%s: OPP Table too large: Size = %u, Maximum size = %lu", - __func__, - psDVFSDeviceCfg->ui32OPPTableSize, - (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)))); - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto fail; + /* OPP table configured from Device tree */ + eError = DVFSCopyOPPTable(psDeviceNode, + psOPPInfo, + ARRAY_SIZE(psOPPInfo->asOPPValues)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Unable to copy OPP table to FW init buffer (%u)", eError)); + goto fail; + } } +#endif + if (!psDVFSDeviceCfg->bDTConfig) + { + PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table"); + + if (psDVFSDeviceCfg->pasOPPTable != NULL) + { + if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psOPPInfo->asOPPValues)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OPP Table too large: Size = %u, Maximum size = %lu", + __func__, + psDVFSDeviceCfg->ui32OPPTableSize, + (unsigned long)(ARRAY_SIZE(psOPPInfo->asOPPValues)))); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail; + } - OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues, - psDVFSDeviceCfg->pasOPPTable, - sizeof(psPDVFSOPPInfo->asOPPValues)); + OSDeviceMemCopy(psOPPInfo->asOPPValues, + psDVFSDeviceCfg->pasOPPTable, + sizeof(psOPPInfo->asOPPValues)); - psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1; + psOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1; + } + } +#if defined(SUPPORT_PDVFS) ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS; +#endif } +#endif /* defined(SUPPORT_FW_OPP_TABLE) */ } -#endif /* defined(SUPPORT_PDVFS) */ -#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ /* FW trace control structure */ eError = RGXSetupFwAllocation(psDevInfo, @@ -2645,15 +1950,16 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, if (!psDeviceNode->bAutoVzFwIsUp) { /* Set initial firmware log type/group(s) */ - if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK) + if (psApphints->ui32LogType & ~RGXFWIF_LOG_TYPE_MASK) { eError = PVRSRV_ERROR_INVALID_PARAMS; PVR_DPF((PVR_DBG_ERROR, "%s: Invalid initial log type (0x%X)", - __func__, ui32LogType)); + __func__, psApphints->ui32LogType)); goto fail; } - psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32LogType; + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = psApphints->ui32LogType; + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, FLUSH); } /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource @@ -2664,7 +1970,7 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, #if defined(SUPPORT_AUTOVZ) /* always allocate trace buffer for AutoVz Host drivers to allow * deterministic addresses of all SysData structures */ - if ((PVRSRV_VZ_MODE_IS(HOST)) || (RGXTraceBufferIsInitRequired(psDevInfo))) + if ((PVRSRV_VZ_MODE_IS(HOST, DEVINFO, psDevInfo)) || (RGXTraceBufferIsInitRequired(psDevInfo))) #else if (RGXTraceBufferIsInitRequired(psDevInfo)) #endif @@ -2689,116 +1995,17 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, /* GPIO validation setup */ psFwSysInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF; -#if defined(SUPPORT_VALIDATION) - { - IMG_INT32 ui32GPIOValidationMode; - ui32ApphintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE; - /* Check AppHint for GPIO validation mode */ - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, - pvAppHintState, - GPIOValidationMode, - &ui32ApphintDefault, - &ui32GPIOValidationMode); - - if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.", - __func__, - ui32GPIOValidationMode, - RGXFWIF_GPIO_VAL_LAST)); - } - else - { - psFwSysInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode; - } - - psFwSysInitScratch->eGPIOValidationMode = ui32GPIOValidationMode; - } - -//if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_STATE_PIN)) - { - IMG_BOOL bGPUStatePin; - IMG_BOOL bApphintDefault = IMG_FALSE; - /* Check AppHint for GPU state pin */ - OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, - pvAppHintState, - GPUStatePin, - &bApphintDefault, - &bGPUStatePin); - - psDevInfo->ui32ValidationFlags |= (bGPUStatePin) ? RGX_VAL_GPUSTATEPIN_EN : 0; - } - - { - IMG_UINT32 ui32EnablePollOnChecksumErrorStatus; - ui32ApphintDefault = 0; - /* Check AppHint for polling on GPU Checksum status */ - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, - pvAppHintState, - EnablePollOnChecksumErrorStatus, - &ui32ApphintDefault, - &ui32EnablePollOnChecksumErrorStatus); +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + eError = RGXFWSetupCounterBuffer(psDevInfo, + &psDevInfo->psCounterBufferMemDesc, + OSGetPageSize(), + &psFwSysInitScratch->sCounterDumpCtl); + PVR_LOG_GOTO_IF_ERROR(eError, "Counter Buffer allocation", fail); - switch (ui32EnablePollOnChecksumErrorStatus) - { - case 0: /* no checking */ break; - case 1: psDevInfo->ui32ValidationFlags |= RGX_VAL_FBDC_SIG_CHECK_NOERR_EN; break; - case 2: psDevInfo->ui32ValidationFlags |= RGX_VAL_FBDC_SIG_CHECK_ERR_EN; break; - case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_NOERR_EN; break; - case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_ERR_EN; break; - default: - PVR_DPF((PVR_DBG_WARNING, "Unsupported value in EnablePollOnChecksumErrorStatus (%d)", ui32EnablePollOnChecksumErrorStatus)); - break; - } - } - - /* Check AppHint for power island transition interval */ - ui32ApphintDefault = 0; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, - pvAppHintState, - PowerDomainKickInterval, - &ui32ApphintDefault, - &psDevInfo->ui32PowDomainKickInterval); - - { - IMG_UINT64 ui64RCEDisableMask; - IMG_UINT64 ui64ApphintDefault = PVRSRV_APPHINT_RCEDISABLEMASK; - OSGetKMAppHintUINT64(APPHINT_NO_DEVICE, - pvAppHintState, - RCEDisableMask, - &ui64ApphintDefault, - &ui64RCEDisableMask); - psFwSysInitScratch->ui64RCEDisableMask = ui64RCEDisableMask; - - } - { - #define PCG_PKT_DROP_THRESH_MAX (0x800U) - #define PCG_PKT_DROP_THRESH_MIN (0xBU) - - IMG_UINT32 ui32PCGPktDropThresh; - IMG_UINT32 ui32ApphintDefault = PCG_PKT_DROP_THRESH_MIN; - - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, - pvAppHintState, - PCGPktDropThresh, - &ui32ApphintDefault, - &ui32PCGPktDropThresh); - - if ((ui32PCGPktDropThresh < PCG_PKT_DROP_THRESH_MIN) || - (ui32PCGPktDropThresh > PCG_PKT_DROP_THRESH_MAX)) - { - ui32PCGPktDropThresh = MAX(PCG_PKT_DROP_THRESH_MIN, MIN(ui32PCGPktDropThresh, PCG_PKT_DROP_THRESH_MAX)); - - PVR_DPF((PVR_DBG_WARNING, - "Clamping value of PCGPktDropThresh apphint to %u (range is %u to %u)", - ui32PCGPktDropThresh, PCG_PKT_DROP_THRESH_MIN, PCG_PKT_DROP_THRESH_MAX)); - } - psFwSysInitScratch->ui32PCGPktDropThresh = ui32PCGPktDropThresh; - } + PVR_DPF((PVR_DBG_WARNING, "Counter buffer allocated at %p, size %zu Bytes.", psDevInfo->psCounterBufferMemDesc, OSGetPageSize())); +#endif /* defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */ -#endif /* defined(SUPPORT_VALIDATION) */ #if defined(SUPPORT_FIRMWARE_GCOV) eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo, @@ -2817,13 +2024,21 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN; } - /* Setup Signature and Checksum Buffers for TDM, GEOM, 3D and CDM */ - eError = RGXFWSetupSignatureChecks(psDevInfo, - &psDevInfo->psRGXFWSigTDMChecksMemDesc, - ui32SignatureChecksBufSize, - &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM]); - PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail); - psDevInfo->ui32SigTDMChecksSize = ui32SignatureChecksBufSize; + /* Setup Signature and Checksum Buffers */ + psDevInfo->psRGXFWSigTDMChecksMemDesc = NULL; + psDevInfo->ui32SigTDMChecksSize = 0; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + /* Buffer allocated only when feature present because all known TDM + * signature registers are dependent on this feature being present */ + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigTDMChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM]); + PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail); + psDevInfo->ui32SigTDMChecksSize = ui32SignatureChecksBufSize; + } eError = RGXFWSetupSignatureChecks(psDevInfo, &psDevInfo->psRGXFWSigTAChecksMemDesc, @@ -2846,6 +2061,7 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "CDM Signature check setup", fail); psDevInfo->ui32SigCDMChecksSize = ui32SignatureChecksBufSize; +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) { @@ -2856,32 +2072,26 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "RDM Signature check setup", fail); psDevInfo->ui32SigRDMChecksSize = ui32SignatureChecksBufSize; } - -#if defined(SUPPORT_VALIDATION) - eError = RGXFWSetupSignatureChecks(psDevInfo, - &psDevInfo->psRGXFWValidationSigMemDesc, - ui32SignatureChecksBufSize, - &psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_3D]); - psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_CDM] = psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_3D]; - PVR_LOG_GOTO_IF_ERROR(eError, "FBCDC/TRP/WGP Signature check setup", fail); - psDevInfo->ui32ValidationSigSize = ui32SignatureChecksBufSize; -#endif #endif - if (!bEnableSignatureChecks) + + if (!psApphints->bEnableSignatureChecks) { psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0; psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM].sBuffer.ui32Addr = 0x0; psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0; psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_CDM].sBuffer.ui32Addr = 0x0; +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_RAY].sBuffer.ui32Addr = 0x0; +#endif } +#endif /* defined(PDUMP) */ eError = RGXFWSetupAlignChecks(psDeviceNode, &psFwSysInitScratch->sAlignChecks); PVR_LOG_GOTO_IF_ERROR(eError, "Alignment checks setup", fail); - psFwSysInitScratch->ui32FilterFlags = ui32FilterFlags; + psFwSysInitScratch->ui32FilterFlags = psApphints->ui32FilterFlags; /* Fill the remaining bits of fw the init data */ psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE; @@ -2889,13 +2099,25 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psFwSysInitScratch->sFBCDCStateTableBase.uiAddr = RGX_FBCDC_HEAP_BASE; psFwSysInitScratch->sFBCDCLargeStateTableBase.uiAddr = RGX_FBCDC_LARGE_HEAP_BASE; psFwSysInitScratch->sTextureHeapBase.uiAddr = RGX_TEXTURE_STATE_HEAP_BASE; - psFwSysInitScratch->sPDSIndirectHeapBase.uiAddr = RGX_PDS_INDIRECT_STATE_HEAP_BASE; - psFwSysInitScratch->ui32JonesDisableMask = ui32JonesDisableMask; + psFwSysInitScratch->sPDSIndirectHeapBase.uiAddr = RGX_PDS_INDIRECT_STATE_HEAP_BASE; - eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch); - PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail); +#if defined(FIX_HW_BRN_65273_BIT_MASK) + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + /* Fill the remaining bits of fw the init data */ + psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_65273_HEAP_BASE; + psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_65273_HEAP_BASE; + } +#endif +#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) + { + eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch); + PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail); + } +#endif #if defined(SUPPORT_PDVFS) /* Core clock rate */ eError = RGXSetupFwAllocation(psDevInfo, @@ -2999,7 +2221,7 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, __func__)); goto fail; } - + } #if !defined(PVRSRV_USE_BRIDGE_LOCK) eError = OSLockCreate(&psDevInfo->hTimerQueryLock); if (eError != PVRSRV_OK) @@ -3010,7 +2232,6 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, goto fail; } #endif - } #if defined(SUPPORT_TBI_INTERFACE) #if !defined(PDUMP) /* allocate only if required */ @@ -3028,15 +2249,19 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer; #endif /* defined(SUPPORT_TBI_INTERFACE) */ - /* Allocate shared buffer for GPU utilisation */ + /* Allocate shared buffer for GPU utilisation. + * Enable FIRMWARE_CACHED to reduce read latency in the FW. + * The FW flushes the cache after any writes. + */ eError = RGXSetupFwAllocation(psDevInfo, - RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & + (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) & RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), - sizeof(RGXFWIF_GPU_UTIL_FWCB), + sizeof(RGXFWIF_GPU_UTIL_FW), "FwGPUUtilisationBuffer", - &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc, - &psFwSysInitScratch->sGpuUtilFWCbCtl, - (void**) &psDevInfo->psRGXFWIfGpuUtilFWCb, + &psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc, + &psFwSysInitScratch->sGpuUtilFWCtl, + (void**) &psDevInfo->psRGXFWIfGpuUtilFW, RFW_FWADDR_NOREF_FLAG); PVR_LOG_GOTO_IF_ERROR(eError, "GPU Utilisation Buffer ctl allocation", fail); @@ -3077,40 +2302,49 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "Firmware scratch buffer allocation", fail); #endif - psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB); + psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(psApphints->ui32HWPerfFWBufSize); + + /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer + * accessed by the FW. The MISR may try to write one packet the size of the L1 + * buffer in some scenarios. When logging is enabled in the MISR, it can be seen + * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers + * are the more chance of this happening. + * Size chosen to allow MISR to write an L1 sized packet and for the client + * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1. + */ + psDevInfo->ui32RGXL2HWPerfBufSize = psDevInfo->ui32RGXFWIfHWPerfBufSize + + (psDevInfo->ui32RGXFWIfHWPerfBufSize>>1); + /* Second stage initialisation or HWPerf, hHWPerfLock created in first * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */ - if (psDevInfo->ui64HWPerfFilter == 0) + if (psDevInfo->ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_HWPERF] == 0) { - psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter; - psFwSysInitScratch->ui64HWPerfFilter = ui64HWPerfFilter; + psFwSysInitScratch->ui64HWPerfFilter = + RGXHWPerfFwSetEventFilter(psDevInfo, RGX_HWPERF_L2_STREAM_HWPERF, + (IMG_UINT64) psApphints->ui32HWPerfFilter0 | + ((IMG_UINT64) psApphints->ui32HWPerfFilter1 << 32)); } else { /* The filter has already been modified. This can happen if * pvr/apphint/EnableFTraceGPU was enabled. */ - psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter; + psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFwFilter; } #if !defined(PDUMP) /* Allocate if HWPerf filter has already been set. This is possible either * by setting a proper AppHint or enabling GPU ftrace events. */ - if (psDevInfo->ui64HWPerfFilter != 0) + if (psFwSysInitScratch->ui64HWPerfFilter != 0) #endif { /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources * (irrespective of HWPerf enabled or not), given that HWPerf can be * enabled during PDump playback via RTCONF at any point of time. */ - eError = RGXHWPerfInitOnDemandResources(psDevInfo); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail); -#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) - if (RGXPowmonBufferIsInitRequired(psDevInfo)) - { - /* Allocate power monitoring log buffer if enabled */ - eError = RGXPowmonBufferInitOnDemandResources(psDevInfo); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXPowmonBufferInitOnDemandResources", fail); - } -#endif + eError = RGXHWPerfInitOnDemandL1Buffer(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandL1Buffer", fail); + + eError = RGXHWPerfInitOnDemandL2Stream(psDevInfo, RGX_HWPERF_L2_STREAM_HWPERF); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandL2Stream", fail); } if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) @@ -3142,7 +2376,7 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN) ? IMG_FALSE : IMG_TRUE; - psFwSysInitScratch->eFirmwarePerf = eFirmwarePerf; + psFwSysInitScratch->eFirmwarePerf = psApphints->eFirmwarePerf; #if defined(PDUMP) /* default: no filter */ @@ -3150,34 +2384,11 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psFwSysInitScratch->sPIDFilter.asItems[0].uiPID = 0; #endif -#if defined(SUPPORT_VALIDATION) - { - IMG_UINT32 dm; - - /* TPU trilinear rounding mask override */ - for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++) - { - psFwSysInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm]; - } - - /* USRM Config override */ - for (dm = 0; dm < RGXFWIF_USRM_DM_LAST; dm++) - { - psFwSysInitScratch->aui32USRMNumRegions[dm] = pui32USRMNumRegions[dm]; - } - - /* UVBRM Config override */ - for (dm = 0; dm < RGXFWIF_UVBRM_DM_LAST; dm++) - { - psFwSysInitScratch->aui64UVBRMNumRegions[dm] = pui64UVBRMNumRegions[dm]; - } - } -#endif #if defined(SUPPORT_SECURITY_VALIDATION) { PVRSRV_MEMALLOCFLAGS_T uiFlags = RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS; - PVRSRV_SET_PHYS_HEAP_HINT(GPU_SECURE, uiFlags); + PVRSRV_SET_PHYS_HEAP_HINT(FW_PRIV_DATA, uiFlags); PDUMPCOMMENT(psDeviceNode, "Allocate non-secure buffer for security validation test"); eError = DevmemFwAllocateExportable(psDeviceNode, @@ -3209,44 +2420,118 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, } #endif /* SUPPORT_SECURITY_VALIDATION */ +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) || RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION)) + { + psFwSysInitScratch->ui32TFBCCompressionControl = + (ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) >> RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT; + } +#endif /* RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK */ + /* Initialize FW started flag */ psFwSysInitScratch->bFirmwareStarted = IMG_FALSE; psFwSysInitScratch->ui32MarkerVal = 1; + psDevInfo->psRGXFWIfRuntimeCfg->ui32VzConnectionCooldownPeriodInSec = RGX_VZ_CONNECTION_COOLDOWN_PERIOD; if (!psDeviceNode->bAutoVzFwIsUp) { - IMG_UINT32 ui32OSIndex; + IMG_UINT32 ui32DriverID; RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; /* Required info by FW to calculate the ActivePM idle timer latency */ psFwSysInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; +#if defined(SUPPORT_SOC_TIMER) + psFwSysInitScratch->ui32InitialSOCClockSpeed = psRGXData->psRGXTimingInfo->ui32SOCClockSpeed; +#endif psFwSysInitScratch->ui32InitialActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms; /* Initialise variable runtime configuration to the system defaults */ psRuntimeCfg->ui32CoreClockSpeed = psFwSysInitScratch->ui32InitialCoreClockSpeed; +#if defined(SUPPORT_SOC_TIMER) + psRuntimeCfg->ui32SOCClockSpeed = psFwSysInitScratch->ui32InitialSOCClockSpeed; +#endif psRuntimeCfg->ui32ActivePMLatencyms = psFwSysInitScratch->ui32InitialActivePMLatencyms; psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE; - psRuntimeCfg->ui32WdgPeriodUs = RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US; psRuntimeCfg->ui32HCSDeadlineMS = RGX_HCS_DEFAULT_DEADLINE_MS; - if (PVRSRV_VZ_MODE_IS(NATIVE)) + if ((RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US > 0U) && (RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US < 1000U)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_LOG_GOTO_IF_ERROR(eError, + "RGXSetupFwSysData: RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US must be either 0 (disabled) or greater than 1000", + fail); + } + else + { + psRuntimeCfg->ui32WdgPeriodUs = RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US; + } + + if (PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo)) { - psRuntimeCfg->aui32OSidPriority[RGXFW_HOST_OS] = 0; + psRuntimeCfg->ai32DriverPriority[RGXFW_HOST_DRIVER_ID] = 0; + psRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID] = RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP; + psRuntimeCfg->aui32TSPercentage[RGXFW_HOST_DRIVER_ID] = (IMG_UINT8)RGX_DRIVERID_0_DEFAULT_TIME_SLICE; } else { - for (ui32OSIndex = 0; ui32OSIndex < RGX_NUM_OS_SUPPORTED; ui32OSIndex++) - { - const IMG_INT32 ai32DefaultOsPriority[RGXFW_MAX_NUM_OS] = - {RGX_OSID_0_DEFAULT_PRIORITY, RGX_OSID_1_DEFAULT_PRIORITY, RGX_OSID_2_DEFAULT_PRIORITY, RGX_OSID_3_DEFAULT_PRIORITY, - RGX_OSID_4_DEFAULT_PRIORITY, RGX_OSID_5_DEFAULT_PRIORITY, RGX_OSID_6_DEFAULT_PRIORITY, RGX_OSID_7_DEFAULT_PRIORITY}; + const RGX_QOS_DEFAULTS asQosDefaults[RGXFW_MAX_NUM_OSIDS] = { + RGX_QOS_DEFAULTS_INIT(0), +#if (RGXFW_MAX_NUM_OSIDS > 1) + RGX_QOS_DEFAULTS_INIT(1), +#if (RGXFW_MAX_NUM_OSIDS > 2) + RGX_QOS_DEFAULTS_INIT(2), + RGX_QOS_DEFAULTS_INIT(3), + RGX_QOS_DEFAULTS_INIT(4), + RGX_QOS_DEFAULTS_INIT(5), + RGX_QOS_DEFAULTS_INIT(6), + RGX_QOS_DEFAULTS_INIT(7), +#if (RGXFW_MAX_NUM_OSIDS > 8) + RGX_QOS_DEFAULTS_INIT(8), + RGX_QOS_DEFAULTS_INIT(9), + RGX_QOS_DEFAULTS_INIT(10), + RGX_QOS_DEFAULTS_INIT(11), + RGX_QOS_DEFAULTS_INIT(12), + RGX_QOS_DEFAULTS_INIT(13), + RGX_QOS_DEFAULTS_INIT(14), + RGX_QOS_DEFAULTS_INIT(15), + RGX_QOS_DEFAULTS_INIT(16), + RGX_QOS_DEFAULTS_INIT(17), + RGX_QOS_DEFAULTS_INIT(18), + RGX_QOS_DEFAULTS_INIT(19), + RGX_QOS_DEFAULTS_INIT(20), + RGX_QOS_DEFAULTS_INIT(21), + RGX_QOS_DEFAULTS_INIT(22), + RGX_QOS_DEFAULTS_INIT(23), + RGX_QOS_DEFAULTS_INIT(24), + RGX_QOS_DEFAULTS_INIT(25), + RGX_QOS_DEFAULTS_INIT(26), + RGX_QOS_DEFAULTS_INIT(27), + RGX_QOS_DEFAULTS_INIT(28), + RGX_QOS_DEFAULTS_INIT(29), + RGX_QOS_DEFAULTS_INIT(30), + RGX_QOS_DEFAULTS_INIT(31), +#if (RGXFW_MAX_NUM_OSIDS > 32) +#error "Support for more than 32 OSIDs not implemented." +#endif +#endif /* RGXFW_MAX_NUM_OSIDS > 8 */ +#endif /* RGXFW_MAX_NUM_OSIDS > 2 */ +#endif /* RGXFW_MAX_NUM_OSIDS > 1 */ + }; + + FOREACH_SUPPORTED_DRIVER(ui32DriverID) + { /* Set up initial priorities between different OSes */ - psRuntimeCfg->aui32OSidPriority[ui32OSIndex] = (IMG_UINT32)ai32DefaultOsPriority[ui32OSIndex]; + psRuntimeCfg->ai32DriverPriority[ui32DriverID] = asQosDefaults[ui32DriverID].i32Priority; + psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID] = asQosDefaults[ui32DriverID].ui32IsolationGroups; + psRuntimeCfg->aui32TSPercentage[ui32DriverID] = (asQosDefaults[ui32DriverID].ui32TSPercentage <= + PVRSRV_VZ_TIME_SLICE_MAX) ? + asQosDefaults[ui32DriverID].ui32TSPercentage:(0); } } + psRuntimeCfg->ui32TSIntervalMs = RGX_DRIVER_DEFAULT_TIME_SLICE_INTERVAL; #if defined(PVR_ENABLE_PHR) && defined(PDUMP) psRuntimeCfg->ui32PHRMode = RGXFWIF_PHR_MODE_RD_RESET; @@ -3255,25 +2540,26 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, #endif /* Validate the power units mask and initialize to number of units to power up */ - if ((ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0) + if ((psApphints->ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0) { eError = PVRSRV_ERROR_INVALID_SPU_MASK; PVR_DPF((PVR_DBG_ERROR, "%s:Invalid power units mask (All=0x%X, Non Fused=0x%X). At-least one power unit must to be powered up.", __func__, ui32AllPowUnitsMask, - ui32AvailablePowUnitsMask)); + psApphints->ui32AvailablePowUnitsMask)); goto fail; } - psRuntimeCfg->ui32PowUnitsStateMask = ui32AvailablePowUnitsMask & ui32AllPowUnitsMask; + psRuntimeCfg->ui32PowUnitsState = psApphints->ui32AvailablePowUnitsMask & ui32AllPowUnitsMask; - psRuntimeCfg->ui32RACStateMask = ui32AvailableRACMask & ui32AllRACMask; + psRuntimeCfg->ui32RACUnitsState = psApphints->ui32AvailableRACMask & ui32AllRACMask; /* flush write buffers for psDevInfo->psRGXFWIfRuntimeCfg */ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfRuntimeCfg); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfRuntimeCfg, FLUSH); /* Setup FW coremem data */ - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) { psFwSysInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; @@ -3288,44 +2574,45 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL; psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt = ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_ALL; -#if defined(SUPPORT_VALIDATION) - psDevInfo->psRGXFWIfFwSysData->ui32RenderKillingCtl = ui32RenderKillingCtl; - psDevInfo->psRGXFWIfFwSysData->ui32CDMTDMKillingCtl = ui32CDMTDMKillingCtl; -#else - PVR_UNREFERENCED_PARAMETER(ui32RenderKillingCtl); - PVR_UNREFERENCED_PARAMETER(ui32CDMTDMKillingCtl); -#endif /* Initialise GPU utilisation buffer */ { IMG_UINT64 ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(), RGXFWIF_GPU_UTIL_STATE_IDLE); RGXFWIF_DM eDM; + IMG_UINT64 ui64LastWordTimeShifted = + RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64() >> RGXFWIF_DM_OS_TIMESTAMP_SHIFT, RGXFWIF_GPU_UTIL_STATE_IDLE); + IMG_UINT32 ui32DriverID; - psDevInfo->psRGXFWIfGpuUtilFWCb->ui64GpuLastWord = ui64LastWord; + psDevInfo->psRGXFWIfGpuUtilFW->ui64GpuLastWord = ui64LastWord; - for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - IMG_UINT32 ui32OSid; + RGXFWIF_GPU_STATS *psStats = &psDevInfo->psRGXFWIfGpuUtilFW->sStats[ui32DriverID]; - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + for (eDM = 0; eDM < RGXFWIF_GPU_UTIL_DM_MAX; eDM++) { - psDevInfo->psRGXFWIfGpuUtilFWCb->aaui64DMOSLastWord[eDM][ui32OSid] = ui64LastWord; + psStats->aui32DMOSLastWord[eDM] = (IMG_UINT32)(ui64LastWordTimeShifted & IMG_UINT32_MAX); + psStats->aui32DMOSLastWordWrap[eDM] = (IMG_UINT32)(ui64LastWordTimeShifted >> 32); } } + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFW, FLUSH); } /* init HWPERF data */ - psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0; - psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWIdx = 0; - psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWrapCount = 0; + psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfRIdx = 0; + psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfWIdx = 0; + psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfWrapCount = 0; psDevInfo->psRGXFWIfFwSysData->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; psDevInfo->psRGXFWIfFwSysData->ui32HWPerfUt = 0; psDevInfo->psRGXFWIfFwSysData->ui32HWPerfDropCount = 0; psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0; psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0; + psDevInfo->psRGXFWIfFwSysData->ui32MemFaultCheck = 0; + // flush write buffers for psRGXFWIfFwSysData OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwSysData); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, FLUSH); /*Send through the BVNC Feature Flags*/ eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags); @@ -3333,13 +2620,12 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, /* populate the real FwOsInit structure with the values stored in the scratch copy */ OSCachedMemCopyWMB(psDevInfo->psRGXFWIfSysInit, psFwSysInitScratch, sizeof(RGXFWIF_SYSINIT)); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfSysInit, + FLUSH); } OSFreeMem(psFwSysInitScratch); -#if defined(SUPPORT_VALIDATION) - OSFreeKMAppHintState(pvAppHintState); -#endif return PVRSRV_OK; @@ -3352,9 +2638,6 @@ static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFreeFwSysData(psDevInfo); PVR_ASSERT(eError != PVRSRV_OK); -#if defined(SUPPORT_VALIDATION) - OSFreeKMAppHintState(pvAppHintState); -#endif return eError; } @@ -3379,7 +2662,7 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, OSCachedMemSet(&sFwOsInitScratch, 0, sizeof(RGXFWIF_OSINIT)); - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { eError = RGXSetupFwGuardPage(psDevInfo); PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware heap guard pages", fail); @@ -3421,6 +2704,7 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "HWR Info Buffer allocation", fail); OSCachedMemSetWMB(psDevInfo->psRGXFWIfHWRInfoBufCtl, 0, sizeof(RGXFWIF_HWRINFOBUF)); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfHWRInfoBufCtl, FLUSH); /* Allocate a sync for power management */ eError = SyncPrimContextCreate(psDevInfo->psDeviceNode, @@ -3433,6 +2717,7 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, /* Set up kernel CCB */ eError = RGXSetupCCB(psDevInfo, &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlLocal, &psDevInfo->psKernelCCBCtlMemDesc, &psDevInfo->psKernelCCB, &psDevInfo->psKernelCCBMemDesc, @@ -3461,6 +2746,7 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, /* Set up firmware CCB */ eError = RGXSetupCCB(psDevInfo, &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlLocal, &psDevInfo->psFirmwareCCBCtlMemDesc, &psDevInfo->psFirmwareCCB, &psDevInfo->psFirmwareCCBMemDesc, @@ -3489,31 +2775,42 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, /* flush write buffers for psRGXFWIfFwOsData */ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwOsData); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, + FLUSH); sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit; #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Set up Workload Estimation firmware CCB */ - eError = RGXSetupCCB(psDevInfo, - &psDevInfo->psWorkEstFirmwareCCBCtl, - &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, - &psDevInfo->psWorkEstFirmwareCCB, - &psDevInfo->psWorkEstFirmwareCCBMemDesc, - &sFwOsInitScratch.psWorkEstFirmwareCCBCtl, - &sFwOsInitScratch.psWorkEstFirmwareCCB, - RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2, - sizeof(RGXFWIF_WORKEST_FWCCB_CMD), - RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, - "FwWEstCCB"); - PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Set up Workload Estimation firmware CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlLocal, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc, + &sFwOsInitScratch.psWorkEstFirmwareCCBCtl, + &sFwOsInitScratch.psWorkEstFirmwareCCB, + RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2, + sizeof(RGXFWIF_WORKEST_FWCCB_CMD), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwWEstCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail); + } #endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ - /* Initialise the compatibility check data */ - RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC); - RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC); + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Initialise the compatibility check data */ + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC); + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC); + } /* populate the real FwOsInit structure with the values stored in the scratch copy */ OSCachedMemCopyWMB(psDevInfo->psRGXFWIfOsInit, &sFwOsInitScratch, sizeof(RGXFWIF_OSINIT)); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfOsInit, + FLUSH); return PVRSRV_OK; @@ -3535,40 +2832,27 @@ static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, @Return PVRSRV_ERROR ******************************************************************************/ PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, + RGX_INIT_APPHINTS *psApphints, IMG_UINT32 ui32ConfigFlags, IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32FwOsCfgFlags, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWRDebugDumpLimit, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_UINT32 ui32RenderKillingCtl, - IMG_UINT32 ui32CDMTDMKillingCtl, - IMG_UINT32 *pui32TPUTrilinearFracMask, - IMG_UINT32 *pui32USRMNumRegions, - IMG_UINT64 *pui64UVBRMNumRegions, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, - IMG_BOOL bSPUClockGating, - FW_PERF_CONF eFirmwarePerf, - IMG_UINT32 ui32KCCBSizeLog2, - IMG_UINT32 ui32AvailablePowUnitsMask, - IMG_UINT32 ui32AvailableRACMask) + IMG_UINT32 ui32FwOsCfgFlags) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32NumHWPerfBlocks; + IMG_UINT32 ui32HWPerfCountersDataSize; eError = RGXSetupFwOsData(psDeviceNode, - ui32KCCBSizeLog2, - ui32HWRDebugDumpLimit, + psApphints->ui32KCCBSizeLog2, + psApphints->ui32HWRDebugDumpLimit, ui32FwOsCfgFlags); PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware os data", fail); - if (PVRSRV_VZ_MODE_IS(GUEST)) + ui32NumHWPerfBlocks = RGXHWPerfMaxDefinedBlks((void *)psDevInfo); + ui32HWPerfCountersDataSize = sizeof(RGXFWIF_HWPERF_CTL) + + (ui32NumHWPerfBlocks - 1) * sizeof(RGXFWIF_HWPERF_CTL_BLK); + + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { /* Guest drivers do not configure system-wide firmware data */ psDevInfo->psRGXFWIfSysInit = NULL; @@ -3577,26 +2861,10 @@ PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, { /* Native and Host drivers must initialise the firmware's system data */ eError = RGXSetupFwSysData(psDeviceNode, - bEnableSignatureChecks, - ui32SignatureChecksBufSize, - ui32HWPerfFWBufSizeKB, - ui64HWPerfFilter, + psApphints, ui32ConfigFlags, ui32ConfigFlagsExt, - ui32LogType, - ui32FilterFlags, - ui32JonesDisableMask, - ui32HWPerfCountersDataSize, - ui32RenderKillingCtl, - ui32CDMTDMKillingCtl, - pui32TPUTrilinearFracMask, - pui32USRMNumRegions, - pui64UVBRMNumRegions, - eRGXRDPowerIslandConf, - bSPUClockGating, - eFirmwarePerf, - ui32AvailablePowUnitsMask, - ui32AvailableRACMask); + ui32HWPerfCountersDataSize); PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware system data", fail); } @@ -3604,10 +2872,8 @@ PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, #if defined(PDUMP) RGXPDumpLoadFWInitData(psDevInfo, - ui32HWPerfCountersDataSize, - ui32RenderKillingCtl, - ui32CDMTDMKillingCtl, - bEnableSignatureChecks); + psApphints, + ui32HWPerfCountersDataSize); #endif /* PDUMP */ fail: @@ -3632,7 +2898,8 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) } #if defined(PDUMP) - if (psDevInfo->psRGXFWSigTDMChecksMemDesc) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM) && + psDevInfo->psRGXFWSigTDMChecksMemDesc) { DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDMChecksMemDesc); psDevInfo->psRGXFWSigTDMChecksMemDesc = NULL; @@ -3656,6 +2923,7 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) psDevInfo->psRGXFWSigCDMChecksMemDesc = NULL; } +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1 && psDevInfo->psRGXFWSigRDMChecksMemDesc) @@ -3663,15 +2931,17 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigRDMChecksMemDesc); psDevInfo->psRGXFWSigRDMChecksMemDesc = NULL; } +#endif + +#endif -#if defined(SUPPORT_VALIDATION) - if (psDevInfo->psRGXFWValidationSigMemDesc) +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + if (psDevInfo->psCounterBufferMemDesc) { - DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWValidationSigMemDesc); - psDevInfo->psRGXFWValidationSigMemDesc = NULL; + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCounterBufferMemDesc); + psDevInfo->psCounterBufferMemDesc = NULL; } #endif -#endif #if defined(SUPPORT_FIRMWARE_GCOV) if (psDevInfo->psFirmwareGcovBufferMemDesc) @@ -3681,19 +2951,17 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) } #endif -#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) RGXSetupFaultReadRegisterRollback(psDevInfo); -#endif - if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc) + if (psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc) { - if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL) + if (psDevInfo->psRGXFWIfGpuUtilFW != NULL) { - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); - psDevInfo->psRGXFWIfGpuUtilFWCb = NULL; + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFW = NULL; } - DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); - psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL; + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFWCtlMemDesc = NULL; } if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc) @@ -3720,14 +2988,9 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) { if (psDevInfo->psRGXFWIfTraceBufCtl != NULL) { - /* first deinit/free the tracebuffer allocation */ + /* deinit/free the tracebuffer allocation */ RGXTraceBufferDeinit(psDevInfo); -#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) - /* second free the powmon log buffer if used */ - RGXPowmonBufferDeinit(psDevInfo); -#endif - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc); psDevInfo->psRGXFWIfTraceBufCtl = NULL; } @@ -3781,9 +3044,12 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) } #endif - /* Free the SLC3 fence object */ - _FreeSLC3Fence(psDevInfo); - +#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) + { + _FreeSLC3Fence(psDevInfo); + } +#endif #if defined(SUPPORT_PDVFS) if (psDevInfo->psRGXFWIFCoreClkRateMemDesc) { @@ -3797,6 +3063,15 @@ static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL; } #endif + +#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) + if (psDevInfo->psRGXFWIfActiveContextBufDesc) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfActiveContextBufDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfActiveContextBufDesc); + psDevInfo->psRGXFWIfActiveContextBufDesc = NULL; + } +#endif } /*! @@ -3814,22 +3089,28 @@ static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo) &psDevInfo->psKernelCCBRtnSlotsMemDesc); RGXFreeCCB(psDevInfo, &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlLocal, &psDevInfo->psKernelCCBCtlMemDesc, &psDevInfo->psKernelCCB, &psDevInfo->psKernelCCBMemDesc); RGXFreeCCB(psDevInfo, &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlLocal, &psDevInfo->psFirmwareCCBCtlMemDesc, &psDevInfo->psFirmwareCCB, &psDevInfo->psFirmwareCCBMemDesc); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - RGXFreeCCB(psDevInfo, - &psDevInfo->psWorkEstFirmwareCCBCtl, - &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, - &psDevInfo->psWorkEstFirmwareCCB, - &psDevInfo->psWorkEstFirmwareCCBMemDesc); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + RGXFreeCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlLocal, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc); + } #endif if (psDevInfo->psPowSyncPrim != NULL) @@ -3962,6 +3243,50 @@ void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo) } } +static INLINE PVRSRV_ERROR RGXUpdateLocalKCCBRoff(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; + IMG_UINT32 ui32ReadOffset; + + barrier(); /* Don't optimise order. Reads from device memory follow. */ + + /* update KCCB read offset */ + RGXFwSharedMemCacheOpValue(psKCCBCtl->ui32ReadOffset, INVALIDATE); + ui32ReadOffset = psKCCBCtl->ui32ReadOffset; + + if (ui32ReadOffset > psKCCBCtlLocal->ui32WrapMask) + { + return PVRSRV_ERROR_KERNEL_CCB_OFFSET; + } + + psKCCBCtlLocal->ui32ReadOffset = ui32ReadOffset; + + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR RGXUpdateLocalFWCCBWoff(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + const RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; + RGXFWIF_CCB_CTL *psFWCCBCtlLocal = psDevInfo->psFirmwareCCBCtlLocal; + IMG_UINT32 ui32WriteOffset; + + barrier(); /* Don't optimise order. Reads from device memory follow. */ + + /* update FWCCB write offset */ + RGXFwSharedMemCacheOpValue(psFWCCBCtl->ui32WriteOffset, INVALIDATE); + ui32WriteOffset = psFWCCBCtl->ui32WriteOffset; + + if (ui32WriteOffset > psFWCCBCtlLocal->ui32WrapMask) + { + return PVRSRV_ERROR_KERNEL_CCB_OFFSET; + } + + psFWCCBCtlLocal->ui32WriteOffset = ui32WriteOffset; + + return PVRSRV_OK; +} + /****************************************************************************** FUNCTION : RGXAcquireKernelCCBSlot @@ -3973,31 +3298,33 @@ void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo) RETURNS : PVRSRV_ERROR ******************************************************************************/ static PVRSRV_ERROR RGXAcquireKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, - const RGXFWIF_CCB_CTL *psKCCBCtl, IMG_UINT32 *pui32Offset) { - IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; -#if defined(PDUMP) - const DEVMEM_MEMDESC *psKCCBCtrlMemDesc = psDevInfo->psKernelCCBCtlMemDesc; -#endif + IMG_UINT32 ui32NextWriteOffset; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; - ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; - ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + ui32NextWriteOffset = (psKCCBCtlLocal->ui32WriteOffset + 1) & psKCCBCtlLocal->ui32WrapMask; #if defined(PDUMP) /* Wait for sufficient CCB space to become available */ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, 0, - "Wait for kCCB woff=%u", ui32NextWriteOffset); - DevmemPDumpCBP(psKCCBCtrlMemDesc, + "Wait for space to write kCCB woff=%u", psKCCBCtlLocal->ui32WriteOffset); + DevmemPDumpCBP(psDevInfo->psKernelCCBCtlMemDesc, offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), - ui32NextWriteOffset, + psKCCBCtlLocal->ui32WriteOffset, 1, - (psKCCBCtl->ui32WrapMask + 1)); + (psKCCBCtlLocal->ui32WrapMask + 1)); #endif - if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset) + if (ui32NextWriteOffset == psKCCBCtlLocal->ui32ReadOffset) { - return PVRSRV_ERROR_KERNEL_CCB_FULL; + PVRSRV_ERROR eError = RGXUpdateLocalKCCBRoff(psDevInfo); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXUpdateLocalKCCBRoff"); + + if (ui32NextWriteOffset == psKCCBCtlLocal->ui32ReadOffset) + { + return PVRSRV_ERROR_KERNEL_CCB_FULL; + } } *pui32Offset = ui32NextWriteOffset; return PVRSRV_OK; @@ -4013,18 +3340,24 @@ static PVRSRV_ERROR RGXAcquireKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, RETURNS : PVRSRV_ERROR ******************************************************************************/ -static PVRSRV_ERROR RGXPollKernelCCBSlot(const DEVMEM_MEMDESC *psKCCBCtrlMemDesc, - const RGXFWIF_CCB_CTL *psKCCBCtl) +static PVRSRV_ERROR RGXPollKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo) { - IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; + IMG_UINT32 ui32NextWriteOffset; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; - ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; - ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + ui32NextWriteOffset = (psKCCBCtlLocal->ui32WriteOffset + 1) & psKCCBCtlLocal->ui32WrapMask; - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + if (ui32NextWriteOffset != psKCCBCtlLocal->ui32ReadOffset) { + return PVRSRV_OK; + } + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + PVRSRV_ERROR eError = RGXUpdateLocalKCCBRoff(psDevInfo); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXUpdateLocalKCCBRoff"); - if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset) + if (ui32NextWriteOffset != psKCCBCtlLocal->ui32ReadOffset) { return PVRSRV_OK; } @@ -4039,7 +3372,7 @@ static PVRSRV_ERROR RGXPollKernelCCBSlot(const DEVMEM_MEMDESC *psKCCBCtrlMemDesc } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); return PVRSRV_ERROR_KERNEL_CCB_FULL; } @@ -4112,6 +3445,10 @@ static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) { return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA); } + case RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE_DATA); + } case RGXFWIF_KCCB_CMD_FORCE_UPDATE: { return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA); @@ -4132,6 +3469,12 @@ static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) { return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA); } +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + case RGXFWIF_KCCB_CMD_COUNTER_DUMP: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_COUNTER_DUMP_DATA); + } +#endif case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG: { return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL); @@ -4148,7 +3491,9 @@ static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) { return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA); } - case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE: + case RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE: + case RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE: + case RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE_INTERVAL: case RGXFWIF_KCCB_CMD_WDG_CFG: case RGXFWIF_KCCB_CMD_HEALTH_CHECK: case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL: @@ -4161,16 +3506,10 @@ static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) { return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_DEV_VIRTADDR); } -#if defined(SUPPORT_VALIDATION) - case RGXFWIF_KCCB_CMD_RGXREG: - { - return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_RGXREG_DATA); - } - case RGXFWIF_KCCB_CMD_GPUMAP: + case RGXFWIF_KCCB_CMD_CANCEL_WORK: { - return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_GPUMAP_DATA); + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CANCEL_WORK_DATA); } -#endif default: { /* Invalid (OR) Unused (OR) Newly added command type */ @@ -4185,10 +3524,13 @@ PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, { PVRSRV_ERROR eError; + RGXFwSharedMemCacheOpValue(psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], + INVALIDATE); eError = PVRSRVWaitForValueKM( (IMG_UINT32 __iomem *)&psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, - RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + RGXFwSharedMemCacheOpExecPfn); PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVWaitForValueKM"); #if defined(PDUMP) @@ -4205,7 +3547,6 @@ PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMP_POLL_OPERATOR_EQUAL, ui32PDumpFlags); PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); - } #else PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); @@ -4221,18 +3562,26 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, { PVRSRV_ERROR eError; PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; - RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + RGXFWIF_CCB_CTL *psKCCBCtl; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; IMG_UINT8 *pui8KCCB = psDevInfo->psKernelCCB; IMG_UINT32 ui32NewWriteOffset; - IMG_UINT32 ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + IMG_UINT32 ui32OldWriteOffset; IMG_UINT32 ui32CmdMemCopySize; #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + RGXFwSharedMemCacheOpPtr(psDevInfo->psKernelCCBCtl, INVALIDATE); + psKCCBCtl = psDevInfo->psKernelCCBCtl; + ui32OldWriteOffset = psKCCBCtlLocal->ui32WriteOffset; + #else IMG_BOOL bContCaptureOn = PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); /* client connected or in pdump init phase */ IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, uiPDumpFlags); /* Are we in capture range or continuous and not in a power transition */ + psKCCBCtl = psDevInfo->psKernelCCBCtl; + ui32OldWriteOffset = psKCCBCtlLocal->ui32WriteOffset; + if (bContCaptureOn) { /* in capture range */ @@ -4250,12 +3599,13 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRVPollForValueKM(psDeviceNode, (IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset, 0xFFFFFFFF, - POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP); + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP, + NULL); /* Dump Init state of Kernel CCB control (read and write offset) */ PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, "Initial state of kernel CCB Control, roff: %d, woff: %d", - psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset); + psKCCBCtl->ui32ReadOffset, psKCCBCtlLocal->ui32WriteOffset); DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, 0, @@ -4267,22 +3617,31 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, #endif #if defined(SUPPORT_AUTOVZ) - if (!((KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo)) || - (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) && - !PVRSRV_VZ_MODE_IS(NATIVE)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:" - "driver state = %u / firmware state = %u;" - "expected READY (%u/%u) or ACTIVE (%u/%u);", - __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo), - RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY, - RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE)); - eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; - goto _RGXSendCommandRaw_Exit; + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + KM_CONNECTION_CACHEOP(Os, INVALIDATE); + if (!PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode)) + { + if ((likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && + (KM_OS_CONNECTION_IS(ACTIVE, psDevInfo) || KM_OS_CONNECTION_IS(READY, psDevInfo)))) || + (KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo))) + { + RGXUpdateAutoVzWdgToken(psDevInfo); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:" + "driver state = %u / firmware state = %u;" + "expected READY (%u/%u) or ACTIVE (%u/%u) or in transition (%u/%u);", + __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo), + RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY, + RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE, + RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_ACTIVE)); + eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; + goto _RGXSendCommandRaw_Exit; + } } #endif - PVR_ASSERT(sizeof(RGXFWIF_KCCB_CMD) == psKCCBCtl->ui32CmdSize); if (!OSLockIsLocked(psDeviceNode->hPowerLock)) { PVR_DPF((PVR_DBG_ERROR, @@ -4292,7 +3651,7 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, } /* Acquire a slot in the CCB */ - eError = RGXAcquireKernelCCBSlot(psDevInfo, psKCCBCtl, &ui32NewWriteOffset); + eError = RGXAcquireKernelCCBSlot(psDevInfo, &ui32NewWriteOffset); if (eError != PVRSRV_OK) { goto _RGXSendCommandRaw_Exit; @@ -4303,8 +3662,9 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_LOG_RETURN_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND); /* Copy the command into the CCB */ - OSCachedMemCopyWMB(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize], + OSCachedMemCopy(&pui8KCCB[ui32OldWriteOffset * sizeof(RGXFWIF_KCCB_CMD)], psKCCBCmd, ui32CmdMemCopySize); + RGXFwSharedMemCacheOpExec(&pui8KCCB[ui32OldWriteOffset * sizeof(RGXFWIF_KCCB_CMD)], ui32CmdMemCopySize, PVRSRV_CACHE_OP_FLUSH); /* If non-NULL pui32CmdKCCBSlot passed-in, return the kCCB slot in which the command was enqueued */ if (pui32CmdKCCBSlot) @@ -4315,6 +3675,8 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, * doesn't get to see stale/false value in allotted slot */ OSWriteDeviceMem32WithWMB(&psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset], RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE); + RGXFwSharedMemCacheOpValue(psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset], + FLUSH); #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, "Reset kCCB slot number %u", ui32OldWriteOffset); @@ -4327,9 +3689,16 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, __func__, psDevInfo, ui32OldWriteOffset, RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE, psKCCBCmd->eCmdType)); } + /* Memory barrier before KCCB write offset update. */ + OSWriteMemoryBarrier(NULL); + + /* Move past the current command */ + psKCCBCtlLocal->ui32WriteOffset = ui32NewWriteOffset; psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset; + /* Read-back of memory before Kick MTS */ OSWriteMemoryBarrier(&psKCCBCtl->ui32WriteOffset); + RGXFwSharedMemCacheOpValue(psKCCBCtl->ui32WriteOffset, FLUSH); #if defined(PDUMP) if (bContCaptureOn) @@ -4342,7 +3711,7 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, uiPDumpFlags, "Dump kCCB cmd woff = %d", ui32OldWriteOffset); DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc, - ui32OldWriteOffset * psKCCBCtl->ui32CmdSize, + ui32OldWriteOffset * sizeof(RGXFWIF_KCCB_CMD), ui32CmdMemCopySize, uiPDumpFlags); @@ -4388,7 +3757,7 @@ static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, #if defined(NO_HARDWARE) /* keep the roff updated because fw isn't there to update it */ - psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset; + psKCCBCtl->ui32ReadOffset = psKCCBCtlLocal->ui32WriteOffset; #endif _RGXSendCommandRaw_Exit: @@ -4496,7 +3865,7 @@ PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_ dllist_replace_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { if (dllist_is_empty(&sCommandList)) { @@ -4531,15 +3900,14 @@ PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_ /* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the * outer loop times-out, we'll still want to return KCCB_FULL to caller */ - eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, - psDevInfo->psKernelCCBCtl); + eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo); if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL) { eError = PVRSRV_ERROR_KERNEL_CCB_FULL; goto cleanup_; } } - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); cleanup_: /* if the local list is not empty put it back to the deferred list head @@ -4670,41 +4038,6 @@ void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) OSScheduleMISR(psDevInfo->hProcessQueuesMISR); } -#if defined(SUPPORT_VALIDATION) -PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 ui64RegVal, - IMG_UINT64 ui64Size, - IMG_UINT32 ui32Offset, - IMG_BOOL bWriteOp) -{ - RGXFWIF_KCCB_CMD sRgxRegsCmd = {0}; - IMG_UINT32 ui32kCCBCommandSlot; - PVRSRV_ERROR eError; - - sRgxRegsCmd.eCmdType = RGXFWIF_KCCB_CMD_RGXREG; - sRgxRegsCmd.uCmdData.sFwRgxData.ui64RegVal = ui64RegVal; - sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegWidth = ui64Size; - sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegAddr = ui32Offset; - sRgxRegsCmd.uCmdData.sFwRgxData.bWriteOp = bWriteOp; - - eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, - RGXFWIF_DM_GP, - &sRgxRegsCmd, - PDUMP_FLAGS_CONTINUOUS, - &ui32kCCBCommandSlot); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); - - if (bWriteOp) - { - eError = RGXWaitForKCCBSlotUpdate(psDevInfo, - ui32kCCBCommandSlot, - PDUMP_FLAGS_CONTINUOUS); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); - } - - return eError; -} -#endif /*! ******************************************************************************* @@ -4719,7 +4052,7 @@ static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData) PVRSRV_DEVICE_NODE *psDeviceNode = pvData; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; - PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_OFF; eError = PVRSRVPowerLock(psDeviceNode); if (eError != PVRSRV_OK) @@ -4729,43 +4062,57 @@ static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData) return; } - /* Check whether it's worth waking up the GPU */ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); - if (!PVRSRV_VZ_MODE_IS(GUEST) && - (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - /* For now, guest drivers will always wake-up the GPU */ - RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; - IMG_BOOL bGPUHasWorkWaiting; - - bGPUHasWorkWaiting = - (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED); - - if (!bGPUHasWorkWaiting) + /* Guests are not permitted to change the device power state */ + if ((eError != PVRSRV_OK) || (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) { - /* all queues are empty, don't wake up the GPU */ PVRSRVPowerUnlock(psDeviceNode); return; } } + else + { + if ((eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) + { + RGXFWIF_GPU_UTIL_FW *psUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; + IMG_BOOL bGPUHasWorkWaiting; - PDUMPPOWCMDSTART(psDeviceNode); - /* wake up the GPU */ - eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, - PVRSRV_DEV_POWER_STATE_ON, - PVRSRV_POWER_FLAGS_NONE); - PDUMPPOWCMDEND(psDeviceNode); + /* Check whether it's worth waking up the GPU */ + RGXFwSharedMemCacheOpValue(psUtilFW->ui64GpuLastWord, INVALIDATE); + bGPUHasWorkWaiting = + (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFW->ui64GpuLastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", - __func__, PVRSRVGetErrorString(eError))); + if (!bGPUHasWorkWaiting) + { + /* all queues are empty, don't wake up the GPU */ + PVRSRVPowerUnlock(psDeviceNode); + return; + } + } - PVRSRVPowerUnlock(psDeviceNode); - return; + PDUMPPOWCMDSTART(psDeviceNode); + /* wake up the GPU */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + PVRSRV_POWER_FLAGS_NONE); + PDUMPPOWCMDEND(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition device to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + + PVRSRVPowerUnlock(psDeviceNode); + return; + } } + /* Memory barrier before Kick MTS */ + OSWriteMemoryBarrier(NULL); + /* uncounted kick to the FW */ HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED); __MTSScheduleWrite(psDevInfo, (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED); @@ -4781,52 +4128,63 @@ PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE "RGX_ScheduleProcessQueues"); } -PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, +PVRSRV_ERROR _RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_DM eKCCBType, RGXFWIF_KCCB_CMD *psKCCBCmd, IMG_UINT32 ui32PDumpFlags, - IMG_UINT32 *pui32CmdKCCBSlot) + IMG_UINT32 *pui32CmdKCCBSlot, + IMG_BOOL bCallerHasPwrLock) { PVRSRV_ERROR eError; IMG_UINT32 uiMMUSyncUpdate; -#if defined(SUPPORT_VALIDATION) - static IMG_UINT32 ui32PowDomainFrameCounter; -#endif - /* Don't send the command/power up request if the device is de-initialising. - * The de-init thread could destroy the device whilst the power up - * sequence below is accessing the HW registers. - */ + /* Don't send the command/power up request if device not available. */ if (unlikely((psDevInfo == NULL) || (psDevInfo->psDeviceNode == NULL) || - (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) + (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR))) { return PVRSRV_ERROR_INVALID_DEVICE; } - /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful - in a scenario with several applications allocating resources. */ - eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); - if (unlikely(eError != PVRSRV_OK)) + /* Don't send the command/power up request if device in deinit phase. + * The de-init thread could destroy the device whilst the power up + * sequence below is accessing the HW registers. + * Not yet safe to free resources. Caller should retry later. + */ + if (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING) { - PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", - __func__, PVRSRVGetErrorString(eError))); + return PVRSRV_ERROR_RETRY; + } - /* If system is found powered OFF, Retry scheduling the command */ - if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + + if (!bCallerHasPwrLock) + { + /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful + in a scenario with several applications allocating resources. */ + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) { - eError = PVRSRV_ERROR_RETRY; - } + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } - goto RGXScheduleCommand_exit; + goto RGXScheduleCommand_exit; + } } - if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)) + if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING)) { /* If we have the power lock the device is valid but the deinit * thread could be waiting for the lock. */ - PVRSRVPowerUnlock(psDevInfo->psDeviceNode); - return PVRSRV_ERROR_INVALID_DEVICE; + eError = PVRSRV_ERROR_RETRY; + goto _PVRSRVInvalidDeviceError_Exit; } /* Ensure device is powered up before sending any commands */ @@ -4842,56 +4200,6 @@ PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, goto _PVRSRVSetDevicePowerStateKM_Exit; } -#if defined(SUPPORT_VALIDATION) - /** - * For validation, force the core to different powered units between - * DM kicks. PVRSRVDeviceGPUUnitsPowerChange acquires the power lock, hence - * ensure that this is done after the power lock is released. - */ - PVRSRVPowerUnlock(psDevInfo->psDeviceNode); - if ((eError == PVRSRV_OK) && (eKCCBType != RGXFWIF_DM_GP)) - { - IMG_BOOL bInsertPowerDomainTransition = - (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN); - if (psDevInfo->ui32PowDomainKickInterval > 0) - { - if (eKCCBType == RGXFWIF_DM_3D) - { - /* Insert a power domain transition every N '3D' frames */ - ui32PowDomainFrameCounter++; - if ((ui32PowDomainFrameCounter % psDevInfo->ui32PowDomainKickInterval) == 0) - { - bInsertPowerDomainTransition = IMG_TRUE; - } - } - } - - if (bInsertPowerDomainTransition) - { - IMG_UINT32 ui32PowerDomainState; - IMG_BOOL bIsValid; - do { - ui32PowerDomainState = RGXPowerDomainGetNextState(&psDevInfo->sPowerDomainState); - bIsValid = ui32PowerDomainState && - ((ui32PowerDomainState & ~(psDevInfo->ui32AvailablePowUnitsMask)) == 0); - } while (!bIsValid); - - PVR_DPF((PVR_DBG_MESSAGE, "Request GPU power units mask change to 0x%x", ui32PowerDomainState)); - eError = PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32PowerDomainState); - if (eError != PVRSRV_OK) - goto RGXScheduleCommand_exit; - } - } - - /* Re-acquire the power lock. */ - eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); - if (unlikely(eError != PVRSRV_OK)) - { - PVR_DPF((PVR_DBG_WARNING, "%s: failed to re-acquire powerlock after GPU power units command (%s)", - __func__, PVRSRVGetErrorString(eError))); - goto RGXScheduleCommand_exit; - } -#endif eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate); if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; @@ -4900,8 +4208,11 @@ PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; _PVRSRVSetDevicePowerStateKM_Exit: - PVRSRVPowerUnlock(psDevInfo->psDeviceNode); - +_PVRSRVInvalidDeviceError_Exit: + if (!bCallerHasPwrLock) + { + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + } RGXScheduleCommand_exit: return eError; } @@ -4912,19 +4223,32 @@ PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) { RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; + RGXFWIF_CCB_CTL *psFWCCBCtlLocal = psDevInfo->psFirmwareCCBCtlLocal; IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB; + PVRSRV_ERROR eError; -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) - PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE) || +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + KM_CONNECTION_CACHEOP(Os, INVALIDATE); + PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE, DEVINFO, psDevInfo) || (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && - KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)), + (KM_OS_CONNECTION_IS(ACTIVE, psDevInfo) || KM_OS_CONNECTION_IS(READY, psDevInfo))), "FW-KM connection is down"); #endif - while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset) + eError = RGXUpdateLocalFWCCBWoff(psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "RGXUpdateLocalFWCCBWoff"); + return; + } + + while (psFWCCBCtlLocal->ui32ReadOffset != psFWCCBCtlLocal->ui32WriteOffset) { /* Point to the next command */ - const RGXFWIF_FWCCB_CMD *psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset; + const RGXFWIF_FWCCB_CMD *psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtlLocal->ui32ReadOffset; + RGXFwSharedMemCacheOpPtr(psFwCCBCmd, INVALIDATE); + HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType); switch (psFwCCBCmd->eCmdType) @@ -4969,7 +4293,7 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) PDUMP_PANIC(psDevInfo->psDeviceNode, FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists"); } - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists", __func__, @@ -5012,7 +4336,7 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; sErrorData.uErrData.sFwPFErrData.sFWFaultAddr.uiAddr = psCmdFwPagefault->sFWFaultAddr.uiAddr; - psDevConfig->pfnSysDevErrorNotify(psDevConfig, + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, &sErrorData); } } @@ -5021,57 +4345,13 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION: { - DLLIST_NODE *psNode, *psNext; const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification = &psFwCCBCmd->uCmdData.sCmdContextResetNotification; - RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL; IMG_UINT32 ui32ErrorPid = 0; - OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); - dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) - { - RGX_SERVER_COMMON_CONTEXT *psThisContext = - IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); - - /* If the notification applies to all contexts update reset info - * for all contexts, otherwise only do so for the appropriate ID. - */ - if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) - { - /* Notification applies to all contexts */ - psThisContext->eLastResetReason = psCmdContextResetNotification->eResetReason; - psThisContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; - } - else - { - /* Notification applies to one context only */ - if (psThisContext->ui32ContextID == psCmdContextResetNotification->ui32ServerCommonContextID) - { - psServerCommonContext = psThisContext; - psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason; - psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; - ui32ErrorPid = RGXGetPIDFromServerMMUContext(psServerCommonContext->psServerMMUContext); - break; - } - } - } - - if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) - { - PVR_DPF((PVR_DBG_MESSAGE, "%s: All contexts reset (Reason=%d, JobRef=0x%08x)", - __func__, - (IMG_UINT32)(psCmdContextResetNotification->eResetReason), - psCmdContextResetNotification->ui32ResetJobRef)); - } - else - { - PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)", - __func__, - psServerCommonContext, - psCmdContextResetNotification->ui32ServerCommonContextID, - (IMG_UINT32)(psCmdContextResetNotification->eResetReason), - psCmdContextResetNotification->ui32ResetJobRef)); - } + FWCommonContextListSetLastResetReason(psDevInfo, + &ui32ErrorPid, + psCmdContextResetNotification); /* Increment error counter (if appropriate) */ if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM) @@ -5096,7 +4376,6 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) psDevInfo->sErrorCounts.ui32TRPErrorCount++; } } - OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); /* Notify system layer */ { @@ -5126,7 +4405,7 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) } } - psDevConfig->pfnSysDevErrorNotify(psDevConfig, + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, &sErrorData); } } @@ -5165,48 +4444,56 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) { case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS: { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,i32AdjustmentValue,0,0,0,0,0,pidTmp); + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,i32AdjustmentValue,0,0,0,0,0,0,pidTmp); break; } case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY: { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,i32AdjustmentValue,0,0,0,0,pidTmp); + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,i32AdjustmentValue,0,0,0,0,0,pidTmp); break; } case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES: { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,i32AdjustmentValue,0,0,0,pidTmp); + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,i32AdjustmentValue,0,0,0,0,pidTmp); break; } case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES: { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,i32AdjustmentValue,0,0,pidTmp); + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,i32AdjustmentValue,0,0,0,pidTmp); break; } case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES: { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,i32AdjustmentValue,0,pidTmp); + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,i32AdjustmentValue,0,0,pidTmp); break; } case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES: { - PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,0,i32AdjustmentValue,pidTmp); + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,0,i32AdjustmentValue,0,pidTmp); break; } +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_RAY_STORES: + { + PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,0,0,i32AdjustmentValue,pidTmp); + break; + } +#endif } #endif break; } -#if defined(SUPPORT_PDVFS) +#if defined(SUPPORT_FW_CORE_CLK_RATE_CHANGE_NOTIFY) case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE: { - PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, + RGX_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate); break; } #endif case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART: { + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); if (psDevInfo->psRGXFWIfFwSysData != NULL && psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF) { @@ -5221,6 +4508,8 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) /* Clear the FW faulted flags... */ psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED); OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags, + FLUSH); /* Power back up again... */ eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, @@ -5230,7 +4519,7 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */ if (eError == PVRSRV_OK) { - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXFWHealthCheckCmd(psDevInfo); if (eError != PVRSRV_ERROR_RETRY) @@ -5238,7 +4527,7 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); } } @@ -5257,7 +4546,7 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; - psDevConfig->pfnSysDevErrorNotify(psDevConfig, + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, &sErrorData); } } @@ -5270,36 +4559,6 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) } break; } -#if defined(SUPPORT_VALIDATION) - case RGXFWIF_FWCCB_CMD_REG_READ: - { - psDevInfo->sFwRegs.ui64RegVal = psFwCCBCmd->uCmdData.sCmdRgxRegReadData.ui64RegValue; - complete(&psDevInfo->sFwRegs.sRegComp); - break; - } -#if defined(SUPPORT_SOC_TIMER) - case RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS: - { - if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) - { - PVRSRV_ERROR eSOCtimerErr = RGXValidateSOCUSCTimer(psDevInfo, - PDUMP_NONE, - psFwCCBCmd->uCmdData.sCmdTimers.ui64timerGray, - psFwCCBCmd->uCmdData.sCmdTimers.ui64timerBinary, - psFwCCBCmd->uCmdData.sCmdTimers.aui64uscTimers); - if (PVRSRV_OK == eSOCtimerErr) - { - PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have increased over time")); - } - else - { - PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have NOT increased over time")); - } - } - break; - } -#endif -#endif default: { /* unknown command */ @@ -5311,7 +4570,20 @@ void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) } /* Update read offset */ - psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask; + psFWCCBCtlLocal->ui32ReadOffset = (psFWCCBCtlLocal->ui32ReadOffset + 1) & psFWCCBCtlLocal->ui32WrapMask; + OSMemoryBarrier(NULL); + psFWCCBCtl->ui32ReadOffset = psFWCCBCtlLocal->ui32ReadOffset; + OSWriteMemoryBarrier(NULL); + + if (psFWCCBCtlLocal->ui32ReadOffset == psFWCCBCtlLocal->ui32WriteOffset) + { + eError = RGXUpdateLocalFWCCBWoff(psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "RGXUpdateLocalFWCCBWoff"); + return; + } + } } } @@ -5337,6 +4609,7 @@ PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, } OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize); + RGXFwSharedMemCacheOpPtr(psRFReg, FLUSH); /* Release the CPU mapping */ DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc); @@ -5347,6 +4620,8 @@ PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, "Dump FWFramework buffer"); DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS); +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); #endif return PVRSRV_OK; @@ -5357,7 +4632,7 @@ PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, */ PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, DEVMEM_MEMDESC **ppsFWFrameworkMemDesc, - IMG_UINT32 ui32FrameworkCommandSize) + IMG_UINT32 ui32FrameworkCommandSize) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; @@ -5366,7 +4641,7 @@ PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, Allocate device memory for the firmware GPU framework state. Sufficient info to kick one or more DMs should be contained in this buffer */ - PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate Volcanic firmware framework state"); + PDUMPCOMMENT(psDeviceNode, "Allocate firmware framework state"); eError = DevmemFwAllocate(psDevInfo, ui32FrameworkCommandSize, @@ -5393,11 +4668,14 @@ PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, PVRSRV_ERROR eError = PVRSRV_OK; IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries; PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; - const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; + + eError = RGXUpdateLocalKCCBRoff(psDevInfo); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXUpdateLocalKCCBRoff"); - ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 + - psKCCBCtl->ui32WriteOffset - - psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask; + ui32CurrentQueueLength = (psKCCBCtlLocal->ui32WrapMask+1 + + psKCCBCtlLocal->ui32WriteOffset - + psKCCBCtlLocal->ui32ReadOffset) & psKCCBCtlLocal->ui32WrapMask; ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount; for (ui32MaxRetries = ui32CurrentQueueLength + 1; @@ -5410,7 +4688,7 @@ PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, * does not generate an error message. In this case, the PollForValueKM is expected to * timeout as there is work ongoing on the GPU which may take longer than the timeout period. */ - eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE); + eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE, NULL); if (eError != PVRSRV_ERROR_TIMEOUT) { break; @@ -5424,6 +4702,7 @@ PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", __func__, PVRSRVGetErrorString(eError), pui32LinMemAddr, ui32Value)); + OSDumpStack(); } return eError; @@ -5442,13 +4721,15 @@ PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32kCCBCommandSlot; IMG_BOOL bWaitForFwUpdate = IMG_FALSE; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); - if (!psDevInfo) { return PVRSRV_ERROR_INVALID_PARAMS; } psDeviceNode = psDevInfo->psDeviceNode; + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); + + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); psFwSysData = psDevInfo->psRGXFWIfFwSysData; if (NULL == psFwSysData) @@ -5471,6 +4752,7 @@ PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, psFwSysData->ui32ConfigFlags &= ~ui32Config; } OSWriteMemoryBarrier(&psFwSysData->ui32ConfigFlags); + RGXFwSharedMemCacheOpValue(psFwSysData->ui32ConfigFlags, FLUSH); /* return current/new value to caller */ if (pui32ConfigState) @@ -5524,6 +4806,14 @@ PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, { PVRSRV_ERROR eError; IMG_UINT32 ui32kCCBCommandSlot; +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus = OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus); + + PVR_LOG_RETURN_IF_FALSE((eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_DEAD) && + (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_FAULT), + "Cleanup aborted: Device in bad state", PVRSRV_OK); +#endif + /* Clean-up commands sent during frame capture intervals must be dumped even when not in capture range... */ ui32PDumpFlags |= PDUMP_FLAGS_INTERVAL; @@ -5543,8 +4833,7 @@ PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, if (eError != PVRSRV_OK) { /* If caller may retry, fail with no error message */ - if ((eError != PVRSRV_ERROR_RETRY) && - (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) + if (!PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR ,"RGXScheduleCommandAndGetKCCBSlot() failed (%s) in %s()", PVRSRVGETERRORSTRING(eError), __func__)); @@ -5609,6 +4898,9 @@ PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, If the command has was run but a resource was busy, then the request will need to be retried. */ + RGXFwSharedMemCacheOpValue(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot], + INVALIDATE); + if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)) { if (psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE) @@ -5644,20 +4936,22 @@ PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, /* Force retry if this context's CCB is currently being dumped * as part of the stalled CCB debug */ - if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB) + if (psDevInfo->pvEarliestStalledClientCCB == (void*)FWCommonContextGetClientCCB(psServerCommonContext)) { PVR_DPF((PVR_DBG_WARNING, - "%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psServerCommonContext->psClientCCB <%p>", + "%s: [%u/%d] Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psCtxClientCCB <%p>", __func__, - (void*)psServerCommonContext->psClientCCB)); + psDeviceNode->sDevId.ui32InternalID, + psDeviceNode->sDevId.i32KernelDeviceID, + psDevInfo->pvEarliestStalledClientCCB)); return PVRSRV_ERROR_RETRY; } psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext); #if defined(PDUMP) - PDUMPCOMMENT(psDevInfo->psDeviceNode, "Common ctx cleanup Request DM%d [context = 0x%08x]", - eDM, psFWCommonContextFWAddr.ui32Addr); - PDUMPCOMMENT(psDevInfo->psDeviceNode, "Wait for CCB to be empty before common ctx cleanup"); + PDUMPCOMMENT(psDeviceNode, "Common ctx cleanup Request DM%d [context = 0x%08x]", + eDM, psFWCommonContextFWAddr.ui32Addr); + PDUMPCOMMENT(psDeviceNode, "Wait for CCB to be empty before common ctx cleanup"); RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags); #endif @@ -5672,7 +4966,7 @@ PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_CLEANUP_FWCOMMONCONTEXT, ui32PDumpFlags); - if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + if ((eError != PVRSRV_OK) && !PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule a memory context cleanup with error (%u)", @@ -5705,8 +4999,7 @@ PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, if (eError != PVRSRV_OK) { /* If caller may retry, fail with no error message */ - if ((eError != PVRSRV_ERROR_RETRY) && - (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) + if (!PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule a HWRTData cleanup with error (%u)", @@ -5741,8 +5034,7 @@ PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, if (eError != PVRSRV_OK) { /* If caller may retry, fail with no error message */ - if ((eError != PVRSRV_ERROR_RETRY) && - (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) + if (!PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule a memory context cleanup with error (%u)", @@ -5774,7 +5066,7 @@ PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_CLEANUP_ZSBUFFER, PDUMP_FLAGS_NONE); - if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + if ((eError != PVRSRV_OK) && !PVRSRVIsRetryError(eError)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to schedule a memory context cleanup with error (%u)", @@ -5787,10 +5079,11 @@ PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32HCSDeadlineMs) { - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS = ui32HCSDeadlineMs; OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS, FLUSH); #if defined(PDUMP) PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -5804,27 +5097,40 @@ PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, return PVRSRV_OK; } -PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo) +PVRSRV_ERROR RGXFWHealthCheckCmdInt(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bCallerHasPwrLock) { - RGXFWIF_KCCB_CMD sCmpKCCBCmd; + RGXFWIF_KCCB_CMD sCmpKCCBCmd = { 0 }; sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; - return RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GP, - &sCmpKCCBCmd, - PDUMP_FLAGS_CONTINUOUS); + if (bCallerHasPwrLock) + { + return RGXScheduleCommandWithoutPowerLock(psDevInfo, + RGXFWIF_DM_GP, + &sCmpKCCBCmd, + PDUMP_FLAGS_CONTINUOUS); + } + else + { + return RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sCmpKCCBCmd, + PDUMP_FLAGS_CONTINUOUS); + } } PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo) { +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) IMG_UINT32 ui32CBaseMapCtxReg; +#endif - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) { - ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4; + ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4; /* Set the mapping context */ RGXWriteReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV); (void)RGXReadReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg); /* Fence write */ @@ -5837,7 +5143,7 @@ PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo) RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1, RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT, RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT, - 0x0); + 0xDEADBEEF); } else { @@ -5854,22 +5160,39 @@ PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo) RGX_CR_MMU_CBASE_MAPPING, RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, - 0x0); + 0xDEADBEEF); } +#else + /* + * Catbase-0 (FW MMU context) pointing to unmapped mem to make + * FW crash from its memory context + */ + RGXWriteKernelMMUPC64(&psDevInfo->sLayerParams, + FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), + RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT, + RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT, + ((0xDEADBEEF + >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) + << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) + & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK); +#endif return PVRSRV_OK; } PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32OSid, - RGXFWIF_OS_STATE_CHANGE eOSOnlineState) + IMG_UINT32 ui32DriverID, + RGXFWIF_OS_STATE_CHANGE eOSOnlineState) { PVRSRV_ERROR eError = PVRSRV_OK; RGXFWIF_KCCB_CMD sOSOnlineStateCmd = { 0 }; - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + const RGXFWIF_SYSDATA *psFwSysData; + + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); + psFwSysData = psDevInfo->psRGXFWIfFwSysData; sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE; - sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid; + sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32DriverID = ui32DriverID; sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState; #if defined(SUPPORT_AUTOVZ) @@ -5879,7 +5202,7 @@ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_UNREFERENCED_PARAMETER(psFwSysData); sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = RGXFWIF_OS_OFFLINE; - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { /* Send the offline command regardless if power lock is held or not. * Under AutoVz this is done during regular driver deinit, store-to-ram suspend @@ -5898,55 +5221,46 @@ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); /* Guests and Host going offline should wait for confirmation * from the Firmware of the state change. If this fails, break * the connection on the OS Driver's end as backup. */ - if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32OSid == RGXFW_HOST_OS)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo) || (ui32DriverID == RGXFW_HOST_DRIVER_ID)) { - LOOP_UNTIL_TIMEOUT(SECONDS_TO_MICROSECONDS/2) + LOOP_UNTIL_TIMEOUT_US(SECONDS_TO_MICROSECONDS/2) { + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); if (KM_FW_CONNECTION_IS(READY, psDevInfo)) { bConnectionDown = IMG_TRUE; break; } - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (!bConnectionDown) { KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); } } } #else - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { /* no reason for Guests to update their state or any other VM's. * This is the Hypervisor and Host driver's responsibility. */ return PVRSRV_OK; } - else if (eOSOnlineState == RGXFWIF_OS_ONLINE) + else { - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GP, - &sOSOnlineStateCmd, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_ERROR_RETRY) break; + const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags; - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - } - else if (psFwSysData) - { - const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags = - (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; + PVR_ASSERT(psFwSysData != NULL); + psFwRunFlags = (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID]; /* Attempt several times until the FW manages to offload the OS */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { IMG_UINT32 ui32kCCBCommandSlot; @@ -5965,8 +5279,9 @@ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, /* read the OS state */ OSMemoryBarrier(NULL); - /* check if FW finished offloading the OSID and is stopped */ - if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE) + /* check if FW finished offloading the driver and is stopped */ + if ((eOSOnlineState == RGXFWIF_OS_ONLINE && (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_READY || psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_ACTIVE)) || + (eOSOnlineState == RGXFWIF_OS_OFFLINE && psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE)) { eError = PVRSRV_OK; break; @@ -5977,11 +5292,7 @@ PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - } - else - { - eError = PVRSRV_ERROR_NOT_INITIALISED; + } END_LOOP_UNTIL_TIMEOUT_US(); } return_ : @@ -5989,166 +5300,22 @@ return_ : return eError; } -PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32OSid, - IMG_UINT32 ui32Priority) -{ - PVRSRV_ERROR eError; - RGXFWIF_KCCB_CMD sOSidPriorityCmd = { 0 }; - - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); - - sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE; - psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid] = ui32Priority; - OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid]); - -#if defined(PDUMP) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Updating the priority of OSID%u inside RGXFWIfRuntimeCfg", ui32OSid); - DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, - offsetof(RGXFWIF_RUNTIME_CFG, aui32OSidPriority) + (ui32OSid * sizeof(ui32Priority)), - ui32Priority , - PDUMP_FLAGS_CONTINUOUS); -#endif - - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GP, - &sOSidPriorityCmd, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_ERROR_RETRY) - { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - - return eError; -} - -PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, - CONNECTION_DATA *psConnection, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_INT32 i32Priority, - RGXFWIF_DM eDM) -{ - IMG_UINT32 ui32CmdSize; - IMG_UINT8 *pui8CmdPtr; - RGXFWIF_KCCB_CMD sPriorityCmd = { 0 }; - RGXFWIF_CCB_CMD_HEADER *psCmdHeader; - RGXFWIF_CMD_PRIORITY *psCmd; - PVRSRV_ERROR eError; - RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext); - - eError = _CheckPriority(psDevInfo, i32Priority, psContext->eRequestor); - PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); - - /* - Get space for command - */ - ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY)); - - eError = RGXAcquireCCB(psClientCCB, - ui32CmdSize, - (void **) &pui8CmdPtr, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - if (eError != PVRSRV_ERROR_RETRY) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__)); - } - goto fail_ccbacquire; - } - - /* - Write the command header and command - */ - psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr; - psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY; - psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY)); - pui8CmdPtr += sizeof(*psCmdHeader); - - psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr; - psCmd->i32Priority = i32Priority; - pui8CmdPtr += sizeof(*psCmd); - - /* - We should reserve space in the kernel CCB here and fill in the command - directly. - This is so if there isn't space in the kernel CCB we can return with - retry back to services client before we take any operations - */ - - /* - Submit the command - */ - RGXReleaseCCB(psClientCCB, - ui32CmdSize, - PDUMP_FLAGS_CONTINUOUS); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__)); - return eError; - } - - /* Construct the priority command. */ - sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; - sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext); - sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); - sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); - sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; - -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; -#endif - - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - eError = RGXScheduleCommand(psDevInfo, - eDM, - &sPriorityCmd, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_ERROR_RETRY) - { - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to submit set priority command with error (%u)", - __func__, - eError)); - goto fail_cmdacquire; - } - - psContext->i32Priority = i32Priority; - - return PVRSRV_OK; - -fail_ccbacquire: -fail_checkpriority: -fail_cmdacquire: - PVR_ASSERT(eError != PVRSRV_OK); - return eError; -} - PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PHRMode) { PVRSRV_ERROR eError; RGXFWIF_KCCB_CMD sCfgPHRCmd = { 0 }; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); + + PVR_LOG_RETURN_IF_FALSE((ui32PHRMode == RGXFWIF_PHR_MODE_OFF) || + (ui32PHRMode == RGXFWIF_PHR_MODE_RD_RESET), + "Invalid PHR Mode.", PVRSRV_ERROR_INVALID_PARAMS); sCfgPHRCmd.eCmdType = RGXFWIF_KCCB_CMD_PHR_CFG; psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode = ui32PHRMode; OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, FLUSH); #if defined(PDUMP) PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -6159,7 +5326,7 @@ PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMP_FLAGS_CONTINUOUS); #endif - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psDevInfo, RGXFWIF_DM_GP, @@ -6170,7 +5337,7 @@ PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); return eError; } @@ -6181,11 +5348,12 @@ PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRV_ERROR eError; RGXFWIF_KCCB_CMD sCfgWdgCmd = { 0 }; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); sCfgWdgCmd.eCmdType = RGXFWIF_KCCB_CMD_WDG_CFG; psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs = ui32WdgPeriodUs; OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs); + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs, FLUSH); #if defined(PDUMP) PDUMPCOMMENT(psDevInfo->psDeviceNode, @@ -6196,7 +5364,7 @@ PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, PDUMP_FLAGS_CONTINUOUS); #endif - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psDevInfo, RGXFWIF_DM_GP, @@ -6207,7 +5375,7 @@ PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); return eError; } @@ -6234,9 +5402,9 @@ void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bI ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo); ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo); - +#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo); - +#endif /* If at least one DM stalled bit is different than before */ if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask)) { @@ -6291,21 +5459,35 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, const RGXFWIF_SYSDATA* psFwSysData; const RGXFWIF_OSDATA* psFwOsData; const RGXFWIF_CCB_CTL* psKCCBCtl; + RGXFWIF_CCB_CTL* psKCCBCtlLocal; IMG_UINT32 ui32ThreadCount; IMG_BOOL bKCCBCmdsWaiting; + PVRSRV_ERROR eError; PVR_ASSERT(psDevNode != NULL); psDevInfo = psDevNode->pvDevice; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevNode, PVRSRV_OK); + /* If the firmware is not yet initialised or has already deinitialised, stop here */ if (psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL || - psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT) + psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING) { return PVRSRV_OK; } psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; - psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) + { + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, + INVALIDATE); + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + } + + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, + INVALIDATE); psFwOsData = psDevInfo->psRGXFWIfFwOsData; /* If this is a quick update, then include the last current value... */ @@ -6321,73 +5503,118 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, psDevInfo->ui32SLRHoldoffCounter--; } + /* Take power lock, retry if it's in use in another task. */ + eError = PVRSRVPowerTryLockWaitForTimeout(psDevNode); + if (eError == PVRSRV_ERROR_TIMEOUT) + { + /* Skip health status update if timeout */ + PVR_DPF((PVR_DBG_WARNING, "%s: Power lock timeout, increase OS_POWERLOCK_TIMEOUT_US.", __func__)); + goto _RGXUpdateHealthStatus_Exit; + } + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerTryLockWaitForTimeout"); + + /* If the firmware is not yet initialised or has already deinitialised, stop here */ + if (psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL || + psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING) + { + PVRSRVPowerUnlock(psDevNode); + return PVRSRV_OK; + } + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) + { + /* On a PCI error all reads from the PCI bar may return 0xFFFFFFFF. + This value is not valid for a core ID. */ + if (psFwSysData->ui32MemFaultCheck == RGX_PCI_ERROR_VALUE_DWORD) + { + PVR_DPF((PVR_DBG_WARNING, "%s: PCI error", __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_PCI_ERROR; + PVRSRVDeviceSetState(psDevNode, PVRSRV_DEVICE_STATE_PCI_ERROR); + PVRSRVPowerUnlock(psDevNode); + goto _RGXUpdateHealthStatus_Exit; + } + } /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */ if (PVRSRVIsDevicePowered(psDevNode)) { - /* - Firmware thread checks... - */ if (psRGXFWIfTraceBufCtl != NULL) { + /* + Firmware thread checks... + */ for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++) { - const IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo; + const IMG_CHAR* pszTraceAssertInfo; + + RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf, INVALIDATE); + pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo; /* Check if the FW has hit an assert... */ if (*pszTraceAssertInfo != '\0') { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)", - __func__, ui32ThreadCount, pszTraceAssertInfo, + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %.*s (%.*s:%d)", + __func__, ui32ThreadCount, RGXFW_TRACE_BUFFER_ASSERT_SIZE, + pszTraceAssertInfo, RGXFW_TRACE_BUFFER_ASSERT_SIZE, psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath, psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum)); eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED; + PVRSRVPowerUnlock(psDevNode); goto _RGXUpdateHealthStatus_Exit; } - /* - Check the threads to see if they are in the same poll locations as last time... - */ - if (bCheckAfterTimePassed) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) { - if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0 && - psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount]) + /* + Check the threads to see if they are in the same poll locations as last time... + */ + if (bCheckAfterTimePassed) { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)", - __func__, ui32ThreadCount, - ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), - psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, - psFwSysData->aui32CrPollMask[ui32ThreadCount])); - eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; - eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING; - goto _RGXUpdateHealthStatus_Exit; + if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0 && + psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount]) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)", + __func__, ui32ThreadCount, + ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, + psFwSysData->aui32CrPollMask[ui32ThreadCount])); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING; + PVRSRVPowerUnlock(psDevNode); + goto _RGXUpdateHealthStatus_Exit; + } + psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount]; } - psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount]; } } - /* - Check if the FW has faulted... - */ - if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode)) { - PVR_DPF((PVR_DBG_WARNING, - "%s: Firmware has faulted and needs to restart", - __func__)); - eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT; - if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED) - { - eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING; - } - else + /* + Check if the FW has faulted... + */ + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT) { - eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING; + PVR_DPF((PVR_DBG_WARNING, + "%s: Firmware has faulted and needs to restart", + __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT; + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED) + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING; + } + else + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING; + } + PVRSRVPowerUnlock(psDevNode); + goto _RGXUpdateHealthStatus_Exit; } - goto _RGXUpdateHealthStatus_Exit; } } @@ -6412,20 +5639,27 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, that some have executed since then. */ bKCCBCmdsWaiting = IMG_FALSE; + + RGXFwSharedMemCacheOpPtr(psDevInfo->psKernelCCBCtl, INVALIDATE); psKCCBCtl = psDevInfo->psKernelCCBCtl; + psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; - if (psKCCBCtl != NULL) + if (psKCCBCtl != NULL && psKCCBCtlLocal != NULL) { - if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask || - psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask) + /* update KCCB read offset */ + RGXFwSharedMemCacheOpValue(psKCCBCtl->ui32ReadOffset, INVALIDATE); + psKCCBCtlLocal->ui32ReadOffset = psKCCBCtl->ui32ReadOffset; + + if (psKCCBCtlLocal->ui32ReadOffset > psKCCBCtlLocal->ui32WrapMask || + psKCCBCtlLocal->ui32WriteOffset > psKCCBCtlLocal->ui32WrapMask) { PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)", - __func__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset)); + __func__, psKCCBCtlLocal->ui32ReadOffset, psKCCBCtlLocal->ui32WriteOffset)); eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; } - if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset) + if (psKCCBCtlLocal->ui32ReadOffset != psKCCBCtlLocal->ui32WriteOffset) { bKCCBCmdsWaiting = IMG_TRUE; } @@ -6452,9 +5686,15 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, If no commands are currently pending and nothing happened since the last poll, then schedule a dummy command to ping the firmware so we know it is alive and processing. */ - if (!bKCCBCmdsWaiting) + if ((!bKCCBCmdsWaiting) && + (eNewStatus != PVRSRV_DEVICE_HEALTH_STATUS_DEAD) && + (eNewStatus != PVRSRV_DEVICE_HEALTH_STATUS_FAULT)) { - PVRSRV_ERROR eError = RGXFWHealthCheckCmd(psDevNode->pvDevice); + /* Protect the PDumpLoadMem. RGXScheduleCommand() cannot take the + * PMR lock itself, because some bridge functions will take the PMR lock + * before calling RGXScheduleCommand + */ + PVRSRV_ERROR eError = RGXFWHealthCheckCmdWithoutPowerLock(psDevNode->pvDevice); if (eError != PVRSRV_OK) { @@ -6481,14 +5721,21 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, IMG_UINT32 ui32LISRCount = 0; IMG_UINT32 ui32FWCount = 0; IMG_UINT32 ui32MissingInts = 0; - IMG_UINT32 ui32Index; /* Add up the total number of interrupts issued, sampled/received and missed... */ +#if defined(RGX_FW_IRQ_OS_COUNTERS) + /* Only the Host OS has a sample count, so only one counter to check. */ + ui32LISRCount += psDevInfo->aui32SampleIRQCount[RGXFW_HOST_DRIVER_ID]; + ui32FWCount += OSReadHWReg32(psDevInfo->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[RGXFW_HOST_DRIVER_ID]); +#else + IMG_UINT32 ui32Index; + for (ui32Index = 0; ui32Index < RGXFW_THREAD_NUM; ui32Index++) { ui32LISRCount += psDevInfo->aui32SampleIRQCount[ui32Index]; ui32FWCount += psFwOsData->aui32InterruptCount[ui32Index]; } +#endif /* RGX_FW_IRQ_OS_COUNTERS */ if (ui32LISRCount < ui32FWCount) { @@ -6515,6 +5762,9 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, psDevInfo->ui32MissingInterruptsLastTime = ui32MissingInts; } + /* Release power lock before RGXCheckForStalledClientContexts */ + PVRSRVPowerUnlock(psDevNode); + /* Stalled CCB check... */ @@ -6543,7 +5793,7 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, sErrorData.uErrData.sHostWdgData.ui32Status = (IMG_UINT32)eNewStatus; sErrorData.uErrData.sHostWdgData.ui32Reason = (IMG_UINT32)eNewReason; - psDevConfig->pfnSysDevErrorNotify(psDevConfig, + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, &sErrorData); } } @@ -6555,7 +5805,7 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, _RGXUpdateHealthStatus_Exit: OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus); OSAtomicWrite(&psDevNode->eHealthReason, eNewReason); - RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason); + RGXSRV_HWPERF_DEVICE_INFO_HEALTH(psDevInfo, eNewStatus, eNewReason); /* * Attempt to service the HWPerf buffer to regularly transport idle/periodic @@ -6572,1546 +5822,482 @@ PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, } } - /* Attempt to refresh timer correlation data */ - RGXTimeCorrRestartPeriodic(psDevNode); - - return PVRSRV_OK; -} /* RGXUpdateHealthStatus */ - -#if defined(SUPPORT_AUTOVZ) -void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - if (likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) - { - /* read and write back the alive token value to confirm to the - * virtualisation watchdog that this connection is healthy */ - KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo); - } -} - -/* - RGXUpdateAutoVzWatchdog -*/ -void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode) -{ - if (likely(psDevNode != NULL)) - { - PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; - - if (unlikely((psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || !psDevInfo->bRGXPowered || - psDevInfo->pvRegsBaseKM == NULL || psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) - { - /* If the firmware is not initialised, stop here */ - return; - } - else - { - PVRSRV_ERROR eError = PVRSRVPowerLock(psDevNode); - PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PVRSRVPowerLock"); - - RGXUpdateAutoVzWdgToken(psDevInfo); - PVRSRVPowerUnlock(psDevNode); - } - } -} -#endif /* SUPPORT_AUTOVZ */ - -PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM) -{ - if (psCurrentServerCommonContext == NULL) - { - /* the context has already been freed so there is nothing to do here */ - return PVRSRV_OK; - } - - return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, - psCurrentServerCommonContext->psClientCCB, - eKickTypeDM); -} - -void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - IMG_UINT32 ui32VerbLevel) -{ - if (psCurrentServerCommonContext == NULL) - { - /* the context has already been freed so there is nothing to do here */ - return; - } - - if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) - { - /* If high verbosity requested, dump whole CCB */ - DumpCCB(psCurrentServerCommonContext->psDevInfo, - psCurrentServerCommonContext->sFWCommonContextFWAddr, - psCurrentServerCommonContext->psClientCCB, - pfnDumpDebugPrintf, - pvDumpDebugFile); - } - else - { - /* Otherwise, only dump first command in the CCB */ - DumpFirstCCBCmd(psCurrentServerCommonContext->sFWCommonContextFWAddr, - psCurrentServerCommonContext->psClientCCB, - pfnDumpDebugPrintf, - pvDumpDebugFile); - } -} - -PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, - IMG_UINT32 *pui32NumCleanupCtl, - RGXFWIF_DM eDM, - IMG_BOOL bKick, - RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, - RGX_ZSBUFFER_DATA *psZSBuffer, - RGX_ZSBUFFER_DATA *psMSAAScratchBuffer) -{ - PVRSRV_ERROR eError; - PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl; - - PVR_ASSERT((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); - PVR_RETURN_IF_INVALID_PARAM((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); - - if (bKick) - { - if (psKMHWRTDataSet) - { - PRGXFWIF_CLEANUP_CTL psCleanupCtl; - - eError = RGXSetFirmwareAddress(&psCleanupCtl, psKMHWRTDataSet->psHWRTDataFwMemDesc, - offsetof(RGXFWIF_HWRTDATA, sCleanupState), - RFW_FWADDR_NOREF_FLAG); - PVR_RETURN_IF_ERROR(eError); - - *(psCleanupCtlWrite++) = psCleanupCtl; - } - - if (eDM == RGXFWIF_DM_3D) - { - RGXFWIF_PRBUFFER_TYPE eBufferType; - RGX_ZSBUFFER_DATA *psBuffer = NULL; - - for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++) - { - switch (eBufferType) - { - case RGXFWIF_PRBUFFER_ZSBUFFER: - psBuffer = psZSBuffer; - break; - case RGXFWIF_PRBUFFER_MSAABUFFER: - psBuffer = psMSAAScratchBuffer; - break; - case RGXFWIF_PRBUFFER_MAXSUPPORTED: - psBuffer = NULL; - break; - } - if (psBuffer) - { - (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr + - offsetof(RGXFWIF_PRBUFFER, sCleanupState); - psBuffer = NULL; - } - } - } - } - - *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl; - PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS); - - return PVRSRV_OK; -} - -PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - RGXFWIF_HWRINFOBUF *psHWRInfoBuf; - IMG_UINT32 i; - - if (psDevNode->pvDevice == NULL) - { - return PVRSRV_ERROR_INVALID_DEVINFO; - } - psDevInfo = psDevNode->pvDevice; - - psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; - - for (i = 0 ; i < psDevInfo->sDevFeatureCfg.ui32MAXDMCount ; i++) - { - /* Reset the HWR numbers */ - psHWRInfoBuf->aui32HwrDmLockedUpCount[i] = 0; - psHWRInfoBuf->aui32HwrDmFalseDetectCount[i] = 0; - psHWRInfoBuf->aui32HwrDmRecoveredCount[i] = 0; - psHWRInfoBuf->aui32HwrDmOverranCount[i] = 0; - } - - for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) - { - psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0; - } - - psHWRInfoBuf->ui32WriteIndex = 0; - psHWRInfoBuf->ui32DDReqCount = 0; - - OSWriteMemoryBarrier(&psHWRInfoBuf->ui32DDReqCount); - - return PVRSRV_OK; -} - -PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, - IMG_DEV_PHYADDR *psPhyAddr, - IMG_UINT32 ui32LogicalOffset, - IMG_UINT32 ui32Log2PageSize, - IMG_UINT32 ui32NumOfPages, - IMG_BOOL *bValid) -{ - - PVRSRV_ERROR eError; - - eError = PMRLockSysPhysAddresses(psPMR); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: PMRLockSysPhysAddresses failed (%u)", - __func__, - eError)); - return eError; - } - - eError = PMR_DevPhysAddr(psPMR, - ui32Log2PageSize, - ui32NumOfPages, - ui32LogicalOffset, - psPhyAddr, - bValid); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: PMR_DevPhysAddr failed (%u)", - __func__, - eError)); - return eError; - } - - - eError = PMRUnlockSysPhysAddresses(psPMR); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: PMRUnLockSysPhysAddresses failed (%u)", - __func__, - eError)); - return eError; - } - - return eError; -} - -#if defined(PDUMP) -PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - - if (psDevInfo->bDumpedKCCBCtlAlready) - { - /* exiting capture range or pdump block */ - psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; - - /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */ - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, - PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER, - "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)", - psDevInfo->psKernelCCBCtl, - ui32WriteOffset, - ui32WriteOffset); - eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, - offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), - ui32WriteOffset, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: problem pdumping POL for kCCBCtl (%d)", __func__, eError)); - } - } - - return eError; - -} -#endif - -/*! -******************************************************************************* - - @Function RGXClientConnectCompatCheck_ClientAgainstFW - - @Description - - Check compatibility of client and firmware (build options) - at the connection time. - - @Input psDeviceNode - device node - @Input ui32ClientBuildOptions - build options for the client - - @Return PVRSRV_ERROR - depending on mismatch found - -******************************************************************************/ -PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions) -{ -#if !defined(NO_HARDWARE) || defined(PDUMP) -#if !defined(NO_HARDWARE) - IMG_UINT32 ui32BuildOptionsMismatch; - IMG_UINT32 ui32BuildOptionsFW; -#endif - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -#endif - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - -#if !defined(NO_HARDWARE) - if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_OSINIT structure not allocated.", - __func__)); - return PVRSRV_ERROR_NOT_INITIALISED; - } - - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) - { - if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) - { - /* No need to wait if the FW has already updated the values */ - break; - } - OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); -#endif - -#if defined(PDUMP) - { - PVRSRV_ERROR eError; - - PDUMPCOMMENT(psDeviceNode, "Compatibility check: client and FW build options"); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions), - ui32ClientBuildOptions, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", - __func__, - eError)); - return eError; - } - } -#endif - -#if !defined(NO_HARDWARE) - ui32BuildOptionsFW = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.ui32BuildOptions; - ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW; - - if (ui32BuildOptionsMismatch != 0) - { - if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) - { - PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " - "extra options present in client: (0x%x). Please check rgx_options.h", - ui32ClientBuildOptions & ui32BuildOptionsMismatch )); - } - - if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0) - { - PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " - "extra options present in Firmware: (0x%x). Please check rgx_options.h", - ui32BuildOptionsFW & ui32BuildOptionsMismatch )); - } - - return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; - } - else - { - PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__)); - } -#endif - - return PVRSRV_OK; -} - -/*! -******************************************************************************* - - @Function RGXFwRawHeapAllocMap - - @Description Register firmware heap for the specified guest OSID - - @Input psDeviceNode - device node - @Input ui32OSID - Guest OSID - @Input sDevPAddr - Heap address - @Input ui64DevPSize - Heap size - - @Return PVRSRV_ERROR - PVRSRV_OK if heap setup was successful. - -******************************************************************************/ -PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSID, - IMG_DEV_PHYADDR sDevPAddr, - IMG_UINT64 ui64DevPSize) -{ - PVRSRV_ERROR eError; - IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH]; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32OSID)); - PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig, - PHYS_HEAP_USAGE_FW_MAIN); - PHYS_HEAP_CONFIG sFwHeapConfig; - - PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK); - - if (psFwMainConfig == NULL) - { - PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found.")); - return PVRSRV_ERROR_NOT_SUPPORTED; - } - - OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); - - if (!ui64DevPSize || - !sDevPAddr.uiAddr || - ui32OSID >= RGX_NUM_OS_SUPPORTED || - ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE) - { - PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName)); - return PVRSRV_ERROR_INVALID_PARAMS; - } - - sFwHeapConfig = *psFwMainConfig; - sFwHeapConfig.sStartAddr.uiAddr = 0; - sFwHeapConfig.sCardBase.uiAddr = sDevPAddr.uiAddr; - sFwHeapConfig.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; - - eError = PhysmemCreateHeapLMA(psDeviceNode, - PHYSMEM_LMA_POLICY_DEFAULT, - &sFwHeapConfig, - szRegionRAName, - &psDeviceNode->apsFWPremapPhysHeap[ui32OSID]); - PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32OSID); - - eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32OSID]); - PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32OSID); - - psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID] = psDeviceNode->apsFWPremapPhysHeap[ui32OSID]; - - PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for OSID: [%d]", ui32OSID); - -#if (RGX_NUM_OS_SUPPORTED > 1) - /* don't clear the heap of other guests on allocation */ - uiRawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL); -#endif - - /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */ - if (psDeviceNode->bAutoVzFwIsUp) - { - uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); - DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); - } - - eError = DevmemFwAllocate(psDevInfo, - RGX_FIRMWARE_RAW_HEAP_SIZE, - uiRawFwHeapAllocFlags, - psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName, - &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); - PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); - - /* Mark this devmem heap as premapped so allocations will not require device mapping. */ - DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); - - if (ui32OSID == RGXFW_HOST_OS) - { - /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly - * No memory allocated from these sub-heaps will be individually mapped into the device's - * address space so they can remain marked permanently as premapped. */ - DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); - DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); - } - - return eError; -} - -/*! -******************************************************************************* - - @Function RGXFwRawHeapUnmapFree - - @Description Unregister firmware heap for the specified guest OSID - - @Input psDeviceNode - device node - @Input ui32OSID - Guest OSID - -******************************************************************************/ -void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSID) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - - /* remove the premap status, so the heap can be unmapped and freed */ - if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID]) - { - DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE); - } - - if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]) - { - DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); - psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL; - } -} - -/*! -******************************************************************************* -@Function RGXRiscvHalt - -@Description Halt the RISC-V FW core (required for certain operations - done through Debug Module) - -@Input psDevInfo Pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, - PDUMP_FLAGS_CONTINUOUS, "Halt RISC-V FW"); - - /* Send halt request (no need to select one or more harts on this RISC-V core) */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until hart is halted */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_DMSTATUS, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - /* Clear halt request */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, - PDUMP_FLAGS_CONTINUOUS); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Send halt request (no need to select one or more harts on this RISC-V core) */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); - - /* Wait until hart is halted */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32), - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Hart not halted (0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS))); - return PVRSRV_ERROR_TIMEOUT; - } - - /* Clear halt request */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); -#endif - - return PVRSRV_OK; -} - -/*! -******************************************************************************* -@Function RGXRiscvIsHalted - -@Description Check if the RISC-V FW is halted - -@Input psDevInfo Pointer to device info - -@Return IMG_BOOL -******************************************************************************/ -IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if defined(NO_HARDWARE) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - /* Assume the core is always halted in nohw */ - return IMG_TRUE; -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - return (OSReadHWReg32(pui32RegsBase, RGX_CR_FWCORE_DMI_DMSTATUS) & - RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN) != 0U; -#endif -} - -/*! -******************************************************************************* -@Function RGXRiscvResume - -@Description Resume the RISC-V FW core - -@Input psDevInfo Pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, - PDUMP_FLAGS_CONTINUOUS, "Resume RISC-V FW"); - - /* Send resume request (no need to select one or more harts on this RISC-V core) */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until hart is resumed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_DMSTATUS, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - /* Clear resume request */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, - PDUMP_FLAGS_CONTINUOUS); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Send resume request (no need to select one or more harts on this RISC-V core) */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); - - /* Wait until hart is resumed */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32), - RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, - RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Hart not resumed (0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS))); - return PVRSRV_ERROR_TIMEOUT; - } - - /* Clear resume request */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, - RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); -#endif - - return PVRSRV_OK; -} - -/*! -******************************************************************************* -@Function RGXRiscvCheckAbstractCmdError - -@Description Check for RISC-V abstract command errors and clear them - -@Input psDevInfo Pointer to GPU device info - -@Return RGXRISCVFW_ABSTRACT_CMD_ERR -******************************************************************************/ -static RGXRISCVFW_ABSTRACT_CMD_ERR RGXRiscvCheckAbstractCmdError(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXRISCVFW_ABSTRACT_CMD_ERR eCmdErr; - -#if defined(NO_HARDWARE) && defined(PDUMP) - eCmdErr = RISCV_ABSTRACT_CMD_NO_ERROR; - - /* Check error status */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - RISCV_ABSTRACT_CMD_NO_ERROR << RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT, - ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; - - /* Check error status */ - eCmdErr = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS) - & ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK) - >> RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT; - - if (eCmdErr != RISCV_ABSTRACT_CMD_NO_ERROR) - { - PVR_DPF((PVR_DBG_WARNING, "RISC-V FW abstract command error %u", eCmdErr)); - - /* Clear the error (note CMDERR field is write-1-to-clear) */ - OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS, - ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK); - } -#endif - - return eCmdErr; -} - -/*! -******************************************************************************* -@Function RGXRiscvReadReg - -@Description Read a value from the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address - -@Output pui32Value Read value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 *pui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32RegAddr); - PVR_UNREFERENCED_PARAMETER(pui32Value); - - /* Reading HW registers is not supported in nohw/pdump */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Send abstract register read command */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_READ | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | - ui32RegAddr); - - /* Wait until abstract command is completed */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); - return PVRSRV_ERROR_TIMEOUT; - } - - if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) - { - /* Read register value */ - *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0); - } - else - { - *pui32Value = 0U; - } - - return PVRSRV_OK; -#endif -} - -/*! -******************************************************************************* -@Function RGXRiscvPollReg - -@Description Poll for a value from the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address -@Input ui32Value Expected value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Poll RISC-V register 0x%x (expected 0x%08x)", - ui32RegAddr, ui32Value); - - /* Send abstract register read command */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_READ | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | - ui32RegAddr, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until abstract command is completed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - RGXRiscvCheckAbstractCmdError(psDevInfo); - - /* Check read value */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_DATA0, - ui32Value, - 0xFFFFFFFF, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - return PVRSRV_OK; -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32RegAddr); - PVR_UNREFERENCED_PARAMETER(ui32Value); - - /* Polling HW registers is currently not required driverlive */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#endif -} - -/*! -******************************************************************************* -@Function RGXRiscvWriteReg - -@Description Write a value to the given RISC-V register (GPR or CSR) + /* Attempt to refresh timer correlation data */ + RGXTimeCorrRestartPeriodic(psDevNode); -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address -@Input ui32Value Write value + return PVRSRV_OK; +} /* RGXUpdateHealthStatus */ -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32Value) +#if defined(SUPPORT_AUTOVZ) +void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo) { -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Write RISC-V register 0x%x (value 0x%08x)", - ui32RegAddr, ui32Value); - - /* Prepare data to be written to register */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0, - ui32Value, PDUMP_FLAGS_CONTINUOUS); - - /* Send abstract register write command */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_WRITE | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | - ui32RegAddr, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until abstract command is completed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Prepare data to be written to register */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value); - - /* Send abstract register write command */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_WRITE | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | - ui32RegAddr); - - /* Wait until abstract command is completed */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + KM_CONNECTION_CACHEOP(Fw, INVALIDATE); + KM_CONNECTION_CACHEOP(Os, INVALIDATE); + if (likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && + (KM_OS_CONNECTION_IS(ACTIVE, psDevInfo) || KM_OS_CONNECTION_IS(READY, psDevInfo)))) { - PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); - return PVRSRV_ERROR_TIMEOUT; + /* read and write back the alive token value to confirm to the + * virtualisation watchdog that this connection is healthy */ + KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo); + KM_ALIVE_TOKEN_CACHEOP(Os, FLUSH); } -#endif - - return PVRSRV_OK; } -/*! -******************************************************************************* -@Function RGXRiscvCheckSysBusError +/* + RGXUpdateAutoVzWatchdog +*/ +void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode) +{ + if (likely(psDevNode != NULL)) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; -@Description Check for RISC-V system bus errors and clear them + if (unlikely((psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || !psDevInfo->bRGXPowered || + psDevInfo->pvRegsBaseKM == NULL || psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || + psDevNode->eDevState == PVRSRV_DEVICE_STATE_DESTRUCTING))) + { + /* If the firmware is not initialised, stop here */ + return; + } + else + { + PVRSRV_ERROR eError = PVRSRVPowerLock(psDevNode); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PVRSRVPowerLock"); -@Input psDevInfo Pointer to GPU device info + RGXUpdateAutoVzWdgToken(psDevInfo); + PVRSRVPowerUnlock(psDevNode); + } + } +} -@Return RGXRISCVFW_SYSBUS_ERR -******************************************************************************/ -static __maybe_unused RGXRISCVFW_SYSBUS_ERR RGXRiscvCheckSysBusError(PVRSRV_RGXDEV_INFO *psDevInfo) +PVRSRV_ERROR RGXDisconnectAllGuests(PVRSRV_DEVICE_NODE *psDeviceNode) { - RGXRISCVFW_SYSBUS_ERR eSBError; - -#if defined(NO_HARDWARE) && defined(PDUMP) - eSBError = RISCV_SYSBUS_NO_ERROR; - - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_SBCS, - RISCV_SYSBUS_NO_ERROR << RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT, - ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + IMG_UINT32 ui32DriverID; - eSBError = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS) - & ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK) - >> RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT; + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror, + INVALIDATE); - if (eSBError != RISCV_SYSBUS_NO_ERROR) + for (ui32DriverID = RGXFW_GUEST_DRIVER_ID_START; + ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED; + ui32DriverID++) { - PVR_DPF((PVR_DBG_WARNING, "RISC-V FW system bus error %u", eSBError)); + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE) + psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32DriverID].bfOsState; - /* Clear the error (note SBERROR field is write-1-to-clear) */ - OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS, - ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK); + if (eGuestState == RGXFW_CONNECTION_FW_ACTIVE || + eGuestState == RGXFW_CONNECTION_FW_READY) + { + PVRSRV_ERROR eError = RGXFWSetFwOsState(psDevInfo, ui32DriverID, RGXFWIF_OS_OFFLINE); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXFWSetFwOsState"); + } } -#endif - return eSBError; + return PVRSRV_OK; } +#endif /* SUPPORT_AUTOVZ */ -#if !defined(EMULATOR) -/*! -******************************************************************************* -@Function RGXRiscvReadAbstractMem +PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, + IMG_UINT32 *pui32NumCleanupCtl, + RGXFWIF_DM eDM, + IMG_BOOL bKick, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer) +{ + PVRSRV_ERROR eError; + PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl; -@Description Read a value at the given address in RISC-V memory space - using RISC-V abstract memory commands + PVR_ASSERT((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); + PVR_RETURN_IF_INVALID_PARAM((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space + if (bKick) + { + if (psKMHWRTDataSet) + { + PRGXFWIF_CLEANUP_CTL psCleanupCtl; -@Output pui32Value Read value + eError = RGXSetFirmwareAddress(&psCleanupCtl, psKMHWRTDataSet->psHWRTDataFwMemDesc, + offsetof(RGXFWIF_HWRTDATA, sCleanupState), + RFW_FWADDR_NOREF_FLAG); + PVR_RETURN_IF_ERROR(eError); -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvReadAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32Addr); - PVR_UNREFERENCED_PARAMETER(pui32Value); + *(psCleanupCtlWrite++) = psCleanupCtl; + } - /* Reading memory is not supported in nohw/pdump */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + if (eDM == RGXFWIF_DM_3D) + { + RGXFWIF_PRBUFFER_TYPE eBufferType; + RGX_ZSBUFFER_DATA *psBuffer = NULL; + + for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++) + { + switch (eBufferType) + { + case RGXFWIF_PRBUFFER_ZSBUFFER: + psBuffer = psZSBuffer; + break; + case RGXFWIF_PRBUFFER_MSAABUFFER: + psBuffer = psMSAAScratchBuffer; + break; + case RGXFWIF_PRBUFFER_MAXSUPPORTED: + psBuffer = NULL; + break; + } + if (psBuffer) + { + (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr + + offsetof(RGXFWIF_PRBUFFER, sCleanupState); + psBuffer = NULL; + } + } + } + } - /* Prepare read address */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr); + *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl; + PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS); - /* Send abstract memory read command */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_READ | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); + return PVRSRV_OK; +} - /* Wait until abstract command is completed */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) +PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_HWRINFOBUF *psHWRInfoBuf; + IMG_UINT32 i; + + if (psDevNode->pvDevice == NULL) { - PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); - return PVRSRV_ERROR_TIMEOUT; + return PVRSRV_ERROR_INVALID_DEVINFO; } + psDevInfo = psDevNode->pvDevice; + + psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; - if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) + for (i = 0 ; i < psDevInfo->sDevFeatureCfg.ui32MAXDMCount ; i++) { - /* Read memory value */ - *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0); + /* Reset the HWR numbers */ + psHWRInfoBuf->aui32HwrDmLockedUpCount[i] = 0; + psHWRInfoBuf->aui32HwrDmFalseDetectCount[i] = 0; + psHWRInfoBuf->aui32HwrDmRecoveredCount[i] = 0; + psHWRInfoBuf->aui32HwrDmOverranCount[i] = 0; } - else + + for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) { - *pui32Value = 0U; + psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0; } - return PVRSRV_OK; -#endif -} -#endif /* !defined(EMULATOR) */ - -/*! -******************************************************************************* -@Function RGXRiscvPollAbstractMem - -@Description Poll for a value at the given address in RISC-V memory space - using RISC-V abstract memory commands - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Expected value + psHWRInfoBuf->ui32WriteIndex = 0; + psHWRInfoBuf->ui32DDReqCount = 0; -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvPollAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, - PDUMP_FLAGS_CONTINUOUS, - "Poll RISC-V address 0x%x (expected 0x%08x)", - ui32Addr, ui32Value); - - /* Prepare read address */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1, - ui32Addr, PDUMP_FLAGS_CONTINUOUS); - - /* Send abstract memory read command */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_READ | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until abstract command is completed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - RGXRiscvCheckAbstractCmdError(psDevInfo); - - /* Check read value */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_DATA0, - ui32Value, - 0xFFFFFFFF, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); + OSWriteMemoryBarrier(&psHWRInfoBuf->ui32DDReqCount); return PVRSRV_OK; -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32Addr); - PVR_UNREFERENCED_PARAMETER(ui32Value); - - /* Polling memory is currently not required driverlive */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#endif } -#if !defined(EMULATOR) -/*! -******************************************************************************* -@Function RGXRiscvReadSysBusMem - -@Description Read a value at the given address in RISC-V memory space - using the RISC-V system bus - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space - -@Output pui32Value Read value - -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvReadSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) +PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, + IMG_DEV_PHYADDR *psPhyAddr, + IMG_UINT32 ui32LogicalOffset, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_BOOL *bValid) { -#if defined(NO_HARDWARE) && defined(PDUMP) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32Addr); - PVR_UNREFERENCED_PARAMETER(pui32Value); - /* Reading memory is not supported in nohw/pdump */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; - - /* Configure system bus to read 32 bit every time a new address is provided */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_SBCS, - (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | - RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN); - - /* Perform read */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr); + PVRSRV_ERROR eError; - /* Wait until system bus is idle */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS))); - return PVRSRV_ERROR_TIMEOUT; + PVR_DPF((PVR_DBG_ERROR, + "%s: PMRLockSysPhysAddresses failed (%u)", + __func__, + eError)); + return eError; } - if (RGXRiscvCheckSysBusError(psDevInfo) == RISCV_SYSBUS_NO_ERROR) + eError = PMR_DevPhysAddr(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + ui32LogicalOffset, + psPhyAddr, + bValid, + DEVICE_USE); + + if (eError != PVRSRV_OK) { - /* Read value from debug system bus */ - *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0); + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR_DevPhysAddr failed (%u)", + __func__, + eError)); + return eError; } - else + + + eError = PMRUnlockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) { - *pui32Value = 0U; + PVR_DPF((PVR_DBG_ERROR, + "%s: PMRUnLockSysPhysAddresses failed (%u)", + __func__, + eError)); + return eError; } - return PVRSRV_OK; -#endif + return eError; } -#endif /* !defined(EMULATOR) */ -/*! -******************************************************************************* -@Function RGXRiscvPollSysBusMem +#if defined(PDUMP) +PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_OK); -@Description Poll for a value at the given address in RISC-V memory space - using the RISC-V system bus + if (psDevInfo->bDumpedKCCBCtlAlready) + { + /* exiting capture range or pdump block */ + psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Expected value + /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */ + PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER, + "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)", + psDevInfo->psKernelCCBCtl, + ui32WriteOffset, + ui32WriteOffset); + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), + ui32WriteOffset, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvPollSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Poll RISC-V address 0x%x (expected 0x%08x)", - ui32Addr, ui32Value); - - /* Configure system bus to read 32 bit every time a new address is provided */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS, - (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | - RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN, - PDUMP_FLAGS_CONTINUOUS); - - /* Perform read */ - PDUMPREG32(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0, - ui32Addr, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until system bus is idle */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_SBCS, - 0U, - RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); - - RGXRiscvCheckSysBusError(psDevInfo); - - /* Check read value */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_SBDATA0, - ui32Value, - 0xFFFFFFFF, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: problem pdumping POL for kCCBCtl (%d)", __func__, eError)); + } + } - return PVRSRV_OK; -#else - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(ui32Addr); - PVR_UNREFERENCED_PARAMETER(ui32Value); + return eError; - /* Polling memory is currently not required driverlive */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#endif } +#endif -#if !defined(EMULATOR) /*! ******************************************************************************* -@Function RGXRiscvReadMem -@Description Read a value at the given address in RISC-V memory space + @Function RGXClientConnectCompatCheck_ClientAgainstFW + + @Description + + Check compatibility of client and firmware (build options) + at the connection time. -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space + @Input psDeviceNode - device node + @Input ui32ClientBuildOptions - build options for the client -@Output pui32Value Read value + @Return PVRSRV_ERROR - depending on mismatch found -@Return PVRSRV_ERROR ******************************************************************************/ -static PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Addr, - IMG_UINT32 *pui32Value) +PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions) { - if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) + IMG_UINT32 ui32BuildOptionsMismatch; + IMG_UINT32 ui32BuildOptionsFW; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_FW_INFO_HEADER *psFWInfoHeader; + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); + + if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL) { - return RGXRiscvReadAbstractMem(psDevInfo, ui32Addr, pui32Value); + PVR_DPF((PVR_DBG_ERROR, + "%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_OSINIT structure not allocated.", + __func__)); + return PVRSRV_ERROR_NOT_INITIALISED; } - return RGXRiscvReadSysBusMem(psDevInfo, ui32Addr, pui32Value); -} -#endif /* !defined(EMULATOR) */ + psFWInfoHeader = &psDevInfo->sFWInfoHeader; -/*! -******************************************************************************* -@Function RGXRiscvPollMem +#if !defined(NO_HARDWARE) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, + INVALIDATE); + if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + } +#endif + + ui32BuildOptionsFW = psFWInfoHeader->ui32Flags; + ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW; -@Description Poll a value at the given address in RISC-V memory space + if (ui32BuildOptionsMismatch != 0) + { + if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " + "extra options present in client: (0x%x). Please check rgx_options.h", + ui32ClientBuildOptions & ui32BuildOptionsMismatch )); + } -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Expected value + if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFW & ui32BuildOptionsMismatch )); + } -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Addr, - IMG_UINT32 ui32Value) -{ - if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + else { - return RGXRiscvPollAbstractMem(psDevInfo, ui32Addr, ui32Value); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__)); } - return RGXRiscvPollSysBusMem(psDevInfo, ui32Addr, ui32Value); + return PVRSRV_OK; } -#if !defined(EMULATOR) /*! ******************************************************************************* -@Function RGXRiscvWriteAbstractMem -@Description Write a value at the given address in RISC-V memory space - using RISC-V abstract memory commands + @Function RGXFwRawHeapAllocMap -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Write value + @Description Register firmware heap for the specified driver -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvWriteAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Write RISC-V address 0x%x (value 0x%08x)", - ui32Addr, ui32Value); - - /* Prepare write address */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1, - ui32Addr, PDUMP_FLAGS_CONTINUOUS); - - /* Prepare write data */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0, - ui32Value, PDUMP_FLAGS_CONTINUOUS); - - /* Send abstract register write command */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_WRITE | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, - PDUMP_FLAGS_CONTINUOUS); - - /* Wait until abstract command is completed */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_ABSTRACTCS, - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + @Input psDeviceNode - device node + @Input ui32DriverID - Guest driver + @Input sDevPAddr - Heap address + @Input ui64DevPSize - Heap size - /* Prepare write address */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr); + @Return PVRSRV_ERROR - PVRSRV_OK if heap setup was successful. - /* Prepare write data */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value); +******************************************************************************/ +PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DriverID, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT64 ui64DevPSize) +{ + PVRSRV_ERROR eError; + IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH]; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32DriverID)); + PHYS_HEAP_CONFIG *psFwHeapConfig = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_SHARED); + PHYS_HEAP_CONFIG sFwHeapConfig; - /* Send abstract memory write command */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_COMMAND, - (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | - RGXRISCVFW_DMI_COMMAND_WRITE | - RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); - /* Wait until abstract command is completed */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + if (psFwHeapConfig == NULL) { - PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); - return PVRSRV_ERROR_TIMEOUT; + PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found.")); + return PVRSRV_ERROR_NOT_SUPPORTED; } -#endif - return PVRSRV_OK; -} + OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID); -/*! -******************************************************************************* -@Function RGXRiscvWriteSysBusMem + if (!ui64DevPSize || + !sDevPAddr.uiAddr || + ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED || + ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName)); + return PVRSRV_ERROR_INVALID_PARAMS; + } -@Description Write a value at the given address in RISC-V memory space - using the RISC-V system bus + sFwHeapConfig = *psFwHeapConfig; + sFwHeapConfig.eType = PHYS_HEAP_TYPE_LMA; + sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_PREMAP; + sFwHeapConfig.uConfig.sLMA.sStartAddr.uiAddr = 0; + sFwHeapConfig.uConfig.sLMA.sCardBase.uiAddr = sDevPAddr.uiAddr; + sFwHeapConfig.uConfig.sLMA.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Write value + eError = PhysmemCreateHeapLMA(psDeviceNode, + RGXPhysHeapGetLMAPolicy(sFwHeapConfig.ui32UsageFlags, psDeviceNode), + &sFwHeapConfig, + szRegionRAName, + &psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]); + PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32DriverID); -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR -RGXRiscvWriteSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -{ -#if defined(NO_HARDWARE) && defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Write RISC-V address 0x%x (value 0x%08x)", - ui32Addr, ui32Value); - - /* Configure system bus to read 32 bit every time a new address is provided */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS, - RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT, - PDUMP_FLAGS_CONTINUOUS); - - /* Prepare write address */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0, - ui32Addr, PDUMP_FLAGS_CONTINUOUS); - - /* Prepare write data and initiate write */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBDATA0, - ui32Value, PDUMP_FLAGS_CONTINUOUS); - - /* Wait until system bus is idle */ - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FWCORE_DMI_SBCS, - 0U, - RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, - PDUMP_FLAGS_CONTINUOUS, - PDUMP_POLL_OPERATOR_EQUAL); -#else - IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]); + PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32DriverID); - /* Configure system bus for 32 bit accesses */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - RGX_CR_FWCORE_DMI_SBCS, - RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT); + psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID] = psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]; - /* Prepare write address */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr); + PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for DriverID: [%d]", ui32DriverID); - /* Prepare write data and initiate write */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0, ui32Value); +#if (RGX_NUM_DRIVERS_SUPPORTED > 1) + /* don't clear the heap of other guests on allocation */ + uiRawFwHeapAllocFlags &= (ui32DriverID > RGXFW_HOST_DRIVER_ID) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL); +#endif - /* Wait until system bus is idle */ - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32), - 0U, - RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */ + if (psDeviceNode->bAutoVzFwIsUp) { - PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", - __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS))); - return PVRSRV_ERROR_TIMEOUT; + uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE); } -#endif - return PVRSRV_OK; -} - -/*! -******************************************************************************* -@Function RGXRiscvWriteMem - -@Description Write a value to the given address in RISC-V memory space +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_PREMAP_FW_HEAPS) + PVR_DPF((PVR_DBG_MESSAGE, "%s: Allocation and mapping for Firmware heaps done by TEE.", __func__)); +#else + eError = DevmemFwAllocate(psDevInfo, + RGX_FIRMWARE_RAW_HEAP_SIZE, + uiRawFwHeapAllocFlags, + psDevInfo->psPremappedFwRawHeap[ui32DriverID]->pszName, + &psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); +#endif -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Write value + /* Mark this devmem heap as premapped so allocations will not require device mapping. */ + DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE); -@Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Addr, - IMG_UINT32 ui32Value) -{ - if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) + if (ui32DriverID == RGXFW_HOST_DRIVER_ID) { - return RGXRiscvWriteAbstractMem(psDevInfo, ui32Addr, ui32Value); + /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly + * No memory allocated from these sub-heaps will be individually mapped into the device's + * address space so they can remain marked permanently as premapped. */ + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); } - return RGXRiscvWriteSysBusMem(psDevInfo, ui32Addr, ui32Value); + return eError; } -#endif /* !defined(EMULATOR) */ /*! ******************************************************************************* -@Function RGXRiscvDmiOp -@Description Acquire the powerlock and perform an operation on the RISC-V - Debug Module Interface, but only if the GPU is powered on. + @Function RGXFwRawHeapUnmapFree -@Input psDevInfo Pointer to device info -@InOut pui64DMI Encoding of a request for the RISC-V Debug - Module with same format as the 'dmi' register - from the RISC-V debug specification (v0.13+). - On return, this is updated with the result of - the request, encoded the same way. + @Description Unregister firmware heap for the specified guest driver + + @Input psDeviceNode - device node + @Input ui32DriverID -@Return PVRSRV_ERROR ******************************************************************************/ -PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 *pui64DMI) +void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DriverID) { -#if defined(NO_HARDWARE) && defined(PDUMP) - PVR_UNREFERENCED_PARAMETER(psDevInfo); - PVR_UNREFERENCED_PARAMETER(pui64DMI); - - /* Accessing DM registers is not supported in nohw/pdump */ - return PVRSRV_ERROR_NOT_SUPPORTED; -#else -#define DMI_BASE RGX_CR_FWCORE_DMI_RESERVED00 -#define DMI_STRIDE (RGX_CR_FWCORE_DMI_RESERVED01 - RGX_CR_FWCORE_DMI_RESERVED00) -#define DMI_REG(r) ((DMI_BASE) + (DMI_STRIDE) * (r)) - -#define DMI_OP_SHIFT 0U -#define DMI_OP_MASK 0x3ULL -#define DMI_DATA_SHIFT 2U -#define DMI_DATA_MASK 0x3FFFFFFFCULL -#define DMI_ADDRESS_SHIFT 34U -#define DMI_ADDRESS_MASK 0xFC00000000ULL - -#define DMI_OP_NOP 0U -#define DMI_OP_READ 1U -#define DMI_OP_WRITE 2U -#define DMI_OP_RESERVED 3U - -#define DMI_OP_STATUS_SUCCESS 0U -#define DMI_OP_STATUS_RESERVED 1U -#define DMI_OP_STATUS_FAILED 2U -#define DMI_OP_STATUS_BUSY 3U - - PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; - PVRSRV_DEV_POWER_STATE ePowerState; - PVRSRV_ERROR eError; - IMG_UINT64 ui64Op, ui64Address, ui64Data; - - ui64Op = (*pui64DMI & DMI_OP_MASK) >> DMI_OP_SHIFT; - ui64Address = (*pui64DMI & DMI_ADDRESS_MASK) >> DMI_ADDRESS_SHIFT; - ui64Data = (*pui64DMI & DMI_DATA_MASK) >> DMI_DATA_SHIFT; - - eError = PVRSRVPowerLock(psDeviceNode); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)", - __func__, PVRSRVGetErrorString(eError))); - ui64Op = DMI_OP_STATUS_FAILED; - goto dmiop_update; - } - - eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - if (eError != PVRSRV_OK) + /* remove the premap status, so the heap can be unmapped and freed */ + if (psDevInfo->psPremappedFwRawHeap[ui32DriverID]) { - PVR_DPF((PVR_DBG_ERROR, "%s: failed to retrieve RGX power state (%s)", - __func__, PVRSRVGetErrorString(eError))); - ui64Op = DMI_OP_STATUS_FAILED; - goto dmiop_release_lock; + DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_FALSE); } - if (ePowerState == PVRSRV_DEV_POWER_STATE_ON) - { - switch (ui64Op) - { - case DMI_OP_NOP: - ui64Op = DMI_OP_STATUS_SUCCESS; - break; - case DMI_OP_WRITE: - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, - DMI_REG(ui64Address), - (IMG_UINT32)ui64Data); - ui64Op = DMI_OP_STATUS_SUCCESS; - break; - case DMI_OP_READ: - ui64Data = (IMG_UINT64)OSReadHWReg32(psDevInfo->pvRegsBaseKM, - DMI_REG(ui64Address)); - ui64Op = DMI_OP_STATUS_SUCCESS; - break; - default: - PVR_DPF((PVR_DBG_ERROR, "%s: unknown op %u", __func__, (IMG_UINT32)ui64Op)); - ui64Op = DMI_OP_STATUS_FAILED; - break; - } - } - else + if (psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]) { - PVR_DPF((PVR_DBG_WARNING, "%s: Accessing RISC-V Debug Module is not " - "possible while the GPU is powered off", __func__)); - - ui64Op = DMI_OP_STATUS_FAILED; + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]); + psDevInfo->psPremappedFwRawMemDesc[ui32DriverID] = NULL; } - -dmiop_release_lock: - PVRSRVPowerUnlock(psDeviceNode); - -dmiop_update: - *pui64DMI = (ui64Op << DMI_OP_SHIFT) | - (ui64Address << DMI_ADDRESS_SHIFT) | - (ui64Data << DMI_DATA_SHIFT); - - return eError; -#endif } /* @@ -8119,6 +6305,7 @@ PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, */ static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value) { +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) void __iomem *pvRegBase = psDevInfo->pvSecureRegsBaseKM; IMG_UINT8 __iomem *pui8RegBase = pvRegBase; IMG_UINT32 ui32PollValue; @@ -8132,27 +6319,27 @@ static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui { if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) { - ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN; - ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN; - ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED; - ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED; - ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_EN; + ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; + ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; + ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA; + ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA; + ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__RD_EN; CHECK_HWBRN_68777(ui32WriteValue); - ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED; + ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA; } else { - ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN; - ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN; - ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED; - ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED; - ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_EN; + ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; + ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; + ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA; + ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__RD_EN; CHECK_HWBRN_68777(ui32WriteValue); - ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED; + ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA; } } else @@ -8170,7 +6357,8 @@ static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), ui32PollValue, ui32PollMask, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + POLL_FLAG_LOG_ERROR, + NULL) != PVRSRV_OK) { return PVRSRV_ERROR_TIMEOUT; } @@ -8184,13 +6372,47 @@ static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), ui32PollValue, ui32PollMask, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + POLL_FLAG_LOG_ERROR, + NULL) != PVRSRV_OK) { return PVRSRV_ERROR_TIMEOUT; } /* Read the value */ *pui32Value = OSReadUncheckedHWReg32(pvRegBase, ui32ReadOffset); +#else + IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; + + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Read */ + OSWriteHWReg32( + psDevInfo->pvRegsBaseKM, + RGX_CR_META_SP_MSLVCTRL0, + ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); + (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0); + + /* Wait for Slave Port to be Ready: read complete */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Read the value */ + *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX); +#endif return PVRSRV_OK; } @@ -8200,49 +6422,49 @@ static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui */ static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value) { +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) void __iomem *pvRegBase = psDevInfo->pvSecureRegsBaseKM; IMG_UINT8 __iomem *pui8RegBase = pvRegBase; - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) { if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) { /* Wait for Slave Port to be Ready */ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED), - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA), + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) { return PVRSRV_ERROR_TIMEOUT; } /* Issue the Write */ CHECK_HWBRN_68777(ui32METAAddr); - OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, ui32METAAddr); - OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, ui32Value); + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA, ui32METAAddr); + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA, ui32Value); } else { /* Wait for Slave Port to be Ready */ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED), - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA), + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) { return PVRSRV_ERROR_TIMEOUT; } /* Issue the Write */ CHECK_HWBRN_68777(ui32METAAddr); - OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED, ui32METAAddr); - OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED, ui32Value); + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA, ui32METAAddr); + OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_EQ1_AND_MRUA, ui32Value); } } else @@ -8252,7 +6474,7 @@ static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 u (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) { return PVRSRV_ERROR_TIMEOUT; } @@ -8263,53 +6485,59 @@ static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 u OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT, ui32Value); (void) OSReadUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ } +#else + IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; + + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Write */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value); +#endif return PVRSRV_OK; } PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value) { - PVRSRV_ERROR eError; - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) { - eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value); + return RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value); } + #if !defined(EMULATOR) - else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) { - eError = RGXRiscvReadMem(psDevInfo, ui32FWAddr, pui32Value); + return RGXRiscvReadMem(psDevInfo, ui32FWAddr, pui32Value); } #endif - else - { - eError = PVRSRV_ERROR_NOT_SUPPORTED; - } - return eError; + return PVRSRV_ERROR_NOT_SUPPORTED; } PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value) { - PVRSRV_ERROR eError; - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) { - eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value); + return RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value); } + #if !defined(EMULATOR) - else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) { - eError = RGXRiscvWriteMem(psDevInfo, ui32FWAddr, ui32Value); + return RGXRiscvWriteMem(psDevInfo, ui32FWAddr, ui32Value); } #endif - else - { - eError = PVRSRV_ERROR_NOT_SUPPORTED; - } - - return eError; + return PVRSRV_ERROR_NOT_SUPPORTED; } PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, @@ -8325,18 +6553,22 @@ PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, MMU_FAULT_DATA sFaultData = {0U}; MMU_CONTEXT *psFwMMUCtx = psDevInfo->psKernelMMUCtx; IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX); - IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE); - IMG_UINT32 ui32OSID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE; + IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE); + IMG_UINT32 ui32DriverID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE; IMG_UINT32 ui32HeapId; PHYS_HEAP *psPhysHeap; - IMG_UINT64 ui64FwDataBaseMask; - IMG_DEV_VIRTADDR sDevVAddr; - +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + /* MIPS uses the same page size as the OS, while others default to 4K pages */ + IMG_UINT32 ui32FwPageSize = RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? + OSGetPageSize() : BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); +#else /* default to 4K pages */ IMG_UINT32 ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); +#endif + IMG_UINT32 ui32PageOffset = (ui32FwVA & (ui32FwPageSize - 1)); - PVR_LOG_GOTO_IF_INVALID_PARAM((ui32OSID < RGX_NUM_OS_SUPPORTED), + PVR_LOG_GOTO_IF_INVALID_PARAM((ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED), eError, ErrorExit); PVR_LOG_GOTO_IF_INVALID_PARAM(((psCpuPA != NULL) || @@ -8348,45 +6580,74 @@ PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, (ui32FwVA < ui32FwHeapEnd)), eError, ErrorExit); - ui32HeapId = (ui32OSID == RGXFW_HOST_OS) ? - PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID); + ui32HeapId = (ui32DriverID == RGXFW_HOST_DRIVER_ID) ? + PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID); psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[ui32HeapId]; - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { - ui64FwDataBaseMask = ~(RGXFW_SEGMMU_DATA_META_CACHE_MASK | - RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK | - RGXFW_SEGMMU_DATA_BASE_ADDRESS); - } - else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - ui64FwDataBaseMask = ~(RGXRISCVFW_GET_REGION_BASE(0xF)); + /* MIPS is equipped with a dedicated MMU */ + RGXMipsCheckFaultAddress(psFwMMUCtx, ui32FwVA, &sFaultData); } else +#endif { - PVR_LOG_GOTO_WITH_ERROR("RGXGetFwMapping", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit); - } + IMG_UINT64 ui64FwDataBaseMask; + IMG_DEV_VIRTADDR sDevVAddr; - sDevVAddr.uiAddr = (ui32FwVA & ui64FwDataBaseMask) | RGX_FIRMWARE_RAW_HEAP_BASE; + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + ui64FwDataBaseMask = ~(RGXFW_SEGMMU_DATA_META_CACHE_MASK | + RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK | + RGXFW_SEGMMU_DATA_BASE_ADDRESS); + } +#if !defined(EMULATOR) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + ui64FwDataBaseMask = ~RGXRISCVFW_REGION_MASK; + } +#endif + else + { + PVR_LOG_GOTO_WITH_ERROR("RGXGetFwMapping", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit); + } + + sDevVAddr.uiAddr = (ui32FwVA & ui64FwDataBaseMask) | RGX_FIRMWARE_RAW_HEAP_BASE; - /* Fw CPU shares a subset of the GPU's VA space */ - MMU_CheckFaultAddress(psFwMMUCtx, &sDevVAddr, &sFaultData); + /* Fw CPU shares a subset of the GPU's VA space */ + MMU_CheckFaultAddress(psFwMMUCtx, &sDevVAddr, &sFaultData); + } ui64RawPTE = sFaultData.sLevelData[MMU_LEVEL_1].ui64Address; if (eError == PVRSRV_OK) { - if (!BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN)) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + IMG_BOOL bValidPage = (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? + BITMASK_HAS(ui64RawPTE, RGXMIPSFW_TLB_VALID) : + BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN); +#else + IMG_BOOL bValidPage = BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN); +#endif + + if (!bValidPage) { /* don't report invalid pages */ eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING; } else { +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + sDevPA.uiAddr = ui32PageOffset + ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? + RGXMIPSFW_TLB_GET_PA(ui64RawPTE) : + (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK)); +#else sDevPA.uiAddr = ui32PageOffset + (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK); +#endif /* Only the Host's Firmware heap is present in the Host's CPU IPA space */ - if (ui32OSID == RGXFW_HOST_OS) + if (ui32DriverID == RGXFW_HOST_DRIVER_ID) { PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPA, &sDevPA); } @@ -8416,34 +6677,26 @@ PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, return eError; } -#if defined(SUPPORT_WORKLOAD_ESTIMATION) -/*! -******************************************************************************* -@Function RGXIsValidWorkloadEstCCBCommand - -@Description Checks if command type can be used for workload estimation +PVRSRV_ERROR +RGXFWSetVzConnectionCooldownPeriod(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32VzConnectionCooldownPeriodInSec) +{ + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVINFO, psDevInfo, PVRSRV_ERROR_NOT_SUPPORTED); -@Input eType Command type to check + psDevInfo->psRGXFWIfRuntimeCfg->ui32VzConnectionCooldownPeriodInSec = ui32VzConnectionCooldownPeriodInSec; + OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32VzConnectionCooldownPeriodInSec); +#if defined(PDUMP) + PDUMPCOMMENT(psDevInfo->psDeviceNode, + "Updating the Vz reconnect request cooldown period inside RGXFWIfRuntimeCfg"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + offsetof(RGXFWIF_RUNTIME_CFG, ui32VzConnectionCooldownPeriodInSec), + ui32VzConnectionCooldownPeriodInSec, + PDUMP_FLAGS_CONTINUOUS); +#endif -@Return IMG_BOOL -******************************************************************************/ -INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType) -{ - switch (eType) - { - case RGXFWIF_CCB_CMD_TYPE_GEOM: - case RGXFWIF_CCB_CMD_TYPE_3D: - case RGXFWIF_CCB_CMD_TYPE_CDM: - case RGXFWIF_CCB_CMD_TYPE_RAY: - case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: - return IMG_TRUE; - default: - PVR_ASSERT(IMG_FALSE); - return IMG_FALSE; - } + return PVRSRV_OK; } -#endif /****************************************************************************** End of file (rgxfwutils.c) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxfwutils.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxfwutils.h deleted file mode 100644 index 687d31077392..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxfwutils.h +++ /dev/null @@ -1,1418 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title RGX firmware utility routines -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description RGX firmware utility routines -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#ifndef RGXFWUTILS_H -#define RGXFWUTILS_H - -#include "rgx_memallocflags.h" -#include "rgxdevice.h" -#include "rgxccb.h" -#include "devicemem.h" -#include "device.h" -#include "pvr_notifier.h" -#include "pvrsrv.h" -#include "connection_server.h" -#include "rgxta3d.h" -#include "devicemem_utils.h" -#include "rgxmem.h" - -#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */ - -static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo, - PVRSRV_MEMALLOCFLAGS_T *puiFlags, - DEVMEM_HEAP **ppsFwHeap) -{ - PVRSRV_PHYS_HEAP ePhysHeap = (PVRSRV_PHYS_HEAP)PVRSRV_GET_PHYS_HEAP_HINT(*puiFlags); - PVRSRV_ERROR eError = PVRSRV_OK; - - switch (ePhysHeap) - { -#if defined(SUPPORT_SECURITY_VALIDATION) - /* call with GPU_SECURE from RGXSetupFwSysData */ - case PVRSRV_PHYS_HEAP_GPU_SECURE: -#endif - case PVRSRV_PHYS_HEAP_FW_CODE: - case PVRSRV_PHYS_HEAP_FW_PRIV_DATA: - case PVRSRV_PHYS_HEAP_FW_MAIN: - { - *ppsFwHeap = psDevInfo->psFirmwareMainHeap; - break; - } - case PVRSRV_PHYS_HEAP_FW_CONFIG: - { - *ppsFwHeap = psDevInfo->psFirmwareConfigHeap; - break; - } - case PVRSRV_PHYS_HEAP_FW_PREMAP0: - case PVRSRV_PHYS_HEAP_FW_PREMAP1: - case PVRSRV_PHYS_HEAP_FW_PREMAP2: - case PVRSRV_PHYS_HEAP_FW_PREMAP3: - case PVRSRV_PHYS_HEAP_FW_PREMAP4: - case PVRSRV_PHYS_HEAP_FW_PREMAP5: - case PVRSRV_PHYS_HEAP_FW_PREMAP6: - case PVRSRV_PHYS_HEAP_FW_PREMAP7: - { - IMG_UINT32 ui32OSID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0; - - PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID"); - *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID]; - break; - } - default: - { - PVR_DPF((PVR_DBG_ERROR, "%s: invalid phys heap", __func__)); - eError = PVRSRV_ERROR_INVALID_PARAMS; - break; - } - } - - return eError; -} - -/* - * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size. - * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems - * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't - * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation. - */ -static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_DEVMEM_SIZE_T uiSize, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - const IMG_CHAR *pszText, - DEVMEM_MEMDESC **ppsMemDescPtr) -{ - IMG_DEV_VIRTADDR sTmpDevVAddr; - PVRSRV_ERROR eError; - DEVMEM_HEAP *psFwHeap; - IMG_DEVMEM_ALIGN_T uiAlign; - - PVR_DPF_ENTERED; - - /* Enforce the standard pre-fix naming scheme callers must follow */ - PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); - - /* Imported from AppHint , flag to poison allocations when freed */ - uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; - - eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); - if (eError != PVRSRV_OK) - { - PVR_DPF_RETURN_RC(eError); - } - - if (psFwHeap == psDevInfo->psFirmwareConfigHeap) - { - /* - * All structures allocated from the Firmware Config subheap must start at the same pre-determined - * offsets, regardless of the system's page size (e.g. 4k,16k,64k). The alignment requirement is - * satisfied for virtual addresses during the mapping stage. Physical allocations do not take - * alignment into consideration. - * VZ drivers usually preallocate and premap the entire Firmware heap range. Any allocations from - * this heap are physical alloc only, having their device VAs derived from their PAs. This makes - * it impossible to fulfil alignment requirements. - * To work around this limitation, allocation sizes are rounded to the nearest multiple of 64kb, - * regardless of the actual size of object. - */ - uiAlign = RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY; - - uiSize = ((uiSize + RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY - 1) / - RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) * - RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY; - } - else - { - uiAlign = (GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS))); - } - - eError = DevmemAllocateAndMap(psFwHeap, - uiSize, - uiAlign, - uiFlags, - pszText, - ppsMemDescPtr, - &sTmpDevVAddr); - - PVR_DPF_RETURN_RC(eError); -} - -static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_DEVMEM_SIZE_T uiSize, - IMG_DEVMEM_ALIGN_T uiAlign, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - const IMG_CHAR *pszText, - DEVMEM_MEMDESC **ppsMemDescPtr) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; - IMG_DEV_VIRTADDR sTmpDevVAddr; - PVRSRV_ERROR eError; - DEVMEM_HEAP *psFwHeap; - - PVR_DPF_ENTERED; - - /* Enforce the standard pre-fix naming scheme callers must follow */ - PVR_ASSERT((pszText != NULL) && - (pszText[0] == 'F') && (pszText[1] == 'w') && - (pszText[2] == 'E') && (pszText[3] == 'x')); - - /* Imported from AppHint , flag to poison allocations when freed */ - uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; - - eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); - if (eError != PVRSRV_OK) - { - PVR_DPF_RETURN_RC(eError); - } - - eError = DevmemAllocateExportable(psDeviceNode, - uiSize, - uiAlign, - DevmemGetHeapLog2PageSize(psFwHeap), - uiFlags, - pszText, - ppsMemDescPtr); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "FW DevmemAllocateExportable failed (%u)", eError)); - PVR_DPF_RETURN_RC(eError); - } - - /* - We need to map it so the heap for this allocation - is set - */ - eError = DevmemMapToDevice(*ppsMemDescPtr, - psDevInfo->psFirmwareMainHeap, - &sTmpDevVAddr); - if (eError != PVRSRV_OK) - { - DevmemFree(*ppsMemDescPtr); - PVR_DPF((PVR_DBG_ERROR, "FW DevmemMapToDevice failed (%u)", eError)); - } - - PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr); -} - -static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_DEVMEM_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 *pui32MappingTable, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - const IMG_CHAR *pszText, - DEVMEM_MEMDESC **ppsMemDescPtr) -{ - IMG_DEV_VIRTADDR sTmpDevVAddr; - PVRSRV_ERROR eError; - DEVMEM_HEAP *psFwHeap; - IMG_UINT32 ui32Align; - - PVR_DPF_ENTERED; - - /* Enforce the standard pre-fix naming scheme callers must follow */ - PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); - ui32Align = GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); - - /* Imported from AppHint , flag to poison allocations when freed */ - uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; - - eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); - if (eError != PVRSRV_OK) - { - PVR_DPF_RETURN_RC(eError); - } - - eError = DevmemAllocateSparse(psDevInfo->psDeviceNode, - uiSize, - ui32NumPhysChunks, - ui32NumVirtChunks, - pui32MappingTable, - ui32Align, - DevmemGetHeapLog2PageSize(psFwHeap), - uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING, - pszText, - ppsMemDescPtr); - if (eError != PVRSRV_OK) - { - PVR_DPF_RETURN_RC(eError); - } - /* - We need to map it so the heap for this allocation - is set - */ - eError = DevmemMapToDevice(*ppsMemDescPtr, - psFwHeap, - &sTmpDevVAddr); - if (eError != PVRSRV_OK) - { - DevmemFree(*ppsMemDescPtr); - PVR_DPF_RETURN_RC(eError); - } - - PVR_DPF_RETURN_RC(eError); -} - - -static INLINE void DevmemFwUnmapAndFree(PVRSRV_RGXDEV_INFO *psDevInfo, - DEVMEM_MEMDESC *psMemDesc) -{ - PVR_DPF_ENTERED1(psMemDesc); - - DevmemReleaseDevVirtAddr(psMemDesc); - DevmemFree(psMemDesc); - - PVR_DPF_RETURN; -} - - -/* - * This function returns the value of the hardware register RGX_CR_TIMER - * which is a timer counting in ticks. - */ - -static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); - - /* - * In order to avoid having to issue three 32-bit reads to detect the - * lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated - * in the MSB of the high 32-bit word. If the wrap happens, we just read - * the register again (it will not wrap again so soon). - */ - if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK) - { - ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); - } - - return (ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT; -} - -/* - * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes. - * Otherwise this allocation is only used by the FW. - * Therefore the GPU cache doesn't need coherency, and write-combine will - * suffice on the CPU side (WC buffer will be flushed at the first kick) - */ -#define RGX_FWCOMCTX_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \ - PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \ - PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) - -#define RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ - PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ - PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) - -#define RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ - PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ - PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CONFIG)) - -#define RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ - PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ - PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) - -/* Firmware memory that is not accessible by the CPU. */ -#define RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ - PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) - -/* Firmware shared memory that is supposed to be read-only to the CPU. - * In reality it isn't due to ZERO_ON_ALLOC which enforces CPU_WRITEABLE - * flag on the allocations. */ -#define RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | \ - PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ - PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) - -/* data content being kept from previous boot cycles from physical memory must not be cleared during allocation */ -#define RGX_AUTOVZ_KEEP_FW_DATA_MASK(bKeepMem) ((bKeepMem) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL)) - -/****************************************************************************** - * RGXSetFirmwareAddress Flags - *****************************************************************************/ -#define RFW_FWADDR_FLAG_NONE (0) /*!< Void flag */ -#define RFW_FWADDR_NOREF_FLAG (1U << 0) /*!< It is safe to immediately release the reference to the pointer, - otherwise RGXUnsetFirmwareAddress() must be call when finished. */ - -IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); -PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, PVRSRV_MEMALLOCFLAGS_T uiAllocFlags); - -#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) -IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); -PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); -#endif - -#if defined(SUPPORT_TBI_INTERFACE) -IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); -PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); -#endif - -PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, - IMG_UINT32 ui32ConfigFlags, - IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32FwOsCfgFlags, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWRDebugDumpLimit, - IMG_UINT32 ui32HWPerfCountersDataSize, - IMG_UINT32 ui32RenderKillingCtl, - IMG_UINT32 ui32CDMTDMKillingCtl, - IMG_UINT32 *pui32TPUTrilinearFracMask, - IMG_UINT32 *pui32USRMNumRegions, - IMG_UINT64 *pui64UVBRMNumRegions, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, - IMG_BOOL bSPUClockGating, - FW_PERF_CONF eFirmwarePerf, - IMG_UINT32 ui32KCCBSizeLog2, - IMG_UINT32 ui32AvailableSPUMask, - IMG_UINT32 ui32AvailableRACMask); - - - -void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*************************************************************************/ /*! -@Function RGXSetupFwAllocation - -@Description Sets a pointer in a firmware data structure. - -@Input psDevInfo Device Info struct -@Input uiAllocFlags Flags determining type of memory allocation -@Input ui32Size Size of memory allocation -@Input pszName Allocation label -@Input psFwPtr Address of the firmware pointer to set -@Input ppvCpuPtr Address of the cpu pointer to set -@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG - -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO *psDevInfo, - PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, - IMG_UINT32 ui32Size, - const IMG_CHAR *pszName, - DEVMEM_MEMDESC **ppsMemDesc, - RGXFWIF_DEV_VIRTADDR *psFwPtr, - void **ppvCpuPtr, - IMG_UINT32 ui32DevVAFlags); - -/*************************************************************************/ /*! -@Function RGXSetFirmwareAddress - -@Description Sets a pointer in a firmware data structure. - -@Input ppDest Address of the pointer to set -@Input psSrc MemDesc describing the pointer -@Input ui32Flags Any combination of RFW_FWADDR_*_FLAG - -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, - DEVMEM_MEMDESC *psSrc, - IMG_UINT32 uiOffset, - IMG_UINT32 ui32Flags); - - -/*************************************************************************/ /*! -@Function RGXSetMetaDMAAddress - -@Description Fills a Firmware structure used to setup the Meta DMA with two - pointers to the same data, one on 40 bit and one on 32 bit - (pointer in the FW memory space). - -@Input ppDest Address of the structure to set -@Input psSrcMemDesc MemDesc describing the pointer -@Input psSrcFWDevVAddr Firmware memory space pointer - -@Return void -*/ /**************************************************************************/ -void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, - DEVMEM_MEMDESC *psSrcMemDesc, - RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, - IMG_UINT32 uiOffset); - - -/*************************************************************************/ /*! -@Function RGXUnsetFirmwareAddress - -@Description Unsets a pointer in a firmware data structure - -@Input psSrc MemDesc describing the pointer - -@Return void -*/ /**************************************************************************/ -void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc); - -PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue); -PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32* ui32RegValue); - -/*************************************************************************/ /*! -@Function FWCommonContextAllocate - -@Description Allocate a FW common context. This allocates the HW memory - for the context, the CCB and wires it all together. - -@Input psConnection Connection this context is being created on -@Input psDeviceNode Device node to create the FW context on - (must be RGX device node) -@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which - which represents the requestor of this FWCC -@Input eDM Data Master type -@Input psServerMMUContext Server MMU memory context. -@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use - as the FW context or NULL if this function - should allocate it -@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use - as the FW context. If psAllocatedMemDesc - is NULL then this parameter is ignored -@Input psFWMemContextMemDesc MemDesc of the FW memory context this - common context resides on -@Input psContextStateMemDesc FW context state (context switch) MemDesc -@Input ui32CCBAllocSizeLog2 Size of the CCB for this context -@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context -@Input ui32ContextFlags Flags which specify properties of the context -@Input i32Priority Priority of the context -@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run -@Input ui64RobustnessAddress Address for FW to signal a context reset -@Input psInfo Structure that contains extra info - required for the creation of the context - (elements might change from core to core) -@Return PVRSRV_OK if the context was successfully created -*/ /**************************************************************************/ -PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, - RGXFWIF_DM eDM, - SERVER_MMU_CONTEXT *psServerMMUContext, - DEVMEM_MEMDESC *psAllocatedMemDesc, - IMG_UINT32 ui32AllocatedOffset, - DEVMEM_MEMDESC *psFWMemContextMemDesc, - DEVMEM_MEMDESC *psContextStateMemDesc, - IMG_UINT32 ui32CCBAllocSizeLog2, - IMG_UINT32 ui32CCBMaxAllocSizeLog2, - IMG_UINT32 ui32ContextFlags, - IMG_INT32 i32Priority, - IMG_UINT32 ui32MaxDeadlineMS, - IMG_UINT64 ui64RobustnessAddress, - RGX_COMMON_CONTEXT_INFO *psInfo, - RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext); - -void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); - -PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); - -RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); - -SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); - -RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - IMG_UINT32 *pui32LastResetJobRef); - -PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); - -PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, - SERVER_MMU_CONTEXT *psServerMMUContext, - PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr); - -PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - IMG_UINT32 ui32ContextFlags); - -/*! -******************************************************************************* -@Function RGXScheduleProcessQueuesKM - -@Description Software command complete handler - (sends uncounted kicks for all the DMs through the MISR) - -@Input hCmdCompHandle RGX device node - -@Return None -******************************************************************************/ -void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); - -#if defined(SUPPORT_VALIDATION) -/*! -******************************************************************************* -@Function RGXScheduleRgxRegCommand - -@Input psDevInfo Device Info struct -@Input ui64RegVal Value to write into FW register -@Input ui64Size Register size -@Input ui32Offset Register Offset -@Input bWriteOp Register Write or Read toggle - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 ui64RegVal, - IMG_UINT64 ui64Size, - IMG_UINT32 ui32Offset, - IMG_BOOL bWriteOp); - -#endif - -/*! -******************************************************************************* - -@Function RGXInstallProcessQueuesMISR - -@Description Installs the MISR to handle Process Queues operations - -@Input phMISR Pointer to the MISR handler -@Input psDeviceNode RGX Device node - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode); - -PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll); - -/*************************************************************************/ /*! -@Function RGXSendCommandWithPowLockAndGetKCCBSlot - -@Description Sends a command to a particular DM without honouring - pending cache operations but taking the power lock. - -@Input psDevInfo Device Info -@Input psKCCBCmd The cmd to send. -@Input ui32PDumpFlags Pdump flags -@Output pui32CmdKCCBSlot When non-NULL: - - Pointer on return contains the kCCB slot - number in which the command was enqueued. - - Resets the value of the allotted slot to - RGXFWIF_KCCB_RTN_SLOT_RST -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_KCCB_CMD *psKCCBCmd, - IMG_UINT32 ui32PDumpFlags, - IMG_UINT32 *pui32CmdKCCBSlot); - -#define RGXSendCommandWithPowLock(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ - RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) - -/*************************************************************************/ /*! -@Function RGXSendCommandAndGetKCCBSlot - -@Description Sends a command to a particular DM without honouring - pending cache operations or the power lock. - The function flushes any deferred KCCB commands first. - -@Input psDevInfo Device Info -@Input psKCCBCmd The cmd to send. -@Input uiPdumpFlags PDump flags. -@Output pui32CmdKCCBSlot When non-NULL: - - Pointer on return contains the kCCB slot - number in which the command was enqueued. - - Resets the value of the allotted slot to - RGXFWIF_KCCB_RTN_SLOT_RST -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_KCCB_CMD *psKCCBCmd, - PDUMP_FLAGS_T uiPdumpFlags, - IMG_UINT32 *pui32CmdKCCBSlot); - -#define RGXSendCommand(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ - RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) - -/*************************************************************************/ /*! -@Function RGXScheduleCommandAndGetKCCBSlot - -@Description Sends a command to a particular DM and kicks the firmware but - first schedules any commands which have to happen before - handle - -@Input psDevInfo Device Info -@Input eDM To which DM the cmd is sent. -@Input psKCCBCmd The cmd to send. -@Input ui32PDumpFlags PDump flags -@Output pui32CmdKCCBSlot When non-NULL: - - Pointer on return contains the kCCB slot - number in which the command was enqueued. - - Resets the value of the allotted slot to - RGXFWIF_KCCB_RTN_SLOT_RST - -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_DM eKCCBType, - RGXFWIF_KCCB_CMD *psKCCBCmd, - IMG_UINT32 ui32PDumpFlags, - IMG_UINT32 *pui32CmdKCCBSlot); -#define RGXScheduleCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags) \ - RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, NULL) - -/*************************************************************************/ /*! -@Function RGXWaitForKCCBSlotUpdate - -@Description Waits until the required kCCB slot value is updated by the FW - (signifies command completion). Additionally, dumps a relevant - PDump poll command. - -@Input psDevInfo Device Info -@Input ui32SlotNum The kCCB slot number to wait for an update on -@Input ui32PDumpFlags - -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32SlotNum, - IMG_UINT32 ui32PDumpFlags); - -PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*************************************************************************/ /*! -@Function PVRSRVRGXFrameworkCopyCommand - -@Description Copy framework command into FW addressable buffer - -@param psDeviceNode -@param psFWFrameworkMemDesc -@param pbyGPUFRegisterList -@param ui32FrameworkRegisterSize - -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, - DEVMEM_MEMDESC *psFWFrameworkMemDesc, - IMG_PBYTE pbyGPUFRegisterList, - IMG_UINT32 ui32FrameworkRegisterSize); - - -/*************************************************************************/ /*! -@Function PVRSRVRGXFrameworkCreateKM - -@Description Create FW addressable buffer for framework - -@param psDeviceNode -@param ppsFWFrameworkMemDesc -@param ui32FrameworkRegisterSize - -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode, - DEVMEM_MEMDESC ** ppsFWFrameworkMemDesc, - IMG_UINT32 ui32FrameworkRegisterSize); - - -/*************************************************************************/ /*! -@Function RGXPollForGPCommandCompletion - -@Description Polls for completion of a submitted GP command. Poll is done - on a value matching a masked read from the address. - -@Input psDevNode Pointer to device node struct -@Input pui32LinMemAddr CPU linear address to poll -@Input ui32Value Required value -@Input ui32Mask Mask - -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, - volatile IMG_UINT32 __iomem *pui32LinMemAddr, - IMG_UINT32 ui32Value, - IMG_UINT32 ui32Mask); - -/*************************************************************************/ /*! -@Function RGXStateFlagCtrl - -@Description Set and return FW internal state flags. - -@Input psDevInfo Device Info -@Input ui32Config AppHint config flags -@Output pui32State Current AppHint state flag configuration -@Input bSetNotClear Set or clear the provided config flags - -@Return PVRSRV_ERROR -*/ /**************************************************************************/ -PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Config, - IMG_UINT32 *pui32State, - IMG_BOOL bSetNotClear); - -/*! -******************************************************************************* -@Function RGXFWRequestCommonContextCleanUp - -@Description Schedules a FW common context cleanup. The firmware doesn't - block waiting for the resource to become idle but rather - notifies the host that the resources is busy. - -@Input psDeviceNode pointer to device node -@Input psServerCommonContext context to be cleaned up -@Input eDM Data master, to which the cleanup command should - be sent -@Input ui32PDumpFlags PDump continuous flag - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, - RGXFWIF_DM eDM, - IMG_UINT32 ui32PDumpFlags); - -/*! -******************************************************************************* -@Function RGXFWRequestHWRTDataCleanUp - -@Description Schedules a FW HWRTData memory cleanup. The firmware doesn't - block waiting for the resource to become idle but rather - notifies the host that the resources is busy. - -@Input psDeviceNode pointer to device node -@Input psHWRTData firmware address of the HWRTData for clean-up - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, - PRGXFWIF_HWRTDATA psHWRTData); - -/*! -******************************************************************************* -@Function RGXFWRequestFreeListCleanUp - -@Description Schedules a FW FreeList cleanup. The firmware doesn't block - waiting for the resource to become idle but rather notifies the - host that the resources is busy. - -@Input psDeviceNode pointer to device node -@Input psFWFreeList firmware address of the FreeList for clean-up - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode, - PRGXFWIF_FREELIST psFWFreeList); - -/*! -******************************************************************************* -@Function RGXFWRequestZSBufferCleanUp - -@Description Schedules a FW ZS Buffer cleanup. The firmware doesn't block - waiting for the resource to become idle but rather notifies the - host that the resources is busy. - -@Input psDevInfo pointer to device node -@Input psFWZSBuffer firmware address of the ZS Buffer for clean-up - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, - PRGXFWIF_ZSBUFFER psFWZSBuffer); - -PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, - CONNECTION_DATA *psConnection, - PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_INT32 i32Priority, - RGXFWIF_DM eDM); - -/*! -******************************************************************************* -@Function RGXFWSetHCSDeadline - -@Description Requests the Firmware to set a new Hard Context Switch timeout - deadline. Context switches that surpass that deadline cause the - system to kill the currently running workloads. - -@Input psDeviceNode pointer to device node -@Input ui32HCSDeadlineMs The deadline in milliseconds. - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32HCSDeadlineMs); - -/*! -******************************************************************************* -@Function RGXFWChangeOSidPriority - -@Description Requests the Firmware to change the priority of an operating - system. Higher priority number equals higher priority on the - scheduling system. - -@Input psDevInfo pointer to device info -@Input ui32OSid The OSid whose priority is to be altered -@Input ui32Priority The new priority number for the specified OSid - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32OSid, - IMG_UINT32 ui32Priority); - -/*! -******************************************************************************* -@Function RGXFWHealthCheckCmd - -@Description Ping the firmware to check if it is responsive. - -@Input psDevInfo pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* -@Function RGXFWSetFwOsState - -@Description Requests the Firmware to change the guest OS Online states. - This should be initiated by the VMM when a guest VM comes - online or goes offline. If offline, the FW offloads any current - resource from that OSID. The request is repeated until the FW - has had time to free all the resources or has waited for - workloads to finish. - -@Input psDevInfo pointer to device info -@Input ui32OSid The Guest OSid whose state is being altered -@Input eOSOnlineState The new state (Online or Offline) - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32OSid, - RGXFWIF_OS_STATE_CHANGE eOSOnlineState); - -#if defined(SUPPORT_AUTOVZ) -/*! -******************************************************************************* -@Function RGXUpdateAutoVzWdgToken - -@Description If the driver-firmware connection is active, read the - firmware's watchdog token and copy its value back into the OS - token. This indicates to the firmware that this driver is alive - and responsive. - -@Input psDevInfo pointer to device info -******************************************************************************/ -void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo); -#endif - -/*! -******************************************************************************* -@Function RGXFWConfigPHR - -@Description Configure the Periodic Hardware Reset functionality - -@Input psDevInfo pointer to device info -@Input ui32PHRMode desired PHR mode - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32PHRMode); -/*! -******************************************************************************* -@Function RGXFWConfigWdg - -@Description Configure the Safety watchdog trigger period - -@Input psDevInfo pointer to device info -@Input ui32WdgPeriodUs requested period in microseconds - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32WdgPeriod); - -/*! -******************************************************************************* -@Function RGXCheckFirmwareCCB - -@Description Processes all commands that are found in the Firmware CCB. - -@Input psDevInfo pointer to device - -@Return None -******************************************************************************/ -void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* -@Function RGXCheckForStalledClientContexts - -@Description Checks all client contexts, for the device with device info - provided, to see if any are waiting for a fence to signal and - optionally force signalling of the fence for the context which - has been waiting the longest. - This function is called by RGXUpdateHealthStatus() and also - may be invoked from other trigger points. - -@Input psDevInfo pointer to device info -@Input bIgnorePrevious If IMG_TRUE, any stalled contexts will be - indicated immediately, rather than only - checking against any previous stalled contexts - -@Return None -******************************************************************************/ -void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious); - -/*! -******************************************************************************* -@Function RGXUpdateHealthStatus - -@Description Tests a number of conditions which might indicate a fatal error - has occurred in the firmware. The result is stored in the - device node eHealthStatus. - -@Input psDevNode Pointer to device node structure. -@Input bCheckAfterTimePassed When TRUE, the function will also test - for firmware queues and polls not changing - since the previous test. - - Note: if not enough time has passed since the - last call, false positives may occur. - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, - IMG_BOOL bCheckAfterTimePassed); - - -PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM); - -#if defined(SUPPORT_AUTOVZ) -/*! -******************************************************************************* -@Function RGXUpdateAutoVzWatchdog - -@Description Updates AutoVz watchdog that maintains the fw-driver connection - -@Input psDevNode Pointer to device node structure. -******************************************************************************/ -void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode); -#endif /* SUPPORT_AUTOVZ */ - -void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile, - IMG_UINT32 ui32VerbLevel); - -/*! -******************************************************************************* -@Function AttachKickResourcesCleanupCtls - -@Description Attaches the cleanup structures to a kick command so that - submission reference counting can be performed when the - firmware processes the command - -@Output apsCleanupCtl Array of CleanupCtl structure pointers to populate. -@Output pui32NumCleanupCtl Number of CleanupCtl structure pointers written out. -@Input eDM Which data master is the subject of the command. -@Input bKick TRUE if the client originally wanted to kick this DM. -@Input psRTDataCleanup Optional RTData cleanup associated with the command. -@Input psZBuffer Optional ZSBuffer associated with the command. - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, - IMG_UINT32 *pui32NumCleanupCtl, - RGXFWIF_DM eDM, - IMG_BOOL bKick, - RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, - RGX_ZSBUFFER_DATA *psZSBuffer, - RGX_ZSBUFFER_DATA *psMSAAScratchBuffer); - -/*! -******************************************************************************* -@Function RGXResetHWRLogs - -@Description Resets the HWR Logs buffer - (the hardware recovery count is not reset) - -@Input psDevNode Pointer to the device - -@Return PVRSRV_ERROR PVRSRV_OK on success. - Otherwise, a PVRSRV error code -******************************************************************************/ -PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode); - -/*! -******************************************************************************* -@Function RGXGetPhyAddr - -@Description Get the physical address of a PMR at an offset within it - -@Input psPMR PMR of the allocation -@Input ui32LogicalOffset Logical offset - -@Output psPhyAddr Physical address of the allocation - -@Return PVRSRV_ERROR PVRSRV_OK on success. - Otherwise, a PVRSRV error code -******************************************************************************/ -PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, - IMG_DEV_PHYADDR *psPhyAddr, - IMG_UINT32 ui32LogicalOffset, - IMG_UINT32 ui32Log2PageSize, - IMG_UINT32 ui32NumOfPages, - IMG_BOOL *bValid); - -#if defined(PDUMP) -/*! -******************************************************************************* -@Function RGXPdumpDrainKCCB - -@Description Wait for the firmware to execute all the commands in the kCCB - -@Input psDevInfo Pointer to the device -@Input ui32WriteOffset Woff we have to POL for the Roff to be equal to - -@Return PVRSRV_ERROR PVRSRV_OK on success. - Otherwise, a PVRSRV error code -******************************************************************************/ -PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32WriteOffset); -#endif /* PDUMP */ - -/*! -******************************************************************************* -@Function RGXFwRawHeapAllocMap - -@Description Register and maps to device, a raw firmware physheap - -@Return PVRSRV_ERROR PVRSRV_OK on success. - Otherwise, a PVRSRV error code -******************************************************************************/ -PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSID, - IMG_DEV_PHYADDR sDevPAddr, - IMG_UINT64 ui64DevPSize); - -/*! -******************************************************************************* -@Function RGXFwRawHeapUnmapFree - -@Description Unregister and unmap from device, a raw firmware physheap - -******************************************************************************/ -void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32OSID); - -/*! -******************************************************************************* -@Function RGXRiscvHalt - -@Description Halt the RISC-V FW core (required for certain operations - done through Debug Module) - -@Input psDevInfo Pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* -@Function RGXRiscvIsHalted - -@Description Check if the RISC-V FW is halted - -@Input psDevInfo Pointer to device info - -@Return IMG_BOOL -******************************************************************************/ -IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* -@Function RGXRiscvResume - -@Description Resume the RISC-V FW core - -@Input psDevInfo Pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo); - -/*! -******************************************************************************* -@Function RGXRiscvReadReg - -@Description Read a value from the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address - -@Output pui32Value Read value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 *pui32Value); - -/*! -******************************************************************************* -@Function RGXRiscvPollReg - -@Description Poll for a value from the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address -@Input ui32Value Expected value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32Value); - -/*! -******************************************************************************* -@Function RGXRiscvWriteReg - -@Description Write a value to the given RISC-V register (GPR or CSR) - -@Input psDevInfo Pointer to device info -@Input ui32RegAddr RISC-V register address -@Input ui32Value Write value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32Value); - -/*! -******************************************************************************* -@Function RGXRiscvPollMem - -@Description Poll for a value at the given address in RISC-V memory space - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Expected value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Addr, - IMG_UINT32 ui32Value); - -/*! -******************************************************************************* -@Function RGXRiscvDmiOp - -@Description Acquire the powerlock and perform an operation on the RISC-V - Debug Module Interface, but only if the GPU is powered on. - -@Input psDevInfo Pointer to device info -@InOut pui64DMI Encoding of a request for the RISC-V Debug - Module with same format as the 'dmi' register - from the RISC-V debug specification (v0.13+). - On return, this is updated with the result of - the request, encoded the same way. - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 *pui64DMI); - -/*! -******************************************************************************* -@Function RGXReadFWModuleAddr - -@Description Read a value at the given address in META or RISCV memory space - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in META or RISCV memory space - -@Output pui32Value Read value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32Addr, - IMG_UINT32 *pui32Value); - -/*! -******************************************************************************* -@Function RGXWriteFWModuleAddr - -@Description Write a value to the given address in META or RISC memory space - -@Input psDevInfo Pointer to device info -@Input ui32Addr Address in RISC-V memory space -@Input ui32Value Write value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32MemAddr, - IMG_UINT32 ui32Value); - -/*! -******************************************************************************* -@Function RGXGetFwMapping - -@Description Retrieve any of the CPU Physical Address, Device Physical - Address or the raw value of the page table entry associated - with the firmware virtual address given. - -@Input psDevInfo Pointer to device info -@Input ui32FwVA The Fw VA that needs decoding -@Output psCpuPA Pointer to the resulting CPU PA -@Output psDevPA Pointer to the resulting Dev PA -@Output pui64RawPTE Pointer to the raw Page Table Entry value - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT32 ui32FwVA, - IMG_CPU_PHYADDR *psCpuPA, - IMG_DEV_PHYADDR *psDevPA, - IMG_UINT64 *pui64RawPTE); - -#if defined(SUPPORT_WORKLOAD_ESTIMATION) -/*! -******************************************************************************* -@Function RGXIsValidWorkloadEstCCBCommand - -@Description Checks if command type can be used for workload estimation - -@Input eType Command type to check - -@Return IMG_BOOL -******************************************************************************/ -INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType); - -#endif - -/*! -******************************************************************************* -@Function RGXFWInjectFault - -@Description Injecting firmware fault to validate recovery through Host - -@Input psDevInfo Pointer to device info - -@Return PVRSRV_ERROR -******************************************************************************/ -PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo); - -#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ) -#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers." -#endif - -#if defined(SUPPORT_AUTOVZ_HW_REGS) -/* AutoVz with hw support */ -#define KM_GET_FW_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH3) -#define KM_GET_OS_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2) -#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2, RGXFW_CONNECTION_OS_##val) - -#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH1) -#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0) -#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0, val) -#else - -#if defined(SUPPORT_AUTOVZ) -#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveFwToken) -#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken) -#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val) -#endif /* defined(SUPPORT_AUTOVZ) */ - -#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1))) -/* native, static-vz and AutoVz using shared memory */ -#define KM_GET_FW_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState) -#define KM_GET_OS_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState) -#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState, RGXFW_CONNECTION_OS_##val) -#else -/* dynamic-vz & nohw */ -#define KM_GET_FW_CONNECTION(psDevInfo) (RGXFW_CONNECTION_FW_ACTIVE) -#define KM_GET_OS_CONNECTION(psDevInfo) (RGXFW_CONNECTION_OS_ACTIVE) -#define KM_SET_OS_CONNECTION(val, psDevInfo) -#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */ -#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ - -#if defined(SUPPORT_AUTOVZ) -#define RGX_FIRST_RAW_HEAP_OSID RGXFW_HOST_OS -#else -#define RGX_FIRST_RAW_HEAP_OSID RGXFW_GUEST_OSID_START -#endif - -#define KM_OS_CONNECTION_IS(val, psDevInfo) (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val) -#define KM_FW_CONNECTION_IS(val, psDevInfo) (KM_GET_FW_CONNECTION(psDevInfo) == RGXFW_CONNECTION_FW_##val) - -#endif /* RGXFWUTILS_H */ -/****************************************************************************** - End of file (rgxfwutils.h) -******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxhwperf.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxhwperf.c index 444f3453ed3b..67812e3c5d0a 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxhwperf.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxhwperf.c @@ -76,7 +76,89 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT) -IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); +static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] = +{ +#define X(a, b, c, d, e, f, g) {a, b, 0xFF, d, e, f, NULL} +RGX_CNT_BLK_TYPE_MODEL_DIRECT_LIST, +RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST +#undef X +}; + +IMG_INTERNAL IMG_UINT32 +RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel) +{ + *ppsModel = gasCntBlkTypeModel; + return ARRAY_SIZE(gasCntBlkTypeModel); +} + +/*! +******************************************************************************* + @Function RGXHWPerfMaxDefinedBlks + + @Description Return the number of valid block-IDs for the given device node + + @Input (PVRSRV_RGXDEV_INFO *) pvDevice device-node to query + + @Returns (IMG_UINT32) Number of block-IDs (RGX_CNTBLK_ID) + valid for this device. +******************************************************************************/ +IMG_INTERNAL IMG_UINT32 +RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_HWPERF_CNTBLK_RT_INFO sRtInfo; + IMG_UINT32 uiRetVal; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *psHWPBlkConfig; + IMG_UINT32 uiNumArrayEls, ui; + + uiRetVal = RGX_CNTBLK_ID_DIRECT_LAST; + + uiNumArrayEls = RGXGetHWPerfBlockConfig(&psHWPBlkConfig); + + if (psHWPBlkConfig == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected NULL Config Block", __func__)); + return 0; + } + PVR_ASSERT(uiNumArrayEls > 0); + + /* Iterate over each block-ID and find the number of instances of each + * block which are present for this device type. We only query the + * Indirect blocks as their presence varies according to GPU. All direct + * blocks have an entry - but they may not be physically present. + */ + for (ui = RGX_CNTBLK_ID_DIRECT_LAST; ui < uiNumArrayEls; ui++) + { + if (rgx_hwperf_blk_present(&psHWPBlkConfig[ui], (void *)psDevInfo, &sRtInfo)) + { + uiRetVal += sRtInfo.uiNumUnits; + PVR_DPF((PVR_DBG_VERBOSE, "%s: Block %u, NumUnits %u, Total %u", + __func__, ui, sRtInfo.uiNumUnits, uiRetVal)); + } +#ifdef DEBUG + else + { + if (psHWPBlkConfig[ui].uiCntBlkIdBase == RGX_CNTBLK_ID_RAC0) + { + if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, + RAY_TRACING_ARCH) > 2U) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Block %u *NOT* present", + __func__, ui)); + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Block %u *NOT* present", + __func__, ui)); + } + } +#endif + } + + PVR_DPF((PVR_DBG_VERBOSE, "%s: Num Units = %u", __func__, uiRetVal)); + + return uiRetVal; +} static IMG_BOOL RGXServerFeatureFlagsToHWPerfFlagsAddBlock( RGX_HWPERF_BVNC_BLOCK * const psBlocks, @@ -126,7 +208,7 @@ PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, R if ((pszBVNC = RGXDevBVNCString(psDevInfo))) { size_t uiStringLength = OSStringNLength(pszBVNC, RGX_HWPERF_MAX_BVNC_LEN - 1); - OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1); + OSStringSafeCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1); memset(&psBVNC->aszBvncString[uiStringLength], 0, RGX_HWPERF_MAX_BVNC_LEN - uiStringLength); } else @@ -159,8 +241,11 @@ PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, R } #ifdef SUPPORT_WORKLOAD_ESTIMATION - /* Not a part of BVNC feature line and so doesn't need the feature supported check */ - psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION; + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + /* Not a part of BVNC feature line and so doesn't need the feature supported check */ + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION; + } #endif /* Define the HW counter block counts. */ @@ -233,6 +318,9 @@ PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, R OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks)); } + /* The GPU core count is overwritten by the FW */ + psBVNC->ui16BvncGPUCores = 0; + return PVRSRV_OK; } @@ -246,12 +334,12 @@ PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( IMG_UINT32 ui32ArrayLen, RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs) { - PVRSRV_ERROR eError = PVRSRV_OK; - RGXFWIF_KCCB_CMD sKccbCmd; - DEVMEM_MEMDESC* psFwBlkConfigsMemDesc; - RGX_HWPERF_CONFIG_CNTBLK* psFwArray; - IMG_UINT32 ui32kCCBCommandSlot; - PVRSRV_RGXDEV_INFO *psDevice; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sKccbCmd; + DEVMEM_MEMDESC *psFwBlkConfigsMemDesc; + RGX_HWPERF_CONFIG_CNTBLK *psFwArray; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevice; PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", PVRSRV_ERROR_INVALID_PARAMS); @@ -260,7 +348,7 @@ PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0", PVRSRV_ERROR_INVALID_PARAMS); @@ -269,15 +357,14 @@ PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( PVR_DPF_ENTERED; - /* Fill in the command structure with the parameters needed - */ + /* Fill in the command structure with the parameters needed */ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS; sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32CtrlWord = ui32CtrlWord; sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen; /* used for passing counters config to the Firmware, write-only for the CPU */ eError = DevmemFwAllocate(psDevice, - sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, + sizeof(RGX_HWPERF_CONFIG_CNTBLK) * ui32ArrayLen, PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | @@ -291,7 +378,7 @@ PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs, - psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); + psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray); @@ -303,26 +390,26 @@ PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, PDUMP_FLAGS_CONTINUOUS); - /* Ask the FW to carry out the HWPerf configuration command - */ + /* Ask the FW to carry out the HWPerf configuration command. */ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, - RGXFWIF_DM_GP, - &sKccbCmd, - PDUMP_FLAGS_CONTINUOUS, - &ui32kCCBCommandSlot); + RGXFWIF_DM_GP, + &sKccbCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2); /* Wait for FW to complete */ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); - /* Release temporary memory used for block configuration - */ + /* Release temporary memory used for block configuration. */ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); - PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen)); + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf %d counter blocks configured and ENABLED", + ui32ArrayLen)); PVR_DPF_RETURN_OK; @@ -376,7 +463,6 @@ PVRSRV_ERROR RGXHWPerfConfigureCounters( RGX_KM_HWPERF_DEVDATA* psDevData; RGX_HWPERF_DEVICE *psHWPerfDev; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); /* Validate input argument values supplied by the caller */ if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs) @@ -395,6 +481,8 @@ PVRSRV_ERROR RGXHWPerfConfigureCounters( { psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevData->psRgxDevNode, PVRSRV_ERROR_NOT_IMPLEMENTED); + /* Call the internal server API */ eError = PVRSRVRGXConfigureHWPerfBlocksKM(NULL, psDevData->psRgxDevNode, @@ -480,7 +568,7 @@ PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 *pui32BlockCount, IMG_UINT32 *pui32EnabledBlockIDs) { - IMG_UINT32 ui32LastIDIdx = 0; + IMG_UINT32 ui32LastIdx = 0; IMG_UINT32 i; PVRSRV_ERROR eError = PVRSRV_OK; @@ -507,14 +595,14 @@ PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode, continue; } - if (ui32LastIDIdx + 1 > ui32ArrayLen) + if (ui32LastIdx > ui32ArrayLen) { PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks.")); PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error); } - pui32EnabledBlockIDs[ui32LastIDIdx] = psHWPerfCtl->sBlkCfg[i].uiBlockID; - ui32LastIDIdx += 1; + pui32EnabledBlockIDs[ui32LastIdx] = psHWPerfCtl->sBlkCfg[i].uiBlockID; + ui32LastIdx++; } } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxinit.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxinit.c index 4a30e3c4f36b..eca671110a4f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxinit.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxinit.c @@ -47,19 +47,20 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #endif -#include "log2.h" #include "img_defs.h" #include "pvr_notifier.h" #include "pvrsrv.h" #include "pvrsrv_bridge_init.h" #include "rgx_bridge_init.h" #include "syscommon.h" -#include "rgx_heaps.h" +#include "rgx_heaps_server.h" #include "rgxheapconfig.h" -#include "rgxdefs_km.h" #include "rgxpower.h" #include "tlstream.h" #include "pvrsrv_tlstreams.h" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +#include "pvr_ricommon.h" +#endif #include "rgxinit.h" #include "rgxbvnc.h" @@ -73,21 +74,28 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxmem.h" #include "sync_internal.h" #include "pvrsrv_apphint.h" -#include "oskm_apphint.h" +#include "os_apphint.h" #include "rgxfwdbg.h" #include "info_page.h" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +#include "rgxfwimageutils.h" +#endif #include "rgxutils.h" #include "rgxfwutils.h" #include "rgx_fwif_km.h" #include "rgxmmuinit.h" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +#include "rgxmipsmmuinit.h" +#include "physmem.h" +#endif #include "devicemem_utils.h" #include "devicemem_server.h" #include "physmem_osmem.h" #include "physmem_lma.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "rgxhwperf.h" #include "htbserver.h" @@ -96,8 +104,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_compat_bvnc.h" -#include "rgx_heaps.h" - #include "rgxta3d.h" #include "rgxtimecorr.h" #include "rgxshader.h" @@ -112,29 +118,31 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if defined(SUPPORT_WORKLOAD_ESTIMATION) #include "rgxworkest.h" #endif + #if defined(SUPPORT_PDVFS) #include "rgxpdvfs.h" #endif -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -#include "rgxsoctimer.h" -#endif #if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) #include "pdump_physmem.h" #endif +#if defined(PDUMP) +#include "rgxpdump_common.h" +#endif + static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString); static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed); -static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue, IMG_UINT64 ui64SPUResetValue); +static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2); static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode); static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); -#if (RGX_NUM_OS_SUPPORTED > 1) -static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid); +#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) +static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID); static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap); -#endif +#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ /* Services internal heap identification used in this file only */ #define RGX_FIRMWARE_MAIN_HEAP_IDENT "FwMain" /*!< RGX Main Firmware Heap identifier */ @@ -151,43 +159,53 @@ static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap); #define VAR(x) #x -#define MAX_BVNC_LEN (12) -#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(MAX_BVNC_LEN))+1) - -static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo); +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo, PVRSRV_DEVICE_NODE *psDeviceNode); #if !defined(NO_HARDWARE) /*************************************************************************/ /*! @Function SampleIRQCount @Description Utility function taking snapshots of RGX FW interrupt count. -@Input paui32Input A pointer to RGX FW IRQ count array. - Size of the array should be equal to RGX FW thread - count. -@Input paui32Output A pointer to array containing sampled RGX FW - IRQ counts -@Return IMG_BOOL Returns IMG_TRUE, if RGX FW IRQ is not equal to +@Input psDevInfo Device Info structure + +@Return IMG_BOOL Returns IMG_TRUE if RGX FW IRQ is not equal to sampled RGX FW IRQ count for any RGX FW thread. -*/ /**************************************************************************/ -static INLINE IMG_BOOL SampleIRQCount(const volatile IMG_UINT32 *paui32Input, - volatile IMG_UINT32 *paui32Output) + */ /**************************************************************************/ +static INLINE IMG_BOOL SampleIRQCount(PVRSRV_RGXDEV_INFO *psDevInfo) { - IMG_UINT32 ui32TID; IMG_BOOL bReturnVal = IMG_FALSE; + volatile IMG_UINT32 *pui32SampleIrqCount = psDevInfo->aui32SampleIRQCount; + IMG_UINT32 ui32IrqCnt; - for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) +#if defined(RGX_FW_IRQ_OS_COUNTERS) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - if (paui32Output[ui32TID] != paui32Input[ui32TID]) + bReturnVal = IMG_TRUE; + } + else + { + get_irq_cnt_val(ui32IrqCnt, RGXFW_HOST_DRIVER_ID, psDevInfo); + + if (ui32IrqCnt != pui32SampleIrqCount[RGXFW_THREAD_0]) { - /** - * we are handling any unhandled interrupts here so align the host - * count with the FW count - */ + pui32SampleIrqCount[RGXFW_THREAD_0] = ui32IrqCnt; + bReturnVal = IMG_TRUE; + } + } +#else + IMG_UINT32 ui32TID; + + for_each_irq_cnt(ui32TID) + { + get_irq_cnt_val(ui32IrqCnt, ui32TID, psDevInfo); - /* Sample the current count from the FW _after_ we've cleared the interrupt. */ - paui32Output[ui32TID] = paui32Input[ui32TID]; + /* treat unhandled interrupts here to align host count with fw count */ + if (pui32SampleIrqCount[ui32TID] != ui32IrqCnt) + { + pui32SampleIrqCount[ui32TID] = ui32IrqCnt; bReturnVal = IMG_TRUE; } } +#endif return bReturnVal; } @@ -201,7 +219,7 @@ static INLINE IMG_BOOL SampleIRQCount(const volatile IMG_UINT32 *paui32Input, */ /**************************************************************************/ static INLINE IMG_UINT32 RGXHostSafetyEvents(PVRSRV_RGXDEV_INFO *psDevInfo) { - if (PVRSRV_VZ_MODE_IS(GUEST) || (psDevInfo->ui32HostSafetyEventMask == 0)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo) || (psDevInfo->ui32HostSafetyEventMask == 0)) { return 0; } @@ -213,6 +231,43 @@ static INLINE IMG_UINT32 RGXHostSafetyEvents(PVRSRV_RGXDEV_INFO *psDevInfo) } } +/*************************************************************************/ /*! +@Function GetContextResetReason +@Description Maps a bit corresponding to an uncorrected FW fault reported in + RGX_CR_FAULT_FW_STATUS register into RGX_CONTEXT_RESET_REASON value +@Input ui32FaultFlag Offset into the bitmask of uncorrected FW faults + psDevInfo Device Info structure +@Return RGX_CONTEXT_RESET_REASON Reset Reason + */ /**************************************************************************/ +static inline RGX_CONTEXT_RESET_REASON GetContextResetReason(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FaultFlag) +{ + RGX_CONTEXT_RESET_REASON eResetReason; + + if (ui32FaultFlag == RGX_CR_FAULT_FW_STATUS_MMU_PTE_PARITY_DETECT_SHIFT) + { + IMG_UINT32 ui32ParityStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_PT_PARITY_STATUS); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_PT_PARITY_CLEAR, ui32ParityStatus); + + PVR_DPF((PVR_DBG_ERROR, "%s: MMU PTE parity errors: 0x%x", __func__, ui32ParityStatus)); + + eResetReason = RGX_CONTEXT_RESET_REASON_FW_PTE_PARITY_ERR; + } + else if (ui32FaultFlag == RGX_CR_FAULT_FW_STATUS_PARITY_DETECT_SHIFT) + { + eResetReason = RGX_CONTEXT_RESET_REASON_FW_PARITY_ERR; + } + else if (ui32FaultFlag == RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_SHIFT) + { + eResetReason = RGX_CONTEXT_RESET_REASON_DCLS_ERR; + } + else + { + eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_ERR; + } + + return eResetReason; +} + /*************************************************************************/ /*! @Function RGXSafetyEventHandler @Description Handles the Safety Events that the Host is responsible for @@ -220,122 +275,160 @@ static INLINE IMG_UINT32 RGXHostSafetyEvents(PVRSRV_RGXDEV_INFO *psDevInfo) */ /**************************************************************************/ static void RGXSafetyEventHandler(PVRSRV_RGXDEV_INFO *psDevInfo) { - IMG_UINT32 ui32HostSafetyStatus = RGXHostSafetyEvents(psDevInfo); - RGX_CONTEXT_RESET_REASON eResetReason = RGX_CONTEXT_RESET_REASON_NONE; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + PVRSRV_ERROR eError; + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire PowerLock (device: %p, error: %s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + return; + } - if (ui32HostSafetyStatus != 0) + if (psDevInfo->bRGXPowered) { - /* clear the safety bus events handled by the Host */ - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_CLEAR, ui32HostSafetyStatus); + RGX_CONTEXT_RESET_REASON eResetReason = RGX_CONTEXT_RESET_REASON_NONE; + IMG_UINT32 ui32HostSafetyStatus = RGXHostSafetyEvents(psDevInfo); + IMG_UINT32 ui32HostSafetyStatusBitPos = 0; - if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_EVENT_STATUS_FAULT_FW_SHIFT)) + if (ui32HostSafetyStatus != 0) { - IMG_UINT32 ui32FaultFlag; - IMG_UINT32 ui32FaultFW = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_STATUS); - IMG_UINT32 ui32CorrectedBitOffset = RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_SHIFT - - RGX_CR_FAULT_FW_STATUS_MMU_DETECT_SHIFT; + /* clear the safety bus events handled by the Host */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_CLEAR, ui32HostSafetyStatus); - PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety fault status: 0x%X", __func__, ui32FaultFW)); - - for (ui32FaultFlag = 0; ui32FaultFlag < ui32CorrectedBitOffset; ui32FaultFlag++) + if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_EVENT_STATUS_FAULT_FW_SHIFT)) { - if (BIT_ISSET(ui32FaultFW, ui32FaultFlag)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety hardware fault detected (0x%lX).", - __func__, BIT(ui32FaultFlag))); - eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_ERR; - } - else if BIT_ISSET(ui32FaultFW, ui32FaultFlag + ui32CorrectedBitOffset) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware safety hardware fault corrected.(0x%lX).", - __func__, BIT(ui32FaultFlag))); + IMG_UINT32 ui32FaultFlag; + IMG_UINT32 ui32FaultFW = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_STATUS); + IMG_UINT32 ui32CorrectedBitOffset = RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_SHIFT - + RGX_CR_FAULT_FW_STATUS_MMU_DETECT_SHIFT; - /* Only report this if we haven't detected a more serious error */ - if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety fault status: 0x%X", __func__, ui32FaultFW)); + + for (ui32FaultFlag = 0; ui32FaultFlag < ui32CorrectedBitOffset; ui32FaultFlag++) + { + if (BIT_ISSET(ui32FaultFW, ui32FaultFlag)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety hardware fault detected (0x%lX).", + __func__, BIT(ui32FaultFlag))); + eResetReason = GetContextResetReason(psDevInfo, ui32FaultFlag); + } + else if BIT_ISSET(ui32FaultFW, ui32FaultFlag + ui32CorrectedBitOffset) { - eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_OK; + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware safety hardware fault corrected.(0x%lX).", + __func__, BIT(ui32FaultFlag))); + + /* Only report this if we haven't detected a more serious error */ + if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR && + eResetReason != RGX_CONTEXT_RESET_REASON_FW_PTE_PARITY_ERR && + eResetReason != RGX_CONTEXT_RESET_REASON_FW_PARITY_ERR && + eResetReason != RGX_CONTEXT_RESET_REASON_DCLS_ERR) + + { + eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_OK; + } } } - } - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_CLEAR, ui32FaultFW); - } + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_CLEAR, ui32FaultFW); + } - if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_EVENT_STATUS_WDT_TIMEOUT_SHIFT)) - { - volatile RGXFWIF_POW_STATE ePowState = psDevInfo->psRGXFWIfFwSysData->ePowState; +#if defined(RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PIPELINED_DATAMASTERS_VERSION) && + RGX_GET_FEATURE_VALUE(psDevInfo, PIPELINED_DATAMASTERS_VERSION) > 0) + { + ui32HostSafetyStatusBitPos = RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_GT0__WDT_TIMEOUT_SHIFT; + } + else +#endif + { + ui32HostSafetyStatusBitPos = RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_SHIFT; + } - if (ePowState == RGXFWIF_POW_ON) + if (BIT_ISSET(ui32HostSafetyStatus, ui32HostSafetyStatusBitPos)) { - PVR_DPF((PVR_DBG_ERROR, "%s: Safety Watchdog Trigger !", __func__)); + volatile RGXFWIF_POW_STATE ePowState; + + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ePowState, + INVALIDATE); + ePowState = psDevInfo->psRGXFWIfFwSysData->ePowState; - /* Only report this if we haven't detected a more serious error */ - if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) + if (ePowState != RGXFWIF_POW_OFF) { - eResetReason = RGX_CONTEXT_RESET_REASON_FW_WATCHDOG; + PVR_DPF((PVR_DBG_ERROR, "%s: Safety Watchdog Trigger !", __func__)); + + /* Only report this if we haven't detected a more serious error */ + if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR && + eResetReason != RGX_CONTEXT_RESET_REASON_FW_PTE_PARITY_ERR) + { + eResetReason = RGX_CONTEXT_RESET_REASON_FW_WATCHDOG; + } } } - } - /* Notify client and system layer of any error */ - if (eResetReason != RGX_CONTEXT_RESET_REASON_NONE) - { - PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; - PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + /* Notify client and system layer of any error */ + if (eResetReason != RGX_CONTEXT_RESET_REASON_NONE) + { + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; - /* Client notification of device error will be achieved by - * clients calling UM function RGXGetLastDeviceError() */ - psDevInfo->eLastDeviceError = eResetReason; + /* Client notification of device error will be achieved by + * clients calling UM function RGXGetLastDeviceError() */ + psDevInfo->eLastDeviceError = eResetReason; - /* Notify system layer of any error */ - if (psDevConfig->pfnSysDevErrorNotify) - { - PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + /* Notify system layer of any error */ + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; - sErrorData.eResetReason = eResetReason; + sErrorData.eResetReason = eResetReason; - psDevConfig->pfnSysDevErrorNotify(psDevConfig, - &sErrorData); + psDevConfig->pfnSysDevErrorNotify(psDevConfig->hSysData, + &sErrorData); + } } } } + + PVRSRVPowerUnlock(psDeviceNode); } static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo) { #if defined(PVRSRV_DEBUG_LISR_EXECUTION) PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; - IMG_UINT32 ui32TID; + IMG_UINT32 ui32idx; #endif RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo); #if defined(PVRSRV_DEBUG_LISR_EXECUTION) PVR_DPF((PVR_DBG_ERROR, - "Last RGX_LISRHandler State (DevID %u): 0x%08X Clock: %llu", + "Last RGX_LISRHandler State (DevID %u): 0x%08X Clock: %" IMG_UINT64_FMTSPEC, psDeviceNode->sDevId.ui32InternalID, psDeviceNode->sLISRExecutionInfo.ui32Status, psDeviceNode->sLISRExecutionInfo.ui64Clockns)); - for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + for_each_irq_cnt(ui32idx) { PVR_DPF((PVR_DBG_ERROR, - "RGX FW thread %u: InterruptCountSnapshot: 0x%X", - ui32TID, psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID])); + MSG_IRQ_CNT_TYPE " %u: InterruptCountSnapshot: 0x%X", + ui32idx, psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32idx])); } #else PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION")); #endif - return SampleIRQCount(psDevInfo->psRGXFWIfFwOsData->aui32InterruptCount, - psDevInfo->aui32SampleIRQCount); + return SampleIRQCount(psDevInfo); } void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) { IMG_BOOL bScheduleMISR; - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { bScheduleMISR = IMG_TRUE; } @@ -379,6 +472,26 @@ static inline IMG_BOOL RGXAckHwIrq(PVRSRV_RGXDEV_INFO *psDevInfo, } } +static __maybe_unused IMG_BOOL RGXAckIrqMETA(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + return RGXAckHwIrq(psDevInfo, + RGX_CR_META_SP_MSLVIRQSTATUS, + RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN, + RGX_CR_META_SP_MSLVIRQSTATUS, + RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK); +} + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +static IMG_BOOL RGXAckIrqMIPS(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + return RGXAckHwIrq(psDevInfo, + RGX_CR_MIPS_WRAPPER_IRQ_STATUS, + RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN, + RGX_CR_MIPS_WRAPPER_IRQ_CLEAR, + RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN); +} +#endif + static IMG_BOOL RGXAckIrqDedicated(PVRSRV_RGXDEV_INFO *psDevInfo) { /* status & clearing registers are available on both Host and Guests @@ -392,19 +505,36 @@ static IMG_BOOL RGXAckIrqDedicated(PVRSRV_RGXDEV_INFO *psDevInfo) ~RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK); } +static PVRSRV_ERROR RGXSetAckIrq(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if defined(RGX_SINGLE_IRQ_WORKAROUND) + /* Register appropriate mechanism for clearing hw interrupts */ + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + psDevInfo->pfnRGXAckIrq = NULL; + } + else +#endif + { + psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; + } + + return PVRSRV_OK; +} + static IMG_BOOL RGX_LISRHandler(void *pvData) { PVRSRV_DEVICE_NODE *psDeviceNode = pvData; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; IMG_BOOL bIrqAcknowledged = IMG_FALSE; #if defined(PVRSRV_DEBUG_LISR_EXECUTION) - IMG_UINT32 ui32TID; + IMG_UINT32 ui32idx, ui32IrqCnt; - for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + for_each_irq_cnt(ui32idx) { - UPDATE_LISR_DBG_SNAPSHOT(ui32TID, psFwOsData->aui32InterruptCount[ui32TID]); + get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); + UPDATE_LISR_DBG_SNAPSHOT(ui32idx, ui32IrqCnt); } UPDATE_LISR_DBG_STATUS(RGX_LISR_INIT); @@ -421,8 +551,7 @@ static IMG_BOOL RGX_LISRHandler(void *pvData) { bIrqAcknowledged = IMG_TRUE; - if (SampleIRQCount(psFwOsData->aui32InterruptCount, - psDevInfo->aui32SampleIRQCount) || bSafetyEvent) + if (bSafetyEvent || SampleIRQCount(psDevInfo)) { UPDATE_LISR_DBG_STATUS(RGX_LISR_PROCESSED); UPDATE_MISR_DBG_COUNTER(); @@ -449,11 +578,16 @@ static IMG_BOOL RGX_LISRHandler(void *pvData) } else { +#if defined(SUPPORT_AUTOVZ) /* AutoVz drivers rebooting while the firmware is active must acknowledge * and clear the hw IRQ line before the RGXInit() has finished. */ - if (!(psDevInfo->psDeviceNode->bAutoVzFwIsUp && - (psDevInfo->pfnRGXAckIrq != NULL) && - psDevInfo->pfnRGXAckIrq(psDevInfo))) + if ((psDevInfo->pfnRGXAckIrq != NULL) && + psDevInfo->pfnRGXAckIrq(psDevInfo)) + { + bIrqAcknowledged = IMG_TRUE; + } + else +#endif { UPDATE_LISR_DBG_STATUS(RGX_LISR_DEVICE_NOT_POWERED); } @@ -505,9 +639,13 @@ static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice) { PVRSRV_DEVICE_NODE *psDeviceNode = psDevice; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + const RGXFWIF_SYSDATA *psFwSysData; PVRSRV_ERROR eError = PVRSRV_OK; + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ePowState, + INVALIDATE); + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + if (psFwSysData->ePowState == RGXFWIF_POW_ON || psFwSysData->ePowState == RGXFWIF_POW_IDLE) { RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); @@ -541,40 +679,34 @@ static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice) #define GPU_IDLE RGXFWIF_GPU_UTIL_STATE_IDLE #define GPU_ACTIVE RGXFWIF_GPU_UTIL_STATE_ACTIVE #define GPU_BLOCKED RGXFWIF_GPU_UTIL_STATE_BLOCKED +#define GPU_INACTIVE RGXFWIF_GPU_UTIL_STATE_INACTIVE #define MAX_ITERATIONS 64 +#define MAX_DIFF_TIME_NS (300000ULL) +#define MAX_DIFF_DM_TIME_NS (MAX_DIFF_TIME_NS >> RGXFWIF_DM_OS_TIMESTAMP_SHIFT) static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_HANDLE hGpuUtilUser, RGXFWIF_GPU_UTIL_STATS *psReturnStats) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + RGXFWIF_GPU_STATS sStats; RGXFWIF_GPU_UTIL_STATS *psAggregateStats; - IMG_UINT64 (*paaui64DMOSTmpCounters)[RGX_NUM_OS_SUPPORTED][RGXFWIF_GPU_UTIL_STATE_NUM]; - IMG_UINT64 (*paui64DMOSTmpLastWord)[RGX_NUM_OS_SUPPORTED]; - IMG_UINT64 (*paui64DMOSTmpLastState)[RGX_NUM_OS_SUPPORTED]; - IMG_UINT64 (*paui64DMOSTmpLastPeriod)[RGX_NUM_OS_SUPPORTED]; - IMG_UINT64 (*paui64DMOSTmpLastTime)[RGX_NUM_OS_SUPPORTED]; + IMG_UINT64 (*paaui64DMOSTmpCounters)[RGX_NUM_DRIVERS_SUPPORTED][RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM]; + IMG_UINT64 (*paui64DMOSTmpLastWord)[RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 (*paui64DMOSTmpLastState)[RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 (*paui64DMOSTmpLastPeriod)[RGX_NUM_DRIVERS_SUPPORTED]; + IMG_UINT64 (*paui64DMOSTmpLastTime)[RGX_NUM_DRIVERS_SUPPORTED]; IMG_UINT64 ui64TimeNow; + IMG_UINT64 ui64TimeNowShifted; IMG_UINT32 ui32Attempts; IMG_UINT32 ui32Remainder; - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; IMG_UINT32 ui32MaxDMCount; RGXFWIF_DM eDM; - /***** (1) Initialise return stats *****/ psReturnStats->bValid = IMG_FALSE; - psReturnStats->ui64GpuStatIdle = 0; - psReturnStats->ui64GpuStatActive = 0; - psReturnStats->ui64GpuStatBlocked = 0; - psReturnStats->ui64GpuStatCumulative = 0; - - memset(psReturnStats->aaui64DMOSStatIdle, 0, sizeof(psReturnStats->aaui64DMOSStatIdle)); - memset(psReturnStats->aaui64DMOSStatActive, 0, sizeof(psReturnStats->aaui64DMOSStatActive)); - memset(psReturnStats->aaui64DMOSStatBlocked, 0, sizeof(psReturnStats->aaui64DMOSStatBlocked)); - memset(psReturnStats->aaui64DMOSStatCumulative, 0, sizeof(psReturnStats->aaui64DMOSStatCumulative)); if (hGpuUtilUser == NULL) { @@ -582,81 +714,152 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, } psAggregateStats = hGpuUtilUser; - ui32MaxDMCount = psDevInfo->sDevFeatureCfg.ui32MAXDMCount; + /* decrease by 1 to account for excluding GP DM from the statics */; + ui32MaxDMCount = psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1; + + /* Reset temporary counters used in the attempts loop */ + paaui64DMOSTmpCounters = &psAggregateStats->sTempGpuStats.aaaui64DMOSTmpCounters[0]; + paui64DMOSTmpLastWord = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastWord[0]; + paui64DMOSTmpLastState = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastState[0]; + paui64DMOSTmpLastPeriod = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastPeriod[0]; + paui64DMOSTmpLastTime = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastTime[0]; - /* Allocate temporary counters used in the attempts loop */ - paaui64DMOSTmpCounters = OSAllocMem(sizeof(*paaui64DMOSTmpCounters) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paaui64DMOSTmpCounters != NULL, "OSAllocMem:1", failTmpCountersAlloc); - paui64DMOSTmpLastWord = OSAllocMem(sizeof(*paui64DMOSTmpLastWord) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastWord != NULL, "OSAllocMem:2", failTmpLastWordAlloc); - paui64DMOSTmpLastState = OSAllocMem(sizeof(*paui64DMOSTmpLastState) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastState != NULL, "OSAllocMem:3", failTmpLastStateAlloc); - paui64DMOSTmpLastPeriod = OSAllocMem(sizeof(*paui64DMOSTmpLastPeriod) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastPeriod != NULL, "OSAllocMem:4", failTmpLastPeriodAlloc); - paui64DMOSTmpLastTime = OSAllocMem(sizeof(*paui64DMOSTmpLastTime) * ui32MaxDMCount); - PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastTime != NULL, "OSAllocMem:5", failTmpLastTimeAlloc); + RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFW, INVALIDATE); /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */ for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++) { - const volatile IMG_UINT64 *pui64GpuStatsCounters = &psUtilFWCb->aui64GpuStatsCounters[0]; - const volatile IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OS] = &psUtilFWCb->aaui64DMOSLastWord[0]; - const volatile IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OS][RGXFWIF_GPU_UTIL_STATE_NUM] = &psUtilFWCb->aaaui64DMOSStatsCounters[0]; - IMG_UINT64 aui64GpuTmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0}; IMG_UINT64 ui64GpuLastPeriod = 0, ui64GpuLastWord = 0, ui64GpuLastState = 0, ui64GpuLastTime = 0; - IMG_UINT32 i = 0; - /***** (2) Get latest data from shared area *****/ - OSLockAcquire(psDevInfo->hGPUUtilLock); - - /* - * First attempt at detecting if the FW is in the middle of an update. - * This should also help if the FW is in the middle of a 64 bit variable update. - */ - while (((ui64GpuLastWord != psUtilFWCb->ui64GpuLastWord) || - (aui64GpuTmpCounters[ui64GpuLastState] != - pui64GpuStatsCounters[ui64GpuLastState])) && - (i < MAX_ITERATIONS)) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - ui64GpuLastWord = psUtilFWCb->ui64GpuLastWord; - ui64GpuLastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64GpuLastWord); - aui64GpuTmpCounters[GPU_IDLE] = pui64GpuStatsCounters[GPU_IDLE]; - aui64GpuTmpCounters[GPU_ACTIVE] = pui64GpuStatsCounters[GPU_ACTIVE]; - aui64GpuTmpCounters[GPU_BLOCKED] = pui64GpuStatsCounters[GPU_BLOCKED]; + IMG_UINT64 aui64StatsCountersNew[RGXFWIF_GPU_UTIL_STATE_NUM]; + IMG_UINT64 ui64GpuLastWordNew; + RGXFWIF_GPU_STATS sStatsNew; + IMG_UINT32 i = 0; - for (eDM = 0; eDM < ui32MaxDMCount; eDM++) + ui64GpuLastWord = 0; + ui64GpuLastState = 0; + + OSLockAcquire(psDevInfo->hGPUUtilLock); + + /* Copy data from device memory */ + memcpy(&sStatsNew, &psDevInfo->psRGXFWIfGpuUtilFW->sStats[ui32DriverID], sizeof(sStats)); + memcpy(&ui64GpuLastWordNew, &psDevInfo->psRGXFWIfGpuUtilFW->ui64GpuLastWord, sizeof(ui64GpuLastWord)); + memcpy(aui64StatsCountersNew, psDevInfo->psRGXFWIfGpuUtilFW->aui64GpuStatsCounters, sizeof(aui64StatsCountersNew)); + + /* + * First attempt at detecting if the FW is in the middle of an update. + * This should also help if the FW is in the middle of a 64 bit variable update. + * This loop must be fast. Faster than FW updates the stats. + */ + for (i = 0; i < MAX_ITERATIONS; i++) { - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + IMG_UINT32 j,k; + IMG_BOOL bRetry = IMG_FALSE; + + if (i > 0) + { + /* On retry keep previous data */ + ui64GpuLastWordNew = ui64GpuLastWord; + memcpy(aui64StatsCountersNew, aui64GpuTmpCounters, sizeof(aui64StatsCountersNew)); + memcpy(&sStatsNew, &sStats, sizeof(sStatsNew)); + } + + /* Copy data from device memory */ + memcpy(&sStats, &psDevInfo->psRGXFWIfGpuUtilFW->sStats[ui32DriverID], sizeof(sStats)); + memcpy(&ui64GpuLastWord, &psDevInfo->psRGXFWIfGpuUtilFW->ui64GpuLastWord, sizeof(ui64GpuLastWord)); + memcpy(aui64GpuTmpCounters, psDevInfo->psRGXFWIfGpuUtilFW->aui64GpuStatsCounters, sizeof(aui64GpuTmpCounters)); + + /* Check for abnormal time difference between reads */ + if (RGXFWIF_GPU_UTIL_GET_TIME(ui64GpuLastWord) - RGXFWIF_GPU_UTIL_GET_TIME(ui64GpuLastWordNew) > MAX_DIFF_TIME_NS) + { + bRetry = IMG_TRUE; + continue; + } + + for (j = 0; j < RGXFWIF_GPU_UTIL_STATE_NUM; j++) + { + /* Check for abnormal time difference between reads */ + if (aui64GpuTmpCounters[j] - aui64StatsCountersNew[j] > MAX_DIFF_TIME_NS) + { + bRetry = IMG_TRUE; + break; + } + } + + if (bRetry) + { + continue; + } + + /* Check for DM counters wrapped or + abnormal time difference between reads. + The DM time is shifted by RGXFWIF_DM_OS_TIMESTAMP_SHIFT */ + for (j = 0; j < RGXFWIF_GPU_UTIL_DM_MAX; j++) + { + if (sStats.aui32DMOSLastWordWrap[j] != sStatsNew.aui32DMOSLastWordWrap[j] || + RGXFWIF_GPU_UTIL_GET_TIME32(sStats.aui32DMOSLastWord[j]) - RGXFWIF_GPU_UTIL_GET_TIME32(sStatsNew.aui32DMOSLastWord[j]) > MAX_DIFF_DM_TIME_NS) + { + bRetry = IMG_TRUE; + break; + } + + for (k = 0; k < RGXFWIF_GPU_UTIL_REDUCED_STATES_NUM; k++) + { + if (sStats.aaui32DMOSCountersWrap[j][k] != sStatsNew.aaui32DMOSCountersWrap[j][k] || + sStats.aaui32DMOSStatsCounters[j][k] - sStatsNew.aaui32DMOSStatsCounters[j][k] > MAX_DIFF_DM_TIME_NS) + { + bRetry = IMG_TRUE; + break; + } + + } + + if (bRetry) + { + break; + } + } + + if (!bRetry) { - paui64DMOSTmpLastWord[eDM][ui32OSid] = paui64DMOSLastWord[eDM][ui32OSid]; - paui64DMOSTmpLastState[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_STATE(paui64DMOSTmpLastWord[eDM][ui32OSid]); - paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_IDLE] = paaui64DMOSStatsCounters[eDM][ui32OSid][GPU_IDLE]; - paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_ACTIVE] = paaui64DMOSStatsCounters[eDM][ui32OSid][GPU_ACTIVE]; - paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_BLOCKED] = paaui64DMOSStatsCounters[eDM][ui32OSid][GPU_BLOCKED]; + /* Stats are good*/ + break; } } - i++; - } + OSLockRelease(psDevInfo->hGPUUtilLock); - OSLockRelease(psDevInfo->hGPUUtilLock); + ui64GpuLastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64GpuLastWord); - if (i == MAX_ITERATIONS) - { - PVR_DPF((PVR_DBG_WARNING, - "RGXGetGpuUtilStats could not get reliable data after trying %u times", i)); + if (i == MAX_ITERATIONS) + { + PVR_DPF((PVR_DBG_WARNING, + "RGXGetGpuUtilStats could not get reliable data after trying %u times", i)); - OSFreeMem(paaui64DMOSTmpCounters); - OSFreeMem(paui64DMOSTmpLastWord); - OSFreeMem(paui64DMOSTmpLastState); - OSFreeMem(paui64DMOSTmpLastPeriod); - OSFreeMem(paui64DMOSTmpLastTime); + return PVRSRV_ERROR_TIMEOUT; + } - return PVRSRV_ERROR_TIMEOUT; - } + for (eDM = 0; eDM < ui32MaxDMCount; eDM++) + { + paui64DMOSTmpLastWord[eDM][ui32DriverID] = + ((IMG_UINT64)sStats.aui32DMOSLastWordWrap[eDM] << 32) + sStats.aui32DMOSLastWord[eDM]; + paui64DMOSTmpLastState[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_STATE(paui64DMOSTmpLastWord[eDM][ui32DriverID]); + if (paui64DMOSTmpLastState[eDM][ui32DriverID] != GPU_ACTIVE) + { + paui64DMOSTmpLastState[eDM][ui32DriverID] = GPU_INACTIVE; + } + paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_INACTIVE] = (IMG_UINT64)sStats.aaui32DMOSStatsCounters[eDM][GPU_INACTIVE] + + ((IMG_UINT64)sStats.aaui32DMOSCountersWrap[eDM][GPU_INACTIVE] << 32); + paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE] = (IMG_UINT64)sStats.aaui32DMOSStatsCounters[eDM][GPU_ACTIVE] + + ((IMG_UINT64)sStats.aaui32DMOSCountersWrap[eDM][GPU_ACTIVE] << 32); + } + + } /* FOREACH_SUPPORTED_DRIVER(ui32DriverID) */ /***** (3) Compute return stats *****/ @@ -680,24 +883,22 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, psReturnStats->ui64GpuStatActive + psReturnStats->ui64GpuStatBlocked; + /* convert time into the same units as used by fw */ + ui64TimeNowShifted = ui64TimeNow >> RGXFWIF_DM_OS_TIMESTAMP_SHIFT; for (eDM = 0; eDM < ui32MaxDMCount; eDM++) { - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - paui64DMOSTmpLastTime[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_TIME(paui64DMOSTmpLastWord[eDM][ui32OSid]); - paui64DMOSTmpLastPeriod[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, paui64DMOSTmpLastTime[eDM][ui32OSid]); - paaui64DMOSTmpCounters[eDM][ui32OSid][paui64DMOSTmpLastState[eDM][ui32OSid]] += paui64DMOSTmpLastPeriod[eDM][ui32OSid]; - + paui64DMOSTmpLastTime[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_TIME(paui64DMOSTmpLastWord[eDM][ui32DriverID]); + paui64DMOSTmpLastPeriod[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNowShifted , paui64DMOSTmpLastTime[eDM][ui32DriverID]); + paaui64DMOSTmpCounters[eDM][ui32DriverID][paui64DMOSTmpLastState[eDM][ui32DriverID]] += paui64DMOSTmpLastPeriod[eDM][ui32DriverID]; /* Get statistics for a user since its last request */ - psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_IDLE], - psAggregateStats->aaui64DMOSStatIdle[eDM][ui32OSid]); - psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_ACTIVE], - psAggregateStats->aaui64DMOSStatActive[eDM][ui32OSid]); - psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32OSid][GPU_BLOCKED], - psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32OSid]); - psReturnStats->aaui64DMOSStatCumulative[eDM][ui32OSid] = psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid] + - psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid] + - psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid]; + psReturnStats->aaui64DMOSStatInactive[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_INACTIVE], + psAggregateStats->aaui64DMOSStatInactive[eDM][ui32DriverID]); + psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE], + psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID]); + psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID] = psReturnStats->aaui64DMOSStatInactive[eDM][ui32DriverID] + + psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID]; } } @@ -736,11 +937,10 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, for (eDM = 0; eDM < ui32MaxDMCount; eDM++) { - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_SUPPORTED_DRIVER(ui32DriverID) { - psAggregateStats->aaui64DMOSStatIdle[eDM][ui32OSid] += psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid]; - psAggregateStats->aaui64DMOSStatActive[eDM][ui32OSid] += psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid]; - psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32OSid] += psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid]; + psAggregateStats->aaui64DMOSStatInactive[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatInactive[eDM][ui32DriverID]; + psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID]; } } @@ -751,28 +951,6 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder); psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder); - for (eDM = 0; eDM < ui32MaxDMCount; eDM++) - { - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) - { - psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid] = OSDivide64(psReturnStats->aaui64DMOSStatIdle[eDM][ui32OSid], 1000, &ui32Remainder); - psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid] = OSDivide64(psReturnStats->aaui64DMOSStatActive[eDM][ui32OSid], 1000, &ui32Remainder); - psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid] = OSDivide64(psReturnStats->aaui64DMOSStatBlocked[eDM][ui32OSid], 1000, &ui32Remainder); - psReturnStats->aaui64DMOSStatCumulative[eDM][ui32OSid] = OSDivide64(psReturnStats->aaui64DMOSStatCumulative[eDM][ui32OSid], 1000, &ui32Remainder); - } - } - - OSFreeMem(paui64DMOSTmpLastTime); -failTmpLastTimeAlloc: - OSFreeMem(paui64DMOSTmpLastPeriod); -failTmpLastPeriodAlloc: - OSFreeMem(paui64DMOSTmpLastState); -failTmpLastStateAlloc: - OSFreeMem(paui64DMOSTmpLastWord); -failTmpLastWordAlloc: - OSFreeMem(paaui64DMOSTmpCounters); - -failTmpCountersAlloc: /* Check that the return stats make sense */ if (psReturnStats->ui64GpuStatCumulative == 0) { @@ -784,7 +962,23 @@ static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, * When such an event happens frequently, timers or the aggregate * stats might not be accurate... */ +#if defined(VIRTUAL_PLATFORM) + /* To avoid spamming the console logging system on emulated devices, + * we special-case so that we will only produce a single message per + * driver invocation. This should reduce the time spent logging + * information which is not relevant for very slow timers found in + * VP device configurations + */ + static IMG_BOOL bFirstTime = IMG_TRUE; + + if (bFirstTime) + { + bFirstTime = IMG_FALSE; +#endif PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data.")); +#if defined(VIRTUAL_PLATFORM) + } +#endif /* defined(VIRTUAL_PLATFORM) */ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; } @@ -842,20 +1036,23 @@ static void RGX_MISRHandler_Main (void *pvData) PVRSRVNotifyCommandCompletion(psDeviceNode); #if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD) - /* - * Firmware CCB only exists for primary FW thread. Only requirement for - * non primary FW thread(s) to communicate with host driver is in the case - * of PDVFS running on non primary FW thread. - * This requirement is directly handled by the below - */ + /* Normally, firmware CCB only exists for the primary FW thread unless PDVFS + is running on the second[ary] FW thread, here we process said CCB */ RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice); #endif - /* Handle Safety events if necessary */ - RGXSafetyEventHandler(psDeviceNode->pvDevice); + /* Only execute SafetyEventHandler if RGX_FEATURE_SAFETY_EVENT is on */ + if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, ECC_RAMS) > 0 || + PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, WATCHDOG_TIMER) || + PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, RISCV_DUAL_LOCKSTEP) || + PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, FAULT_DECODE_VERSION) >= 2) + { + /* Handle Safety events if necessary */ + RGXSafetyEventHandler(psDeviceNode->pvDevice); + } /* Signal the global event object */ - PVRSRVSignalGlobalEO(); + PVRSRVSignalDriverWideEO(); /* Process the Firmware CCB for pending commands */ RGXCheckFirmwareCCB(psDeviceNode->pvDevice); @@ -864,8 +1061,11 @@ static void RGX_MISRHandler_Main (void *pvData) RGXTimeCorrRestartPeriodic(psDeviceNode); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Process Workload Estimation Specific commands from the FW */ - WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Process Workload Estimation Specific commands from the FW */ + WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice); + } #endif if (psDevInfo->pvAPMISRData == NULL) @@ -875,86 +1075,277 @@ static void RGX_MISRHandler_Main (void *pvData) } #endif /* !defined(NO_HARDWARE) */ -static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, - PVRSRV_DEVICE_CONFIG *psDevConfig) + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) && defined(PDUMP) +static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_RGXDEV_INFO *psDevInfo) { - PVRSRV_ERROR eError = PVRSRV_OK; + PMR *psFWDataPMR; + RGXMIPSFW_BOOT_DATA *psBootData; + IMG_DEV_PHYADDR sTmpAddr; + IMG_UINT32 ui32BootConfOffset, ui32ParamOffset, i; + PVRSRV_ERROR eError; + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); - /* Save information used on power transitions for later - * (when RGXStart and RGXStop are executed) + psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); + ui32BootConfOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); + ui32BootConfOffset += RGXMIPSFW_BOOTLDR_CONF_OFFSET; + + /* The physical addresses used by a pdump player will be different + * than the ones we have put in the MIPS bootloader configuration data. + * We have to tell the pdump player to replace the original values with the real ones. */ - psDevInfo->sLayerParams.psDevInfo = psDevInfo; - psDevInfo->sLayerParams.psDevConfig = psDevConfig; -#if defined(PDUMP) - psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS; -#endif - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || - RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + PDUMPCOMMENT(psDeviceNode, "Pass new boot parameters to the FW"); + + /* Rogue Registers physical address */ + ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64RegBase); + + eError = PDumpRegLabelToMem64(RGX_PDUMPREG_NAME, + 0x0, + psFWDataPMR, + ui32ParamOffset, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) { - IMG_DEV_PHYADDR sKernelMMUCtxPCAddr; + PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of Rogue registers phy address failed (%u)", eError)); + return eError; + } - if (psDevInfo->psDeviceNode->bAutoVzFwIsUp) - { - /* If AutoVz firmware is up at this stage, the driver initialised it - * during a previous life-cycle. The firmware's memory is already pre-mapped - * and the MMU page tables reside in the predetermined memory carveout. - * The Kernel MMU Context created in this life-cycle is a dummy structure - * that is not used for mapping. - * To program the Device's BIF with the correct PC address, use the base - * address of the carveout reserved for MMU mappings as Kernel MMU PC Address */ -#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) - sKernelMMUCtxPCAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; -#else - PHYS_HEAP_CONFIG *psFwHeapCfg = FindPhysHeapConfig(psDevConfig, - PHYS_HEAP_USAGE_FW_MAIN); - eError = (psFwHeapCfg != NULL) ? PVRSRV_OK : PVRSRV_ERROR_PHYSHEAP_CONFIG; - PVR_LOG_RETURN_IF_ERROR(eError, "FindPhysHeapConfig(PHYS_HEAP_USAGE_FW_MAIN)"); - - sKernelMMUCtxPCAddr.uiAddr = psFwHeapCfg->sCardBase.uiAddr + - (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED); -#endif /* PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR */ - } - else - { - eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, - &sKernelMMUCtxPCAddr); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog")); - return eError; - } - } + /* Page Table physical Address */ + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sTmpAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXBootldrDataInit: MMU_AcquireBaseAddr failed (%u)", + eError)); + return eError; + } - psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, + (void **)&psBootData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire pointer to FW data (%s)", + __func__, PVRSRVGetErrorString(eError))); + return eError; } -#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) - /* Send information used on power transitions to the trusted device as - * in this setup the driver cannot start/stop the GPU and perform resets - */ - if (psDevConfig->pfnTDSetPowerParams) + psBootData = IMG_OFFSET_ADDR(psBootData, ui32BootConfOffset); + + for (i = 0; i < psBootData->ui32PTNumPages; i++) { - PVRSRV_TD_POWER_PARAMS sTDPowerParams; + ui32ParamOffset = ui32BootConfOffset + + offsetof(RGXMIPSFW_BOOT_DATA, aui64PTPhyAddr[0]) + + i * sizeof(psBootData->aui64PTPhyAddr[0]); - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || - RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + eError = PDumpPTBaseObjectToMem64(psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName, + psFWDataPMR, + 0, + ui32ParamOffset, + PDUMP_FLAGS_CONTINUOUS, + MMU_LEVEL_1, + sTmpAddr.uiAddr, + i << psBootData->ui32PTLog2PageSize); + if (eError != PVRSRV_OK) { - sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; + PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of page tables phy address failed (%u)", eError)); + return eError; } - - eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData, - &sTDPowerParams); } - else + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); + + /* Stack physical address */ + ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64StackPhyAddr); + + eError = PDumpMemLabelToMem64(psFWDataPMR, + psFWDataPMR, + RGXGetFWImageSectionOffset(NULL, MIPS_STACK), + ui32ParamOffset, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!")); - eError = PVRSRV_ERROR_NOT_IMPLEMENTED; + PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of stack phy address failed (%u)", eError)); + return eError; } -#endif return eError; } +#endif /* PDUMP */ + +static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, + PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEV_PHYADDR sKernelMMUCtxPCAddr; + IMG_BOOL bPremappedFw; + + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVCFG, psDevConfig, PVRSRV_OK); + + /* Save information used on power transitions for later + * (when RGXStart and RGXStop are executed) + */ +#if defined(PDUMP) + psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_PREMAP_FW_HEAPS) + /* Volcanic drivers with security support and premapped fw heaps + * always have their fw heap premapped by the TEE */ + bPremappedFw = IMG_TRUE; +#else + /* If AutoVz firmware is up at this stage, the driver initialised it + * during a previous life-cycle. The firmware's memory is already pre-mapped + * and the MMU page tables reside in the predetermined memory carveout. + * The Kernel MMU Context created in this life-cycle is a dummy structure + * that is not used for mapping. + * To program the Device's BIF with the correct PC address, use the base + * address of the carveout reserved for MMU mappings as Kernel MMU PC Address */ + bPremappedFw = psDevInfo->psDeviceNode->bAutoVzFwIsUp; +#endif + + if (bPremappedFw) + { + IMG_DEV_PHYADDR sDevPAddr; + PHYS_HEAP *psFwPageTableHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]; + + PVR_LOG_RETURN_IF_FALSE((NULL != psFwPageTableHeap), + "Firmware Page Table heap not defined.", + PVRSRV_ERROR_INVALID_HEAP); + + PhysHeapGetDevPAddr(psFwPageTableHeap, &sDevPAddr); + sKernelMMUCtxPCAddr.uiAddr = sDevPAddr.uiAddr; + psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; + } + else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, + &sKernelMMUCtxPCAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog")); + return eError; + } + + psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; + } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else + { + PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR); + PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); + IMG_DEV_PHYADDR sPhyAddr; + IMG_BOOL bValid; + +#if defined(SUPPORT_ALT_REGBASE) + psDevInfo->sLayerParams.sGPURegAddr = psDevConfig->sAltRegsGpuPBase; +#else + /* The physical address of the GPU registers needs to be translated + * in case we are in a LMA scenario + */ + PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], + 1, + &sPhyAddr, + &(psDevConfig->sRegsCpuPBase)); + + psDevInfo->sLayerParams.sGPURegAddr = sPhyAddr; +#endif + + /* Register bank must be aligned to 512KB (as per the core integration) to + * prevent the FW accessing incorrect registers */ + if ((psDevInfo->sLayerParams.sGPURegAddr.uiAddr & 0x7FFFFU) != 0U) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Register bank must be aligned to 512KB, but current address (0x%016"IMG_UINT64_FMTSPECX") is not", + psDevInfo->sLayerParams.sGPURegAddr.uiAddr)); + return PVRSRV_ERROR_INIT_FAILURE; + } + + eError = RGXGetPhyAddr(psFWCodePMR, + &sPhyAddr, + RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE), + OSGetPageShift(), /* FW will be using the same page size as the OS */ + 1, + &bValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI code address")); + return eError; + } + + psDevInfo->sLayerParams.sBootRemapAddr = sPhyAddr; + + eError = RGXGetPhyAddr(psFWDataPMR, + &sPhyAddr, + RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA), + OSGetPageShift(), + 1, + &bValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI data address")); + return eError; + } + + psDevInfo->sLayerParams.sDataRemapAddr = sPhyAddr; + + eError = RGXGetPhyAddr(psFWCodePMR, + &sPhyAddr, + RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE), + OSGetPageShift(), + 1, + &bValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW exceptions address")); + return eError; + } + + psDevInfo->sLayerParams.sCodeRemapAddr = sPhyAddr; + + psDevInfo->sLayerParams.sTrampolineRemapAddr.uiAddr = psDevInfo->psTrampoline->sPhysAddr.uiAddr; + + psDevInfo->sLayerParams.bDevicePA0IsValid = psDevConfig->bDevicePA0IsValid; + } +#endif +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + /* Send information used on power transitions to the trusted device as + * in this setup the driver cannot start/stop the GPU and perform resets + */ + if (psDevConfig->pfnTDSetPowerParams) + { + PVRSRV_TD_POWER_PARAMS sTDPowerParams; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; + } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + sTDPowerParams.sGPURegAddr = psDevInfo->sLayerParams.sGPURegAddr; + sTDPowerParams.sBootRemapAddr = psDevInfo->sLayerParams.sBootRemapAddr; + sTDPowerParams.sCodeRemapAddr = psDevInfo->sLayerParams.sCodeRemapAddr; + sTDPowerParams.sDataRemapAddr = psDevInfo->sLayerParams.sDataRemapAddr; + } +#endif + + eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData, + &sTDPowerParams); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!")); + eError = PVRSRV_ERROR_NOT_IMPLEMENTED; + } +#endif + + return eError; +} + +#if defined(RGX_FEATURE_AXI_ACE_BIT_MASK) /* RGXSystemGetFabricCoherency */ @@ -993,13 +1384,10 @@ PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDevConfig, return PVRSRV_ERROR_BAD_MAPPING; } - if (psDevConfig->psDevNode != NULL) - { - bPowerDown = (psDevConfig->psDevNode->eCurrentSysPowerState == PVRSRV_SYS_POWER_STATE_OFF); - } + bPowerDown = ! PVRSRVIsSystemPowered(psDevConfig->psDevNode); /* Power-up the device as required to read the registers */ - if (bPowerDown) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig) && bPowerDown) { eError = PVRSRVSetSystemPowerState(psDevConfig, PVRSRV_SYS_POWER_STATE_ON); PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON"); @@ -1009,7 +1397,7 @@ PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDevConfig, value NO_COHERENCY 0x0 {SoC does not support any form of Coherency} value ACE_LITE_COHERENCY 0x1 {SoC supports ACE-Lite or I/O Coherency} value FULL_ACE_COHERENCY 0x2 {SoC supports full ACE or 2-Way Coherency} */ - ui32DeviceFabricCoherency = OSReadHWReg32(pvRegsBaseKM, RGX_CR_SOC_AXI); + ui32DeviceFabricCoherency = OSReadHWReg32((void __iomem *)pvRegsBaseKM, RGX_CR_SOC_AXI); PVR_LOG(("AXI fabric coherency (RGX_CR_SOC_AXI): 0x%x", ui32DeviceFabricCoherency)); #if defined(DEBUG) if (ui32DeviceFabricCoherency & ~((IMG_UINT32)RGX_CR_SOC_AXI_MASKFULL)) @@ -1021,7 +1409,7 @@ PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDevConfig, ui32DeviceFabricCoherency &= ~((IMG_UINT32)RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK); ui32DeviceFabricCoherency >>= RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT; - if (bPowerDown) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig) && bPowerDown) { eError = PVRSRVSetSystemPowerState(psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); @@ -1049,22 +1437,16 @@ PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDevConfig, break; } #else /* !defined(NO_HARDWARE) */ -#if defined(RGX_FEATURE_GPU_CPU_COHERENCY) - *peDevFabricType = PVRSRV_DEVICE_FABRIC_FULLACE; - eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; - ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; -#else - *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE; - eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; - ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY; -#endif + *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE; + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY; #endif /* !defined(NO_HARDWARE) */ - OSCreateKMAppHintState(&pvAppHintState); + OSCreateAppHintState(&pvAppHintState); ui32AppHintDefault = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FabricCoherencyOverride, + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FabricCoherencyOverride, &ui32AppHintDefault, &ui32AppHintFabricCoherency); - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); #if defined(SUPPORT_SECURITY_VALIDATION) /* Temporarily disable coherency */ @@ -1116,42 +1498,41 @@ PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDevConfig, *peCacheSnoopingMode = eDeviceCacheSnoopingMode; return PVRSRV_OK; } +#endif /* RGXSystemHasFBCDCVersion31 */ static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode) { -#if defined(SUPPORT_VALIDATION) - IMG_UINT32 ui32FBCDCVersionOverride = 0; -#endif - -#if defined(SUPPORT_VALIDATION) - void *pvAppHintState = NULL; - - IMG_UINT32 ui32AppHintDefault; - OSCreateKMAppHintState(&pvAppHintState); - ui32AppHintDefault = PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FBCDCVersionOverride, - &ui32AppHintDefault, &ui32FBCDCVersionOverride); - OSFreeKMAppHintState(pvAppHintState); +#if defined(HW_ERN_66622_BIT_MASK) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - if (ui32FBCDCVersionOverride > 0) + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 66622)) +#endif { - if (ui32FBCDCVersionOverride == 2) { - return IMG_TRUE; + if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) + { + return IMG_TRUE; + } } } +#if defined(HW_ERN_66622_BIT_MASK) else -#endif { + +#if !defined(NO_HARDWARE) if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) { - return IMG_TRUE; + PVR_DPF((PVR_DBG_ERROR, + "%s: System uses FBCDC3.1 but GPU doesn't support it!", + __func__)); } +#endif } +#endif /* defined(HW_ERN_66622_BIT_MASK) */ return IMG_FALSE; } @@ -1161,9 +1542,13 @@ static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode) */ static IMG_UINT32 RGXGetTFBCLossyGroup(PVRSRV_DEVICE_NODE *psDeviceNode) { - /* Volcanic cores don't use TFBC */ +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + return psDevInfo->ui32TFBCLossyGroup; +#else PVR_UNREFERENCED_PARAMETER(psDeviceNode); return 0; +#endif } /* @@ -1174,56 +1559,79 @@ static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode, { MMU_DEVICEATTRIBS *psMMUDevAttrs = NULL; - /* bKernelMemoryCtx is only used for rogue/oceanic cores */ - PVR_UNREFERENCED_PARAMETER(bKernelFWMemoryCtx); - if (psDeviceNode->pfnCheckDeviceFeature) { - psMMUDevAttrs = psDeviceNode->psMMUDevAttrs; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + { + psMMUDevAttrs = bKernelFWMemoryCtx ? + psDeviceNode->psFirmwareMMUDevAttrs : + psDeviceNode->psMMUDevAttrs; + } + else +#endif + { + PVR_UNREFERENCED_PARAMETER(bKernelFWMemoryCtx); + psMMUDevAttrs = psDeviceNode->psMMUDevAttrs; + } } return psMMUDevAttrs; } + +/* + RGXDevSnoopMode +*/ +static PVRSRV_DEVICE_SNOOP_MODE RGXDevSnoopMode(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + return PVRSRV_DEVICE_SNOOP_NONE; +} + /* * RGXInitDevPart2 */ -PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32DeviceFlags, - IMG_UINT32 ui32HWPerfHostFilter, - RGX_ACTIVEPM_CONF eActivePMConf, - IMG_UINT32 ui32AvailablePowUnitsMask, - IMG_UINT32 ui32AvailableRACMask) +PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_INIT_APPHINTS *psApphints) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_DEV_POWER_STATE eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; IMG_UINT32 ui32AllRACMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1; - - /* Assume system layer has turned power on by this point, required before powering device */ - psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; +#endif +#if defined(PDUMP) && defined(NO_HARDWARE) + IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_DEINIT; +#endif PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 2"); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) && defined(PDUMP) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + RGXPDumpBootldrData(psDeviceNode, psDevInfo); + } +#endif #if defined(TIMING) || defined(DEBUG) OSUserModeAccessToPerfCountersEn(); #endif /* Initialise Device Flags */ psDevInfo->ui32DeviceFlags = 0; - RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE); + RGXSetDeviceFlags(psDevInfo, psApphints->ui32DeviceFlags, IMG_TRUE); /* Allocate DVFS Table (needs to be allocated before GPU trace events * component is initialised because there is a dependency between them) */ psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable))); PVR_LOG_GOTO_IF_NOMEM(psDevInfo->psGpuDVFSTable, eError, ErrorExit); - if (psDevInfo->ui32HWPerfHostFilter == 0) { - RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter); + RGXHWPerfHostSetEventFilter(psDevInfo, psApphints->ui32HWPerfHostFilter); } /* If HWPerf enabled allocate all resources for the host side buffer. */ @@ -1237,9 +1645,12 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Initialise work estimation lock */ - eError = OSLockCreate(&psDevInfo->hWorkEstLock); - PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Initialise work estimation lock */ + eError = OSLockCreate(&psDevInfo->hWorkEstLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit); + } #endif /* Initialise lists of ZSBuffers */ @@ -1263,6 +1674,14 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(MMUCtxUnregLock)", ErrorExit); } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = OSLockCreate(&psDevInfo->hNMILock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(NMILock)", ErrorExit); + } +#endif + /* Setup GPU utilisation stats update callback */ eError = OSLockCreate(&psDevInfo->hGPUUtilLock); PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(GPUUtilLock)", ErrorExit); @@ -1271,39 +1690,41 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, #endif eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; - psDevInfo->eActivePMConf = eActivePMConf; + psDevInfo->eActivePMConf = psApphints->eRGXActivePMConf; +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) /* Validate the SPU mask and initialize to number of SPUs to power up */ - if ((ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0) + if ((psApphints->ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0) { PVR_DPF((PVR_DBG_ERROR, "%s:Invalid SPU mask (All=0x%X, Non Fused=0x%X). At-least one SPU must to be powered up.", __func__, ui32AllPowUnitsMask, - ui32AvailablePowUnitsMask)); + psApphints->ui32AvailablePowUnitsMask)); PVR_LOG_GOTO_WITH_ERROR("ui32AvailablePowUnitsMask", eError, PVRSRV_ERROR_INVALID_SPU_MASK, ErrorExit); } - psDevInfo->ui32AvailablePowUnitsMask = ui32AvailablePowUnitsMask & ui32AllPowUnitsMask; + psDevInfo->ui32AvailablePowUnitsMask = psApphints->ui32AvailablePowUnitsMask & ui32AllPowUnitsMask; - psDevInfo->ui32AvailableRACMask = ui32AvailableRACMask & ui32AllRACMask; + psDevInfo->ui32AvailableRACMask = psApphints->ui32AvailableRACMask & ui32AllRACMask; +#endif #if !defined(NO_HARDWARE) /* set-up the Active Power Mgmt callback */ { RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM; - IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) || - (eActivePMConf == RGX_ACTIVEPM_FORCE_ON); + IMG_BOOL bEnableAPM = ((psApphints->eRGXActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) || + (psApphints->eRGXActivePMConf == RGX_ACTIVEPM_FORCE_ON); - if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE))) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ) + /* The AutoVz driver enables a virtualisation watchdog not compatible with APM */ + if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode))) { - PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__)); + PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in AutoVz mode", __func__)); bEnableAPM = IMG_FALSE; } -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ) - /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */ PVR_ASSERT(bEnableAPM == IMG_FALSE); #endif @@ -1321,6 +1742,8 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, } #endif + psDevInfo->eDebugDumpFWTLogType = psApphints->eDebugDumpFWTLogType; + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM, RGXQueryAPMState, RGXSetAPMState, @@ -1331,40 +1754,34 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, /* Register the device with the power manager */ eError = PVRSRVRegisterPowerDevice(psDeviceNode, - (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPrePowerState : &RGXVzPrePowerState, - (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPostPowerState : &RGXVzPostPowerState, - psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState, - &RGXPreClockSpeedChange, &RGXPostClockSpeedChange, - &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, - &RGXPowUnitsStateMaskChange, - (IMG_HANDLE)psDeviceNode, - PVRSRV_DEV_POWER_STATE_OFF, - eDefaultPowerState); + (PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode)) ? &RGXPrePowerState : &RGXVzPrePowerState, + (PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode)) ? &RGXPostPowerState : &RGXVzPostPowerState, + psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState, + &RGXPreClockSpeedChange, &RGXPostClockSpeedChange, + &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, &RGXCancelForcedIdleRequestAsync, + &RGXPowUnitsChange, + (IMG_HANDLE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + eDefaultPowerState); PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterPowerDevice", ErrorExit); eError = RGXSetPowerParams(psDevInfo, psDevConfig); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetPowerParams", ErrorExit); -#if defined(SUPPORT_VALIDATION) - { - void *pvAppHintState = NULL; - - IMG_UINT32 ui32AppHintDefault; - - OSCreateKMAppHintState(&pvAppHintState); - ui32AppHintDefault = PVRSRV_APPHINT_TESTSLRINTERVAL; - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TestSLRInterval, - &ui32AppHintDefault, &psDevInfo->ui32TestSLRInterval); - PVR_LOG(("OSGetKMAppHintUINT32(TestSLRInterval) ui32AppHintDefault=%d, psDevInfo->ui32TestSLRInterval=%d", - ui32AppHintDefault, psDevInfo->ui32TestSLRInterval)); - OSFreeKMAppHintState(pvAppHintState); - psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; - psDevInfo->ui32SLRSkipFWAddr = 0; - } -#endif #if defined(PDUMP) #if defined(NO_HARDWARE) + /* Capture the HWPERF buffer as the last thing we do */ + PDUMPIF(psDeviceNode, "ENABLE_HWPERF", ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, "** Dump HWPerf Buffer"); + PDumpSaveToFileVirtualNoValidate(psDeviceNode, + psDevInfo->psRGXFWIfHWPerfBufMemDesc, + 0, + psDevInfo->ui32RGXFWIfHWPerfBufSize, + "out.hwperf", + 0, + ui32PDumpFlags); + PDUMPFI(psDeviceNode, "ENABLE_HWPERF", ui32PDumpFlags); PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, "Wait for the FW to signal idle"); /* Kick the FW once, in case it still needs to detect and set the idle state */ @@ -1388,7 +1805,7 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW; - if (! PVRSRV_VZ_MODE_IS(GUEST)) + if (! PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { eError = RGXStop(&psDevInfo->sLayerParams); PVR_LOG_GOTO_IF_ERROR(eError, "RGXStop", ErrorExit); @@ -1411,8 +1828,9 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, "RGX_Main"); PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(MISR)", ErrorExit); - /* only the HOST_IRQ bus is supported on volcanic for IRQ delivery */ - psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; + /* Register appropriate mechanism for clearing hw interrupts */ + eError = RGXSetAckIrq(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetAckIrq", ErrorExit); eError = SysInstallDeviceLISR(psDevConfig->hSysData, psDevConfig->ui32IRQ, @@ -1424,40 +1842,17 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, #endif /* !defined(NO_HARDWARE) */ #if defined(PDUMP) -/* We need to wrap the check for S7_CACHE_HIERARCHY being supported inside - * #if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)...#endif, as the - * RGX_IS_FEATURE_SUPPORTED macro references a bitmask define derived from its - * last parameter which will not exist on architectures which do not have this - * feature. - * Note we check for RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK rather than for - * RGX_FEATURE_S7_CACHE_HIERARCHY (which might seem a better choice) as this - * means we can build the kernel driver without having to worry about the BVNC - * (the BIT_MASK is defined in rgx_bvnc_defs_km.h for all BVNCs for a given - * architecture, whereas the FEATURE is only defined for those BVNCs that - * support it). - */ -#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK) if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY))) -#endif { - if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) && - !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) + if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig)) { PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "System has NO cache snooping"); + "System has CPU cache snooping"); } else { - if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig)) - { - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "System has CPU cache snooping"); - } - if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) - { - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "System has DEVICE cache snooping"); - } + PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "System has NO cache snooping"); } } #endif @@ -1466,7 +1861,7 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVTQLoadShaders", ErrorExit); #if defined(SUPPORT_SECURE_ALLOC_KM) - eError = OSAllocateSecBuf(psDeviceNode, RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES, "SharedSecMem", &psDevInfo->psGenHeapSecMem); + eError = OSAllocateSecBuf(psDeviceNode, RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE, "SharedSecMem", &psDevInfo->psGenHeapSecMem); PVR_LOG_GOTO_IF_ERROR(eError, "OSAllocateSecBuf", ErrorExit); #endif @@ -1481,8 +1876,14 @@ PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, } #define VZ_RGX_FW_FILENAME_SUFFIX ".vz" +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +#define RGX_64K_FW_FILENAME_SUFFIX ".64k" +#define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ + RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX) + sizeof(RGX_64K_FW_FILENAME_SUFFIX))) +#else #define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX))) +#endif static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR *pszFWFilenameStr, @@ -1490,26 +1891,32 @@ static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode, { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; const IMG_CHAR * const pszFWFilenameSuffix = - PVRSRV_VZ_MODE_IS(NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX; + PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode) ? "" : VZ_RGX_FW_FILENAME_SUFFIX; + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + const IMG_CHAR * const pszFWFilenameSuffix2 = + ((OSGetPageSize() == RGX_MMU_PAGE_SIZE_64KB) && + RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + ? RGX_64K_FW_FILENAME_SUFFIX : ""; +#else + const IMG_CHAR * const pszFWFilenameSuffix2 = ""; +#endif OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE, - "%s." RGX_BVNC_STR_FMTSPEC "%s", - RGX_FW_FILENAME, + RGX_FW_FILENAME "." RGX_BVNC_STR_FMTSPEC "%s%s", psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, - pszFWFilenameSuffix); + pszFWFilenameSuffix, pszFWFilenameSuffix2); OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE, - "%s." RGX_BVNC_STRP_FMTSPEC "%s", - RGX_FW_FILENAME, + RGX_FW_FILENAME "." RGX_BVNC_STRP_FMTSPEC "%s%s", psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, - pszFWFilenameSuffix); + pszFWFilenameSuffix, pszFWFilenameSuffix2); } PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, - OS_FW_IMAGE **ppsRGXFW, - const IMG_BYTE **ppbFWData) + OS_FW_IMAGE **ppsRGXFW) { IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; @@ -1541,15 +1948,9 @@ PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, if (eErr == PVRSRV_OK) { PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr)); - *ppbFWData = (const IMG_BYTE*)OSFirmwareData(*ppsRGXFW); - } - else - { - *ppbFWData = NULL; } return eErr; - } #if defined(PDUMP) @@ -1558,7 +1959,7 @@ PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode) RGXFWIF_KCCB_CMD sKccbCmd; PVRSRV_ERROR eError; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_ERROR_NOT_SUPPORTED); /* Fill in the command structure with the parameters needed */ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT; @@ -1575,23 +1976,34 @@ PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode { /* set up fw memory contexts */ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + __maybe_unused PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; PVRSRV_ERROR eError; -#if defined(SUPPORT_AUTOVZ) +#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + IMG_BOOL bNativeFwUMAHeap = PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDeviceNode) && + (PhysHeapGetType(psDeviceNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM]) == PHYS_HEAP_TYPE_UMA); +#endif + +#if defined(RGX_PREMAP_FW_HEAPS) PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; - if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp)) + if ((!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) && (!psDeviceNode->bAutoVzFwIsUp) && (!bNativeFwUMAHeap)) { + PHYS_HEAP *psFwPageTableHeap = + psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]; + + PVR_LOG_GOTO_IF_INVALID_PARAM((psFwPageTableHeap != NULL), + eError, failed_to_create_ctx); + /* Temporarily swap the MMU and default GPU physheap to allow the page * tables of all memory mapped by the FwKernel context to be placed * in a dedicated memory carveout. This should allow the firmware mappings to * persist after a Host kernel crash or driver reset. */ - - psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap; + psDeviceNode->psMMUPhysHeap = psFwPageTableHeap; } #endif +#if defined(RGX_FEATURE_AXI_ACE_BIT_MASK) /* Set the device fabric coherency before FW context creation */ eError = RGXSystemGetFabricCoherency(psDevConfig, psDevConfig->sRegsCpuPBase, @@ -1606,11 +2018,14 @@ PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode eError)); goto failed_to_create_ctx; } +#endif /* Register callbacks for creation of device memory contexts */ psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; + RGXFwSharedMemCheckSnoopMode(psDevConfig); + /* Create the memory context for the firmware. */ eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_FORFW, &psDevInfo->psKernelDevmemCtx); @@ -1645,63 +2060,81 @@ PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode goto failed_to_find_heap; } -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if (defined(RGX_PREMAP_FW_HEAPS)) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - IMG_UINT32 ui32OSID; - for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + IMG_UINT32 ui32DriverID; + + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVNODE, psDeviceNode) { IMG_CHAR szHeapName[RA_MAX_NAME_LENGTH]; - OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); + OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID); eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName, - &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]); + &psDevInfo->psPremappedFwRawHeap[ui32DriverID]); PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap); } } #endif -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) && !bNativeFwUMAHeap) { IMG_DEV_PHYADDR sPhysHeapBase; - IMG_UINT32 ui32OSID; - - eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN], &sPhysHeapBase); + IMG_UINT32 ui32DriverID; + void *pvAppHintState = NULL; + IMG_UINT64 ui64DefaultHeapStride; + IMG_UINT64 ui64GuestHeapDevBaseStride; + + OSCreateAppHintState(&pvAppHintState); + ui64DefaultHeapStride = PVRSRV_APPHINT_GUESTFWHEAPSTRIDE; + OSGetAppHintUINT64(APPHINT_NO_DEVICE, + pvAppHintState, + GuestFWHeapStride, + &ui64DefaultHeapStride, + &ui64GuestHeapDevBaseStride); + OSFreeAppHintState(pvAppHintState); + pvAppHintState = NULL; + + eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM], &sPhysHeapBase); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", failed_to_find_heap); - for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVNODE, psDeviceNode) { - IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)}; + IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32DriverID * ui64GuestHeapDevBaseStride)}; eError = RGXFwRawHeapAllocMap(psDeviceNode, - ui32OSID, + ui32DriverID, sRawFwHeapBase, RGX_FIRMWARE_RAW_HEAP_SIZE); if (eError != PVRSRV_OK) { - for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--) + for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--) { - RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID); } PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap); } } -#if defined(SUPPORT_AUTOVZ) +#if defined(RGX_PREMAP_FW_HEAPS) /* restore default Px setup */ psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; #endif } -#else - if (PVRSRV_VZ_MODE_IS(GUEST)) +#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + +#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* On setups with dynamically mapped Guest heaps, the Guest makes + * a PVZ call to the Host to request the mapping during init. */ + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig); + eError = PvzClientMapDevPhysHeap(psDevConfig); PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap); } -#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ +#endif /* !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); @@ -1726,31 +2159,32 @@ void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; +#if defined(RGX_PREMAP_FW_HEAPS) + PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; +#endif -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { -#if defined(SUPPORT_AUTOVZ) - PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; - - psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap; +#if defined(RGX_PREMAP_FW_HEAPS) + psDeviceNode->psMMUPhysHeap = + psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]; if (!psDeviceNode->bAutoVzFwIsUp) #endif { - IMG_UINT32 ui32OSID; + IMG_UINT32 ui32DriverID; - for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVNODE, psDeviceNode) { - RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID); } } -#if defined(SUPPORT_AUTOVZ) - psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; -#endif } -#else - if (PVRSRV_VZ_MODE_IS(GUEST)) +#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + +#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { (void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig); @@ -1775,8 +2209,12 @@ void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) if (psDevInfo->psKernelDevmemCtx) { eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); - PVR_ASSERT(eError == PVRSRV_OK); + PVR_LOG_IF_ERROR(eError, "DevmemDestroyContext"); } + +#if defined(RGX_PREMAP_FW_HEAPS) + psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; +#endif } static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, @@ -1791,7 +2229,7 @@ static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, /* Skip the alignment check if the driver is guest since there is no firmware to check against */ - PVRSRV_VZ_RET_IF_MODE(GUEST, eError); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDevNode, eError); if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL) { @@ -1813,6 +2251,8 @@ static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, } paui32FWAlignChecks += ui32UMChecksOffset; + /* Invalidate the size value, check the next region size (UM) and invalidate */ + RGXFwSharedMemCacheOpPtr(paui32FWAlignChecks, INVALIDATE); if (*paui32FWAlignChecks++ != ui32AlignChecksSizeUM) { PVR_DPF((PVR_DBG_ERROR, @@ -1825,6 +2265,10 @@ static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, goto return_; } + RGXFwSharedMemCacheOpExec(paui32FWAlignChecks, + ui32AlignChecksSizeUM * sizeof(IMG_UINT32), + PVRSRV_CACHE_OP_INVALIDATE); + for (i = 0; i < ui32AlignChecksSizeUM; i++) { if (aui32AlignChecksUM[i] != paui32FWAlignChecks[i]) @@ -1858,6 +2302,16 @@ PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, { PVRSRV_ERROR eError = PVRSRV_OK; IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift(); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) && defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K; + } +#else + PVR_UNREFERENCED_PARAMETER(uiLog2Align); +#endif uiMemAllocFlags = (uiMemAllocFlags | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | @@ -1868,8 +2322,6 @@ PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, uiMemAllocFlags &= PVRSRV_MEMALLOCFLAGS_TDFWMASK; #endif - PVR_UNREFERENCED_PARAMETER(uiLog2Align); - PDUMPCOMMENT(psDeviceNode, "Allocate FW %s memory", pszText); eError = DevmemFwAllocate(psDeviceNode->pvDevice, @@ -1896,22 +2348,56 @@ PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, This check is left for clarity in error messages if any incompatibility occurs. - @Input psFwOsInit - FW init data + @Input psDevInfo - device info @Return PVRSRV_ERROR - depending on mismatch found ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_OSINIT *psFwOsInit) +static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) { -#if !defined(NO_HARDWARE) + IMG_UINT32 ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch; + RGX_FW_INFO_HEADER *psFWInfoHeader = NULL; + RGXFWIF_OSINIT *psFwOsInit = NULL; + IMG_UINT8 ui8FwOsCount; - if (psFwOsInit == NULL) + if (psDevInfo == NULL) return PVRSRV_ERROR_INVALID_PARAMS; ui32BuildOptions = (RGX_BUILD_OPTIONS_KM & RGX_BUILD_OPTIONS_MASK_FW); - ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW; + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + psFwOsInit = psDevInfo->psRGXFWIfOsInit; + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW; + + ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + if (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + __func__, RGX_NUM_DRIVERS_SUPPORTED, ui8FwOsCount)); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + } + else + { + psFWInfoHeader = &psDevInfo->sFWInfoHeader; + ui32BuildOptionsFWKMPart = psFWInfoHeader->ui32Flags & RGX_BUILD_OPTIONS_MASK_FW; + if (PVRSRV_VZ_MODE_IS(HOST, DEVINFO, psDevInfo) && BITMASK_HAS(psFWInfoHeader->ui32Flags, OPTIONS_NUM_DRIVERS_SUPPORTED_CHECK_EN)) + { + ui8FwOsCount = (psFWInfoHeader->ui32Flags & OPTIONS_NUM_DRIVERS_SUPPORTED_MASK) >> OPTIONS_NUM_DRIVERS_SUPPORTED_SHIFT; + ui8FwOsCount++; + if (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + __func__, RGX_NUM_DRIVERS_SUPPORTED, ui8FwOsCount)); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + } + } /* Check if the FW is missing support for any features required by the driver */ if (~ui32BuildOptionsFWKMPart & ui32BuildOptions) @@ -1919,21 +2405,21 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart; #if !defined(PVRSRV_STRICT_COMPAT_CHECK) /*Mask non-critical options out as we do support combining them in UM & KM */ - ui32BuildOptionsMismatch &= ~FW_OPTIONS_STRICT; + ui32BuildOptionsMismatch &= FW_OPTIONS_STRICT; #endif - if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + if ((ui32BuildOptions & ui32BuildOptionsMismatch) != 0) { PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; " - "extra options present in the KM driver: (0x%x). Please check rgx_options.h", - ui32BuildOptions & ui32BuildOptionsMismatch )); + "extra options present in the KM driver: (0x%x). Please check rgx_options.h", + ui32BuildOptions & ui32BuildOptionsMismatch)); return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; } - if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0) + if ((ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0) { PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; " - "extra options present in Firmware: (0x%x). Please check rgx_options.h", - ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch )); + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch )); return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; } PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ.")); @@ -1942,7 +2428,6 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF { PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]")); } -#endif return PVRSRV_OK; } @@ -1957,59 +2442,54 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF Validate FW DDK version against driver DDK version @Input psDevInfo - device info - @Input psFwOsInit - FW init data @Return PVRSRV_ERROR - depending on mismatch found ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) { -#if defined(PDUMP)||(!defined(NO_HARDWARE)) - IMG_UINT32 ui32DDKVersion; + IMG_UINT32 ui32KMDDKVersion; + IMG_UINT32 ui32FWDDKVersion; PVRSRV_ERROR eError; + RGX_FW_INFO_HEADER *psFWInfoHeader = NULL; + RGXFWIF_OSINIT *psFwOsInit = NULL; - ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); -#endif + if (psDevInfo == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; -#if defined(PDUMP) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW DDK version"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion), - ui32DDKVersion, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) + ui32KMDDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); + + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; + psFwOsInit = psDevInfo->psRGXFWIfOsInit; + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + ui32FWDDKVersion = psFwOsInit->sRGXCompChecks.ui32DDKVersion; } -#endif + else + { + psFWInfoHeader = &psDevInfo->sFWInfoHeader; -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; + ui32FWDDKVersion = PVRVERSION_PACK(psFWInfoHeader->ui16PVRVersionMajor, psFWInfoHeader->ui16PVRVersionMinor); + } - if (psFwOsInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion) + if (ui32FWDDKVersion != ui32KMDDKVersion) { - PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK revision (%u.%u).", - PVRVERSION_MAJ, PVRVERSION_MIN, - PVRVERSION_UNPACK_MAJ(psFwOsInit->sRGXCompChecks.ui32DDKVersion), - PVRVERSION_UNPACK_MIN(psFwOsInit->sRGXCompChecks.ui32DDKVersion))); + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK version (%u.%u).", + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_UNPACK_MAJ(ui32FWDDKVersion), + PVRVERSION_UNPACK_MIN(ui32FWDDKVersion))); eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH; PVR_DBG_BREAK; return eError; } else { - PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK revision (%u.%u) match. [ OK ]", - PVRVERSION_MAJ, PVRVERSION_MIN, - PVRVERSION_MAJ, PVRVERSION_MIN)); + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK version (%u.%u) match. [ OK ]", + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_MAJ, PVRVERSION_MIN)); } -#endif return PVRSRV_OK; } @@ -2024,46 +2504,41 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXD Validate FW DDK build against driver DDK build @Input psDevInfo - device info - @Input psFwOsInit - FW init data @Return PVRSRV_ERROR - depending on mismatch found ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) { - PVRSRV_ERROR eError=PVRSRV_OK; -#if defined(PDUMP)||(!defined(NO_HARDWARE)) - IMG_UINT32 ui32DDKBuild; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32KMDDKBuild; + IMG_UINT32 ui32FWDDKBuild; + RGX_FW_INFO_HEADER *psFWInfoHeader = NULL; + RGXFWIF_OSINIT *psFwOsInit = NULL; - ui32DDKBuild = PVRVERSION_BUILD; -#endif + ui32KMDDKBuild = PVRVERSION_BUILD; -#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW DDK build"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild), - ui32DDKBuild, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) + if (psDevInfo == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } -#endif + psFwOsInit = psDevInfo->psRGXFWIfOsInit; + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; + ui32FWDDKBuild = psFwOsInit->sRGXCompChecks.ui32DDKBuild; + } + else + { + psFWInfoHeader = &psDevInfo->sFWInfoHeader; + ui32FWDDKBuild = psFWInfoHeader->ui32PVRVersionBuild; + } - if (psFwOsInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild) + if (ui32FWDDKBuild != ui32KMDDKBuild) { PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).", - ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); + ui32KMDDKBuild, ui32FWDDKBuild)); #if defined(PVRSRV_STRICT_COMPAT_CHECK) eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH; PVR_DBG_BREAK; @@ -2073,9 +2548,8 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV else { PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]", - ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); + ui32KMDDKBuild, ui32FWDDKBuild)); } -#endif return eError; } @@ -2089,440 +2563,57 @@ static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV Validate FW BVNC against driver BVNC @Input psDevInfo - device info - @Input psFwOsInit - FW init data @Return PVRSRV_ERROR - depending on mismatch found ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) +static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) { -#if !defined(NO_HARDWARE) - IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; -#endif -#if defined(PDUMP)||(!defined(NO_HARDWARE)) - RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC); - PVRSRV_ERROR eError; - - sBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, - psDevInfo->sDevFeatureCfg.ui32V, - psDevInfo->sDevFeatureCfg.ui32N, - psDevInfo->sDevFeatureCfg.ui32C); -#endif - -#if defined(PDUMP) - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW BVNC (struct version)"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), - sBVNC.ui32LayoutVersion, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - } - - - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW BVNC (BNC part - lower 32 bits)"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), - (IMG_UINT32)sBVNC.ui64BVNC, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - } - - PDUMPCOMMENT(psDevInfo->psDeviceNode, - "Compatibility check: KM driver and FW BVNC (BNC part - Higher 32 bits)"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + - sizeof(IMG_UINT32), - (IMG_UINT32)(sBVNC.ui64BVNC >> 32), - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - } -#endif - -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; - - RGX_BVNC_EQUAL(sBVNC, psFwOsInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); - - if (PVRSRV_VZ_MODE_IS(GUEST)) - { - bCompatibleAll = IMG_TRUE; - } - - if (!bCompatibleAll) - { - if (!bCompatibleVersion) - { - PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).", - __func__, - sBVNC.ui32LayoutVersion, - psFwOsInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion)); - eError = PVRSRV_ERROR_BVNC_MISMATCH; - return eError; - } - - if (!bCompatibleBVNC) - { - PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)", - RGX_BVNC_PACKED_EXTR_B(sBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_B(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC))); - eError = PVRSRV_ERROR_BVNC_MISMATCH; - return eError; - } - } - else - { - PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]")); - } -#endif - return PVRSRV_OK; -} - -/*! - ******************************************************************************* - - @Function RGXDevInitCompatCheck_BVNC_HWAgainstDriver - - @Description - - Validate HW BVNC against driver BVNC - - @Input psDevInfo - device info - @Input psFwOsInit - FW init data - - @Return PVRSRV_ERROR - depending on mismatch found - - ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) -{ -#if defined(PDUMP) || !defined(NO_HARDWARE) - IMG_UINT64 ui64MaskBVNC = RGX_BVNC_PACK_MASK_B | - RGX_BVNC_PACK_MASK_V | - RGX_BVNC_PACK_MASK_N | - RGX_BVNC_PACK_MASK_C; - - PVRSRV_ERROR eError; - RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC); -#endif - -#if defined(PDUMP) - PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; -#endif - -#if !defined(NO_HARDWARE) - RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC); - IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; -#endif - - if (psDevInfo->bIgnoreHWReportedBVNC) - { - PVR_LOG(("BVNC compatibility checks between driver and HW are disabled (AppHint override)")); - return PVRSRV_OK; - } - -#if defined(PDUMP) || !defined(NO_HARDWARE) -#if defined(COMPAT_BVNC_MASK_B) - ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_B; -#endif -#if defined(COMPAT_BVNC_MASK_V) - ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_V; -#endif -#if defined(COMPAT_BVNC_MASK_N) - ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_N; -#endif -#if defined(COMPAT_BVNC_MASK_C) - ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C; -#endif - - sSWBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, - psDevInfo->sDevFeatureCfg.ui32V, - psDevInfo->sDevFeatureCfg.ui32N, - psDevInfo->sDevFeatureCfg.ui32C); - - - - if (ui64MaskBVNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_V | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) - { - PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.", - ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_B))?("B"):("")), - ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_V))?("V"):("")), - ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_N))?("N"):("")), - ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_C))?("C"):("")))); - } -#endif - -#if defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: Layout version of compchecks struct"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), - sSWBVNC.ui32LayoutVersion, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } - - PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check started"); - if (ui64MaskBVNC & (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) - { - PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags); - PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags); - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: HW BNC and FW BNC (Lower 32 bits)"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), - (IMG_UINT32)sSWBVNC.ui64BVNC , - (IMG_UINT32)(ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V), - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } - - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: HW BNC and FW BNC (Higher 32 bits)"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + - sizeof(IMG_UINT32), - (IMG_UINT32)(sSWBVNC.ui64BVNC >> 32), - (IMG_UINT32)((ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V) >> 32), - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } - - PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags); - } - if (ui64MaskBVNC & RGX_BVNC_PACK_MASK_V) - { - PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags); - PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags); + PVRSRV_ERROR eError = PVRSRV_OK; - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: HW V and FW V"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + - offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + - ((RGX_BVNC_PACK_SHIFT_V >= 32) ? sizeof(IMG_UINT32) : 0), - (IMG_UINT32)(sSWBVNC.ui64BVNC >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0)), - RGX_BVNC_PACK_MASK_V >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0), - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; - } - PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags); - } - PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check finished"); -#endif + RGX_FW_INFO_HEADER *psFWInfoHeader; + IMG_UINT64 ui64KMBVNC; -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - { + if (psDevInfo == NULL) return PVRSRV_ERROR_INVALID_PARAMS; - } - sHWBVNC = psFwOsInit->sRGXCompChecks.sHWBVNC; + psFWInfoHeader = &psDevInfo->sFWInfoHeader; - sHWBVNC.ui64BVNC &= ui64MaskBVNC; - sSWBVNC.ui64BVNC &= ui64MaskBVNC; + ui64KMBVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); - RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); - - if (PVRSRV_VZ_MODE_IS(GUEST)) - { - bCompatibleAll = IMG_TRUE; - } - - if (!bCompatibleAll) + if (ui64KMBVNC != psFWInfoHeader->ui64BVNC) { - if (!bCompatibleVersion) - { - PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).", - __func__, - sHWBVNC.ui32LayoutVersion, - sSWBVNC.ui32LayoutVersion)); - eError = PVRSRV_ERROR_BVNC_MISMATCH; - return eError; - } - - if (!bCompatibleBVNC) - { - PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d).", - RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); - eError = PVRSRV_ERROR_BVNC_MISMATCH; - return eError; - } - } - else - { - PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]", - RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), - RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); - } -#endif - - return PVRSRV_OK; -} - -/*! - ******************************************************************************* - - @Function RGXDevInitCompatCheck_METACoreVersion_AgainstDriver - - @Description - - Validate HW META version against driver META version - - @Input psDevInfo - device info - @Input psFwOsInit - FW init data - - @Return PVRSRV_ERROR - depending on mismatch found - - ******************************************************************************/ -static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, - RGXFWIF_OSINIT *psFwOsInit) -{ -#if defined(PDUMP)||(!defined(NO_HARDWARE)) - PVRSRV_ERROR eError; -#endif + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)", + RGX_BVNC_PACKED_EXTR_B(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_V(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_N(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_C(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_B(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(psFWInfoHeader->ui64BVNC))); -#if defined(PDUMP) - PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; -#endif - - IMG_UINT32 ui32FWCoreIDValue = 0; - IMG_CHAR *pcRGXFW_PROCESSOR = NULL; - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - switch (RGX_GET_FEATURE_VALUE(psDevInfo, META)) - { - case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break; - case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break; - case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break; - case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break; - default: - PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); - PVR_ASSERT(0); - } - pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; - } - else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - ui32FWCoreIDValue = RGXRISCVFW_CORE_ID_VALUE; - pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; + eError = PVRSRV_ERROR_BVNC_MISMATCH; } else { - PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); - PVR_ASSERT(0); - } + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: KM driver BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]", + RGX_BVNC_PACKED_EXTR_B(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_V(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_N(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_C(ui64KMBVNC), + RGX_BVNC_PACKED_EXTR_B(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(psFWInfoHeader->ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(psFWInfoHeader->ui64BVNC))); -#if defined(PDUMP) - PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags); - PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags); - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, - "Compatibility check: KM driver and HW FW Processor version"); - eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, - offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + - offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion), - ui32FWCoreIDValue, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - ui32PDumpFlags); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); - return eError; + eError = PVRSRV_OK; } - PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags); -#endif - -#if !defined(NO_HARDWARE) - if (psFwOsInit == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; - if (psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue) - { - PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).", - pcRGXFW_PROCESSOR, - ui32FWCoreIDValue, - pcRGXFW_PROCESSOR, - psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); - eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH; - PVR_DBG_BREAK; - return eError; - } - else - { - PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].", - pcRGXFW_PROCESSOR, - ui32FWCoreIDValue, - pcRGXFW_PROCESSOR, - psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); - } -#endif - return PVRSRV_OK; + return eError; } /*! @@ -2545,105 +2636,65 @@ static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; #if !defined(NO_HARDWARE) - IMG_UINT32 ui32RegValue; - IMG_UINT8 ui8FwOsCount; - IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; - - LOOP_UNTIL_TIMEOUT(ui32FwTimeout) - { - if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) - { - /* No need to wait if the FW has already updated the values */ - break; - } - OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - - ui32RegValue = 0; - - if ((!PVRSRV_VZ_MODE_IS(GUEST)) && - RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue); + IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; - if (eError != PVRSRV_OK) + LOOP_UNTIL_TIMEOUT_US(ui32FwTimeout) { - PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)", - __func__, eError)); - goto chk_exit; - } + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, + INVALIDATE); + if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); - if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT)) + /* Flush covers this instance and the reads in the functions below */ + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks, + INVALIDATE); + if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) { - eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED; - PVR_DPF((PVR_DBG_ERROR, - "%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)", - __func__, psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, eError)); - goto chk_exit; - } - } + eError = PVRSRV_ERROR_TIMEOUT; + PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)", + __func__, eError)); - if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) - { - eError = PVRSRV_ERROR_TIMEOUT; - PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)", - __func__, eError)); - if (PVRSRV_VZ_MODE_IS(GUEST)) - { PVR_DPF((PVR_DBG_ERROR, "%s: Potential causes: firmware not initialised or the current Guest driver's " "OsConfig initialisation data was not accepted by the firmware", __func__)); + goto chk_exit; } - goto chk_exit; - } - - ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; - if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || - (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", - __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount)); } #endif /* defined(NO_HARDWARE) */ - eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo->psRGXFWIfOsInit); + eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo); if (eError != PVRSRV_OK) { goto chk_exit; } - eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo); if (eError != PVRSRV_OK) { goto chk_exit; } - eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo); if (eError != PVRSRV_OK) { goto chk_exit; } - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); - if (eError != PVRSRV_OK) - { - goto chk_exit; - } - - eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo); if (eError != PVRSRV_OK) { goto chk_exit; } } - eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); - if (eError != PVRSRV_OK) - { - goto chk_exit; - } - eError = PVRSRV_OK; chk_exit: @@ -2651,13 +2702,13 @@ static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) } static void _RGXSoftResetToggle(PVRSRV_RGXDEV_INFO *psDevInfo, - IMG_UINT64 ui64ResetValue, - IMG_UINT64 ui64SPUResetValue) + IMG_UINT64 ui64ResetValue1, + IMG_UINT64 ui64ResetValue2) { - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue); + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1); if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1) { - OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET_SPU, ui64SPUResetValue); + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET_SPU, ui64ResetValue2); } /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ @@ -2672,35 +2723,59 @@ static void _RGXSoftResetToggle(PVRSRV_RGXDEV_INFO *psDevInfo, @Function RGXSoftReset @Description Resets some modules of the RGX device @Input psDeviceNode Device node -@Input ui64ResetValue A mask for which each bit set corresponds +@Input ui64ResetValue1 A mask for which each bit set corresponds to a module to reset (via the SOFT_RESET register). -@Input ui64SPUResetValue A mask for which each bit set corresponds +@Input ui64ResetValue2 A mask for which each bit set corresponds to a module to reset (via the SOFT_RESET_SPU register). @Return PVRSRV_ERROR */ /***************************************************************************/ static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT64 ui64ResetValue, - IMG_UINT64 ui64SPUResetValue) + IMG_UINT64 ui64ResetValue1, + IMG_UINT64 ui64ResetValue2) { PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_BOOL bSoftReset = IMG_FALSE; + IMG_UINT64 ui64SoftResetMask = 0; PVR_ASSERT(psDeviceNode != NULL); PVR_ASSERT(psDeviceNode->pvDevice != NULL); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); + + psDevInfo = psDeviceNode->pvDevice; - if (((ui64ResetValue & RGX_CR_SOFT_RESET_MASKFULL) != ui64ResetValue) - || (ui64SPUResetValue & RGX_CR_SOFT_RESET_SPU_MASKFULL) != ui64SPUResetValue) +#if defined(RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) { - return PVRSRV_ERROR_INVALID_PARAMS; + ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL; + } + else +#endif + { + ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL; } - /* The device info */ - psDevInfo = psDeviceNode->pvDevice; +#if defined(RGX_CR_SOFT_RESET2_MASKFULL) + if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) && + ((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2)) + { + bSoftReset = IMG_TRUE; + } +#elif defined(RGX_CR_SOFT_RESET_SPU_MASKFULL) + if ((ui64ResetValue2 & RGX_CR_SOFT_RESET_SPU_MASKFULL) != ui64ResetValue2) + { + bSoftReset = IMG_TRUE; + } +#endif + + if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } /* Set in soft-reset */ - _RGXSoftResetToggle(psDevInfo, ui64ResetValue, ui64SPUResetValue); + _RGXSoftResetToggle(psDevInfo, ui64ResetValue1, ui64ResetValue2); /* Take the modules out of reset... */ _RGXSoftResetToggle(psDevInfo, 0, 0); @@ -2708,6 +2783,119 @@ static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, return PVRSRV_OK; } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline; + +static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + DevPhysMemFree(psDeviceNode, +#if defined(PDUMP) + psDevInfo->psTrampoline->hPdumpPages, +#endif + &psDevInfo->psTrampoline->sPages); + + if (psDevInfo->psTrampoline != &sNullTrampoline) + { + OSFreeMem(psDevInfo->psTrampoline); + } + psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline; +} + +#define RANGES_OVERLAP(x,y,size) (x < (y+size) && y < (x+size)) +#define TRAMPOLINE_ALLOC_MAX_RETRIES (3) + +static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + IMG_INT32 i, j; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_MIPS_ADDRESS_TRAMPOLINE *pasTrampoline[TRAMPOLINE_ALLOC_MAX_RETRIES]; + + PDUMPCOMMENT(psDeviceNode, "Allocate pages for trampoline"); + + /* Retry the allocation of the trampoline block (16KB), retaining any + * previous allocations overlapping with the target range until we get an + * allocation that doesn't overlap with the target range. + * Any allocation like this will require a maximum of 3 tries as we are + * allocating a physical contiguous block of memory, not individual pages. + * Free the unused allocations at the end only after the desired range + * is obtained to prevent the alloc function from returning the same bad + * range repeatedly. + */ + for (i = 0; i < TRAMPOLINE_ALLOC_MAX_RETRIES; i++) + { + pasTrampoline[i] = OSAllocMem(sizeof(RGX_MIPS_ADDRESS_TRAMPOLINE)); + eError = DevPhysMemAlloc(psDeviceNode, + RGXMIPSFW_TRAMPOLINE_SIZE, + RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE, + 0, // (init) u8Value + IMG_FALSE, // bInitPage, +#if defined(PDUMP) + psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName, + "TrampolineRegion", + &pasTrampoline[i]->hPdumpPages, +#endif + PVR_SYS_ALLOC_PID, + &pasTrampoline[i]->sPages, + &pasTrampoline[i]->sPhysAddr); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, + "%s failed (%u)", + __func__, + eError)); + goto fail; + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* Set the persistent uiOSid value so that we free from the correct + * base arena when unloading the driver and freeing the trampoline. + */ + pasTrampoline[i]->sPages.uiOSid = 0; /* Firmware global arena */ +#endif + + if (!RANGES_OVERLAP(pasTrampoline[i]->sPhysAddr.uiAddr, + RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR, + RGXMIPSFW_TRAMPOLINE_SIZE)) + { + break; + } + } + if (TRAMPOLINE_ALLOC_MAX_RETRIES == i) + { + /* Failed to find a physical allocation after 3 attempts */ + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + PVR_DPF((PVR_DBG_ERROR, + "%s failed to allocate non-overlapping pages (%u)", + __func__, eError)); + /* Fall through, clean up and return error. */ + } + else + { + /* Remember the last physical block allocated, it will not be freed */ + psDevInfo->psTrampoline = pasTrampoline[i]; + } + +fail: + /* free all unused allocations */ + for (j = 0; j < i; j++) + { + DevPhysMemFree(psDeviceNode, +#if defined(PDUMP) + pasTrampoline[j]->hPdumpPages, +#endif + &pasTrampoline[j]->sPages); + OSFreeMem(pasTrampoline[j]); + } + + return eError; +} + +#undef RANGES_OVERLAP +#endif + PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_DEVMEM_SIZE_T uiFWCodeLen, IMG_DEVMEM_SIZE_T uiFWDataLen, @@ -2717,20 +2905,29 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + IMG_DEVMEM_SIZE_T uiDummyLen; + DEVMEM_MEMDESC *psDummyMemDesc = NULL; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && + (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) + { + eError = RGXAllocTrampoline(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate trampoline region (%u)", + eError)); + goto failTrampolineMemDescAlloc; + } + } +#endif /* * Set up Allocation for FW code section */ - uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + uiMemAllocFlags = RGX_FWCODEDATA_ALLOCFLAGS | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE); - eError = RGXAllocateFWMemoryRegion(psDeviceNode, uiFWCodeLen, uiMemAllocFlags, @@ -2755,31 +2952,55 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, goto failFWCodeMemDescAqDevVirt; } - /* - * The FW code must be the first allocation in the firmware heap, otherwise - * the bootloader will not work (the FW will not be able to find the bootloader). - */ - PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_MAIN_HEAP_BASE); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) || (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)))) +#endif + { + /* + * The FW code must be the first allocation in the firmware heap, otherwise + * the bootloader will not work (the FW will not be able to find the bootloader). + */ + PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_RAW_HEAP_BASE); + } + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + /* + * Allocate Dummy Pages so that Data segment allocation gets the same + * device virtual address as specified in MIPS firmware linker script + */ + uiDummyLen = RGXGetFWImageSectionMaxSize(NULL, MIPS_CODE) + + RGXGetFWImageSectionMaxSize(NULL, MIPS_EXCEPTIONS_CODE) + + RGXGetFWImageSectionMaxSize(NULL, MIPS_BOOT_CODE) - + uiFWCodeLen; /* code actual size */ + + if (uiDummyLen > 0) + { + eError = DevmemFwAllocateExportable(psDeviceNode, + uiDummyLen, + OSGetPageSize(), + uiMemAllocFlags, + "FwExDummyPages", + &psDummyMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw dummy mem (%u)", + eError)); + goto failDummyMemDescAlloc; + } + } + } +#endif /* * Set up Allocation for FW data section */ - uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA)) & - RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); - eError = RGXAllocateFWMemoryRegion(psDeviceNode, uiFWDataLen, - uiMemAllocFlags, + RGX_FWCODEDATA_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA), "FwDataRegion", &psDevInfo->psRGXFWDataMemDesc); @@ -2806,18 +3027,9 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, /* * Set up Allocation for FW coremem code section */ - uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE)) & - RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); - + uiMemAllocFlags = (RGX_FWCODEDATA_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE)) & + ~PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE; eError = RGXAllocateFWMemoryRegion(psDeviceNode, uiFWCorememCodeLen, uiMemAllocFlags, @@ -2829,7 +3041,7 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_DPF((PVR_DBG_ERROR, "Failed to allocate fw coremem code mem, size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", uiFWCorememCodeLen, uiMemAllocFlags, eError)); - goto failFWCorememMemDescAlloc; + goto failFWCorememCodeMemDescAlloc; } eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, @@ -2858,17 +3070,8 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, /* * Set up Allocation for FW coremem data section */ - uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA)) & - RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); - + uiMemAllocFlags = RGX_FWCODEDATA_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA); eError = RGXAllocateFWMemoryRegion(psDeviceNode, uiFWCorememDataLen, uiMemAllocFlags, @@ -2907,6 +3110,14 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0; } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + /* Free Dummy Pages */ + if (psDummyMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); + } +#endif + return PVRSRV_OK; failFWCorememDataMemDescFwAddr: @@ -2924,15 +3135,30 @@ PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); psDevInfo->psRGXFWCorememCodeMemDesc = NULL; } -failFWCorememMemDescAlloc: +failFWCorememCodeMemDescAlloc: failFWDataMemDescAqDevVirt: DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); psDevInfo->psRGXFWDataMemDesc = NULL; failFWDataMemDescAlloc: +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (psDummyMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); + } +failDummyMemDescAlloc: +#endif failFWCodeMemDescAqDevVirt: DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); psDevInfo->psRGXFWCodeMemDesc = NULL; failFWCodeMemDescAlloc: +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && + (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) + { + RGXFreeTrampoline(psDeviceNode); + } +failTrampolineMemDescAlloc: +#endif return eError; } @@ -3063,60 +3289,24 @@ PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, */ PVRSRV_ERROR RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_BOOL bEnableSignatureChecks, - IMG_UINT32 ui32SignatureChecksBufSize, - IMG_UINT32 ui32HWPerfFWBufSizeKB, - IMG_UINT64 ui64HWPerfFilter, + RGX_INIT_APPHINTS *psApphints, IMG_UINT32 ui32ConfigFlags, - IMG_UINT32 ui32LogType, - IMG_UINT32 ui32FilterFlags, - IMG_UINT32 ui32JonesDisableMask, - IMG_UINT32 ui32HWRDebugDumpLimit, - IMG_UINT32 ui32RenderKillingCtl, - IMG_UINT32 ui32CDMTDMKillingCtl, - IMG_UINT32 *pui32TPUTrilinearFracMask, - IMG_UINT32 *pui32USRMNumRegions, - IMG_UINT64 *pui64UVBRMNumRegions, - IMG_UINT32 ui32HWPerfCountersDataSize, - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, - IMG_BOOL bSPUClockGating, - FW_PERF_CONF eFirmwarePerf, - IMG_UINT32 ui32KCCBSizeLog2, IMG_UINT32 ui32ConfigFlagsExt, - IMG_UINT32 ui32AvailablePowUnitsMask, - IMG_UINT32 ui32AvailableRACMask, IMG_UINT32 ui32FwOsCfgFlags) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; #if defined(DEBUG) + void *pvAppHintState = NULL; + IMG_BOOL bAppHintDefault; IMG_BOOL bEnableFWPoisonOnFree = IMG_FALSE; #endif eError = RGXSetupFirmware(psDeviceNode, - bEnableSignatureChecks, - ui32SignatureChecksBufSize, - ui32HWPerfFWBufSizeKB, - ui64HWPerfFilter, + psApphints, ui32ConfigFlags, ui32ConfigFlagsExt, - ui32FwOsCfgFlags, - ui32LogType, - ui32FilterFlags, - ui32JonesDisableMask, - ui32HWRDebugDumpLimit, - ui32HWPerfCountersDataSize, - ui32RenderKillingCtl, - ui32CDMTDMKillingCtl, - pui32TPUTrilinearFracMask, - pui32USRMNumRegions, - pui64UVBRMNumRegions, - eRGXRDPowerIslandingConf, - bSPUClockGating, - eFirmwarePerf, - ui32KCCBSizeLog2, - ui32AvailablePowUnitsMask, - ui32AvailableRACMask); + ui32FwOsCfgFlags); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -3125,7 +3315,7 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, goto failed_init_firmware; } - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup, RGXFWTraceQueryFilter, @@ -3140,7 +3330,16 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, } #if defined(DEBUG) - bEnableFWPoisonOnFree = PVRSRV_APPHINT_ENABLEFWPOISONONFREE; + OSCreateAppHintState(&pvAppHintState); + + bAppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE; + OSGetAppHintBOOL(psDeviceNode, + pvAppHintState, + EnableFWPoisonOnFree, + &bAppHintDefault, + &bEnableFWPoisonOnFree); + + OSFreeAppHintState(pvAppHintState); PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree, RGXQueryFWPoisonOnFree, @@ -3155,6 +3354,9 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, psDevInfo->uiFWPoisonOnFreeFlag = 0ULL; #endif + psDevInfo->ui32ClockSource = PVRSRV_APPHINT_SECONDARYOSCLOCKSOURCE; + psDevInfo->ui32LastClockSource = PVRSRV_APPHINT_SECONDARYOSCLOCKSOURCE; + return PVRSRV_OK; failed_init_firmware: @@ -3164,6 +3366,7 @@ RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, /* See device.h for function declaration */ static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RequestedSize, DEVMEM_MEMDESC **psMemDesc, IMG_UINT32 *puiSyncPrimVAddr, IMG_UINT32 *puiSyncPrimBlockSize) @@ -3171,35 +3374,24 @@ static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_RGXDEV_INFO *psDevInfo; PVRSRV_ERROR eError; RGXFWIF_DEV_VIRTADDR pFirmwareAddr; - IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32); - IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32); - IMG_UINT32 ui32CoherencyFlag = 0; + IMG_DEVMEM_ALIGN_T uiUFOBlockAlign = MAX(sizeof(IMG_UINT32), sizeof(SYNC_CHECKPOINT_FW_OBJ)); + IMG_DEVMEM_SIZE_T uiUFOBlockSize = PVR_ALIGN(ui32RequestedSize, uiUFOBlockAlign); psDevInfo = psDeviceNode->pvDevice; /* Size and align are 'expanded' because we request an Exportalign allocation */ eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), - &uiUFOBlockSize, - &ui32UFOBlockAlign); + &uiUFOBlockSize, + &uiUFOBlockAlign); if (eError != PVRSRV_OK) { goto e0; } - if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && - PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) - { - ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; - } - else - { - ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_UNCACHED; - } - eError = DevmemFwAllocateExportable(psDeviceNode, uiUFOBlockSize, - ui32UFOBlockAlign, + uiUFOBlockAlign, PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | @@ -3208,91 +3400,34 @@ static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - ui32CoherencyFlag, + PVRSRV_MEMALLOCFLAG_UNCACHED, "FwExUFOBlock", psMemDesc); - if (eError != PVRSRV_OK) - { - goto e0; - } - - eError = RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE); - PVR_GOTO_IF_ERROR(eError, e1); - - *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr; - *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize); - - return PVRSRV_OK; - -e1: - DevmemFwUnmapAndFree(psDevInfo, *psMemDesc); -e0: - return eError; -} - -/* See device.h for function declaration */ -static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, - DEVMEM_MEMDESC *psMemDesc) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - - /* - If the system has snooping of the device cache then the UFO block - might be in the cache so we need to flush it out before freeing - the memory - - When the device is being shutdown/destroyed we don't care anymore. - Several necessary data structures to issue a flush were destroyed - already. - */ - if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && - psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT) - { - RGXFWIF_KCCB_CMD sFlushInvalCmd; - PVRSRV_ERROR eError; - IMG_UINT32 ui32kCCBCommandSlot; - - /* Schedule the SLC flush command ... */ -#if defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate"); -#endif - sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = 0; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = 0; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; - sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0; - - eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, - &sFlushInvalCmd, - PDUMP_FLAGS_CONTINUOUS, - &ui32kCCBCommandSlot); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to schedule SLC flush command with error (%u)", - __func__, - eError)); - } - else - { - /* Wait for the SLC flush to complete */ - eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: SLC flush and invalidate aborted with error (%u)", - __func__, - eError)); - } - else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & - RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) - { - PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); - } - } + if (eError != PVRSRV_OK) + { + goto e0; } + eError = RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_GOTO_IF_ERROR(eError, e1); + + *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr; + *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize); + + return PVRSRV_OK; + +e1: + DevmemFwUnmapAndFree(psDevInfo, *psMemDesc); +e0: + return eError; +} + +/* See device.h for function declaration */ +static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXUnsetFirmwareAddress(psMemDesc); DevmemFwUnmapAndFree(psDevInfo, psMemDesc); } @@ -3310,8 +3445,9 @@ static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) } #endif + if ((psDevInfo->hTQUSCSharedMem != NULL) && - (psDevInfo->hTQCLISharedMem != NULL)) + (psDevInfo->hTQCLISharedMem != NULL)) { PVRSRVTQUnloadShaders(psDeviceNode); } @@ -3335,6 +3471,7 @@ static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) } if (psDeviceNode->hCmdCompNotify != NULL) { + /* Cancel notifications to this device */ PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify); psDeviceNode->hCmdCompNotify = NULL; } @@ -3349,6 +3486,14 @@ static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) OSLockDestroy(psDevInfo->hGPUUtilLock); } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && + (psDevInfo->hNMILock != NULL)) + { + OSLockDestroy(psDevInfo->hNMILock); + } +#endif + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && (psDevInfo->hMMUCtxUnregLock != NULL)) { @@ -3372,10 +3517,13 @@ static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* De-init work estimation lock */ - if (psDevInfo->hWorkEstLock != NULL) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - OSLockDestroy(psDevInfo->hWorkEstLock); + /* De-init work estimation lock */ + if (psDevInfo->hWorkEstLock != NULL) + { + OSLockDestroy(psDevInfo->hWorkEstLock); + } } #endif @@ -3395,7 +3543,6 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; PVRSRV_ERROR eError; DEVICE_MEMORY_INFO *psDevMemoryInfo; - IMG_UINT32 ui32Temp=0; if (!psDevInfo) { @@ -3407,76 +3554,20 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) if (psDevInfo->psRGXFWIfOsInit) { KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + KM_CONNECTION_CACHEOP(Os, FLUSH); } - DeviceDepBridgeDeInit(psDevInfo); - -#if defined(PDUMP) - DevmemIntFreeDefBackingPage(psDeviceNode, - &psDeviceNode->sDummyPage, - DUMMY_PAGE); - DevmemIntFreeDefBackingPage(psDeviceNode, - &psDeviceNode->sDevZeroPage, - DEV_ZERO_PAGE); -#endif - -#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) - if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) - { - OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0); - PVR_UNREFERENCED_PARAMETER(ui32Temp); - } - else -#else - { - /*Delete the Dummy page related info */ - ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter); - if (0 != ui32Temp) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Dummy page reference counter is non zero (%u)", - __func__, - ui32Temp)); - PVR_ASSERT(0); - } - } -#endif - - /*Delete the Zero page related info */ - ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter); - if (0 != ui32Temp) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Zero page reference counter is non zero (%u)", - __func__, - ui32Temp)); - } - -#if defined(PDUMP) - if (NULL != psDeviceNode->sDummyPage.hPdumpPg) - { - PDUMPCOMMENT(psDeviceNode, "Error dummy page handle is still active"); - } + RGXUnregisterBridges(psDevInfo); - if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg) - { - PDUMPCOMMENT(psDeviceNode, "Error Zero page handle is still active"); - } +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + OSLockDestroy(psDevInfo->hCounterDumpingLock); #endif - /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */ - OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); - - /* Destroy the zero page lock */ - OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); - /* Unregister debug request notifiers first as they could depend on anything. */ RGXDebugDeinit(psDevInfo); - /* - * De-initialise in reverse order, so stage 2 init is undone first. - */ + /* De-initialise in reverse order, so stage 2 init is undone first. */ if (psDevInfo->bDevInit2Done) { DevPart2DeInitRGX(psDeviceNode); @@ -3491,6 +3582,20 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) eError)); } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + /* Unregister MMU related stuff */ + eError = RGXMipsMMUInit_Unregister(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevDeInitRGX: Failed RGXMipsMMUInit_Unregister (0x%x)", + eError)); + } + } +#endif + /* UnMap Regs */ if (psDevInfo->pvRegsBaseKM != NULL) { @@ -3501,6 +3606,7 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) psDevInfo->pvRegsBaseKM = NULL; } +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) if (psDevInfo->pvSecureRegsBaseKM != NULL) { #if !defined(NO_HARDWARE) @@ -3508,12 +3614,13 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)) { /* undo the VA offset performed in RGXRegisterDevice() to allow the allocation to be unmapped */ - psDevInfo->pvSecureRegsBaseKM = (void *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET); + psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET); OSUnMapPhysToLin((void __force *) psDevInfo->pvSecureRegsBaseKM, RGX_HOST_SECURE_REGBANK_SIZE); } #endif /* !NO_HARDWARE */ psDevInfo->pvSecureRegsBaseKM = NULL; } +#endif #if 0 /* not required at this time */ if (psDevInfo->hTimer) @@ -3531,7 +3638,7 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; - RGXDeInitHeaps(psDevMemoryInfo); + RGXDeInitHeaps(psDevMemoryInfo, psDeviceNode); if (psDevInfo->psRGXFWCodeMemDesc) { @@ -3541,6 +3648,26 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); psDevInfo->psRGXFWCodeMemDesc = NULL; } +#if !defined(NO_HARDWARE) + else if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + PVR_DPF((PVR_DBG_WARNING, "No firmware code memory to free")); + } +#endif /* !defined(NO_HARDWARE) */ + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && + (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) + { + if (psDevInfo->psTrampoline->sPages.u.pvHandle) + { + /* Free trampoline region */ + PDUMPCOMMENT(psDeviceNode, "Freeing trampoline memory"); + RGXFreeTrampoline(psDeviceNode); + } + } +#endif + if (psDevInfo->psRGXFWDataMemDesc) { /* Free fw data */ @@ -3549,6 +3676,13 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); psDevInfo->psRGXFWDataMemDesc = NULL; } +#if !defined(NO_HARDWARE) + else if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + PVR_DPF((PVR_DBG_WARNING, "No firmware data memory to free")); + } +#endif /* !defined(NO_HARDWARE) */ + if (psDevInfo->psRGXFWCorememCodeMemDesc) { /* Free fw core mem code */ @@ -3572,14 +3706,19 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) */ RGXFreeFirmware(psDevInfo); -#if defined(SUPPORT_VALIDATION) - RGXPowerDomainDeInitState(&psDevInfo->sPowerDomainState); -#endif RGXDeInitMultiCoreInfo(psDeviceNode); /* De-initialise non-device specific (TL) users of RGX device memory */ - RGXHWPerfDeinit(psDevInfo); + { + IMG_UINT32 i; + for (i = 0; i < RGX_HWPERF_L2_STREAM_LAST; i++) + { + RGXHWPerfDeinitL2Stream(psDevInfo, i); + } + + RGXHWPerfDeinit(psDevInfo); + } RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode); @@ -3587,12 +3726,14 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) eError = HTBDeInit(); PVR_LOG_IF_ERROR(eError, "HTBDeInit"); + OSLockDestroy(psDevInfo->hGpuUtilStatsLock); + /* destroy the stalled CCB locks */ OSLockDestroy(psDevInfo->hCCBRecoveryLock); OSLockDestroy(psDevInfo->hCCBStallCheckLock); /* destroy the context list locks */ - OSLockDestroy(psDevInfo->sRegCongfig.hLock); + OSLockDestroy(psDevInfo->sRegConfig.hLock); OSLockDestroy(psDevInfo->hBPLock); OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); OSWRLockDestroy(psDevInfo->hRenderCtxListLock); @@ -3610,13 +3751,6 @@ PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString); } -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) - if (NULL != psDevInfo->sRGXTimerValues.pui64uscTimers) - { - OSFreeMem(psDevInfo->sRGXTimerValues.pui64uscTimers); - psDevInfo->sRGXTimerValues.pui64uscTimers = NULL; - } -#endif /* DeAllocate devinfo */ OSFreeMem(psDevInfo); @@ -3640,9 +3774,10 @@ PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode) /* Takes a log2 page size parameter and calculates a suitable page size * for the RGX heaps. Returns 0 if parameter is wrong.*/ -static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) +IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) { IMG_BOOL bFound = IMG_FALSE; + IMG_UINT32 ui32PageSizeMask = RGXGetValidHeapPageSizeMask(); /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT, * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/ @@ -3660,24 +3795,18 @@ static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) do { - switch (uiLog2PageSize) + if ((IMG_PAGE2BYTES32(uiLog2PageSize) & ui32PageSizeMask) == 0) { - case RGX_HEAP_4KB_PAGE_SHIFT: - case RGX_HEAP_16KB_PAGE_SHIFT: - case RGX_HEAP_64KB_PAGE_SHIFT: - case RGX_HEAP_256KB_PAGE_SHIFT: - case RGX_HEAP_1MB_PAGE_SHIFT: - case RGX_HEAP_2MB_PAGE_SHIFT: - /* All good, RGX page size equals given page size - * => use it as default for heaps */ - bFound = IMG_TRUE; - break; - default: - /* We have to fall back to a smaller device - * page size than given page size because there - * is no exact match for any supported size. */ - uiLog2PageSize -= 1U; - break; + /* We have to fall back to a smaller device + * page size than given page size because there + * is no exact match for any supported size. */ + uiLog2PageSize -= 1U; + } + else + { + /* All good, RGX page size equals given page size + * => use it as default for heaps */ + bFound = IMG_TRUE; } } while (!bFound); @@ -3700,6 +3829,7 @@ static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) typedef struct RGX_HEAP_INFO_TAG RGX_HEAP_INFO; // Forward declaration typedef IMG_BOOL (*PFN_IS_PRESENT)(PVRSRV_RGXDEV_INFO*, const RGX_HEAP_INFO*); +typedef void (*PFN_HEAP_DYNAMIC)(PVRSRV_DEVICE_NODE*, RGX_HEAP_INFO*); struct RGX_HEAP_INFO_TAG { @@ -3709,52 +3839,57 @@ struct RGX_HEAP_INFO_TAG IMG_DEVMEM_SIZE_T uiHeapReservedRegionLength; IMG_UINT32 ui32Log2ImportAlignment; PFN_IS_PRESENT pfnIsHeapPresent; + PFN_HEAP_DYNAMIC pfnDynamicBaseSize; /* May modify the psHeapInfo's base & length. May be NULL. + Only called once if the heap is present otherwise never. */ PFN_HEAP_INIT pfnInit; PFN_HEAP_DEINIT pfnDeInit; IMG_UINT32 ui32HeapInstanceFlags; }; -#if defined(SUPPORT_SECURE_ALLOC_KM) -/* Private data struct for general heap. */ -typedef struct RGX_GENERAL_HEAP_DATA_TAG -{ - DEVMEMINT_RESERVATION *psSecMemReservation; - DEVMEMINT_MAPPING *psSecMemMapping; -} RGX_GENERAL_HEAP_DATA; +/* RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES is the total amount of reserved space, to be specified in gasRGXHeapLayoutApp[] */ +#define RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES (RGX_HEAP_UM_GENERAL_RESERVED_SIZE + RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE) -/* Init callback function for general heap. */ -static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, - DEVMEMINT_HEAP *psDevmemHeap, - IMG_HANDLE *phPrivData) +/* Private heap data struct. */ +typedef struct RGX_HEAP_DATA_TAG { - PVRSRV_RGXDEV_INFO *psDevInfo; - RGX_GENERAL_HEAP_DATA *psHeapData; + DEVMEMINT_RESERVATION *psMemReservation; +} RGX_HEAP_DATA; + +static PVRSRV_ERROR HeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64Offset, + IMG_BOOL bWriteAble, + IMG_HANDLE *phPrivData) +{ + RGX_HEAP_DATA *psHeapData; IMG_DEV_VIRTADDR sCarveOutAddr; PVRSRV_ERROR eError; PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemHeap, "psDevmemHeap"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psPMR, "psPMR"); PVR_LOG_RETURN_IF_INVALID_PARAM(phPrivData, "phPrivData"); - psDevInfo = psDeviceNode->pvDevice; - psHeapData = OSAllocMem(sizeof(*psHeapData)); PVR_LOG_RETURN_IF_NOMEM(psHeapData, "psHeapData"); /* Map the per device secure mem PMR allocation to the general devmem heap carveout. */ sCarveOutAddr = DevmemIntHeapGetBaseAddr(psDevmemHeap); - sCarveOutAddr.uiAddr += RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET; - - eError = DevmemIntReserveRange(psDevmemHeap, - sCarveOutAddr, - RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES, - &psHeapData->psSecMemReservation); + sCarveOutAddr.uiAddr += ui64Offset; + + eError = DevmemIntReserveRange(NULL, + NULL, + psDevmemHeap, + sCarveOutAddr, + uiSize, + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + (bWriteAble ? PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE : 0), + &psHeapData->psMemReservation); PVR_GOTO_IF_ERROR(eError, ErrorFreeHeapData); - eError = DevmemIntMapPMR(psDevmemHeap, psHeapData->psSecMemReservation, psDevInfo->psGenHeapSecMem, - PVRSRV_MEMALLOCFLAG_GPU_READABLE - | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE, - &psHeapData->psSecMemMapping); + eError = DevmemIntMapPMR(psHeapData->psMemReservation, psPMR); PVR_GOTO_IF_ERROR(eError, ErrorUnreserve); *phPrivData = (IMG_HANDLE)psHeapData; @@ -3762,7 +3897,7 @@ static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, return PVRSRV_OK; ErrorUnreserve: - DevmemIntUnreserveRange(psHeapData->psSecMemReservation); + DevmemIntUnreserveRange(psHeapData->psMemReservation); ErrorFreeHeapData: OSFreeMem(psHeapData); @@ -3770,27 +3905,185 @@ static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, } /* Deinit callback function for general heap. */ -static void GeneralHeapDeInit(IMG_HANDLE hPrivData) +static void HeapDeInit(IMG_HANDLE hPrivData) { - RGX_GENERAL_HEAP_DATA *psHeapData = (RGX_GENERAL_HEAP_DATA*)hPrivData; + RGX_HEAP_DATA *psHeapData = (RGX_HEAP_DATA*)hPrivData; PVR_ASSERT(hPrivData); - DevmemIntUnmapPMR(psHeapData->psSecMemMapping); - DevmemIntUnreserveRange(psHeapData->psSecMemReservation); + DevmemIntUnmapPMR(psHeapData->psMemReservation); + DevmemIntUnreserveRange(psHeapData->psMemReservation); OSFreeMem(psHeapData); } + +/* Init callback function for general heap. */ +static PVRSRV_ERROR USCHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + IMG_HANDLE *phPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_BOOL bWriteAble = IMG_FALSE; + IMG_DEVMEM_SIZE_T uiSize; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); + + psDevInfo = psDeviceNode->pvDevice; + + uiSize = PMR_PhysicalSize(psDevInfo->hTQUSCSharedMem); + + eError = HeapInit(psDeviceNode, + psDevmemHeap, + psDevInfo->hTQUSCSharedMem, + uiSize, + RGX_HEAP_KM_USC_RESERVED_REGION_OFFSET, + bWriteAble, + phPrivData); + + return eError; +} + +#if defined(SUPPORT_SECURE_ALLOC_KM) +/* Init callback function for general heap. */ +static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + IMG_HANDLE *phPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_BOOL bWriteAble = IMG_TRUE; + PVRSRV_ERROR eError; + + psDevInfo = psDeviceNode->pvDevice; + + eError = HeapInit(psDeviceNode, + psDevmemHeap, + psDevInfo->psGenHeapSecMem, + RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE, + RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET, + bWriteAble, + phPrivData); + + return eError; +} + +#define GeneralHeapDeInit HeapDeInit #else /* Callbacks not used */ #define GeneralHeapInit NULL #define GeneralHeapDeInit NULL #endif +static void SVMHeapDynamic(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HEAP_INFO *psHeapInfo) +{ + IMG_UINT64 ui64OSPageSize = OSGetPageSize(); +#if defined(FIX_HW_BRN_65273_BIT_MASK) + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; +#endif /* defined(FIX_HW_BRN_65273_BIT_MASK) */ + + /* Ensures the SVM heap has the correct alignment & size for any OS page size. + * + * The SVM heap's base must be the smallest possible address mappable by UM. + * This is 32KB unless the page size is larger than 32KB. [1] + * If the page size > 32KB, raise the SVM heap base to the next page boundary. + * Also reduce the length to ensure it's still page aligned and doesn't go + * into another heap. + * + * [1]: https://chromium.googlesource.com/chromium/src/+/fe24932ee14aa93e1fe4d3e7003b9362591a54d4/docs/security/faq.md#why-aren_t-null-pointer-dereferences-considered-security-bugs + */ + IMG_UINT64 ui64Base = PVR_ALIGN(psHeapInfo->ui64HeapBase, ui64OSPageSize); + IMG_UINT64 ui64BaseDiff = ui64Base - psHeapInfo->ui64HeapBase; + psHeapInfo->ui64HeapBase = ui64Base; + if (psHeapInfo->uiHeapLength >= ui64BaseDiff) + psHeapInfo->uiHeapLength -= ui64BaseDiff; + if (psHeapInfo->uiHeapReservedRegionLength >= ui64BaseDiff) + psHeapInfo->uiHeapReservedRegionLength -= ui64BaseDiff; + + /* The device shared-virtual-memory heap address-space size is stored on the device for + faster look-up without having to walk the device heap configuration structures during + client device connection (i.e. this size is relative to a zero-based offset) */ +#if defined(FIX_HW_BRN_65273_BIT_MASK) + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + psDeviceNode->ui64GeneralSVMHeapTopVA = 0; + } + else +#endif /* defined(FIX_HW_BRN_65273_BIT_MASK) */ + { + psDeviceNode->ui64GeneralSVMHeapTopVA = psHeapInfo->ui64HeapBase + psHeapInfo->uiHeapLength; + } +} + /* Feature Present function prototypes */ +#if defined(FIX_HW_BRN_65273_BIT_MASK) +static IMG_BOOL BRN65273IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) +{ + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) + { + return (((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_BRN_ALT_VALUE) || + ((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_BRN_DEP_VALUE)) ? + IMG_TRUE : IMG_FALSE; + } + else + { + return ((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_DEFAULT_VALUE) ? IMG_TRUE : IMG_FALSE; + } +} +#endif + +#if defined(FIX_HW_BRN_63142_BIT_MASK) +static IMG_BOOL BRN63142IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) +{ + PVR_UNREFERENCED_PARAMETER(pksHeapInfo); + + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 63142)) + { + PVR_ASSERT((pksHeapInfo->ui64HeapBase & IMG_UINT64_C(0x3FFFFFFFF)) + + pksHeapInfo->uiHeapLength == IMG_UINT64_C(0x400000000)); + + return IMG_TRUE; + } + + return IMG_FALSE; +} +#endif + /* FW Feature Present function prototypes */ +#if defined(FIX_HW_BRN_65101_BIT_MASK) +static IMG_BOOL FWBRN65101IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) +{ + /* Used to determine the correct table row to instantiate as a heap by checking + * the Heap size and base at run time VS the current table instance + */ + IMG_UINT64 ui64MainSubHeapSize; + + /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo))) + { + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) + { + ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101; + } + else + { + ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL; + } + } + else + { + ui64MainSubHeapSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE; + } + + /* Determine if we should include this entry based upon previous checks */ + return (pksHeapInfo->uiHeapLength == ui64MainSubHeapSize && + pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_MAIN_HEAP_BASE) ? + IMG_TRUE : IMG_FALSE; +} +#endif + static IMG_BOOL FWVZConfigPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo) { /* Used to determine the correct table row to instantiate as a heap by checking @@ -3821,34 +4114,100 @@ static IMG_BOOL BPHandlerHeapPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HE #endif } -static const RGX_HEAP_INFO gasRGXHeapLayoutApp[] = +static RGX_HEAP_INFO gasRGXHeapLayoutApp[] = { - /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent pfnInit pfnDeInit HeapInstanceFlags */ - {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE, 0, NULL, GeneralHeapInit, GeneralHeapDeInit, HEAP_INST_DEFAULT_VALUE}, - {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG}, - {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_USCCODE_BPH_HEAP_IDENT, RGX_USCCODE_BPH_HEAP_BASE, RGX_USCCODE_BPH_HEAP_SIZE, 0, 0, BPHandlerHeapPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_SIGNALS_HEAP_IDENT, RGX_SIGNALS_HEAP_BASE, RGX_SIGNALS_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_COMPONENT_CTRL_HEAP_IDENT, RGX_COMPONENT_CTRL_HEAP_BASE, RGX_COMPONENT_CTRL_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_PDS_INDIRECT_STATE_HEAP_IDENT, RGX_PDS_INDIRECT_STATE_HEAP_BASE, RGX_PDS_INDIRECT_STATE_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE} + /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent pfnDynamicBaseSize pfnInit pfnDeInit HeapInstanceFlags */ + {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, SVMHeapDynamic, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES, 0, NULL, NULL, GeneralHeapInit, GeneralHeapDeInit, HEAP_INST_DEFAULT_VALUE }, + {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG }, + {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, NULL, NULL, USCHeapInit, HeapDeInit, HEAP_INST_DEFAULT_VALUE }, + {RGX_USCCODE_BPH_HEAP_IDENT, RGX_USCCODE_BPH_HEAP_BASE, RGX_USCCODE_BPH_HEAP_SIZE, 0, 0, BPHandlerHeapPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_COMPONENT_CTRL_HEAP_IDENT, RGX_COMPONENT_CTRL_HEAP_BASE, RGX_COMPONENT_CTRL_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_PDS_INDIRECT_STATE_HEAP_IDENT, RGX_PDS_INDIRECT_STATE_HEAP_BASE, RGX_PDS_INDIRECT_STATE_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, + {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, NULL, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, }; -static const RGX_HEAP_INFO gasRGXHeapLayoutFW[] = +static RGX_HEAP_INFO gasRGXHeapLayoutFW[] = { - /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnPresent pfnInit pfnDeInit HeapInstanceFlags*/ - {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWVZMainHeapPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, - {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE} + /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent pfnDynamicBaseSize pfnInit pfnDeInit HeapInstanceFlags*/ + {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWVZMainHeapPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, }; +static INLINE IMG_BOOL IsFwHeapLayout(const RGX_HEAP_INFO *psHeapInfo) +{ + return psHeapInfo->pszName[0] == 'F' && + psHeapInfo->pszName[1] == 'w' ? IMG_TRUE : IMG_FALSE; +} + +static INLINE void CheckHeapAlignment(const RGX_HEAP_INFO *psHeapInfo, + const PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT64 uiAlignment = RGX_HEAP_BASE_SIZE_ALIGN - 1; + + PVR_UNREFERENCED_PARAMETER(psDevInfo); + + if (IsFwHeapLayout(psHeapInfo)) + { + /* + * 1) Main heap starts at 2MB offset 0xEC10000000UL + * 2) Config Sub heap is created at the end of the main heap making the entire unit start and end at 2MB offset aligned, + * these 2 heaps will always have the same page size + * There are no other heaps in between these two heaps, there is no risk for another devmem heap be created between them. + */ + return; + } + + /* General SVM Heap must be placed below 2MiB boundary so we need to adjust + * the validity condition. This is because an OS might return virtual + * addresses below 2MiB threshold. By default (based on testing on Linux) + * this is around 32KiB. */ + if (OSStringNCompare(psHeapInfo->pszName, RGX_GENERAL_SVM_HEAP_IDENT, + sizeof(RGX_GENERAL_SVM_HEAP_IDENT)) == 0) + { + uiAlignment = RGX_GENERAL_SVM_BASE_SIZE_ALIGNMENT - 1; + } + + /* All UM accessible heap bases should be aligned to 2MB */ + if (psHeapInfo->ui64HeapBase & uiAlignment) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Heap Base not aligned to RGX_HEAP_BASE_SIZE_ALIGN. " + "Invalid Heap \"%s\" Base: " + "%"IMG_UINT64_FMTSPEC")\n" + "Heap Base (0x%"IMG_UINT64_FMTSPECX") should always be aligned to " + "RGX_HEAP_BASE_ALIGN (0x%" IMG_UINT64_FMTSPECX ")", + __func__, + psHeapInfo->pszName, + psHeapInfo->ui64HeapBase, + psHeapInfo->ui64HeapBase, + uiAlignment + 1)); + } + + /* All UM accessible heaps should also be size aligned to 2MB */ + if (psHeapInfo->uiHeapLength & uiAlignment) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Heap Size not aligned to RGX_HEAP_BASE_SIZE_ALIGN. " + "Invalid Heap \"%s\" Size: " + "%"IMG_UINT64_FMTSPEC")\n" + "Heap Size (0x%"IMG_UINT64_FMTSPECX") should always be aligned to " + "RGX_HEAP_BASE_SIZE_ALIGN (0x%" IMG_UINT64_FMTSPECX ")", + __func__, + psHeapInfo->pszName, + psHeapInfo->uiHeapLength, + psHeapInfo->uiHeapLength, + uiAlignment + 1)); + } +} + /* Generic counting method. */ static void _CountRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO pksHeapInfo[], @@ -3878,20 +4237,60 @@ static void _CountRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, } /* Generic heap instantiator */ static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, - const RGX_HEAP_INFO pksHeapInfo[], - IMG_UINT32 ui32HeapListSize, - DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor) + RGX_HEAP_INFO psHeapInfos[], + IMG_UINT32 ui32HeapListSize, + const IMG_UINT32 ui32Log2RgxDefaultPageShift, + DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor) { IMG_UINT32 i; + +#if defined DEBUG + IMG_UINT32 ui32heapListCnt; + bool bHeapPageSizeMisMatch = false; + + /* + * To ensure all heaps within a 2MB region have the same page sizes + */ + for (ui32heapListCnt = 0; ui32heapListCnt < (ui32HeapListSize-1); ui32heapListCnt++) + { + const RGX_HEAP_INFO *psHeapInfo1 = &psHeapInfos[ui32heapListCnt]; + const RGX_HEAP_INFO *psHeapInfo2 = &psHeapInfos[ui32heapListCnt+1]; + + if (((psHeapInfo1->uiHeapLength) & (RGX_HEAP_BASE_SIZE_ALIGN - 1)) && + ((psHeapInfo1->ui64HeapBase + psHeapInfo1->uiHeapLength) & (RGX_HEAP_BASE_SIZE_ALIGN - 1)) == + ((psHeapInfo2->ui64HeapBase) & (RGX_HEAP_BASE_SIZE_ALIGN - 1))) + { + if (psHeapInfo1->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG) + { + if (!(psHeapInfo2->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG)) + { + bHeapPageSizeMisMatch = true; + } + } + else if (psHeapInfo2->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG) + { + bHeapPageSizeMisMatch = true; + } + } + if (bHeapPageSizeMisMatch) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Two Heap with Different Page Size allocated in the same PD space(2MB)\n" + "Invalid Heaps 1) \"%s\" and 2) \"%s\"", + __func__, + psHeapInfo1->pszName, + psHeapInfo2->pszName)); + } + } +#endif + /* We now have a list of the heaps to include and so we should loop over this * list and instantiate. */ for (i = 0; i < ui32HeapListSize; i++) { - IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); IMG_UINT32 ui32Log2DataPageSize = 0; - - const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i]; + RGX_HEAP_INFO *psHeapInfo = &psHeapInfos[i]; if (psHeapInfo->pfnIsHeapPresent) { @@ -3902,24 +4301,31 @@ static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, } } + if (psHeapInfo->pfnDynamicBaseSize != NULL) + { + psHeapInfo->pfnDynamicBaseSize(psDevInfo->psDeviceNode, psHeapInfo); + } + if (psHeapInfo->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG) { - ui32Log2DataPageSize = psDevInfo->ui32Log2Non4KPgSize; + ui32Log2DataPageSize = psDevInfo->psDeviceNode->ui32Non4KPageSizeLog2; } else { ui32Log2DataPageSize = ui32Log2RgxDefaultPageShift; } + CheckHeapAlignment(psHeapInfo, psDevInfo); + HeapCfgBlueprintInit(psHeapInfo->pszName, - psHeapInfo->ui64HeapBase, - psHeapInfo->uiHeapLength, - psHeapInfo->uiHeapReservedRegionLength, - ui32Log2DataPageSize, - psHeapInfo->ui32Log2ImportAlignment, - psHeapInfo->pfnInit, - psHeapInfo->pfnDeInit, - *psDeviceMemoryHeapCursor); + psHeapInfo->ui64HeapBase, + psHeapInfo->uiHeapLength, + psHeapInfo->uiHeapReservedRegionLength, + ui32Log2DataPageSize, + psHeapInfo->ui32Log2ImportAlignment, + psHeapInfo->pfnInit, + psHeapInfo->pfnDeInit, + *psDeviceMemoryHeapCursor); (*psDeviceMemoryHeapCursor)++; } @@ -3935,8 +4341,31 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutFW); IMG_UINT32 ui32CountedHeapSize; - IMG_UINT32 ui32AppHeapCount = 0U; - IMG_UINT32 ui32FWHeapCount = 0U; + IMG_UINT32 ui32AppHeapCount = 0; + IMG_UINT32 ui32FWHeapCount = 0; + + IMG_UINT32 ui32Log2DefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); + + if (ui32Log2DefaultPageShift == 0) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + +#if defined(FIX_HW_BRN_71317_BIT_MASK) + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71317)) + { + if (ui32Log2DefaultPageShift == RGX_HEAP_2MB_PAGE_SHIFT + || ui32Log2DefaultPageShift == RGX_HEAP_1MB_PAGE_SHIFT) + { + PVR_DPF((PVR_DBG_ERROR, + "OS page size too large for device virtual heaps. " + "Maximum page size supported is 256KB when BRN71317 is present.")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + } +#endif /* Count heaps required for the app heaps */ _CountRequiredHeaps(psDevInfo, @@ -3950,7 +4379,7 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, ui32FWHeapListSize, &ui32FWHeapCount); - ui32CountedHeapSize = (ui32AppHeapCount + ui32FWHeapCount + RGX_NUM_OS_SUPPORTED); + ui32CountedHeapSize = (ui32AppHeapCount + ui32FWHeapCount + RGX_NUM_DRIVERS_SUPPORTED); psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * ui32CountedHeapSize); PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeap, eError, e0); @@ -3960,15 +4389,17 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, /* Instantiate App Heaps */ _InstantiateRequiredHeaps(psDevInfo, - gasRGXHeapLayoutApp, - ui32AppHeapListSize, - &psDeviceMemoryHeapCursor); + gasRGXHeapLayoutApp, + ui32AppHeapListSize, + ui32Log2DefaultPageShift, + &psDeviceMemoryHeapCursor); /* Instantiate FW Heaps */ _InstantiateRequiredHeaps(psDevInfo, - gasRGXHeapLayoutFW, - ui32FWHeapListSize, - &psDeviceMemoryHeapCursor); + gasRGXHeapLayoutFW, + ui32FWHeapListSize, + ui32Log2DefaultPageShift, + &psDeviceMemoryHeapCursor); /* set the heap count */ psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap); @@ -3976,7 +4407,7 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, /* Check we have allocated the correct # of heaps, minus any VZ heaps as these * have not been created at this point */ - PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_OS_SUPPORTED)); + PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_DRIVERS_SUPPORTED)); /* In the new heap setup, we initialise 2 configurations: @@ -3999,11 +4430,10 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4) { IMG_UINT32 i; - const IMG_UINT32 ui32GeneralNon4KHeapPageSize = (1 << psDevInfo->ui32Log2Non4KPgSize); - const IMG_UINT32 ui32RgxDefaultPageSize = (1 << RGXHeapDerivePageSize(OSGetPageShift())); /* * Initialise all MMU Page Size Range Config register to the default page size @@ -4011,48 +4441,28 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, */ for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i) { - psDevInfo->aui64MMUPageSizeRangeValue[i] = - RGXMMUInit_GetConfigRangeValue(ui32RgxDefaultPageSize, - 0, - (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT)); - } - - - /* set the last MMU config range covering the entire virtual memory to the OS's page size */ - psDevInfo->aui64MMUPageSizeRangeValue[RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES - 1] = - RGXMMUInit_GetConfigRangeValue(ui32RgxDefaultPageSize, 0, (1ULL << 40)); - - /* - * If the Non4K heap has a different page size than the OS's page size - * (used as default for all other heaps), configure one MMU config range - * for the Non4K heap - */ - if (ui32GeneralNon4KHeapPageSize != ui32RgxDefaultPageSize) - { - psDevInfo->aui64MMUPageSizeRangeValue[0] = - RGXMMUInit_GetConfigRangeValue(ui32GeneralNon4KHeapPageSize, - RGX_GENERAL_NON4K_HEAP_BASE, - RGX_GENERAL_NON4K_HEAP_SIZE); + psDevInfo->aui64MMUPageSizeRangeValue[i] = RGXMMUInitRangeValue(i); } } +#endif -#if (RGX_NUM_OS_SUPPORTED > 1) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; /* Create additional raw firmware heaps */ - for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVINFO, psDevInfo) { - if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32OSid) != PVRSRV_OK) + eError = RGXInitFwRawHeap(psDevInfo, psDeviceMemoryHeapCursor, ui32DriverID); + if (eError != PVRSRV_OK) { /* if any allocation fails, free previously allocated heaps and abandon initialisation */ - for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--) + for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--) { RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); psDeviceMemoryHeapCursor--; } - eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e1; } @@ -4063,7 +4473,7 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, psDeviceMemoryHeapCursor++; } } -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ +#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ return PVRSRV_OK; e1: @@ -4072,167 +4482,486 @@ static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, return eError; } -static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo) +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo, PVRSRV_DEVICE_NODE *psDeviceNode) { -#if (RGX_NUM_OS_SUPPORTED > 1) - if (PVRSRV_VZ_MODE_IS(HOST)) +#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - IMG_UINT32 ui32OSid; + IMG_UINT32 ui32DriverID; DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap; /* Delete all guest firmware heaps */ - for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + FOREACH_DRIVER_RAW_HEAP(ui32DriverID, DEVNODE, psDeviceNode) { RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); psDeviceMemoryHeapCursor++; } } -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray); OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap); } -static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode) +static PVRSRV_ERROR RGXInitSharedFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError = PVRSRV_OK; - PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig, - PHYS_HEAP_USAGE_FW_MAIN); + PHYS_HEAP_CONFIG *psSysHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_SHARED); -#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) /* VZ heap validation */ - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - PVR_LOG_RETURN_IF_FALSE(psFwMainConfig != NULL, + PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg != NULL, "FW Main heap is required for VZ Guest.", PVRSRV_ERROR_PHYSHEAP_CONFIG); } #endif - if (psFwMainConfig != NULL) + if (psSysHeapCfg != NULL) { - /* Check FW_MAIN for multiple usage flags. Because FW_MAIN is divided + /* Check FW_SHARED for multiple usage flags. Because FW_SHARED is divided into subheaps, shared usage with other heaps is not allowed. */ - PVR_LOG_RETURN_IF_FALSE(psFwMainConfig->ui32UsageFlags == PHYS_HEAP_USAGE_FW_MAIN, - "FW Main phys heap config specified with more than one usage. FW Main must be FW Main only.", + PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_SHARED, + "FW_SHARED phys heap config not specified with more than one usage." + "FW_SHARED heap must be exclusively used as FW_SHARED.", PVRSRV_ERROR_PHYSHEAP_CONFIG); } - if (psFwMainConfig == NULL) + if (psSysHeapCfg == NULL) { PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap not set", __func__)); + /* Nothing to do. Default to the physheap fallback option */ } - else if (psFwMainConfig->eType == PHYS_HEAP_TYPE_UMA) + else if (psSysHeapCfg->eType == PHYS_HEAP_TYPE_UMA) { PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__)); + + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psSysHeapCfg, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); + + psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG] = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; } else /* PHYS_HEAP_TYPE_LMA or PHYS_HEAP_TYPE_DMA */ { - PHYS_HEAP_CONFIG sFwHeapConfig; + PHYS_HEAP_CONFIG sFwMainHeapCfg, sFwCfgHeapCfg; PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__)); - PVR_LOG_GOTO_IF_FALSE(psFwMainConfig->uiSize >= RGX_FIRMWARE_RAW_HEAP_SIZE, - "Invalid firmware physical heap size.", ErrorDeinit); -#if defined(SUPPORT_AUTOVZ) - if (PVRSRV_VZ_MODE_IS(HOST)) - { - sFwHeapConfig = *psFwMainConfig; + /* Subheap layout: Main + (optional MIPS reserved range) + Config */ + sFwMainHeapCfg = *psSysHeapCfg; + PVR_ASSERT(sFwMainHeapCfg.eType == PHYS_HEAP_TYPE_LMA || + sFwMainHeapCfg.eType == PHYS_HEAP_TYPE_DMA); - /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap. - * If a different base address is specified for this reserved range, use the overriding define instead. */ -#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) - sFwHeapConfig.sStartAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; - sFwHeapConfig.sCardBase.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; -#else - sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED; - sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED; -#endif + /* Reserve space for the Config heap */ + sFwMainHeapCfg.uConfig.sLMA.uiSize -= RGX_FIRMWARE_CONFIG_HEAP_SIZE; - sFwHeapConfig.uiSize = RGX_FIRMWARE_MAX_PAGETABLE_SIZE; - sFwHeapConfig.ui32UsageFlags = 0; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + { + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - eError = PhysmemCreateHeapLMA(psDeviceNode, - PHYSMEM_LMA_POLICY_DEFAULT, - &sFwHeapConfig, - "Fw MMU subheap", - &psDeviceNode->psFwMMUReservedPhysHeap); - PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MMU", ErrorDeinit); + /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode))) + { +#if defined(FIX_HW_BRN_65101_BIT_MASK) + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) + { + sFwMainHeapCfg.uConfig.sLMA.uiSize -= RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101; + } + else +#endif + { + sFwMainHeapCfg.uConfig.sLMA.uiSize -= RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL; + } + } } #endif - /* Subheap layout: Main + (optional MIPS reserved range) + Config */ - sFwHeapConfig = *psFwMainConfig; - sFwHeapConfig.uiSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE; - sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN; - eError = PhysmemCreateHeapLMA(psDeviceNode, - PHYSMEM_LMA_POLICY_DEFAULT, - &sFwHeapConfig, + RGXPhysHeapGetLMAPolicy(sFwMainHeapCfg.ui32UsageFlags, psDeviceNode), + &sFwMainHeapCfg, "Fw Main subheap", - &psDeviceNode->psFWMainPhysHeap); + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit); - sFwHeapConfig = *psFwMainConfig; - sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE; - sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE; - sFwHeapConfig.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE; - sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CONFIG; + sFwCfgHeapCfg = *psSysHeapCfg; + PVR_ASSERT(sFwCfgHeapCfg.eType == PHYS_HEAP_TYPE_LMA || + sFwCfgHeapCfg.eType == PHYS_HEAP_TYPE_DMA); + + sFwCfgHeapCfg.uConfig.sLMA.sStartAddr.uiAddr += psSysHeapCfg->uConfig.sLMA.uiSize - RGX_FIRMWARE_CONFIG_HEAP_SIZE; + sFwCfgHeapCfg.uConfig.sLMA.sCardBase.uiAddr += psSysHeapCfg->uConfig.sLMA.uiSize - RGX_FIRMWARE_CONFIG_HEAP_SIZE; + + sFwCfgHeapCfg.uConfig.sLMA.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE; eError = PhysmemCreateHeapLMA(psDeviceNode, - PHYSMEM_LMA_POLICY_DEFAULT, - &sFwHeapConfig, + RGXPhysHeapGetLMAPolicy(sFwCfgHeapCfg.ui32UsageFlags, psDeviceNode), + &sFwCfgHeapCfg, "Fw Cfg subheap", - &psDeviceNode->psFWCfgPhysHeap); + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit); } /* Acquire FW heaps */ - eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode, - &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); + eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit); - eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode, - &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); + eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit); - eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode, - &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]); + return eError; + +ErrorDeinit: + PVR_ASSERT(IMG_FALSE); + + return eError; +} + +static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PHYS_HEAP_CONFIG *psFwCodeHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_CODE); + PHYS_HEAP_CONFIG *psFwDataHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_PRIV_DATA); + PHYS_HEAP_CONFIG *psFwPrivateHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_PRIVATE); + PHYS_HEAP_CONFIG sFwPrivateTempCfg; + + if (psFwPrivateHeapCfg != NULL) + { + PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg == NULL) && (psFwDataHeapCfg == NULL), + "FW_PRIVATE and the FW_CODE & FW_PRIV_DATA usage flags " + "achieve the same goal and are mutually exclusive.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + /* Fw code and data are both allocated from this unified heap */ + sFwPrivateTempCfg = *psFwPrivateHeapCfg; + sFwPrivateTempCfg.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA; + + psFwCodeHeapCfg = &sFwPrivateTempCfg; + psFwDataHeapCfg = &sFwPrivateTempCfg; + } + + if ((psFwCodeHeapCfg == NULL) || (psFwDataHeapCfg == NULL)) + { + if (psFwCodeHeapCfg != psFwDataHeapCfg) + { + /* Private Firmware code and data heaps must be either both defined + * or both undefined. There is no point in isolating one but not + * the other.*/ + eError = PVRSRV_ERROR_PHYSHEAP_CONFIG; + PVR_LOG_GOTO_IF_ERROR(eError, "PrivateFwPhysHeap check", ErrorDeinit); + } + else + { + /* No dedicated heaps, default to the physheap fallback option */ + } + } + else if (psFwCodeHeapCfg == psFwDataHeapCfg) + { + if (psFwCodeHeapCfg->ui32UsageFlags == + (PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA)) + { + /* Fw code and private data allocations come from the same system heap + * Instantiate one physheap and share it between them. */ + + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psFwCodeHeapCfg, + NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig"); + } + else + { + /* Not an exclusive heap, can be used for other purposes (e.g. secure buffers). + * Expect the PVR layer to have already created a heap for the other uses. */ + } + } + else + { + /* + * Separating private Firmware code and data is allowed for backwards compatibility + * purposes. New platforms should use the unified FW_PRIVATE heap instead. + * + * Early security implementations on Rogue cores required separate FW_PRIV_DATA + * and FW_CODE heaps, as access permissions to Firmware were granted differently + * based on the transaction types (code or data). + */ + PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_CODE) && + (psFwDataHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PRIV_DATA), + "Dedicated private heaps for Fw code and " + "data must have one usage flag exclusively.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + /* Dedicated Fw code heap */ + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psFwCodeHeapCfg, + NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); + + /* Dedicated Fw private data heap */ + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psFwDataHeapCfg, + NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); + } + +#if defined(RGX_PREMAP_FW_HEAPS) && defined(SUPPORT_TRUSTED_DEVICE) + /* When premapping distinct private and shared Firmware phys heaps + * inside the same virtual devmem heap, their sizes must add up to + * the fixed RGX_FIRMWARE_RAW_HEAP_SIZE for the premapping to work */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + PHYS_HEAP_CONFIG *psFwSharedHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_SHARED); + IMG_UINT64 ui64FwCodeHeapSize = PhysHeapConfigGetSize(psFwCodeHeapCfg); + IMG_UINT64 ui64FwDataHeapSize = PhysHeapConfigGetSize(psFwDataHeapCfg); + IMG_UINT64 ui64FwSharedHeapSize = PhysHeapConfigGetSize(psFwSharedHeapCfg); + IMG_UINT64 ui64FwPrivateHeapSize; + + PVR_LOG_GOTO_IF_FALSE((psFwCodeHeapCfg != NULL) && (psFwDataHeapCfg != NULL), + "Security support requires Fw code and data memory be" + " separate from the heap shared with the kernel driver.", FailDeinit); + + if (psFwCodeHeapCfg != psFwDataHeapCfg) + { + /* Private Firmware allocations come from 2 different heaps */ + ui64FwPrivateHeapSize = ui64FwCodeHeapSize + ui64FwDataHeapSize; + } + else + { + /* Private Firmware allocations come from a single heap */ + ui64FwPrivateHeapSize = ui64FwCodeHeapSize; + } + + PVR_LOG_GOTO_IF_FALSE((ui64FwSharedHeapSize + + ui64FwPrivateHeapSize) == + RGX_FIRMWARE_RAW_HEAP_SIZE, + "Invalid firmware physical heap size.", FailDeinit); + } +#endif + + eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit); - eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode, - &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]); + eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]); PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit); return eError; +#if defined(RGX_PREMAP_FW_HEAPS) && defined(SUPPORT_TRUSTED_DEVICE) +FailDeinit: + eError = PVRSRV_ERROR_INVALID_PARAMS; +#endif ErrorDeinit: PVR_ASSERT(IMG_FALSE); return eError; } -static void _ReadNon4KHeapPageSize(IMG_UINT32 *pui32Log2Non4KPgSize) +static PVRSRV_ERROR RGXInitFwPageTableHeap(PVRSRV_DEVICE_NODE *psDeviceNode) { - void *pvAppHintState = NULL; - IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; - IMG_UINT32 ui32GeneralNon4KHeapPageSize; - - /* Get the page size for the dummy page from the NON4K heap apphint */ - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, - GeneralNon4KHeapPageSize,&ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize); - *pui32Log2Non4KPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize); - OSFreeKMAppHintState(pvAppHintState); + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(RGX_PREMAP_FW_HEAPS) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + PHYS_HEAP_CONFIG *psFwPageTableHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_PREMAP_PT); + + PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg != NULL), + "The Firmware Page Table phys heap config not found.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + + PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PREMAP_PT), + "The Firmware Page Table heap must be used exclusively for this purpose", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_LMA) || + (psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_DMA), + "The Firmware Page Table heap must be LMA or DMA memory.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->uConfig.sLMA.uiSize >= RGX_FIRMWARE_MAX_PAGETABLE_SIZE), + "The Firmware Page Table heap must be able to hold the maximum " + "number of pagetables needed to cover the Firmware's VA space.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + psFwPageTableHeapCfg, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig:FwPageTableHeap"); + + eError = PhysHeapAcquire(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire:FwPageTableHeap"); + } +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +#endif /* defined(RGX_PREMAP_FW_HEAPS) */ + + return eError; +} + +static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + eError = RGXInitFwPageTableHeap(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitFwPageTableHeap", ErrorDeinit); + eError = RGXInitSharedFwPhysHeaps(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSharedFwPhysHeaps", ErrorDeinit); + eError = RGXInitPrivateFwPhysHeaps(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitPrivateFwPhysHeaps", ErrorDeinit); + +ErrorDeinit: + return eError; +} + +/*************************************************************************/ /*! +@Function RGXDeviceFWMainHeapMemCheck +@Description Checks the free memory in FW Main PhysHeap of a device to ensure + there is enough for a connection to be made. + +@Input psDeviceNode The device of the FW Main PhysHeap to be checked. + +@Return On success PVRSRV_OK, else a PVRSRV_ERROR code. +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXDeviceFWMainHeapMemCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PHYS_HEAP *psFWMainPhysHeap; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); + + psFWMainPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; + if (psFWMainPhysHeap == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to get device's FW Main PhysHeap")); + return PVRSRV_ERROR_INVALID_HEAP; + } + + if (PhysHeapGetType(psFWMainPhysHeap) == PHYS_HEAP_TYPE_LMA) + { + const IMG_UINT32 ui32MinMemInKBs = RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION; + IMG_UINT64 ui64FreePhysHeapMem; + + eError = PhysHeapFreeMemCheck(psFWMainPhysHeap, + KB2B(ui32MinMemInKBs), + &ui64FreePhysHeapMem); + + if (eError == PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY) + { + PVR_DPF((PVR_DBG_ERROR, "FW_MAIN PhysHeap contains less than the " + "minimum free space required to acquire a connection. " + "Free space: %"IMG_UINT64_FMTSPEC"KB " + "Minimum required: %uKB", + B2KB(ui64FreePhysHeapMem), + ui32MinMemInKBs)); + } + } + + return eError; +} + +PVRSRV_ERROR RGXGetNon4KHeapPageShift(const void *hPrivate, IMG_UINT32 *pui32Log2Non4KPgShift) +{ + IMG_UINT32 uiLog2OSPageShift = OSGetPageShift(); + + /* We support Non4K pages only on platforms with 4KB pages. On all platforms + * where OS pages are larger than 4KB we must ensure the non4K device memory + * heap matches the page size used in all other device memory heaps, which + * is the OS page size, see RGXHeapDerivePageSize. */ + if (uiLog2OSPageShift > RGX_HEAP_4KB_PAGE_SHIFT) + { + *pui32Log2Non4KPgShift = RGXHeapDerivePageSize(uiLog2OSPageShift); + } + else + { + IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; + IMG_UINT32 ui32GeneralNon4KHeapPageSize; + IMG_UINT32 ui32GeneralNon4KValidHeapPageSize; + IMG_UINT32 ui32Log2Non4KPgShift; + +#if defined(SUPPORT_TRUSTED_DEVICE) + /* The TEE-DDK is not aware of REE-DDK apphints. Stick to default */ + ui32GeneralNon4KHeapPageSize = ui32AppHintDefault; +#else + void *pvAppHintState = NULL; + /* Get the page size for the dummy page from the NON4K heap apphint */ + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, + GeneralNon4KHeapPageSize, &ui32AppHintDefault, + &ui32GeneralNon4KHeapPageSize); + OSFreeAppHintState(pvAppHintState); +#endif + /* Validate that the given ui32GeneralNon4KHeapPageSize is one of the + * supported range of values. + */ + ui32Log2Non4KPgShift = RGXHeapDerivePageSize(ExactLog2(ui32GeneralNon4KHeapPageSize)); + ui32GeneralNon4KValidHeapPageSize = (1U << ui32Log2Non4KPgShift); + if (ui32GeneralNon4KValidHeapPageSize != ui32GeneralNon4KHeapPageSize) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Non4K Heap Size: Requested" + " Heap size=%u, actual=%u, pageshift 0x%x", + __func__, ui32GeneralNon4KHeapPageSize, + ui32GeneralNon4KValidHeapPageSize, ui32Log2Non4KPgShift)); + } + + if (ui32Log2Non4KPgShift == 0U) + { + return PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE; + } + + *pui32Log2Non4KPgShift = ui32Log2Non4KPgShift; + } + + +#if defined(FIX_HW_BRN_71317_BIT_MASK) + if (RGX_DEVICE_HAS_BRN(hPrivate, 71317)) + { + if (*pui32Log2Non4KPgShift == RGX_HEAP_2MB_PAGE_SHIFT + || *pui32Log2Non4KPgShift == RGX_HEAP_1MB_PAGE_SHIFT) + { + PVR_DPF((PVR_DBG_ERROR, + "Page sizes of 2MB or 1MB cause page faults.")); + return PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE; + } + } +#else + PVR_UNREFERENCED_PARAMETER(hPrivate); +#endif + + /* Check the Non4k page size is at least the size of the OS page size + * or larger. The Non4k page size also has to be a multiple of the OS page + * size but since we have the log2 value from the apphint we know powers of 2 + * will always be multiples. If the Non4k page size is less than OS page size + * we notify and upgrade the size. + */ + if (*pui32Log2Non4KPgShift < uiLog2OSPageShift) + { + PVR_DPF((PVR_DBG_MESSAGE, "Non4K page size smaller than OS page size, upgrading to " + "match OS page size.")); + *pui32Log2Non4KPgShift = uiLog2OSPageShift; + } + + return PVRSRV_OK; } /* RGXRegisterDevice * * WARNING! * - * No PDUMP statements are allowed in until Part 2 of the device initialisation - * is reached. + * No PDUMP statements are allowed until device initialisation starts. */ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) { @@ -4242,11 +4971,10 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) void *pvAppHintState = NULL; IMG_UINT32 ui32AppHintDefault = HWPERF_HOST_TL_STREAM_SIZE_DEFAULT, ui32HWPerfHostBufSizeKB; - ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB, + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB, &ui32AppHintDefault, &ui32HWPerfHostBufSizeKB); - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); pvAppHintState = NULL; /********************* @@ -4264,14 +4992,22 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK); OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE); - psDeviceNode->pfnDevSLCFlushRange = RGXSLCFlushRange; psDeviceNode->pfnInvalFBSCTable = RGXInvalidateFBSCTable; psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL; + /* Callback for getting the MMU device attributes */ + psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes; + psDeviceNode->pfnGetDeviceSnoopMode = RGXDevSnoopMode; psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate; - psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick; +#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) + psDeviceNode->pfnMMUTopLevelPxWorkarounds = RGXMapBRN71422TargetPhysicalAddress; +#else + psDeviceNode->pfnMMUTopLevelPxWorkarounds = NULL; +#endif + /* pfnMMUTweakProtFlags is set later on once BVNC features are setup */ + psDeviceNode->pfnMMUTweakProtFlags = NULL; psDeviceNode->pfnInitDeviceCompatCheck = &RGXDevInitCompatCheck; @@ -4284,7 +5020,7 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock; /* Register callback for checking the device's health */ - psDeviceNode->pfnUpdateHealthStatus = PVRSRV_VZ_MODE_IS(GUEST) ? NULL : RGXUpdateHealthStatus; + psDeviceNode->pfnUpdateHealthStatus = PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) ? NULL : RGXUpdateHealthStatus; #if defined(SUPPORT_AUTOVZ) /* Register callback for updating the virtualization watchdog */ @@ -4303,9 +5039,6 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) /* Register callback for soft resetting some device modules */ psDeviceNode->pfnSoftReset = RGXSoftReset; -#if defined(SUPPORT_VALIDATION) - psDeviceNode->pfnValidationGPUUnitsPowerChange = PVRSRVDeviceGPUUnitsPowerChange; -#endif /* Register callback for resetting the HWR logs */ psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs; @@ -4322,62 +5055,16 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue; /* Callback for checking if system layer supports FBC 3.1 */ - psDeviceNode->pfnHasFBCDCVersion31 = NULL; + psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31; - /* Callback for getting the MMU device attributes */ - psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes; + /* Callback for getting TFBC configuration */ + psDeviceNode->pfnGetTFBCLossyGroup = RGXGetTFBCLossyGroup; /* Register callback for initialising device-specific physical memory heaps */ psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit; - /* Set up required support for dummy page */ - OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0); - OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0); - - /* Set the order to 0 */ - psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0; - psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0; - - /* Set the size of the Dummy page to zero */ - psDeviceNode->sDummyPage.ui32Log2PgSize = 0; - - /* Set the size of the Zero page to zero */ - psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0; - - /* Set the Dummy page phys addr */ - psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; - - /* Set the Zero page phys addr */ - psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; - - /* The lock can be acquired from MISR (Z-buffer) path */ - eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock); - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__)); - return eError; - } - - /* Create the lock for zero page */ - eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock); - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__)); - goto free_dummy_page; - } -#if defined(PDUMP) - psDeviceNode->sDummyPage.hPdumpPg = NULL; - psDeviceNode->sDevZeroPage.hPdumpPg = NULL; -#endif - - psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31; - - psDeviceNode->pfnGetTFBCLossyGroup = RGXGetTFBCLossyGroup; - - /* The device shared-virtual-memory heap address-space size is stored here for faster - look-up without having to walk the device heap configuration structures during - client device connection (i.e. this size is relative to a zero-based offset) */ - psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE; + /* Register callback for checking a device's FW Main physical heap for sufficient free memory */ + psDeviceNode->pfnCheckForSufficientFWPhysMem = RGXDeviceFWMainHeapMemCheck; /********************* * Device info setup * @@ -4391,6 +5078,15 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) return PVRSRV_ERROR_OUT_OF_MEMORY; } + /* initialise the layer parameters needed for early hw feature checks */ + psDevInfo->sLayerParams.psDevInfo = psDevInfo; + psDevInfo->sLayerParams.psDevConfig = psDeviceNode->psDevConfig; + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + /* Default psTrampoline to point to null struct */ + psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline; +#endif + /* create locks for the context lists stored in the DevInfo structure. * these lists are modified on context create/destroy and read by the * watchdog thread @@ -4463,7 +5159,7 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) goto e7; } - eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock); + eError = OSLockCreate(&psDevInfo->sRegConfig.hLock); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__)); @@ -4497,6 +5193,13 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) goto e12; } + eError = OSLockCreate(&psDevInfo->hGpuUtilStatsLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create GPU stats lock", __func__)); + goto e13; + } + dllist_init(&psDevInfo->sMemoryContextList); /* initialise ui32SLRHoldoffCounter */ @@ -4523,20 +5226,17 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) #if !defined(NO_HARDWARE) psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase, - psDeviceNode->psDevConfig->ui32RegsSize, - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + psDeviceNode->psDevConfig->ui32RegsSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); if (psDevInfo->pvRegsBaseKM == NULL) { PVR_DPF((PVR_DBG_ERROR, - "PVRSRVRGXInitDevPart2KM: Failed to create RGX register mapping")); + "%s: Failed to create RGX register mapping", + __func__)); eError = PVRSRV_ERROR_BAD_MAPPING; - goto e13; + goto e14; } - psDevInfo->pvSecureRegsBaseKM = NULL; -#else - psDevInfo->pvRegsBaseKM = NULL; - psDevInfo->pvSecureRegsBaseKM = NULL; #endif /* !NO_HARDWARE */ psDeviceNode->pvDevice = psDevInfo; @@ -4550,12 +5250,16 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) goto e15; } +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) /* * We must now setup the SECURITY mappings if supported. We cannot * check on the features until we have reached here as the BVNC is * not setup before now. */ -#if !defined(NO_HARDWARE) +#if defined(NO_HARDWARE) || defined(SUPPORT_TRUSTED_DEVICE) + /* secure register bank not accessible or unused */ + psDevInfo->pvSecureRegsBaseKM = 0; +#else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) && (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)) { @@ -4570,7 +5274,7 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXInitDevPart2KM: Failed to create RGX secure register mapping")); eError = PVRSRV_ERROR_BAD_MAPPING; - goto e13; + goto e15; } /* @@ -4580,21 +5284,23 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) * For the RegBankBase+RegOffset computation to still be accurate for host-secure registers, * we need to compensate for offsets of registers in the secure bank */ - psDevInfo->pvSecureRegsBaseKM = (void *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM - RGX_HOST_SECURE_REGBANK_OFFSET); + psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM - RGX_HOST_SECURE_REGBANK_OFFSET); } else { psDevInfo->pvSecureRegsBaseKM = psDevInfo->pvRegsBaseKM; } -#else - psDevInfo->pvSecureRegsBaseKM = NULL; -#endif /* !NO_HARDWARE */ +#endif /* defined(NO_HARDWARE) || defined(SUPPORT_TRUSTED_DEVICE) */ +#endif /* defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) */ - _ReadNon4KHeapPageSize(&psDevInfo->ui32Log2Non4KPgSize); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + psDeviceNode->pfnMMUTweakProtFlags = (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? + RGXMMUTweakProtFlags : NULL; +#endif - /*Set the zero & dummy page sizes as needed for the heap with largest page size */ - psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize; - psDeviceNode->sDummyPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize; + eError = RGXGetNon4KHeapPageShift(&psDevInfo->sLayerParams, + &psDeviceNode->ui32Non4KPageSizeLog2); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXGetNon4KHeapPageSize", e16); /* Configure MMU specific stuff */ RGXMMUInit_Register(psDeviceNode); @@ -4602,70 +5308,58 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo); if (eError != PVRSRV_OK) { - goto e15; + goto e16; } eError = RGXHWPerfInit(psDevInfo); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e15); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e16); eError = RGXHWPerfHostInit(psDeviceNode->pvDevice, ui32HWPerfHostBufSizeKB); PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfHostInit", ErrorDeInitHWPerfFw); -#if defined(SUPPORT_VALIDATION) - eError = RGXPowerDomainInitState(&psDevInfo->sPowerDomainState, - psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount); - if (eError != PVRSRV_OK) - { - goto ErrorDeInitHWPerfHost; - } - /* This completion will be signaled by the ISR when processing - * the answer CCB command carrying an RGX Register read value */ - init_completion(&psDevInfo->sFwRegs.sRegComp); - psDevInfo->sFwRegs.ui64RegVal = 0; + /* Register callback for dumping debug info */ + eError = RGXDebugInit(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", e17); -#if defined(SUPPORT_SOC_TIMER) +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + /* Register callback for fw mmu init */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { - const IMG_BOOL bDefaultFalse = IMG_FALSE; - IMG_BOOL bInitSocTimer; - void *pvAppHintState = NULL; - - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &bDefaultFalse, &bInitSocTimer); - OSFreeKMAppHintState(pvAppHintState); - - if (bInitSocTimer) - { - eError = RGXInitSOCUSCTimer(psDeviceNode); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSOCUSCTimer", e16); - } + psDeviceNode->pfnFwMMUInit = RGXMipsMMUInit_Register; } -#endif #endif - /* Register callback for dumping debug info */ - eError = RGXDebugInit(psDevInfo); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", e16); - /* Initialise the device dependent bridges */ - eError = DeviceDepBridgeInit(psDevInfo); - PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit"); + eError = RGXRegisterBridges(psDevInfo); + PVR_LOG_IF_ERROR(eError, "RGXRegisterBridges"); + +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) + eError = OSLockCreate(&psDevInfo->hCounterDumpingLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for counter sampling.", __func__)); + goto ErrorDeInitDeviceDepBridge; + } +#endif /* Initialise error counters */ memset(&psDevInfo->sErrorCounts, 0, sizeof(PVRSRV_RGXDEV_ERROR_COUNTS)); return PVRSRV_OK; -e16: -#if defined(SUPPORT_VALIDATION) - RGXPowerDomainDeInitState(&psDevInfo->sPowerDomainState); -ErrorDeInitHWPerfHost: +#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) +ErrorDeInitDeviceDepBridge: + RGXUnregisterBridges(psDevInfo); #endif + +e17: RGXHWPerfHostDeInit(psDevInfo); ErrorDeInitHWPerfFw: RGXHWPerfDeinit(psDevInfo); -e15: +e16: #if !defined(NO_HARDWARE) +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) if (psDevInfo->pvSecureRegsBaseKM != NULL) { /* Adjust pvSecureRegsBaseKM if device has SECURITY_VERSION > 1 */ @@ -4673,18 +5367,24 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)) { /* Undo the VA offset adjustment to unmap correct VAddr */ - psDevInfo->pvSecureRegsBaseKM = (void *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET); + psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET); OSUnMapPhysToLin((void __force *) psDevInfo->pvSecureRegsBaseKM, - psDevInfo->ui32RegSize); + RGX_HOST_SECURE_REGBANK_SIZE); } } -e13: +#endif +#endif /* !NO_HARDWARE */ +e15: +#if !defined(NO_HARDWARE) if (psDevInfo->pvRegsBaseKM != NULL) { OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, psDevInfo->ui32RegSize); } +e14: #endif /* !NO_HARDWARE */ + OSLockDestroy(psDevInfo->hGpuUtilStatsLock); +e13: OSLockDestroy(psDevInfo->hCCBRecoveryLock); e12: OSLockDestroy(psDevInfo->hCCBStallCheckLock); @@ -4693,7 +5393,7 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) e10: OSLockDestroy(psDevInfo->hBPLock); e9: - OSLockDestroy(psDevInfo->sRegCongfig.hLock); + OSLockDestroy(psDevInfo->sRegConfig.hLock); e8: OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); e7: @@ -4713,13 +5413,6 @@ PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) e0: OSFreeMem(psDevInfo); - /* Destroy the zero page lock created above */ - OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); - -free_dummy_page: - /* Destroy the dummy page lock created above */ - OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); - PVR_ASSERT(eError != PVRSRV_OK); return eError; } @@ -4769,9 +5462,9 @@ IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo) @Return PVRSRV_ERROR */ /**************************************************************************/ static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_CHAR **ppszVersionString) + IMG_CHAR **ppszVersionString) { -#if defined(COMPAT_BVNC_MASK_B) || defined(COMPAT_BVNC_MASK_V) || defined(COMPAT_BVNC_MASK_N) || defined(COMPAT_BVNC_MASK_C) || defined(NO_HARDWARE) || defined(EMULATOR) +#if defined(NO_HARDWARE) || defined(EMULATOR) const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (SW)"; #else const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (HW)"; @@ -4816,7 +5509,7 @@ static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, @Return PVRSRV_ERROR */ /***************************************************************************/ static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_PUINT32 pui32RGXClockSpeed) + IMG_PUINT32 pui32RGXClockSpeed) { RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; @@ -4826,7 +5519,7 @@ static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, return PVRSRV_OK; } -#if (RGX_NUM_OS_SUPPORTED > 1) +#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) /*! ******************************************************************************* @@ -4834,28 +5527,44 @@ static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, @Description Called to perform additional initialisation ******************************************************************************/ -static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid) +static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID) { IMG_UINT32 uiStringLength; IMG_UINT32 uiStringLengthMax = 32; IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); + PVR_RETURN_IF_FALSE(ui32Log2RgxDefaultPageShift != 0, PVRSRV_ERROR_INVALID_PARAMS); + +#if defined(FIX_HW_BRN_71317_BIT_MASK) + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71317)) + { + if (ui32Log2RgxDefaultPageShift == RGX_HEAP_2MB_PAGE_SHIFT + || ui32Log2RgxDefaultPageShift == RGX_HEAP_1MB_PAGE_SHIFT) + { + PVR_DPF((PVR_DBG_ERROR, + "OS page size too large for device virtual heaps. " + "Maximum page size supported is 256KB when BRN71317 is present.")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } +#endif + uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1); - /* Start by allocating memory for this OSID heap identification string */ + /* Start by allocating memory for this DriverID heap identification string */ psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); if (psDevMemHeap->pszName == NULL) { return PVRSRV_ERROR_OUT_OF_MEMORY; } - /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */ - OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid); + /* Append the DriverID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */ + OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID); /* Use the common blueprint template support function to initialise the heap */ HeapCfgBlueprintInit(psDevMemHeap->pszName, - RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE), + RGX_FIRMWARE_RAW_HEAP_BASE + (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE), RGX_FIRMWARE_RAW_HEAP_SIZE, 0, ui32Log2RgxDefaultPageShift, @@ -4877,7 +5586,7 @@ static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UI static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap) { IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE; - IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE); + IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_DRIVERS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE); /* Safe to do as the guest firmware heaps are last in the list */ if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase && @@ -4887,7 +5596,7 @@ static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap) OSFreeMem(pszName); } } -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ +#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ /****************************************************************************** End of file (rgxinit.c) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer.h deleted file mode 100644 index 42fdf1e5f677..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer.h +++ /dev/null @@ -1,510 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title Header for Services abstraction layer -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Declaration of an interface layer used to abstract code that - can be compiled outside of the DDK, potentially in a - completely different OS. - All the headers included by this file must also be copied to - the alternative source tree. - All the functions declared here must have a DDK implementation - inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and - another different implementation in case they are used outside - of the DDK. - All of the functions accept as a first parameter a - "const void *hPrivate" argument. It should be used to pass - around any implementation specific data required. -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#if !defined(RGXLAYER_H) -#define RGXLAYER_H - -#if defined(__cplusplus) -extern "C" { -#endif - - -#include "img_defs.h" -#include "img_types.h" -#include "img_elf.h" -#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */ -#include "pvrsrv_device.h" -#include "rgx_bvnc_defs_km.h" -#include "rgx_fw_info.h" -#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */ -#include "rgx_meta.h" -#include "rgx_riscv.h" - -#include "rgxdefs_km.h" -/* includes: - * rgx_cr_defs_km.h, - * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h), - * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h) - */ - - -/*! -******************************************************************************* - - @Function RGXMemCopy - - @Description MemCopy implementation - - @Input hPrivate : Implementation specific data - @Input pvDst : Pointer to the destination - @Input pvSrc : Pointer to the source location - @Input uiSize : The amount of memory to copy in bytes - - @Return void - -******************************************************************************/ -void RGXMemCopy(const void *hPrivate, - void *pvDst, - void *pvSrc, - size_t uiSize); - -/*! -******************************************************************************* - - @Function RGXMemSet - - @Description MemSet implementation - - @Input hPrivate : Implementation specific data - @Input pvDst : Pointer to the start of the memory region - @Input ui8Value : The value to be written - @Input uiSize : The number of bytes to be set to ui8Value - - @Return void - -******************************************************************************/ -void RGXMemSet(const void *hPrivate, - void *pvDst, - IMG_UINT8 ui8Value, - size_t uiSize); - -/*! -******************************************************************************* - - @Function RGXCommentLog - - @Description Generic log function used for debugging or other purposes - - @Input hPrivate : Implementation specific data - @Input pszString : Message to be printed - @Input ... : Variadic arguments - - @Return void - -******************************************************************************/ -__printf(2, 3) -void RGXCommentLog(const void *hPrivate, - const IMG_CHAR *pszString, - ...); - -/*! -******************************************************************************* - - @Function RGXErrorLog - - @Description Generic error log function used for debugging or other purposes - - @Input hPrivate : Implementation specific data - @Input pszString : Message to be printed - @Input ... : Variadic arguments - - @Return void - -******************************************************************************/ -__printf(2, 3) -void RGXErrorLog(const void *hPrivate, - const IMG_CHAR *pszString, - ...); - -/* This is used to check if a specific feature is enabled. - * Should be used instead of calling RGXDeviceHasFeature. */ -#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \ - RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK) - -/* This is used to check if a specific feature with value is enabled. - * Should be used instead of calling RGXDeviceGetFeatureValue. */ -#define RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, Feature) \ - (RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) >= 0) - -/* This is used to get the value of a specific feature from hPrivate. - * Should be used instead of calling RGXDeviceGetFeatureValue. */ -#define RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, Feature) \ - RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) - -/* This is used to get the value of a specific ERN from hPrivate. - * Should be used instead of calling RGXDeviceHasErnBrn. */ -#define RGX_DEVICE_HAS_ERN(hPrivate, FixNum) \ - RGXDeviceHasErnBrn(hPrivate, HW_##FixNum##_BIT_MASK) - -/* This is used to get the value of a specific BRN from hPrivate. - * Should be used instead of calling RGXDeviceHasErnBrn. */ -#define RGX_DEVICE_HAS_BRN(hPrivate, FixNum) \ - RGXDeviceHasErnBrn(hPrivate, FIX_HW_##FixNum##_BIT_MASK) - -#define CLK_CTRL_FORCE_ON(X, Module) \ - X = (((X) & RGX_CR_##Module##_CLRMSK) | RGX_CR_##Module##_ON) -/*! -******************************************************************************* - - @Function RGXDeviceGetFeatureValue - - @Description Checks if a device has a particular feature with values - - @Input hPrivate : Implementation specific data - @Input ui64Feature : Feature with values to check - - @Return Value >= 0 if the given feature is available, -1 otherwise - -******************************************************************************/ -IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature); - -/*! -******************************************************************************* - - @Function RGXDeviceHasFeature - - @Description Checks if a device has a particular feature - - @Input hPrivate : Implementation specific data - @Input ui64Feature : Feature to check - - @Return IMG_TRUE if the given feature is available, IMG_FALSE otherwise - -******************************************************************************/ -IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature); - -/*! -******************************************************************************* - - @Function RGXDeviceHasErnBrn - - @Description Checks if a device has a particular errata - - @Input hPrivate : Implementation specific data - @Input ui64ErnsBrns : Flags to check - - @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise - -******************************************************************************/ -IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns); - -/*! -******************************************************************************* - - @Function RGXGetFWCorememSize - - @Description Get the FW coremem size - - @Input hPrivate : Implementation specific data - - @Return FW coremem size - -******************************************************************************/ -IMG_INTERNAL -IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate); - -/*! -******************************************************************************* - - @Function RGXWriteReg32/64 - - @Description Write a value to a 32/64 bit RGX register - - @Input hPrivate : Implementation specific data - @Input ui32RegAddr : Register offset inside the register bank - @Input ui32/64RegValue : New register value - - @Return void - -******************************************************************************/ -void RGXWriteReg32(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32RegValue); - -void RGXWriteReg64(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT64 ui64RegValue); - -/*! -******************************************************************************* - - @Function RGXReadReg32/64 - - @Description Read a 32/64 bit RGX register - - @Input hPrivate : Implementation specific data - @Input ui32RegAddr : Register offset inside the register bank - - @Return Register value - -******************************************************************************/ -IMG_UINT32 RGXReadReg32(const void *hPrivate, - IMG_UINT32 ui32RegAddr); - -IMG_UINT64 RGXReadReg64(const void *hPrivate, - IMG_UINT32 ui32RegAddr); - -/*! -******************************************************************************* - - @Function RGXReadModifyWriteReg32 - - @Description Read-modify-write a 32 bit RGX register - - @Input hPrivate : Implementation specific data. - @Input ui32RegAddr : Register offset inside the register bank. - @Input ui32RegValue : New register value. - @Input ui32RegMask : Keep the bits set in the mask. - - @Return Always returns PVRSRV_OK - -******************************************************************************/ -IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT64 ui64RegValue, - IMG_UINT64 ui64RegKeepMask); - -/*! -******************************************************************************* - - @Function RGXPollReg32/64 - - @Description Poll on a 32/64 bit RGX register until some bits are set/unset - - @Input hPrivate : Implementation specific data - @Input ui32RegAddr : Register offset inside the register bank - @Input ui32/64RegValue : Value expected from the register - @Input ui32/64RegMask : Only the bits set in this mask will be - checked against uiRegValue - - @Return PVRSRV_OK if the poll succeeds, - PVRSRV_ERROR_TIMEOUT if the poll takes too long - -******************************************************************************/ -PVRSRV_ERROR RGXPollReg32(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32RegValue, - IMG_UINT32 ui32RegMask); - -PVRSRV_ERROR RGXPollReg64(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT64 ui64RegValue, - IMG_UINT64 ui64RegMask); - -/*! -******************************************************************************* - - @Function RGXWaitCycles - - @Description Wait for a number of GPU cycles and/or microseconds - - @Input hPrivate : Implementation specific data - @Input ui32Cycles : Number of GPU cycles to wait for in pdumps, - it can also be used when running driver-live - if desired (ignoring the next parameter) - @Input ui32WaitUs : Number of microseconds to wait for when running - driver-live - - @Return void - -******************************************************************************/ -void RGXWaitCycles(const void *hPrivate, - IMG_UINT32 ui32Cycles, - IMG_UINT32 ui32WaitUs); - -/*! -******************************************************************************* - - @Function RGXAcquireKernelMMUPC - - @Description Acquire the Kernel MMU Page Catalogue device physical address - - @Input hPrivate : Implementation specific data - @Input psPCAddr : Returned page catalog address - - @Return void - -******************************************************************************/ -void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr); - -/*! -******************************************************************************* - - @Function RGXWriteKernelMMUPC32/64 - - @Description Write the Kernel MMU Page Catalogue to the 32/64 bit - RGX register passed as argument. - In a driver-live scenario without PDump these functions - are the same as RGXWriteReg32/64 and they don't need - to be reimplemented. - - @Input hPrivate : Implementation specific data - @Input ui32PCReg : Register offset inside the register bank - @Input ui32AlignShift : PC register alignshift - @Input ui32Shift : PC register shift - @Input ui32/64PCVal : Page catalog value (aligned and shifted) - - @Return void - -******************************************************************************/ -#if defined(PDUMP) -void RGXWriteKernelMMUPC32(const void *hPrivate, - IMG_UINT32 ui32PCReg, - IMG_UINT32 ui32PCRegAlignShift, - IMG_UINT32 ui32PCRegShift, - IMG_UINT32 ui32PCVal); - -#else /* defined(PDUMP) */ -#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \ - RGXWriteReg32(priv, pcreg, pcval) -#endif /* defined(PDUMP) */ - -/*! -******************************************************************************* - - @Function RGXDoFWSlaveBoot - - @Description Returns whether or not a FW Slave Boot is required - while powering on - - @Input hPrivate : Implementation specific data - - @Return IMG_BOOL - -******************************************************************************/ -IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate); - -/*! -******************************************************************************* - - @Function RGXFabricCoherencyTest - - @Description Performs fabric coherency test - - @Input hPrivate : Implementation specific data - - @Return PVRSRV_OK if the test succeeds, - PVRSRV_ERROR_INIT_FAILURE if the test fails at some point - -******************************************************************************/ -PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate); - -/*! -******************************************************************************* - - @Function RGXGetDeviceSLCBanks - - @Description Returns the number of SLC banks used by the device - - @Input hPrivate : Implementation specific data - - @Return Number of SLC banks - -******************************************************************************/ -IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate); - -/*! -******************************************************************************* - - @Function RGXGetDeviceCacheLineSize - - @Description Returns the device cache line size - - @Input hPrivate : Implementation specific data - - @Return Cache line size - -******************************************************************************/ -IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate); - -/*! -******************************************************************************* - - @Function RGXAcquireBootCodeAddr - - @Description Acquire the device virtual address of the RISCV boot code - - @Input hPrivate : Implementation specific data - @Output psBootCodeAddr : Boot code base address - - @Return void - -******************************************************************************/ -void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr); - -/*! -******************************************************************************* - - @Function RGXAcquireBootDataAddr - - @Description Acquire the device virtual address of the RISCV boot data - - @Input hPrivate : Implementation specific data - @Output psBootDataAddr : Boot data base address - - @Return void - -******************************************************************************/ -void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr); - -/*! -******************************************************************************* - - @Function RGXDeviceAckIrq - - @Description Checks the implementation specific IRQ status register, - clearing it if necessary and returning the IRQ status. - - @Input hPrivate : Implementation specific data - - @Return: IRQ status - -******************************************************************************/ -IMG_BOOL RGXDeviceAckIrq(const void *hPrivate); - -#if defined(__cplusplus) -} -#endif - -#endif /* RGXLAYER_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer_impl.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer_impl.c index 4270d0b2dcef..f95f500dd5b7 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer_impl.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxlayer_impl.c @@ -45,7 +45,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "osfunc.h" #include "pdump_km.h" #include "rgxfwutils.h" +#include "rgxinit.h" +#include "rgxfwimageutils.h" #include "cache_km.h" +#include "km/rgxdefs_km.h" +#include "rgx_heaps_server.h" #if defined(PDUMP) #if defined(__linux__) @@ -61,420 +65,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* __linux__ */ #endif -void RGXMemCopy(const void *hPrivate, - void *pvDst, - void *pvSrc, - size_t uiSize) -{ - PVR_UNREFERENCED_PARAMETER(hPrivate); - OSDeviceMemCopy(pvDst, pvSrc, uiSize); -} - -void RGXMemSet(const void *hPrivate, - void *pvDst, - IMG_UINT8 ui8Value, - size_t uiSize) -{ - PVR_UNREFERENCED_PARAMETER(hPrivate); - OSDeviceMemSet(pvDst, ui8Value, uiSize); -} - -void RGXCommentLog(const void *hPrivate, - const IMG_CHAR *pszString, - ...) -{ -#if defined(PDUMP) - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - va_list argList; - va_start(argList, pszString); - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - PDumpCommentWithFlagsVA(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, pszString, argList); - va_end(argList); -#else - PVR_UNREFERENCED_PARAMETER(hPrivate); - PVR_UNREFERENCED_PARAMETER(pszString); -#endif -} - -void RGXErrorLog(const void *hPrivate, - const IMG_CHAR *pszString, - ...) -{ - IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; - va_list argList; - - PVR_UNREFERENCED_PARAMETER(hPrivate); - - va_start(argList, pszString); - vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList); - va_end(argList); - - PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer)); -} - -IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature) -{ - IMG_INT32 i32Ret = -1; - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - PVRSRV_DEVICE_NODE *psDeviceNode; - - PVR_ASSERT(hPrivate != NULL); - - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - psDeviceNode = psDevInfo->psDeviceNode; - - if ((psDeviceNode->pfnGetDeviceFeatureValue)) - { - i32Ret = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ui64Feature); - } - - return i32Ret; -} - -IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0; -} - -IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) - { - return RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE); - } - return 0; -} - -void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ? - (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM); - -#if defined(PDUMP) - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - { - OSWriteUncheckedHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue); - } - - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags); -} - -void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ? - (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM); - -#if defined(PDUMP) - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - { - OSWriteUncheckedHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue); - } - - PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags); -} - -IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - IMG_UINT32 ui32RegValue; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ? - (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM); - -#if defined(PDUMP) - if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) - { - ui32RegValue = IMG_UINT32_MAX; - } - else -#endif - { - ui32RegValue = OSReadUncheckedHWReg32(pvRegsBase, ui32RegAddr); - } - - PDUMPREGREAD32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, psParams->ui32PdumpFlags); - - return ui32RegValue; -} - -IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - IMG_UINT64 ui64RegValue; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ? - (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM); - -#if defined(PDUMP) - if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) - { - ui64RegValue = IMG_UINT64_MAX; - } - else -#endif - { - ui64RegValue = OSReadUncheckedHWReg64(pvRegsBase, ui32RegAddr); - } - - PDUMPREGREAD64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); - - return ui64RegValue; -} - -IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT64 uiRegValueNew, - IMG_UINT64 uiRegKeepMask) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ? - (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM); - - /* only use the new values for bits we update according to the keep mask */ - uiRegValueNew &= ~uiRegKeepMask; - -#if defined(PDUMP) - /* Store register offset to temp PDump variable */ - PDumpRegRead64ToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ":SYSMEM:$1", ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); - - /* Keep the bits set in the mask */ - PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", - uiRegKeepMask, PDUMP_FLAGS_CONTINUOUS); - - /* OR the new values */ - PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", - uiRegValueNew, PDUMP_FLAGS_CONTINUOUS); - - /* Do the actual register write */ - PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, - ui32RegAddr, ":SYSMEM:$1", 0); - - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - - { - IMG_UINT64 uiRegValue = OSReadUncheckedHWReg64(pvRegsBase, ui32RegAddr); - uiRegValue &= uiRegKeepMask; - OSWriteUncheckedHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew); - } - - return PVRSRV_OK; -} - -PVRSRV_ERROR RGXPollReg32(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT32 ui32RegValue, - IMG_UINT32 ui32RegMask) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ? - (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM); - -#if defined(PDUMP) - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - { - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), - ui32RegValue, - ui32RegMask, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr)); - return PVRSRV_ERROR_TIMEOUT; - } - } - - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - ui32RegAddr, - ui32RegValue, - ui32RegMask, - psParams->ui32PdumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - - return PVRSRV_OK; -} - -PVRSRV_ERROR RGXPollReg64(const void *hPrivate, - IMG_UINT32 ui32RegAddr, - IMG_UINT64 ui64RegValue, - IMG_UINT64 ui64RegMask) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - void __iomem *pvRegsBase; - - /* Split lower and upper words */ - IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32); - IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue); - IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32); - IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask); - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ? - (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM); - -#if defined(PDUMP) - if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -#endif - { - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4), - ui32UpperValue, - ui32UpperMask, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); - return PVRSRV_ERROR_TIMEOUT; - } - - if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, - (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), - ui32LowerValue, - ui32LowerMask, - POLL_FLAG_LOG_ERROR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for lower part of Reg (0x%x) failed", ui32RegAddr)); - return PVRSRV_ERROR_TIMEOUT; - } - } - - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - ui32RegAddr + 4, - ui32UpperValue, - ui32UpperMask, - psParams->ui32PdumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - - - PDUMPREGPOL(psDevInfo->psDeviceNode, - RGX_PDUMPREG_NAME, - ui32RegAddr, - ui32LowerValue, - ui32LowerMask, - psParams->ui32PdumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - - return PVRSRV_OK; -} - -void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - OSWaitus(ui32TimeUs); - PDUMPIDLWITHFLAGS(psDevInfo->psDeviceNode, ui32Cycles, PDUMP_FLAGS_CONTINUOUS); -} - -void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr) -{ - PVR_ASSERT(hPrivate != NULL); - *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr; -} - -#if defined(PDUMP) -void RGXWriteKernelMMUPC32(const void *hPrivate, - IMG_UINT32 ui32PCReg, - IMG_UINT32 ui32PCRegAlignShift, - IMG_UINT32 ui32PCRegShift, - IMG_UINT32 ui32PCVal) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; - - /* Write the cat-base address */ - OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32PCReg, ui32PCVal); - - /* Pdump catbase address */ - MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, - RGX_PDUMPREG_NAME, - ui32PCReg, - 4, - ui32PCRegAlignShift, - ui32PCRegShift, - PDUMP_FLAGS_CONTINUOUS); -} -#endif /* defined(PDUMP) */ #define MAX_NUM_COHERENCY_TESTS (10) IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate) { PVRSRV_RGXDEV_INFO *psDevInfo; - PVRSRV_DEVICE_NODE *psDeviceNode; PVR_ASSERT(hPrivate != NULL); psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; @@ -484,13 +79,7 @@ IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate) return IMG_FALSE; } - psDeviceNode = psDevInfo->psDeviceNode; -#if !defined(NO_HARDWARE) - return (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && - PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)); -#else return IMG_FALSE; -#endif } /* @@ -598,7 +187,7 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) sFabricCohCcTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR; sFabricCohCcTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED; - /* Allocate, acquire cpu address and set firmware address for cc=0 buffer */ + /* Allocate, acquire cpu address and set firmware address for cc=0 buffer */ eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode, uiFabricCohTestBlockSize, uiFabricCohTestBlockAlign, @@ -682,8 +271,7 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) psFabricCohTestBufferDevVA = &sFabricCohCcTestBufferDevVA; } - if (eTestType == GPU_WRITE_CPU_READ_SH && - !PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) + if (eTestType == GPU_WRITE_CPU_READ_SH) { /* Cannot perform this test if there is no snooping of device cache */ continue; @@ -691,7 +279,7 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) /* Acquire underlying PMR CpuPA in preparation for cache maintenance */ (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR); - eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid); + eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid, CPU_USE); if (eError != PVRSRV_OK || bValid == IMG_FALSE) { PVR_DPF((PVR_DBG_ERROR, @@ -707,6 +295,29 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) { IMG_UINT32 i; +#if defined(DEBUG) + switch (eTestType) + { + case CPU_WRITE_GPU_READ_SM: + PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); + break; + case GPU_WRITE_CPU_READ_SM: + PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); + break; + case CPU_WRITE_GPU_READ_SH: + PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); + break; + case GPU_WRITE_CPU_READ_SH: + PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); + break; + default: + PVR_LOG(("Internal error, exiting test")); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } +#endif + /* Do multiple sub-dword cache line tests */ for (i = 0; i < 2 && bExit == IMG_FALSE; i++) { @@ -726,7 +337,7 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) what we are about to write via slave-port here because if it drains from the CPU dcache before we read it, it would corrupt what we are going to read back via the CPU */ - CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_CLEAN); + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_FLUSH); /* Calculate a new value to write */ ui32FWValue = i + ui32OddEvenSeed; @@ -736,8 +347,8 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "RGXWriteFWModuleAddr error: %s, exiting", - PVRSRVGetErrorString(eError))); + "RGXWriteFWModuleAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); bExit = IMG_TRUE; continue; } @@ -748,8 +359,8 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "RGXReadFWModuleAddr error: %s, exiting", - PVRSRVGetErrorString(eError))); + "RGXReadFWModuleAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); bExit = IMG_TRUE; continue; } @@ -769,15 +380,12 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) continue; } - if (!PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) - { - /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory - region is discarded before we read (i.e. next read must trigger a cache miss). - If there is snooping of device cache, then any prefetching done by the CPU - will reflect the most up to date datum writing by GPU into said location, - that is to say prefetching must be coherent so CPU d-flush is not needed */ - CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_INVALIDATE); - } + /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory + region is discarded before we read (i.e. next read must trigger a cache miss). + Previously there was snooping of device cache, where prefetching done by the CPU + would reflect the most up to date datum writing by GPU into said location, + that is to say prefetching was coherent so CPU d-flush was not needed */ + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_INVALIDATE); } else { @@ -793,15 +401,11 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) /* Flush possible cpu store-buffer(ing) on LMA */ OSWriteMemoryBarrier(&pui32FabricCohTestBufferCpuVA[i]); - switch (eTestType) + if (eTestType == CPU_WRITE_GPU_READ_SM) { - case CPU_WRITE_GPU_READ_SM: /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so memory is coherent before the SlavePort reads */ CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_FLUSH); - break; - default: - break; } /* Read back value using RGX slave-port interface */ @@ -809,7 +413,7 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "RGXReadWithSP error: %s, exiting", + "RGXReadFWModuleAddr error: %s, exiting", PVRSRVGetErrorString(eError))); bExit = IMG_TRUE; continue; @@ -869,7 +473,11 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) } #if defined(DEBUG) - bSubTestPassed = bExit ? IMG_FALSE : bSubTestPassed; + if (bExit) + { + continue; + } + switch (eTestType) { case CPU_WRITE_GPU_READ_SM: @@ -932,79 +540,100 @@ PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) return eError; } -IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0; -} - -IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate) +static IMG_UINT64 RGXMMUComputeRangeValue(IMG_UINT32 ui32DataPageShift, IMG_UINT64 ui64BaseAddress, IMG_UINT64 ui64RangeSize) { - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; + /* end address of range is inclusive */ + IMG_UINT64 ui64EndAddress = ui64BaseAddress + ui64RangeSize - (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT); + IMG_UINT64 ui64RegValue = 0; - if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS)) + switch (ui32DataPageShift) { - return 0; + case RGX_HEAP_16KB_PAGE_SHIFT: + ui64RegValue = 1; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + ui64RegValue = 2; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + ui64RegValue = 3; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + ui64RegValue = 4; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + ui64RegValue = 5; + break; + case RGX_HEAP_4KB_PAGE_SHIFT: + /* fall through */ + default: + /* anything we don't support, use 4K */ + break; } - return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS); -} -IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; + /* check that the range is defined by valid 40 bit virtual addresses */ + PVR_ASSERT((ui64BaseAddress & ~((1ULL << 40) - 1)) == 0); + PVR_ASSERT((ui64EndAddress & ~((1ULL << 40) - 1)) == 0); - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)) - { - return 0; - } - return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS); -} - -void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr) -{ - PVRSRV_RGXDEV_INFO *psDevInfo; + /* the range config register addresses are in 2MB chunks so check 21 lsb are zero */ + PVR_ASSERT((ui64BaseAddress & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT) - 1)) == 0); + PVR_ASSERT((ui64EndAddress & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT) - 1)) == 0); - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + ui64BaseAddress >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT; + ui64EndAddress >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT; - *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase; + ui64RegValue = (ui64RegValue << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT) | + (ui64EndAddress << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT) | + (ui64BaseAddress << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT); + return ui64RegValue; } -void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr) +IMG_UINT64 RGXMMUInitRangeValue(IMG_UINT32 ui32MMURange) { - PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT64 ui64RegVal; + IMG_UINT32 ui32Non4KHeapPageShift; + IMG_UINT32 ui32RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); + PVRSRV_ERROR eError; - PVR_ASSERT(hPrivate != NULL); - psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + switch (ui32MMURange) + { + case RGX_MMU_RANGE_GLOBAL: + /* set the last MMU config range covering the entire virtual memory to the OS's page size */ + ui64RegVal = RGXMMUComputeRangeValue(ui32RgxDefaultPageShift, 0, (1ULL << 40)); + break; + case RGX_MMU_RANGE_NON4KHEAP: + /* + * If the Non4K heap has a different page size than the OS's page size + * (used as default for all other heaps), configure one MMU config range + * for the Non4K heap + */ + eError = RGXGetNon4KHeapPageShift(NULL, &ui32Non4KHeapPageShift); + PVR_LOG_IF_ERROR(eError, "RGXGetNon4KHeapPageShift"); + + if (eError == PVRSRV_OK) + { + if (ui32Non4KHeapPageShift != ui32RgxDefaultPageShift) + { + ui64RegVal = RGXMMUComputeRangeValue(ui32Non4KHeapPageShift, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE); + } + else + { + ui64RegVal = RGXMMUComputeRangeValue(ui32RgxDefaultPageShift, 0, (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT)); + } + } + else + { + /* + * Error from Non4K Heap shift. Default to the normal PageShift + * to attempt to continue. + */ + ui64RegVal = RGXMMUComputeRangeValue(ui32RgxDefaultPageShift, 0, (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT)); + } + break; + default: + ui64RegVal = RGXMMUComputeRangeValue(ui32RgxDefaultPageShift, 0, (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT)); + break; + } - *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase; + return ui64RegVal; } -IMG_BOOL RGXDeviceAckIrq(const void *hPrivate) -{ - RGX_LAYER_PARAMS *psParams; - PVRSRV_RGXDEV_INFO *psDevInfo; - - PVR_ASSERT(hPrivate != NULL); - psParams = (RGX_LAYER_PARAMS*)hPrivate; - psDevInfo = psParams->psDevInfo; - - return (psDevInfo->pfnRGXAckIrq != NULL) ? - psDevInfo->pfnRGXAckIrq(psDevInfo) : IMG_TRUE; -} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmulticore.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmulticore.c index ab6fe5829107..0c8bc6747805 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmulticore.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmulticore.c @@ -49,6 +49,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "multicore_defs.h" #include "allocmem.h" #include "pvr_debug.h" +#include "rgxfwmemctx.h" @@ -68,19 +69,20 @@ static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 *pui32NumCores, IMG_UINT64 *pui64Caps) { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError = PVRSRV_OK; - if (psDeviceNode->ui32MultiCoreNumCores == 0) + if (psDevInfo->ui32MultiCoreNumCores == 0) { /* MULTICORE not supported on this device */ eError = PVRSRV_ERROR_NOT_SUPPORTED; } else { - *pui32NumCores = psDeviceNode->ui32MultiCoreNumCores; + *pui32NumCores = psDevInfo->ui32MultiCoreNumCores; if (ui32CapsSize > 0) { - if (ui32CapsSize < psDeviceNode->ui32MultiCoreNumCores) + if (ui32CapsSize < psDevInfo->ui32MultiCoreNumCores) { PVR_DPF((PVR_DBG_ERROR, "Multicore caps buffer too small")); eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; @@ -89,9 +91,9 @@ static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, { IMG_UINT32 i; - for (i = 0; i < psDeviceNode->ui32MultiCoreNumCores; ++i) + for (i = 0; i < psDevInfo->ui32MultiCoreNumCores; ++i) { - pui64Caps[i] = psDeviceNode->pui64MultiCoreCapabilities[i]; + pui64Caps[i] = psDevInfo->pui64MultiCoreCapabilities[i]; } } } @@ -105,7 +107,7 @@ static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, /* * RGXInitMultiCoreInfo: * Read multicore HW registers and fill in data structure for clients. - * Return not_supported on cores without multicore. + * Return not supported on cores without multicore. */ PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) { @@ -119,40 +121,96 @@ PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) } /* defaults for non-multicore devices */ - psDeviceNode->ui32MultiCoreNumCores = 0; - psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); - psDeviceNode->pui64MultiCoreCapabilities = NULL; + psDevInfo->ui32MultiCoreNumCores = 0; + psDevInfo->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); + psDevInfo->pui64MultiCoreCapabilities = NULL; psDeviceNode->pfnGetMultiCoreInfo = NULL; if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) { + IMG_BOOL bPowerWasDown; IMG_UINT32 ui32MulticoreInfo; IMG_UINT32 ui32PrimaryCoreIds; IMG_UINT32 ui32PrimaryId; - IMG_UINT32 ui32TotalCores; IMG_UINT32 ui32NumCores; - IMG_UINT32 id, i; + IMG_UINT32 i; + IMG_UINT32 ui32CoresFoundInDomain = 0; + +#if defined(RGX_HOST_SECURE_REGBANK_OFFSET) && defined(XPU_MAX_REGBANKS_ADDR_WIDTH) + IMG_UINT32 ui32MulticoreRegBankOffset = (1 << RGX_GET_FEATURE_VALUE(psDevInfo, XPU_MAX_REGBANKS_ADDR_WIDTH)); + + /* Ensure the HOST_SECURITY reg bank definitions are correct */ + if ((RGX_HOST_SECURE_REGBANK_OFFSET + RGX_HOST_SECURE_REGBANK_SIZE) != ui32MulticoreRegBankOffset) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Register bank definitions for HOST_SECURITY don't match core's configuration.", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } +#endif + bPowerWasDown = ! PVRSRVIsSystemPowered(psDeviceNode); + + /* Power-up the device as required to read the registers */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) && bPowerWasDown) + { + PVRSRVPowerLock(psDeviceNode); + eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVSetSystemPowerState ON failed (%u)", __func__, eError)); + PVRSRVPowerUnlock(psDeviceNode); + return eError; + } + } + +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + RGXFWIF_MULTICORE_INFO *psRGXMulticoreInfo; + IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; + + LOOP_UNTIL_TIMEOUT_US(ui32FwTimeout) + { + RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXMulticoreInfo.ui32NumCores, + INVALIDATE); + if (*((volatile IMG_UINT32*)&psDevInfo->psRGXFWIfOsInit->sRGXMulticoreInfo.ui32NumCores)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); - ui32NumCores = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_DOMAIN) + if (*((volatile IMG_UINT32*)&psDevInfo->psRGXFWIfOsInit->sRGXMulticoreInfo.ui32NumCores) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "Multicore info not available for guest")); + return PVRSRV_ERROR_DEVICE_REGISTER_FAILED; + } + + psRGXMulticoreInfo = &psDevInfo->psRGXFWIfOsInit->sRGXMulticoreInfo; + ui32NumCores = psRGXMulticoreInfo->ui32NumCores; + ui32MulticoreInfo = psRGXMulticoreInfo->ui32MulticoreInfo; + + PVR_LOG(("RGX Guest Device initialised with %u %s", + ui32NumCores, (ui32NumCores == 1U) ? "core" : "cores")); + } + else +#endif + { + ui32NumCores = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_DOMAIN) & ~RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_CLRMSK) >> RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_SHIFT; - - ui32TotalCores = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM) - & ~RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK) - >> RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT; - ui32MulticoreInfo = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE); + ui32MulticoreInfo = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE); + } #if defined(NO_HARDWARE) /* override to defaults if no hardware */ - ui32NumCores = 8;//RGX_MULTICORE_MAX_NOHW_CORES; - ui32TotalCores = RGX_MULTICORE_MAX_NOHW_CORES; + ui32NumCores = RGX_MULTICORE_MAX_NOHW_CORES; ui32MulticoreInfo = 0; /* primary id 0 with 7 secondaries */ #endif /* ID for this primary is in this register */ ui32PrimaryId = (ui32MulticoreInfo & ~RGX_CR_MULTICORE_ID_CLRMSK) >> RGX_CR_MULTICORE_ID_SHIFT; /* allocate storage for capabilities */ - psDeviceNode->pui64MultiCoreCapabilities = OSAllocMem(ui32NumCores * sizeof(psDeviceNode->pui64MultiCoreCapabilities[0])); - if (psDeviceNode->pui64MultiCoreCapabilities == NULL) + psDevInfo->pui64MultiCoreCapabilities = OSAllocMem(ui32NumCores * sizeof(psDevInfo->pui64MultiCoreCapabilities[0])); + if (psDevInfo->pui64MultiCoreCapabilities == NULL) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc memory for Multicore info", __func__)); return PVRSRV_ERROR_OUT_OF_MEMORY; @@ -161,31 +219,49 @@ PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) ui32PrimaryCoreIds = (ui32MulticoreInfo & ~RGX_CR_MULTICORE_PRIMARY_CORE_ID_CLRMSK) >> RGX_CR_MULTICORE_PRIMARY_CORE_ID_SHIFT; - psDeviceNode->ui32MultiCorePrimaryId = ui32PrimaryId; - psDeviceNode->ui32MultiCoreNumCores = ui32NumCores; + psDevInfo->ui32MultiCorePrimaryId = ui32PrimaryId; + psDevInfo->ui32MultiCoreNumCores = ui32NumCores; PVR_DPF((PVR_DBG_MESSAGE, "Multicore domain has %d cores with primary id %u\n", ui32NumCores, ui32PrimaryId)); PDUMPCOMMENT(psDeviceNode, "RGX Multicore domain has %d cores with primary id %u\n", ui32NumCores, ui32PrimaryId); - for (i = 0, id = 0; id < ui32TotalCores; ++id) + for (i = 0; i < RGX_MULTICORE_MAX_NOHW_CORES; i++) { if ((ui32PrimaryCoreIds & 0x7) == ui32PrimaryId) { + if (ui32CoresFoundInDomain >= ui32NumCores) + { + /* Enough cores have already been found in the domain, but there is an additional match. + This is an illegal combination. */ + PVR_ASSERT(ui32CoresFoundInDomain < ui32NumCores); + break; + } + /* currently all cores are identical so have the same capabilities */ - psDeviceNode->pui64MultiCoreCapabilities[i] = id - | ((id == ui32PrimaryId) ? RGX_MULTICORE_CAPABILITY_PRIMARY_EN : 0) + psDevInfo->pui64MultiCoreCapabilities[ui32CoresFoundInDomain] = i + | ((i == ui32PrimaryId) ? RGX_MULTICORE_CAPABILITY_PRIMARY_EN : 0) | RGX_MULTICORE_CAPABILITY_GEOMETRY_EN | RGX_MULTICORE_CAPABILITY_COMPUTE_EN | RGX_MULTICORE_CAPABILITY_FRAGMENT_EN; - PDUMPCOMMENT(psDeviceNode, "\tCore %u has caps 0x%08x", id, - (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i]); - PVR_DPF((PVR_DBG_MESSAGE, "Core %u has caps 0x%08x", id, (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i])); - ++i; + PDUMPCOMMENT(psDeviceNode, "\tCore %u has caps 0x%08x", i, + (IMG_UINT32)psDevInfo->pui64MultiCoreCapabilities[ui32CoresFoundInDomain]); + PVR_DPF((PVR_DBG_MESSAGE, "Core %u has caps 0x%08x", i, (IMG_UINT32)psDevInfo->pui64MultiCoreCapabilities[ui32CoresFoundInDomain])); + ui32CoresFoundInDomain++; } ui32PrimaryCoreIds >>= 3; } + PVR_ASSERT(ui32CoresFoundInDomain == ui32NumCores); + + /* revert power state to what it was on entry to this function */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) && bPowerWasDown) + { + eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); + PVRSRVPowerUnlock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); + } + /* Register callback to return info about multicore setup to client bridge */ psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo; } @@ -205,12 +281,14 @@ PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) */ void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) { - if (psDeviceNode->pui64MultiCoreCapabilities != NULL) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (psDevInfo->pui64MultiCoreCapabilities != NULL) { - OSFreeMem(psDeviceNode->pui64MultiCoreCapabilities); - psDeviceNode->pui64MultiCoreCapabilities = NULL; - psDeviceNode->ui32MultiCoreNumCores = 0; - psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); + OSFreeMem(psDevInfo->pui64MultiCoreCapabilities); + psDevInfo->pui64MultiCoreCapabilities = NULL; + psDevInfo->ui32MultiCoreNumCores = 0; + psDevInfo->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); } psDeviceNode->pfnGetMultiCoreInfo = NULL; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpdump.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpdump.c index bca6a9bcedad..a844e17e9b97 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpdump.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpdump.c @@ -43,13 +43,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if defined(PDUMP) #include "pvrsrv.h" +#include "devicemem_utils.h" #include "devicemem_pdump.h" +#include "devicemem_server.h" #include "rgxpdump.h" +#include "rgxpdump_common.h" #include "pdumpdesc.h" -#if defined(SUPPORT_VALIDATION) -#include "validation_soc.h" -#include "rgxtbdefs.h" -#endif /* * There are two different set of functions one for META/RISCV and one for MIPS @@ -61,102 +60,165 @@ static PVRSRV_ERROR _FWDumpSignatureBufferKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32PDumpFlags) { - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(SUPPORT_FIRMWARE_GCOV) + DECLARE_DLLIST(sFirmwareGcovBufferValidRegions); +#endif + DECLARE_DLLIST(sSigTDMChecksValidRegions); + DECLARE_DLLIST(sSigTAChecksValidRegions); + DECLARE_DLLIST(sSig3DChecksValidRegions); + DECLARE_DLLIST(sSigCDMChecksValidRegions); +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + DECLARE_DLLIST(sSigRDMChecksValidRegions); +#endif PVR_UNREFERENCED_PARAMETER(psConnection); +#if defined(SUPPORT_FIRMWARE_GCOV) + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psFirmwareGcovBufferMemDesc, + psDevInfo->ui32FirmwareGcovSize, + &sFirmwareGcovBufferValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrReturnError); +#endif + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psRGXFWSigTDMChecksMemDesc, + psDevInfo->ui32SigTDMChecksSize, + &sSigTDMChecksValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrFreeGcovBufferRegions); + } + + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psRGXFWSigTAChecksMemDesc, + psDevInfo->ui32SigTAChecksSize, + &sSigTAChecksValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrFreeSigTDMChecksRegions); + + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psRGXFWSig3DChecksMemDesc, + psDevInfo->ui32Sig3DChecksSize, + &sSig3DChecksValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrFreeSigTAChecksRegions); + + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psRGXFWSigCDMChecksMemDesc, + psDevInfo->ui32SigCDMChecksSize, + &sSigCDMChecksValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrFreeSig3DChecksRegions); + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && + RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) + { + eError = PDumpGetValidRegion(psDeviceNode, + psDevInfo->psRGXFWSigRDMChecksMemDesc, + psDevInfo->ui32SigRDMChecksSize, + &sSigRDMChecksValidRegions); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpGetValidRegion", ErrFreeSigCDMChecksRegions); + } +#endif + + PDUMPIF(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); PDUMPELSE(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); #if defined(SUPPORT_FIRMWARE_GCOV) /* Gcov */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Gcov Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psFirmwareGcovBufferMemDesc, - 0, - psDevInfo->ui32FirmwareGcovSize, - "firmware_gcov.img", - 0, - ui32PDumpFlags); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psFirmwareGcovBufferMemDesc, + &sFirmwareGcovBufferValidRegions, + "firmware_gcov.img", + ui32PDumpFlags); #endif + /* TDM signatures */ - PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTDMChecksMemDesc, - 0, - psDevInfo->ui32SigTDMChecksSize, - "out.2dsig", - 0, - ui32PDumpFlags); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer"); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psRGXFWSigTDMChecksMemDesc, + &sSigTDMChecksValidRegions, + "out.tdmsig", + ui32PDumpFlags); + } /* TA signatures */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TA signatures and checksums Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc, - 0, - psDevInfo->ui32SigTAChecksSize, - "out.tasig", - 0, - ui32PDumpFlags); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psRGXFWSigTAChecksMemDesc, + &sSigTAChecksValidRegions, + "out.tasig", + ui32PDumpFlags); /* 3D signatures */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc, - 0, - psDevInfo->ui32Sig3DChecksSize, - "out.3dsig", - 0, - ui32PDumpFlags); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psRGXFWSig3DChecksMemDesc, + &sSig3DChecksValidRegions, + "out.3dsig", + ui32PDumpFlags); + /* CDM signatures */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump CDM signatures and checksums Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigCDMChecksMemDesc, - 0, - psDevInfo->ui32SigCDMChecksSize, - "out.cdmsig", - 0, - ui32PDumpFlags); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psRGXFWSigCDMChecksMemDesc, + &sSigCDMChecksValidRegions, + "out.cdmsig", + ui32PDumpFlags); +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) { /* RDM signatures */ PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump RDM signatures and checksums Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigRDMChecksMemDesc, - 0, - psDevInfo->ui32SigRDMChecksSize, - "out.rdmsig", - 0, - ui32PDumpFlags); + PDumpSaveToFileVirtual(psDeviceNode, + psDevInfo->psRGXFWSigRDMChecksMemDesc, + &sSigRDMChecksValidRegions, + "out.rdmsig", + ui32PDumpFlags); } +#endif PDUMPFI(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags); -#if defined(SUPPORT_VALIDATION) && (defined(SUPPORT_TRP) || defined(SUPPORT_WGP) || defined(SUPPORT_FBCDC_SIGNATURE_CHECK)) - /* - * Validation signatures buffer - */ - PDUMPIF(psDeviceNode, "DISABLE_VALIDATION_CHECKSUM_BUFFER_DUMP", ui32PDumpFlags); - PDUMPELSE(psDeviceNode, "DISABLE_VALIDATION_CHECKSUM_BUFFER_DUMP", ui32PDumpFlags); - PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump validation signatures buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWValidationSigMemDesc, - 0, - psDevInfo->ui32ValidationSigSize, - "out.trpsig", - 0, - ui32PDumpFlags); + return PVRSRV_OK; - PDUMPFI(psDeviceNode, "DISABLE_VALIDATION_CHECKSUM_BUFFER_DUMP", ui32PDumpFlags); +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) +ErrFreeSigCDMChecksRegions: #endif - - return PVRSRV_OK; + DevmemIntPDumpFreeValidRegions(&sSigCDMChecksValidRegions); +ErrFreeSig3DChecksRegions: + DevmemIntPDumpFreeValidRegions(&sSig3DChecksValidRegions); +ErrFreeSigTAChecksRegions: + DevmemIntPDumpFreeValidRegions(&sSigTAChecksValidRegions); +ErrFreeSigTDMChecksRegions: + DevmemIntPDumpFreeValidRegions(&sSigTDMChecksValidRegions); +ErrFreeGcovBufferRegions: +#if defined(SUPPORT_FIRMWARE_GCOV) + DevmemIntPDumpFreeValidRegions(&sFirmwareGcovBufferValidRegions); +ErrReturnError: +#endif + return eError; } -static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 ui32PDumpFlags) + +static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset; + const IMG_CHAR acOutTraceFileName[] = "out.trace"; + PVR_UNREFERENCED_PARAMETER(psConnection); - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); /* Dump trace buffers */ PDUMPIF(psDeviceNode, "ENABLE_TRACEBUF", ui32PDumpFlags); @@ -168,73 +230,186 @@ static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection, * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is * "expression must have a constant value". */ - const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff + const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff = + (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]); + + /* ui32TracePointer tracepointer */ + ui32Size = sizeof(IMG_UINT32); + PDumpSaveToFileVirtualNoValidate(psDeviceNode, + psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + uiTraceBufThreadNumOff, + ui32Size, + acOutTraceFileName, + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + + /* next, dump size of trace buffer in DWords */ + ui32Size = sizeof(IMG_UINT32); + PDumpSaveToFileVirtualNoValidate(psDeviceNode, + psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), + ui32Size, + acOutTraceFileName, + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + + /* trace buffer */ + ui32Size = psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]); + PDumpSaveToFileVirtualNoValidate(psDeviceNode, + psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum], + 0, /* 0 offset in the trace buffer mem desc */ + ui32Size, + acOutTraceFileName, + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + + /* assert info buffer */ + ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + + sizeof(IMG_UINT32); + PDumpSaveToFileVirtualNoValidate(psDeviceNode, + psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + /* move to first element of sTraceBuf */ + offsetof(RGXFWIF_TRACEBUF, sTraceBuf) + /* skip required number of sTraceBuf elements */ + + ui32ThreadNum * sizeof(RGXFWIF_TRACEBUF_SPACE) + /* offset into its sAssertBuf, to be pdumped */ + + offsetof(RGXFWIF_TRACEBUF_SPACE, sAssertBuf), + ui32Size, + acOutTraceFileName, + ui32OutFileOffset, + ui32PDumpFlags); + ui32OutFileOffset += ui32Size; + } + PDUMPFI(psDeviceNode, "ENABLE_TRACEBUF", ui32PDumpFlags); + + /* FW HWPerf buffer is always allocated when PDUMP is defined, irrespective of HWPerf events being enabled/disabled */ + PVR_ASSERT(psDevInfo->psRGXFWIfHWPerfBufMemDesc); + + return PVRSRV_OK; +} + + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) +static PVRSRV_ERROR _MipsDumpSignatureBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* TA signatures */ + PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TA signatures and checksums Buffer"); + + DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTAChecksMemDesc, + 0, + psDevInfo->ui32SigTAChecksSize, + "out.tasig", + 0); + + /* 3D signatures */ + PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer"); + DevmemPDumpSaveToFile(psDevInfo->psRGXFWSig3DChecksMemDesc, + 0, + psDevInfo->ui32Sig3DChecksSize, + "out.3dsig", + 0); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) + { + /* TDM signatures */ + PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer"); + DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTDMChecksMemDesc, + 0, + psDevInfo->ui32SigTDMChecksSize, + "out.tdmsig", + 0); + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _MipsDumpTraceBufferKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); + + /* Dump trace buffers */ + PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump trace buffers"); + for (ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++) + { + /* + * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of + * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is + * "expression must have a constant value". + */ + const IMG_DEVMEM_OFFSET_T uiTraceBufOff = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]); + /* Same again... */ + const IMG_DEVMEM_OFFSET_T uiTraceBufSpaceAssertBufOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF_SPACE *)0)->sAssertBuf); + /* ui32TracePointer tracepointer */ ui32Size = sizeof(IMG_UINT32); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, - uiTraceBufThreadNumOff, + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + uiTraceBufOff, ui32Size, "out.trace", - ui32OutFileOffset, - ui32PDumpFlags); + ui32OutFileOffset); ui32OutFileOffset += ui32Size; /* next, dump size of trace buffer in DWords */ ui32Size = sizeof(IMG_UINT32); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), ui32Size, "out.trace", - ui32OutFileOffset, - ui32PDumpFlags); + ui32OutFileOffset); ui32OutFileOffset += ui32Size; /* trace buffer */ - ui32Size = psDevInfo->psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + ui32Size = psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum], + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum], 0, /* 0 offset in the trace buffer mem desc */ ui32Size, "out.trace", - ui32OutFileOffset, - ui32PDumpFlags); + ui32OutFileOffset); ui32OutFileOffset += ui32Size; /* assert info buffer */ ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR) + sizeof(IMG_UINT32); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, - offsetof(RGXFWIF_TRACEBUF, sTraceBuf) /* move to first element of sTraceBuf */ - + ui32ThreadNum * sizeof(RGXFWIF_TRACEBUF_SPACE) /* skip required number of sTraceBuf elements */ - + offsetof(RGXFWIF_TRACEBUF_SPACE, sAssertBuf), /* offset into its sAssertBuf, to be pdumped */ + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + uiTraceBufOff + uiTraceBufSpaceAssertBufOff, ui32Size, "out.trace", - ui32OutFileOffset, - ui32PDumpFlags); + ui32OutFileOffset); ui32OutFileOffset += ui32Size; } - PDUMPFI(psDeviceNode, "ENABLE_TRACEBUF", ui32PDumpFlags); - - /* FW HWPerf buffer is always allocated when PDUMP is defined, irrespective of HWPerf events being enabled/disabled */ - PVR_ASSERT(psDevInfo->psRGXFWIfHWPerfBufMemDesc); /* Dump hwperf buffer */ - PDUMPIF(psDeviceNode, "ENABLE_HWPERF", ui32PDumpFlags); PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump HWPerf Buffer"); - DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfHWPerfBufMemDesc, + DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfHWPerfBufMemDesc, 0, psDevInfo->ui32RGXFWIfHWPerfBufSize, "out.hwperf", - 0, - ui32PDumpFlags); - PDUMPFI(psDeviceNode, "ENABLE_HWPERF", ui32PDumpFlags); + 0); return PVRSRV_OK; - } +#endif /* @@ -246,137 +421,33 @@ PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection, { if (psDeviceNode->pfnCheckDeviceFeature) { - return _FWDumpSignatureBufferKM(psConnection, - psDeviceNode, - ui32PDumpFlags); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + { + return _MipsDumpSignatureBufferKM(psConnection, + psDeviceNode, + ui32PDumpFlags); + } + else +#endif + { + return _FWDumpSignatureBufferKM(psConnection, + psDeviceNode, + ui32PDumpFlags); + } } return PVRSRV_OK; } -#if defined(SUPPORT_VALIDATION) -PVRSRV_ERROR PVRSRVPDumpComputeCRCSignatureCheckKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE * psDeviceNode, - IMG_UINT32 ui32PDumpFlags) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - PVRSRV_ERROR eError; - - if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE))) - { - return PVRSRV_ERROR_NOT_SUPPORTED; - } - - /* - * Add a PDUMP POLL on the KZ signature check status. - */ - if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_NOERR_EN) - { - PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: match required"); - eError = PDUMPREGPOL(psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_SCRATCH11, - 1U, - 0xFFFFFFFF, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - } - else if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_ERR_EN) - { - PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: mismatch required"); - eError = PDUMPREGPOL(psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_SCRATCH11, - 2U, - 0xFFFFFFFF, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - } - PVR_UNREFERENCED_PARAMETER(psConnection); - return PVRSRV_OK; -} -#endif PVRSRV_ERROR PVRSRVPDumpCRCSignatureCheckKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32PDumpFlags) { -#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_FBCDC_SIGNATURE_CHECK) - PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - PVRSRV_ERROR eError; - - /* - * Add a PDUMP POLL on the FBC/FBDC signature check status. - */ - if (psDevInfo->ui32ValidationFlags & RGX_VAL_FBDC_SIG_CHECK_NOERR_EN) - { - PDUMPCOMMENT(psDeviceNode, "Verify FBCDC Signature: match required"); - eError = PDUMPREGPOL(psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FBCDC_STATUS, - 0, - 0xFFFFFFFF, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - - eError = PDUMPREGPOL(psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FBCDC_SIGNATURE_STATUS, - 0, - 0xFFFFFFFF, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - } - else if (psDevInfo->ui32ValidationFlags & RGX_VAL_FBDC_SIG_CHECK_ERR_EN) - { - static char pszVar1[] = ":SYSMEM:$2"; - static char pszVar2[] = ":SYSMEM:$3"; - char *pszLoopCondition; - - /* - * Do: - * v1 = [RGX_CR_FBCDC_STATUS] - * v2 = [RGX_CR_FBCDC_SIGNATURE_STATUS] - * While (v1 OR v2) == 0 - */ - PDUMPCOMMENT(psDeviceNode, "Verify FBCDC Signature: mismatch required"); - eError = PDumpInternalValCondStr(&pszLoopCondition, - pszVar1, - 0, - 0xFFFFFFFF, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Unable to write pdump verification sequence (%d)", __func__, eError)); - } - else - { - eError = PDumpStartDoLoopKM(psDeviceNode, ui32PDumpFlags); - - eError = PDumpRegRead32ToInternalVar(psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FBCDC_STATUS, - pszVar1, - ui32PDumpFlags); - - eError = PDumpRegRead32ToInternalVar(psDeviceNode, - RGX_PDUMPREG_NAME, - RGX_CR_FBCDC_SIGNATURE_STATUS, - pszVar2, - ui32PDumpFlags); - - eError = PDumpWriteVarORVarOp(psDeviceNode, pszVar1, pszVar2, ui32PDumpFlags); - eError = PDumpEndDoWhileLoopKM(psDeviceNode, pszLoopCondition, ui32PDumpFlags); - OSFreeMem(pszLoopCondition); - } - } -#else PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -#endif /* SUPPORT_VALIDATION */ PVR_UNREFERENCED_PARAMETER(psConnection); return PVRSRV_OK; @@ -390,30 +461,9 @@ PVRSRV_ERROR PVRSRVPDumpValCheckPreCommandKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32PDumpFlags) { -#if defined(SUPPORT_VALIDATION) - PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - PVRSRV_ERROR eError; - - //if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_STATE_PIN) && - if (psDevInfo->ui32ValidationFlags & RGX_VAL_GPUSTATEPIN_EN) - { - /* - * Add a PDUMP POLL on the GPU_STATE inactive status. - */ - PDUMPCOMMENT(psDeviceNode, "Verify GPU system status: INACTIVE"); - eError = PDUMPREGPOL(psDeviceNode, - RGX_TB_PDUMPREG_NAME, - RGX_TB_SYSTEM_STATUS, - 0, - ~RGX_TB_SYSTEM_STATUS_GPU_STATE_CLRMSK, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_EQUAL); - } -#else PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); PVR_UNREFERENCED_PARAMETER(psConnection); -#endif return PVRSRV_OK; } @@ -425,30 +475,9 @@ PVRSRV_ERROR PVRSRVPDumpValCheckPostCommandKM(CONNECTION_DATA * psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32PDumpFlags) { -#if defined(SUPPORT_VALIDATION) - PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - PVRSRV_ERROR eError; - - //if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_STATE_PIN) && - if (psDevInfo->ui32ValidationFlags & RGX_VAL_GPUSTATEPIN_EN) - { - /* - * Add a PDUMP POLL on the GPU_STATE active status. - */ - PDUMPCOMMENT(psDeviceNode, "Verify GPU system status: ACTIVE"); - eError = PDUMPREGPOL(psDeviceNode, - RGX_TB_PDUMPREG_NAME, - RGX_TB_SYSTEM_STATUS, - 0, - ~RGX_TB_SYSTEM_STATUS_GPU_STATE_CLRMSK, - ui32PDumpFlags, - PDUMP_POLL_OPERATOR_NOTEQUAL); - } -#else PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); PVR_UNREFERENCED_PARAMETER(psConnection); -#endif return PVRSRV_OK; } @@ -460,7 +489,16 @@ PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA *psConnection, { if (psDeviceNode->pfnCheckDeviceFeature) { - return _FWDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) + { + return _MipsDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); + } + else +#endif + { + return _FWDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags); + } } return PVRSRV_OK; @@ -478,12 +516,17 @@ PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDevic IMG_FB_COMPRESSION eFBCompression, const IMG_UINT32 *paui32FBCClearColour, PDUMP_FBC_SWIZZLE eFBCSwizzle, - IMG_PBYTE abyPDumpDesc) + IMG_PBYTE pbyPDumpImageHdr) { IMG_PUINT32 pui32Word; IMG_UINT32 ui32HeaderDataSize; PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) || defined(RGX_FEATURE_TFBC_DELTA_CORRELATION_BIT_MASK) || defined(RGX_FEATURE_TFBC_NATIVE_YUV10_BIT_MASK) + IMG_UINT32 ui32TFBCControl = (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) >> + RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT; +#endif + /* Validate parameters */ if (((IMAGE_HEADER_SIZE & ~(HEADER_WORD1_SIZE_CLRMSK >> HEADER_WORD1_SIZE_SHIFT)) != 0) || ((IMAGE_HEADER_VERSION & ~(HEADER_WORD1_VERSION_CLRMSK >> HEADER_WORD1_VERSION_SHIFT)) != 0)) @@ -491,9 +534,9 @@ PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDevic return PVRSRV_ERROR_INVALID_PARAMS; } - memset(abyPDumpDesc, 0, IMAGE_HEADER_SIZE); + memset(pbyPDumpImageHdr, 0, IMAGE_HEADER_SIZE); - pui32Word = IMG_OFFSET_ADDR(abyPDumpDesc, 0); + pui32Word = IMG_OFFSET_ADDR(pbyPDumpImageHdr, 0); pui32Word[0] = (IMAGE_HEADER_TYPE << HEADER_WORD0_TYPE_SHIFT); pui32Word[1] = (IMAGE_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) | (IMAGE_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT); @@ -593,6 +636,89 @@ PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDevic pui32Word[12] = (IMG_UINT32) (psDeviceNode->ui64FBCClearColour & 0xFFFFFFFF); pui32Word[13] = (IMG_UINT32) (psDeviceNode->ui64FBCClearColour >> 32); +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT)) + { + /* Should match current value of RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP */ + IMG_UINT32 ui32TFBCGroup = ui32TFBCControl & ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK; + switch (ui32TFBCGroup) + { + case RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_0: + pui32Word[14] = IMAGE_HEADER_WORD14_TFBC_GROUP_25_50_75; + break; + case RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_1: + pui32Word[14] = IMAGE_HEADER_WORD14_TFBC_GROUP_25_37_50; + break; + } + } + else +#endif + { + pui32Word[14] = IMAGE_HEADER_WORD14_TFBC_GROUP_25_50_75; + } + +#if defined(RGX_FEATURE_TFBC_DELTA_CORRELATION_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION)) + { + /* Should match current value of RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME */ + IMG_UINT32 ui32TFBCScheme = ui32TFBCControl & ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK; + switch (ui32TFBCScheme) + { + case RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_DEFAULT: + pui32Word[14] |= IMAGE_HEADER_WORD14_COMP_SCHEME_ALL; + break; + case RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD_AND_CORRELATION: + pui32Word[14] |= IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_CORR; + break; + case RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD: + pui32Word[14] |= IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_ONLY; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Unsupported TFBC compression control scheme - %d", ui32TFBCScheme)); + return PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE; + } + } + else +#endif + { + /* Should always be set to 2 ("TFBC delta standard only") on cores without this feature */ + pui32Word[14] |= IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_ONLY; + } + +#if defined(RGX_FEATURE_TFBC_NATIVE_YUV10_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_NATIVE_YUV10)) + { + IMG_UINT32 ui32TFBCOverrideYUV10 = (ui32TFBCControl & RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN); + + if (ui32TFBCOverrideYUV10) + { + pui32Word[14] |= IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_EN; + } + } +#endif + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, TFBC_VERSION)) + { + RGX_LAYER_PARAMS sParams = {.psDevInfo = psDevInfo}; + + if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, TFBC_VERSION) >= 20U) + { + IMG_UINT32 ui32TFBCOverrideLossyMinChannel = (ui32TFBCControl & RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN); + + if (ui32TFBCOverrideLossyMinChannel) + { + pui32Word[14] |= IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_EN; + } + } + else + { + /* Should be set on TFBC version < 2.0 cores */ + pui32Word[14] |= IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_EN; + } + } +#endif /* defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) */ + return PVRSRV_OK; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpower.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpower.c deleted file mode 100644 index 45a83e8e0997..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpower.c +++ /dev/null @@ -1,1602 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title Device specific power routines -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Device specific functions -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#if defined(__linux__) -#include -#else -#include -#endif - -#include "rgxpower.h" -#include "rgxinit.h" -#include "rgx_fwif_km.h" -#include "rgxfwutils.h" -#include "pdump_km.h" -#include "pvr_debug.h" -#include "osfunc.h" -#include "rgxdebug.h" -#include "devicemem.h" -#include "devicemem_pdump.h" -#include "rgxtimecorr.h" -#include "devicemem_utils.h" -#include "htbserver.h" -#include "rgxstartstop.h" -#include "rgxfwimageutils.h" -#include "sync.h" -#include "rgxdefs_km.h" - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#include "process_stats.h" -#endif -#if defined(SUPPORT_LINUX_DVFS) -#include "pvr_dvfs_device.h" -#endif -#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) -#include "validation_soc.h" -#include "oskm_apphint.h" -#endif - -static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_KCCB_CMD sCmd; - PVRSRV_ERROR eError; - IMG_UINT32 ui32CmdKCCBSlot; - - /* Send the Timeout notification to the FW */ - sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; - sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; - sCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT; - - eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, - &sCmd, - PDUMP_FLAGS_NONE, - &ui32CmdKCCBSlot); - - return eError; -} - -static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb; - IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OS]; - IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OS][RGXFWIF_GPU_UTIL_STATE_NUM]; - IMG_UINT64 ui64LastPeriod; - IMG_UINT64 ui64LastState; - IMG_UINT64 ui64LastTime; - IMG_UINT64 ui64TimeNow; - RGXFWIF_DM eDM; - - psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; - paui64DMOSLastWord = &psUtilFWCb->aaui64DMOSLastWord[0]; - paaui64DMOSStatsCounters = &psUtilFWCb->aaaui64DMOSStatsCounters[0]; - - OSLockAcquire(psDevInfo->hGPUUtilLock); - - ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDevInfo->psDeviceNode)); - - /* Update counters to account for the time since the last update */ - ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord); - ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64GpuLastWord); - ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); - psUtilFWCb->aui64GpuStatsCounters[ui64LastState] += ui64LastPeriod; - - /* Update state and time of the latest update */ - psUtilFWCb->ui64GpuLastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); - - for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++) - { - IMG_UINT32 ui32OSid; - - for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) - { - ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32OSid]); - ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32OSid]); - ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); - paaui64DMOSStatsCounters[eDM][ui32OSid][ui64LastState] += ui64LastPeriod; - - /* Update state and time of the latest update */ - paui64DMOSLastWord[eDM][ui32OSid] = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); - } - } - - OSLockRelease(psDevInfo->hGPUUtilLock); -} - -static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode) -{ - PVRSRV_ERROR eError; - -#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) - PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - - if (psDevConfig->pfnTDRGXStop == NULL) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!")); - return PVRSRV_ERROR_NOT_IMPLEMENTED; - } - - eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData); -#else - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - - eError = RGXStop(&psDevInfo->sLayerParams); -#endif - - return eError; -} - -/* - RGXPrePowerState -*/ -PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - - if ((eNewPowerState != eCurrentPowerState) && - (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) - { - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGXFWIF_KCCB_CMD sPowCmd; - IMG_UINT32 ui32CmdKCCBSlot; - - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - - /* Send the Power off request to the FW */ - sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; - sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ; - sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED); - - eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", - __func__)); - return eError; - } - - eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, - &sPowCmd, - PDUMP_FLAGS_NONE, - &ui32CmdKCCBSlot); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request", - __func__)); - return eError; - } - - /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies - on the EventObject which is signalled in this MISR */ - eError = RGXPollForGPCommandCompletion(psDeviceNode, - psDevInfo->psPowSyncPrim->pui32LinAddr, - 0x1, 0xFFFFFFFF); - - /* Check the Power state after the answer */ - if (eError == PVRSRV_OK) - { - /* Finally, de-initialise some registers. */ - if (psFwSysData->ePowState == RGXFWIF_POW_OFF) - { -#if !defined(NO_HARDWARE) - IMG_UINT32 ui32TID; - const RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; - - /* Driver takes the VZ Fw-KM connection down, preventing the - * firmware from submitting further interrupts */ - KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); - - for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) - { - /* Wait for the pending FW processor to host interrupts to come back. */ - eError = PVRSRVPollForValueKM(psDeviceNode, - (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32TID], - psFwOsData->aui32InterruptCount[ui32TID], - 0xffffffff, - POLL_FLAG_LOG_ERROR); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Wait for pending interrupts failed (DevID %u). Thread %u: Host: %u, FW: %u", - __func__, - psDeviceNode->sDevId.ui32InternalID, - ui32TID, - psDevInfo->aui32SampleIRQCount[ui32TID], - psFwOsData->aui32InterruptCount[ui32TID])); - - RGX_WaitForInterruptsTimeout(psDevInfo); - break; - } - } -#endif /* NO_HARDWARE */ - - /* Update GPU frequency and timer correlation related data */ - RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER); - - /* Update GPU state counters */ - _RGXUpdateGPUUtilStats(psDevInfo); - -#if defined(SUPPORT_LINUX_DVFS) - eError = SuspendDVFS(psDeviceNode); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__)); - return eError; - } -#endif - - psDevInfo->bRGXPowered = IMG_FALSE; - - eError = RGXDoStop(psDeviceNode); - if (eError != PVRSRV_OK) - { - /* Power down failures are treated as successful since the power was removed but logged. */ - PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)", - __func__, PVRSRVGetErrorString(eError))); - psDevInfo->ui32ActivePMReqNonIdle++; - eError = PVRSRV_OK; - } - } - else - { - /* the sync was updated but the pow state isn't off -> the FW denied the transition */ - eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; - - if (BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED)) - { /* It is an error for a forced request to be denied */ - PVR_DPF((PVR_DBG_ERROR, - "%s: Failure to power off during a forced power off. FW: %d", - __func__, psFwSysData->ePowState)); - } - } - } - else if (eError == PVRSRV_ERROR_TIMEOUT) - { - /* timeout waiting for the FW to ack the request: return timeout */ - PVR_DPF((PVR_DBG_WARNING, - "%s: Timeout waiting for powoff ack from the FW", - __func__)); - } - else - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Error waiting for powoff ack from the FW (%s)", - __func__, PVRSRVGetErrorString(eError))); - eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; - } - } - - return eError; -} - -#if defined(SUPPORT_AUTOVZ) -static PVRSRV_ERROR _RGXWaitForGuestsToDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode) -{ - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_ERROR eError = PVRSRV_ERROR_TIMEOUT; - IMG_UINT32 ui32FwTimeout = (20 * SECONDS_TO_MICROSECONDS); - - LOOP_UNTIL_TIMEOUT(ui32FwTimeout) - { - IMG_UINT32 ui32OSid; - IMG_BOOL bGuestOnline = IMG_FALSE; - - for (ui32OSid = RGXFW_GUEST_OSID_START; - ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) - { - RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE) - psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32OSid].bfOsState; - - if ((eGuestState == RGXFW_CONNECTION_FW_ACTIVE) || - (eGuestState == RGXFW_CONNECTION_FW_OFFLOADING)) - { - bGuestOnline = IMG_TRUE; - PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32OSid)); - } - } - - if (!bGuestOnline) - { - /* Allow Guests to finish reading Connection state registers before disconnecting. */ - OSSleepms(100); - - PVR_DPF((PVR_DBG_WARNING, "%s: All Guest connections are down. " - "Host can power down the GPU.", __func__)); - eError = PVRSRV_OK; - break; - } - else - { - PVR_DPF((PVR_DBG_WARNING, "%s: Waiting for Guests to disconnect " - "before powering down GPU.", __func__)); - - if (PVRSRVPwrLockIsLockedByMe(psDeviceNode)) - { - /* Don't wait with the power lock held as this prevents the vz - * watchdog thread from keeping the fw-km connection alive. */ - PVRSRVPowerUnlock(psDeviceNode); - } - } - - OSSleepms(10); - } END_LOOP_UNTIL_TIMEOUT(); - - if (!PVRSRVPwrLockIsLockedByMe(psDeviceNode)) - { - /* Take back power lock after waiting for Guests */ - eError = PVRSRVPowerLock(psDeviceNode); - } - - return eError; -} -#endif /* defined(SUPPORT_AUTOVZ) */ - -/* - RGXVzPrePowerState -*/ -PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - - PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); - - if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON) - { - /* powering down */ -#if defined(SUPPORT_AUTOVZ) - if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp)) - { - /* The Host must ensure all Guest drivers have disconnected from the GPU before powering it down. - * Guest drivers regularly access hardware registers during runtime. If an attempt is made to - * access a GPU register while the GPU is down, the SoC might lock up. */ - eError = _RGXWaitForGuestsToDisconnect(psDeviceNode); - PVR_LOG_RETURN_IF_ERROR(eError, "_RGXWaitForGuestsToDisconnect"); - - /* Temporarily restore all power callbacks used by the driver to fully power down the GPU. - * Under AutoVz, power transitions requests (e.g. on driver deinitialisation and unloading) - * are generally ignored and the GPU power state is unaffected. Special power requests like - * those triggered by Suspend/Resume calls must reinstate the callbacks when needed. */ - PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev, - &RGXVzPrePowerState, &RGXVzPostPowerState, - psDeviceNode->psDevConfig->pfnPrePowerState, - psDeviceNode->psDevConfig->pfnPostPowerState, - &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest); - } - else - { - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - - if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && - KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)) - { - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_ERROR eError = RGXFWSetFwOsState(psDevInfo, 0, RGXFWIF_OS_OFFLINE); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXFWSetFwOsState"); - } - } -#endif - PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", - __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", - psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); - } - else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) - { - /* powering up */ - PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", - __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", - psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); - - } - - if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) - { - /* call regular device power function */ - eError = RGXPrePowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags); - } - - return eError; -} - -/* - RGXVzPostPowerState -*/ -PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - - PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); - - if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) - { - /* call regular device power function */ - eError = RGXPostPowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags); - } - - if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON) - { - /* powering down */ - PVR_LOG_RETURN_IF_FALSE((!psDeviceNode->bAutoVzFwIsUp), "AutoVz Fw active, power not changed", eError); - PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", - __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", - psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); - -#if !defined(SUPPORT_AUTOVZ_HW_REGS) - /* The connection states must be reset on a GPU power cycle. If the states are kept - * in hardware scratch registers, they will be cleared on power down. When using shared - * memory the connection data must be explicitly cleared by the driver. */ - OSCachedMemSetWMB(psDevInfo->psRGXFWIfConnectionCtl, 0, sizeof(RGXFWIF_CONNECTION_CTL)); -#endif /* defined(SUPPORT_AUTOVZ) && !defined(SUPPORT_AUTOVZ_HW_REGS) */ - - if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) - { -#if defined(SUPPORT_AUTOVZ) - /* AutoVz Guests attempting to suspend have updated their connections earlier in RGXVzPrePowerState. - * Skip this redundant register write, as the Host could have powered down the GPU by now. */ - if (psDeviceNode->bAutoVzFwIsUp) -#endif - { - /* Take the VZ connection down to prevent firmware from submitting further interrupts */ - KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); - } - /* Power transition callbacks were not executed, update RGXPowered flag here */ - psDevInfo->bRGXPowered = IMG_FALSE; - } - } - else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) - { - /* powering up */ - IMG_UINT32 ui32FwTimeout = (3 * SECONDS_TO_MICROSECONDS); - volatile IMG_BOOL *pbUpdatedFlag = &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated; - - PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", - __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", - psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); - if (PVRSRV_VZ_MODE_IS(GUEST)) - { - /* Guests don't execute the power transition callbacks, so update their RGXPowered flag here */ - psDevInfo->bRGXPowered = IMG_TRUE; - -#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) - /* Guest drivers expect the firmware to have set its end of the - * connection to Ready state by now. */ - if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__)); - } - - LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US) - { - if (KM_FW_CONNECTION_IS(READY, psDevInfo)) - { - PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__)); - break; - } - else - { - OSSleepms(10); - } - } END_LOOP_UNTIL_TIMEOUT(); - - if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Ready state.", __func__)); - return PVRSRV_ERROR_TIMEOUT; - } -#endif /* RGX_VZ_STATIC_CARVEOUT_FW_HEAPS */ - - /* Guests can only access the register holding the connection states, - * after the GPU is confirmed to be powered up */ - KM_SET_OS_CONNECTION(READY, psDevInfo); - - OSWriteDeviceMem32WithWMB(pbUpdatedFlag, IMG_FALSE); - - /* Kick an initial dummy command to make the firmware initialise all - * its internal guest OS data structures and compatibility information. - * Use the lower-level RGXSendCommandAndGetKCCBSlot() for the job, to make - * sure only 1 KCCB command is issued to the firmware. - * The default RGXFWHealthCheckCmd() prefaces each HealthCheck command with - * a pre-kick cache command which can interfere with the FW-KM init handshake. */ - { - RGXFWIF_KCCB_CMD sCmpKCCBCmd; - sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; - - eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sCmpKCCBCmd, PDUMP_FLAGS_CONTINUOUS, NULL); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot()"); - } - } - else - { - KM_SET_OS_CONNECTION(READY, psDevInfo); - - /* Disable power callbacks that should not be run on virtualised drivers after the GPU - * is fully initialised: system layer pre/post functions and driver idle requests. - * The original device RGX Pre/Post functions are called from this Vz wrapper. */ - PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev, - &RGXVzPrePowerState, &RGXVzPostPowerState, - NULL, NULL, NULL, NULL); - -#if defined(SUPPORT_AUTOVZ) - /* During first-time boot the flag is set here, while subsequent reboots will already - * have set it earlier in RGXInit. Set to true from this point onwards in any case. */ - psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; -#endif - } - - /* Wait for the firmware to accept and enable the connection with this OS by setting its state to Active */ - LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US) - { - if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo)) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__)); - break; - } - else - { - PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__)); - OSSleepms(10); - } - } END_LOOP_UNTIL_TIMEOUT(); - - if (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Active state.", __func__)); - return PVRSRV_ERROR_TIMEOUT; - } - - /* poll on the Firmware supplying the compatibility data */ - LOOP_UNTIL_TIMEOUT(ui32FwTimeout) - { - if (*pbUpdatedFlag) - { - break; - } - OSSleepms(10); - } END_LOOP_UNTIL_TIMEOUT(); - - PVR_LOG_RETURN_IF_FALSE(*pbUpdatedFlag, "Firmware does not respond with compatibility data. ", PVRSRV_ERROR_TIMEOUT); - - KM_SET_OS_CONNECTION(ACTIVE, psDevInfo); - } - - return PVRSRV_OK; -} - -#if defined(TRACK_FW_BOOT) -static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - FW_BOOT_STAGE eStage; - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) - { - /* Boot stage temporarily stored to the register below */ - eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, - RGX_FW_BOOT_STAGE_REGISTER); - } - else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) - { - eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SCRATCH14); - } - else - { - return; - } - - PVR_LOG(("%s: FW reached boot stage %i/%i.", - __func__, eStage, FW_BOOT_INIT_DONE)); -} -#endif - -static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode) -{ - PVRSRV_ERROR eError; - -#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) - PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; - - if (psDevConfig->pfnTDRGXStart == NULL) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!")); - return PVRSRV_ERROR_NOT_IMPLEMENTED; - } - - eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData); -#else - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - - eError = RGXStart(&psDevInfo->sLayerParams); -#endif - - return eError; -} - -#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) -/* - * To validate the MTS unit we do the following: - * - Immediately after firmware loading for each OSID - * - Write the OSid to a memory location shared with FW - * - Kick the register of that OSid - * (Uncounted, DM 0) - * - FW clears the memory location if OSid matches - * - Host checks that memory location is cleared - * - * See firmware/devices/rgx/rgxfw_bg.c - */ -static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *psDeviceNode, - RGXFWIF_SYSINIT *psFwSysInit, - PVRSRV_RGXDEV_INFO *psDevInfo) -{ - IMG_UINT32 ui32ScheduleRegister; - IMG_UINT32 ui32OSid; - IMG_UINT32 ui32KickType; - IMG_UINT32 ui32OsRegBanksMapped = (psDeviceNode->psDevConfig->ui32RegsSize / RGX_VIRTUALISATION_REG_SIZE_PER_OS); - - /* Nothing to do if device does not support GPU_VIRTUALISATION */ - if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GPU_VIRTUALISATION)) - { - return PVRSRV_OK; - } - - PVR_DPF((PVR_DBG_MESSAGE, "Testing per-os kick registers:")); - - ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, GPUVIRT_VALIDATION_NUM_OS); - - if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS) - { - PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:")); - PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped)); - PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped)); - } - - ui32KickType = RGX_CR_MTS_SCHEDULE_DM_DM0 | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED; - - for (ui32OSid = 0; ui32OSid < ui32OsRegBanksMapped; ui32OSid++) - { - /* set Test field */ - psFwSysInit->ui32OSKickTest = (ui32OSid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT; - -#if defined(PDUMP) - DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, - offsetof(RGXFWIF_SYSINIT, ui32OSKickTest), - sizeof(psFwSysInit->ui32OSKickTest), - PDUMP_FLAGS_CONTINUOUS); -#endif - - OSWriteMemoryBarrier(&psFwSysInit->ui32OSKickTest); - - /* kick register */ - ui32ScheduleRegister = RGX_CR_MTS_SCHEDULE + (ui32OSid * RGX_VIRTUALISATION_REG_SIZE_PER_OS); - PVR_DPF((PVR_DBG_MESSAGE, " Testing OS: %u, Kick Reg: %X", - ui32OSid, - ui32ScheduleRegister)); - OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType); - OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister); - -#if defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "VZ sideband test, kicking MTS register %u", ui32OSid); - - PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME, - ui32ScheduleRegister, ui32KickType, PDUMP_FLAGS_CONTINUOUS); - - DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, - offsetof(RGXFWIF_SYSINIT, ui32OSKickTest), - 0, - 0xFFFFFFFF, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); -#endif - - /* Wait test enable bit to be unset */ - if (PVRSRVPollForValueKM(psDeviceNode, - (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest, - 0, - RGXFWIF_KICK_TEST_ENABLED_BIT, - POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware did not clear test location (contents: 0x%X)", - ui32OSid, - psFwSysInit->ui32OSKickTest)); - - return PVRSRV_ERROR_TIMEOUT; - } - - /* Check that the value is what we expect */ - if (psFwSysInit->ui32OSKickTest != 0) - { - PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware wrote 0x%X to test location", - ui32OSid, - psFwSysInit->ui32OSKickTest)); - return PVRSRV_ERROR_INIT_FAILURE; - } - - PVR_DPF((PVR_DBG_MESSAGE, " PASS")); - } - - PVR_LOG(("MTS passed sideband tests")); - return PVRSRV_OK; -} -#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */ - -#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) -#define SCRATCH_VALUE (0x12345678U) - -static void RGXRiscvDebugModuleTest(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - void *pvAppHintState = NULL; - const IMG_BOOL bDefaultFalse = IMG_FALSE; - IMG_BOOL bRunRiscvDmiTest; - - IMG_UINT32 *pui32FWCode = NULL; - PVRSRV_ERROR eError; - - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, RiscvDmiTest, - &bDefaultFalse, &bRunRiscvDmiTest); - OSFreeKMAppHintState(pvAppHintState); - - if (bRunRiscvDmiTest == IMG_FALSE) - { - return; - } - - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32FWCode); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Error acquiring FW code memory pointer (%s)", - __func__, - PVRSRVGetErrorString(eError))); - } - - PDumpIfKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST BEGIN"); - - RGXRiscvHalt(psDevInfo); - - /* - * Test RISC-V register reads/writes. - * RGXRiscv[Write/Poll]Reg are used to access internal RISC-V registers - * via debug module. - */ - - /* Write RISC-V mscratch register */ - RGXRiscvWriteReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); - /* Read RISC-V misa register (compare against default standard value) */ - RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MISA_ADDR, RGXRISCVFW_MISA_VALUE); - /* Read RISC-V mscratch register (compare against previously written value) */ - RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); - - /* - * Test RISC-V memory reads/writes. - * RGXRiscv[Write/Poll]Mem are used to access system memory via debug module - * (from RISC-V point of view). - */ - - if (pui32FWCode != NULL) - { - IMG_UINT32 ui32Tmp; - - /* Acquire pointer to FW code (bootloader) */ - pui32FWCode += RGXGetFWImageSectionOffset(NULL, RISCV_UNCACHED_CODE) / sizeof(IMG_UINT32); - /* Save FW code at address (bootloader) */ - ui32Tmp = *pui32FWCode; - - /* Write FW code at address (bootloader) */ - RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, SCRATCH_VALUE); - /* Read FW code at address (bootloader + 4) (compare against value read from Host) */ - RGXRiscvPollMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE + 4, *(pui32FWCode + 1)); - /* Read FW code at address (bootloader) (compare against previously written value) */ - RGXRiscvPollMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, SCRATCH_VALUE); - /* Restore FW code at address (bootloader) */ - RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, ui32Tmp); - - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); - } - - /* - * Test GPU register reads/writes. - * RGXRiscv[Write/Poll]Mem are used to access GPU registers via debug module - * (from RISC-V point of view). - * Note that system memory and GPU register accesses both use the same - * debug module interface, targeting different address ranges. - */ - - /* Write SCRATCH0 from the Host */ - PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, SCRATCH_VALUE, PDUMP_FLAGS_CONTINUOUS); - /* Read SCRATCH0 */ - RGXRiscvPollMem(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, SCRATCH_VALUE); - /* Write SCRATCH0 */ - RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, ~SCRATCH_VALUE); - /* Read SCRATCH0 from the Host */ - PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, ~SCRATCH_VALUE, 0xFFFFFFFFU, - PDUMP_FLAGS_CONTINUOUS, PDUMP_POLL_OPERATOR_EQUAL); - - RGXRiscvResume(psDevInfo); - - PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST END"); - PDumpFiKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); -} -#endif - -/* - RGXPostPowerState -*/ -PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags) -{ - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - - if ((eNewPowerState != eCurrentPowerState) && - (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) - { - PVRSRV_ERROR eError; - - if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) - { - /* Update timer correlation related data */ - RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER); - - /* Update GPU state counters */ - _RGXUpdateGPUUtilStats(psDevInfo); - - eError = RGXDoStart(psDeviceNode); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: RGXDoStart failed")); - return eError; - } - - OSMemoryBarrier(NULL); - - /* - * Check whether the FW has started by polling on bFirmwareStarted flag - */ - if (PVRSRVPollForValueKM(psDeviceNode, - (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, - IMG_TRUE, - 0xFFFFFFFF, - POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed.")); - eError = PVRSRV_ERROR_TIMEOUT; - -#if defined(TRACK_FW_BOOT) - RGXCheckFWBootStage(psDevInfo); -#endif - - /* - * When bFirmwareStarted fails some info may be gained by doing the following - * debug dump but unfortunately it could be potentially dangerous if the reason - * for not booting is the GPU power is not ON. However, if we have reached this - * point the System Layer has returned without errors, we assume the GPU power - * is indeed ON. - */ - RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE); - RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice); - - return eError; - } - -#if defined(PDUMP) - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start."); - eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, - offsetof(RGXFWIF_SYSINIT, bFirmwareStarted), - IMG_TRUE, - 0xFFFFFFFFU, - PDUMP_POLL_OPERATOR_EQUAL, - PDUMP_FLAGS_CONTINUOUS); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)", - eError)); - return eError; - } - -#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) - /* Check if the Validation IRQ flag is set */ - if ((psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_IRQ) != 0) - { - eError = PVRSRVValidateIrqs(psDeviceNode); - if (eError != PVRSRV_OK) - { - return eError; - } - } -#endif /* defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) */ - -#endif /* defined(PDUMP) */ - -#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) - eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo); - if (eError != PVRSRV_OK) - { - return eError; - } -#endif - -#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) - RGXRiscvDebugModuleTest(psDevInfo); -#endif - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) - PVRSRVSetFirmwareStartTime(psDeviceNode->psPowerDev, - psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp); -#endif - - HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal); - - psDevInfo->bRGXPowered = IMG_TRUE; - -#if defined(SUPPORT_LINUX_DVFS) - eError = ResumeDVFS(psDeviceNode); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Failed to resume DVFS")); - return eError; - } -#endif - } - } - - PDUMPCOMMENT(psDeviceNode, - "RGXPostPowerState: Current state: %d, New state: %d", - eCurrentPowerState, eNewPowerState); - - return PVRSRV_OK; -} - -/* - RGXPreClockSpeedChange -*/ -PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eCurrentPowerState) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - PVR_UNREFERENCED_PARAMETER(psRGXData); - - PVR_DPF((PVR_DBG_MESSAGE, "RGXPreClockSpeedChange: RGX clock speed was %uHz", - psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); - - if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && - (psFwSysData->ePowState != RGXFWIF_POW_OFF)) - { - /* Update GPU frequency and timer correlation related data */ - RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS); - } - - return eError; -} - -/* - RGXPostClockSpeedChange -*/ -PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eCurrentPowerState) -{ - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; - - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); - - /* Update runtime configuration with the new value */ - OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed, - ui32NewClockSpeed); - - if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && - (psFwSysData->ePowState != RGXFWIF_POW_OFF)) - { - RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd; - IMG_UINT32 ui32CmdKCCBSlot; - - RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS); - - sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE; - sCOREClkSpeedChangeCmd.uCmdData.sCoreClkSpeedChangeData.ui32NewClockSpeed = ui32NewClockSpeed; - - PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command"); - - PDUMPPOWCMDSTART(psDeviceNode); - eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, - &sCOREClkSpeedChangeCmd, - PDUMP_FLAGS_NONE, - &ui32CmdKCCBSlot); - PDUMPPOWCMDEND(psDeviceNode); - - if (eError != PVRSRV_OK) - { - PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command failed"); - PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError)); - return eError; - } - - PVR_DPF((PVR_DBG_MESSAGE, "RGXPostClockSpeedChange: RGX clock speed changed to %uHz", - psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); - } - - return eError; -} - -/*************************************************************************/ /*! -@Function RGXPowUnitsStateMaskChange -@Description Changes power state of power units/islands -@Input hDevHandle RGX Device Node. -@Input ui32PowUnitsStateMask Mask containing power state of PUs. - Each bit corresponds to an PU. - Bit position corresponds to PU number i.e. Bit0 is PU0, Bit1 is PU1 etc. - '1' indicates ON and '0' indicates OFF. - Value must be non-zero. -@Return PVRSRV_ERROR. -*/ /**************************************************************************/ -PVRSRV_ERROR RGXPowUnitsStateMaskChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32PowUnitsStateMask) -{ - - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_ERROR eError; - RGXFWIF_KCCB_CMD sPowUnitsStateMaskChange; - IMG_UINT32 ui32PowUnitsMask = psDevInfo->ui32AvailablePowUnitsMask; - IMG_UINT32 ui32CmdKCCBSlot; - RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - - /** - * Validate the input. At-least one PU must be powered on and all requested - * PU's must be a subset of full PU mask. - */ - if ((ui32PowUnitsStateMask == 0) || (ui32PowUnitsStateMask & ~ui32PowUnitsMask)) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Invalid Power Units mask requested (0x%X). Value should be non-zero and sub-set of 0x%X mask", - __func__, - ui32PowUnitsStateMask, - ui32PowUnitsMask)); - return PVRSRV_ERROR_INVALID_SPU_MASK; - } - - psRuntimeCfg->ui32PowUnitsStateMask = ui32PowUnitsStateMask; - OSWriteMemoryBarrier(&psRuntimeCfg->ui32PowUnitsStateMask); - -#if !defined(NO_HARDWARE) - { - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - - if (psFwSysData->ePowState == RGXFWIF_POW_OFF) - { - return PVRSRV_OK; - } - - if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) - { - eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; - PVR_DPF((PVR_DBG_ERROR, - "%s: Powered units state can not be changed, when not IDLE", - __func__)); - return eError; - } - } -#endif - - eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", - __func__)); - return eError; - } - - sPowUnitsStateMaskChange.eCmdType = RGXFWIF_KCCB_CMD_POW; - sPowUnitsStateMaskChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE; - sPowUnitsStateMaskChange.uCmdData.sPowData.uPowerReqData.ui32PowUnitsStateMask = ui32PowUnitsStateMask; - sPowUnitsStateMaskChange.uCmdData.sPowData.uPowerReqData.ui32RACStateMask = 0; - - if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) >= 3) - { - sPowUnitsStateMaskChange.uCmdData.sPowData.uPowerReqData.ui32RACStateMask = - (1U << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1; - } - - PDUMPCOMMENT(psDeviceNode, - "Scheduling command to change power units state to 0x%X", - ui32PowUnitsStateMask); - eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, - &sPowUnitsStateMaskChange, - PDUMP_FLAGS_NONE, - &ui32CmdKCCBSlot); - - if (eError != PVRSRV_OK) - { - PDUMPCOMMENT(psDeviceNode, - "Scheduling command to change power units state. Error:%u", - eError); - PVR_DPF((PVR_DBG_ERROR, - "%s: Scheduling KCCB to change power units state. Error:%u", - __func__, eError)); - return eError; - } - - /* Wait for the firmware to answer. */ - eError = RGXPollForGPCommandCompletion(psDeviceNode, - psDevInfo->psPowSyncPrim->pui32LinAddr, - 0x1, 0xFFFFFFFF); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for idle request", __func__)); - return eError; - } - -#if defined(PDUMP) - PDUMPCOMMENT(psDeviceNode, - "%s: Poll for Kernel SyncPrim [0x%p] on DM %d", - __func__, psDevInfo->psPowSyncPrim->pui32LinAddr, - RGXFWIF_DM_GP); - - SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, - 1, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - 0); -#endif - - return PVRSRV_OK; -} - -/* - @Function RGXAPMLatencyChange -*/ -PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, - IMG_UINT32 ui32ActivePMLatencyms, - IMG_BOOL bActivePMLatencyPersistant) -{ - - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_ERROR eError; - RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; - IMG_UINT32 ui32CmdKCCBSlot; - PVRSRV_DEV_POWER_STATE ePowerState; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - - eError = PVRSRVPowerLock(psDeviceNode); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Failed to acquire power lock")); - return eError; - } - - /* Update runtime configuration with the new values and ensure the - * new APM latency is written to memory before requesting the FW to - * read it - */ - psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms; - psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant; - OSWriteMemoryBarrier(&psRuntimeCfg->bActivePMLatencyPersistant); - - eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); - - if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) - { - RGXFWIF_KCCB_CMD sActivePMLatencyChange; - sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW; - sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE; - - PDUMPCOMMENT(psDeviceNode, - "Scheduling command to change APM latency to %u", - ui32ActivePMLatencyms); - eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, - &sActivePMLatencyChange, - PDUMP_FLAGS_NONE, - &ui32CmdKCCBSlot); - - if (eError != PVRSRV_OK) - { - PDUMPCOMMENT(psDeviceNode, - "Scheduling command to change APM latency failed. Error:%u", - eError); - PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError)); - goto ErrorExit; - } - } - -ErrorExit: - PVRSRVPowerUnlock(psDeviceNode); - - return eError; -} - -/* - RGXActivePowerRequest -*/ -PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - - - psDevInfo->ui32ActivePMReqTotal++; - - /* Powerlock to avoid further requests from racing with the FW hand-shake - * from now on (previous kicks to this point are detected by the FW) - * PVRSRVPowerLock is replaced with PVRSRVPowerTryLock to avoid - * potential dead lock between PDumpWriteLock and PowerLock - * during 'DriverLive + PDUMP=1 + EnableAPM=1'. - */ - eError = PVRSRVPowerTryLock(psDeviceNode); - if (eError != PVRSRV_OK) - { - if (eError != PVRSRV_ERROR_RETRY) - { - PVR_LOG_ERROR(eError, "PVRSRVPowerTryLock"); - } - else - { - psDevInfo->ui32ActivePMReqRetry++; - } - goto _RGXActivePowerRequest_PowerLock_failed; - } - - /* Check again for IDLE once we have the power lock */ - if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) - { -#if defined(PVRSRV_ENABLE_PROCESS_STATS) - PVRSRVSetFirmwareHandshakeIdleTime(psDeviceNode->psPowerDev, - RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime); -#endif - - PDUMPPOWCMDSTART(psDeviceNode); - eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, - PVRSRV_DEV_POWER_STATE_OFF, - PVRSRV_POWER_FLAGS_NONE); - PDUMPPOWCMDEND(psDeviceNode); - - if (eError == PVRSRV_OK) - { - psDevInfo->ui32ActivePMReqOk++; - } - else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) - { - psDevInfo->ui32ActivePMReqDenied++; - } - } - else - { - psDevInfo->ui32ActivePMReqNonIdle++; - } - - PVRSRVPowerUnlock(psDeviceNode); - -_RGXActivePowerRequest_PowerLock_failed: - - return eError; -} -/* - RGXForcedIdleRequest -*/ - -#define RGX_FORCED_IDLE_RETRY_COUNT 10 - -PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted) -{ - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGXFWIF_KCCB_CMD sPowCmd; - PVRSRV_ERROR eError; - IMG_UINT32 ui32RetryCount = 0; - IMG_UINT32 ui32CmdKCCBSlot; -#if !defined(NO_HARDWARE) - const RGXFWIF_SYSDATA *psFwSysData; -#endif - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - -#if !defined(NO_HARDWARE) - psFwSysData = psDevInfo->psRGXFWIfFwSysData; - - /* Firmware already forced idle */ - if (psFwSysData->ePowState == RGXFWIF_POW_FORCED_IDLE) - { - return PVRSRV_OK; - } - - /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */ - if (psFwSysData->ePowState == RGXFWIF_POW_OFF) - { - return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; - } -#endif - - eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", - __func__)); - return eError; - } - sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; - sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; - sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE; - - PDUMPCOMMENT(psDeviceNode, - "RGXForcedIdleRequest: Sending forced idle command"); - - /* Send one forced IDLE command to GP */ - eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, - &sPowCmd, - PDUMP_FLAGS_NONE, - &ui32CmdKCCBSlot); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send idle request", __func__)); - return eError; - } - - /* Wait for GPU to finish current workload */ - do { - eError = RGXPollForGPCommandCompletion(psDeviceNode, - psDevInfo->psPowSyncPrim->pui32LinAddr, - 0x1, 0xFFFFFFFF); - if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT)) - { - break; - } - ui32RetryCount++; - PVR_DPF((PVR_DBG_WARNING, - "%s: Request timeout. Retry %d of %d", - __func__, ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT)); - } while (IMG_TRUE); - - if (eError != PVRSRV_OK) - { - RGXFWNotifyHostTimeout(psDevInfo); - PVR_DPF((PVR_DBG_ERROR, - "%s: Idle request failed. Firmware potentially left in forced idle state", - __func__)); - return eError; - } - -#if defined(PDUMP) - PDUMPCOMMENT(psDeviceNode, - "RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", - psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); - - SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, - 1, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - 0); -#endif - -#if !defined(NO_HARDWARE) - /* Check the firmware state for idleness */ - if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) - { - return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; - } -#endif - - return PVRSRV_OK; -} - -/* - RGXCancelForcedIdleRequest -*/ -PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) -{ - PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGXFWIF_KCCB_CMD sPowCmd; - PVRSRV_ERROR eError = PVRSRV_OK; - IMG_UINT32 ui32CmdKCCBSlot; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - - eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", - __func__)); - goto ErrorExit; - } - - /* Send the IDLE request to the FW */ - sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; - sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; - sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE; - - PDUMPCOMMENT(psDeviceNode, - "RGXForcedIdleRequest: Sending cancel forced idle command"); - - /* Send cancel forced IDLE command to GP */ - eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, - &sPowCmd, - PDUMP_FLAGS_NONE, - &ui32CmdKCCBSlot); - - if (eError != PVRSRV_OK) - { - PDUMPCOMMENT(psDeviceNode, - "RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", - RGXFWIF_DM_GP); - goto ErrorExit; - } - - /* Wait for the firmware to answer. */ - eError = RGXPollForGPCommandCompletion(psDeviceNode, - psDevInfo->psPowSyncPrim->pui32LinAddr, - 1, 0xFFFFFFFF); - - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for cancel idle request", __func__)); - goto ErrorExit; - } - -#if defined(PDUMP) - PDUMPCOMMENT(psDeviceNode, - "RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", - psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); - - SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, - 1, - 0xffffffff, - PDUMP_POLL_OPERATOR_EQUAL, - 0); -#endif - - return eError; - -ErrorExit: - PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state", __func__)); - return eError; -} - -#if defined(SUPPORT_VALIDATION) -#define RGX_POWER_DOMAIN_STATE_INVALID (0xFFFFFFFF) - -PVRSRV_ERROR RGXPowerDomainInitState(RGX_POWER_DOMAIN_STATE *psState, - IMG_UINT32 ui32MaxPowUnitsCount) -{ - /* - * Total power domain states = 2^(Max power unit count) - */ - IMG_UINT32 ui32TotalStates = 1 << ui32MaxPowUnitsCount; - IMG_UINT32 i; - - /** - * Allocate memory for storing last transition for each power domain - * state. - */ - psState->paui32LastTransition = OSAllocMem(ui32TotalStates * - sizeof(*psState->paui32LastTransition)); - - if (!psState->paui32LastTransition) - { - PVR_DPF((PVR_DBG_ERROR, "%s: failed to allocate memory ", __func__)); - return PVRSRV_ERROR_OUT_OF_MEMORY; - } - - /** - * Initialize last transition of each state to invalid - */ - for (i=0; ipaui32LastTransition[i] = RGX_POWER_DOMAIN_STATE_INVALID; - } - - psState->ui32PowUnitsCount = ui32MaxPowUnitsCount; - psState->ui32CurrentState = RGX_POWER_DOMAIN_STATE_INVALID; - - return PVRSRV_OK; -} - -void RGXPowerDomainDeInitState(RGX_POWER_DOMAIN_STATE *psState) -{ - psState->ui32PowUnitsCount = 0; - - if (psState->paui32LastTransition) - { - OSFreeMem(psState->paui32LastTransition); - } -} - -IMG_UINT32 RGXPowerDomainGetNextState(RGX_POWER_DOMAIN_STATE *psState) -{ - IMG_UINT32 ui32NextState, ui32CurrentState = psState->ui32CurrentState; - IMG_UINT32 ui32TotalStates = 1 << psState->ui32PowUnitsCount; - - if (ui32CurrentState == RGX_POWER_DOMAIN_STATE_INVALID) - { - /** - * Start with all units powered off. - */ - ui32NextState = 0; - } - else if (psState->paui32LastTransition[ui32CurrentState] == RGX_POWER_DOMAIN_STATE_INVALID) - { - ui32NextState = ui32CurrentState; - psState->paui32LastTransition[ui32CurrentState] = ui32CurrentState; - } - else - { - ui32NextState = (psState->paui32LastTransition[ui32CurrentState] + 1) % ui32TotalStates; - psState->paui32LastTransition[ui32CurrentState] = ui32NextState; - } - - psState->ui32CurrentState = ui32NextState; - return ui32NextState; -} -#endif -/****************************************************************************** - End of file (rgxpower.c) -******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpower.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpower.h deleted file mode 100644 index e92938d77a77..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxpower.h +++ /dev/null @@ -1,272 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title RGX power header file -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Header for the RGX power -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#if !defined(RGXPOWER_H) -#define RGXPOWER_H - -#include "pvrsrv_error.h" -#include "img_types.h" -#include "servicesext.h" -#include "rgxdevice.h" - - -/*! -****************************************************************************** - - @Function RGXPrePowerState - - @Description - - does necessary preparation before power state transition - - @Input hDevHandle : RGX Device Node - @Input eNewPowerState : New power state - @Input eCurrentPowerState : Current power state - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags); - -/*! -****************************************************************************** - - @Function RGXPostPowerState - - @Description - - does necessary preparation after power state transition - - @Input hDevHandle : RGX Device Node - @Input eNewPowerState : New power state - @Input eCurrentPowerState : Current power state - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags); - -/*! -****************************************************************************** - - @Function RGXVzPrePowerState - - @Description - - does necessary preparation before power state transition on a vz driver - - @Input hDevHandle : RGX Device Node - @Input eNewPowerState : New power state - @Input eCurrentPowerState : Current power state - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags); - -/*! -****************************************************************************** - - @Function RGXVzPostPowerState - - @Description - - does necessary preparation after power state transition on a vz driver - - @Input hDevHandle : RGX Device Node - @Input eNewPowerState : New power state - @Input eCurrentPowerState : Current power state - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags); - -/*! -****************************************************************************** - - @Function RGXPreClockSpeedChange - - @Description - - Does processing required before an RGX clock speed change. - - @Input hDevHandle : RGX Device Node - @Input eCurrentPowerState : Power state of the device - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eCurrentPowerState); - -/*! -****************************************************************************** - - @Function RGXPostClockSpeedChange - - @Description - - Does processing required after an RGX clock speed change. - - @Input hDevHandle : RGX Device Node - @Input eCurrentPowerState : Power state of the device - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eCurrentPowerState); - - -/*! -****************************************************************************** - - @Function RGXPowUnitsStateMaskChange - - @Description Changes power state of SPUs - - @Input hDevHandle RGX Device Node. - @Input ui32PowUnitsStateMask Mask containing power state of SPUs. - Each bit corresponds to an SPU. - Bit position corresponds to SPU number - i.e. Bit0 is SPU0, Bit1 is SPU1 etc. - '1' indicates ON and '0' indicates OFF. - Value must be non-zero. - @Return PVRSRV_ERROR. - -******************************************************************************/ -PVRSRV_ERROR RGXPowUnitsStateMaskChange(IMG_HANDLE hDevHandle, - IMG_UINT32 ui32PowUnitsStateMask); - -/*! -****************************************************************************** - - @Function RGXAPMLatencyChange - - @Description - - Changes the wait duration used before firmware indicates IDLE. - Reducing this value will cause the firmware to shut off faster and - more often but may increase bubbles in GPU scheduling due to the added - power management activity. If bPersistent is NOT set, APM latency will - return back to system default on power up. - - @Input hDevHandle : RGX Device Node - @Input ui32ActivePMLatencyms : Number of milliseconds to wait - @Input bActivePMLatencyPersistant : Set to ensure new value is not reset - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, - IMG_UINT32 ui32ActivePMLatencyms, - IMG_BOOL bActivePMLatencyPersistant); - -/*! -****************************************************************************** - - @Function RGXActivePowerRequest - - @Description Initiate a handshake with the FW to power off the GPU - - @Input hDevHandle : RGX Device Node - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle); - -/*! -****************************************************************************** - - @Function RGXForcedIdleRequest - - @Description Initiate a handshake with the FW to idle the GPU - - @Input hDevHandle : RGX Device Node - - @Input bDeviceOffPermitted : Set to indicate device state being off is not - erroneous. - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted); - -/*! -****************************************************************************** - - @Function RGXCancelForcedIdleRequest - - @Description Send a request to cancel idle to the firmware. - - @Input hDevHandle : RGX Device Node - - @Return PVRSRV_ERROR : - -******************************************************************************/ -PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle); - - -#if defined(SUPPORT_VALIDATION) -PVRSRV_ERROR RGXPowerDomainInitState(RGX_POWER_DOMAIN_STATE *psState, - IMG_UINT32 ui32MaxPowUnitsCount); - -void RGXPowerDomainDeInitState(RGX_POWER_DOMAIN_STATE *psState); - -IMG_UINT32 RGXPowerDomainGetNextState(RGX_POWER_DOMAIN_STATE *psState); -#endif -#endif /* RGXPOWER_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxray.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxray.c index ff1acc1e654c..fb5c757d5d80 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxray.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxray.c @@ -46,7 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pdump_km.h" #include "pvr_debug.h" #include "rgxutils.h" -#include "rgxfwutils.h" +#include "rgxfwcmnctx.h" #include "rgxray.h" #include "rgxmem.h" #include "allocmem.h" @@ -56,7 +56,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxccb.h" #include "rgxhwperf.h" #include "ospvr_gputrace.h" -#include "htbuffer.h" #include "sync_server.h" #include "sync_internal.h" @@ -74,12 +73,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* Enable this to dump the compiled list of UFOs prior to kick call */ -#define ENABLE_CMP_UFO_DUMP 0 +#define ENABLE_RAY_UFO_DUMP 0 -//#define CMP_CHECKPOINT_DEBUG 1 -//#define CMP_CHECKPOINT_DEBUG 1 +//#define RAY_CHECKPOINT_DEBUG 1 -#if defined(CMP_CHECKPOINT_DEBUG) +#if defined(RAY_CHECKPOINT_DEBUG) #define CHKPT_DBG(X) PVR_DPF(X) #else #define CHKPT_DBG(X) @@ -88,7 +86,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. struct _RGX_SERVER_RAY_CONTEXT_ { PVRSRV_DEVICE_NODE *psDeviceNode; DEVMEM_MEMDESC *psFWRayContextMemDesc; - DEVMEM_MEMDESC *psContextStateMemDesc; POS_LOCK hLock; #if defined(SUPPORT_WORKLOAD_ESTIMATION) WORKEST_HOST_DATA sWorkEstData; @@ -150,14 +147,6 @@ PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection, goto fail_createlock; } - PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware ray context suspend state"); - - eError = DevmemFwAllocate(psDevInfo, - sizeof(RGXFWIF_COMPUTECTX_STATE), - RGX_FWCOMCTX_ALLOCFLAGS, - "FwRayContextState", - &psRayContext->psContextStateMemDesc); - eError = FWCommonContextAllocate(psConnection, psDeviceNode, REQ_TYPE_RAY, @@ -166,7 +155,7 @@ PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection, psRayContext->psFWRayContextMemDesc, offsetof(RGXFWIF_FWRAYCONTEXT, sRDMContext), psFWMemContextMemDesc, - psRayContext->psContextStateMemDesc, + NULL, RGX_RDM_CCB_SIZE_LOG2, RGX_RDM_CCB_MAX_SIZE_LOG2, ui32ContextFlags, @@ -192,11 +181,14 @@ PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection, } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstInitRay(psDevInfo, &psRayContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + WorkEstInitRay(psDevInfo, &psRayContext->sWorkEstData); + } #endif OSDeviceMemCopy(&psFWRayContext->sStaticRayContextState, pStaticRayContextState, ui32StaticRayContextStateSize); - DevmemPDumpLoadMem(psRayContext->psFWRayContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS); + DevmemPDumpLoadMem(psRayContext->psFWRayContextMemDesc, 0, sizeof(RGXFWIF_FWRAYCONTEXT), PDUMP_FLAGS_CONTINUOUS); DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc); SyncAddrListInit(&psRayContext->sSyncAddrListFence); @@ -223,33 +215,36 @@ PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext) PVRSRV_RGXDEV_INFO *psDevInfo = psRayContext->psDeviceNode->pvDevice; #if defined(SUPPORT_WORKLOAD_ESTIMATION) - RGXFWIF_FWRAYCONTEXT *psFWRayContext; - IMG_UINT32 ui32WorkEstCCBSubmitted; - - eError = DevmemAcquireCpuVirtAddr(psRayContext->psFWRayContextMemDesc, - (void **)&psFWRayContext); - if (eError != PVRSRV_OK) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware ray context (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + RGXFWIF_FWRAYCONTEXT *psFWRayContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; + + eError = DevmemAcquireCpuVirtAddr(psRayContext->psFWRayContextMemDesc, + (void **)&psFWRayContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware ray context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } - ui32WorkEstCCBSubmitted = psFWRayContext->ui32WorkEstCCBSubmitted; + ui32WorkEstCCBSubmitted = psFWRayContext->ui32WorkEstCCBSubmitted; - DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc); + DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc); - /* Check if all of the workload estimation CCB commands for this workload are read */ - if (ui32WorkEstCCBSubmitted != psRayContext->sWorkEstData.ui32WorkEstCCBReceived) - { - PVR_DPF((PVR_DBG_WARNING, - "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", - __func__, ui32WorkEstCCBSubmitted, - psRayContext->sWorkEstData.ui32WorkEstCCBReceived)); + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psRayContext->sWorkEstData.ui32WorkEstCCBReceived) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psRayContext->sWorkEstData.ui32WorkEstCCBReceived)); - return PVRSRV_ERROR_RETRY; + return PVRSRV_ERROR_RETRY; + } } #endif @@ -258,25 +253,21 @@ PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext) psRayContext->psServerCommonContext, RGXFWIF_DM_RAY, PDUMP_FLAGS_NONE); - if (eError == PVRSRV_ERROR_RETRY) - { - return eError; - } - else if (eError != PVRSRV_OK) - { - PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psRayContext->psDeviceNode, + eError, + RGXFWRequestCommonContextCleanUp); /* ... it has so we can free its resources */ FWCommonContextFree(psRayContext->psServerCommonContext); - DevmemFwUnmapAndFree(psDevInfo, psRayContext->psContextStateMemDesc); + psRayContext->psServerCommonContext = NULL; #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstDeInitRay(psDevInfo, &psRayContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + WorkEstDeInitRay(psDevInfo, &psRayContext->sWorkEstData); + } #endif DevmemFwUnmapAndFree(psDevInfo, psRayContext->psFWRayContextMemDesc); @@ -284,7 +275,7 @@ PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext) OSLockDestroy(psRayContext->hLock); OSFreeMem(psRayContext); - return PVRSRV_OK; + return eError; } @@ -409,7 +400,7 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, goto fail_resolve_input_fence; } CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); -#if defined(CMP_CHECKPOINT_DEBUG) +#if defined(RAY_CHECKPOINT_DEBUG) if (ui32FenceSyncCheckpointCount > 0) { IMG_UINT32 ii; @@ -460,7 +451,7 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); /* Copy the update values into the new memory, then append our timeline update value */ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); -#if defined(CMP_CHECKPOINT_DEBUG) +#if defined(RAY_CHECKPOINT_DEBUG) if (ui32IntClientUpdateCount > 0) { IMG_UINT32 iii; @@ -481,11 +472,11 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; - CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync)); - /* Now append the timeline sync prim addr to the compute context update list */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the ray context update list", __func__, (void*)psFenceTimelineUpdateSync)); + /* Now append the timeline sync prim addr to the ray context update list */ SyncAddrListAppendSyncPrim(&psRayContext->sSyncAddrListUpdate, psFenceTimelineUpdateSync); -#if defined(CMP_CHECKPOINT_DEBUG) +#if defined(RAY_CHECKPOINT_DEBUG) if (ui32IntClientUpdateCount > 0) { IMG_UINT32 iii; @@ -508,7 +499,7 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, if (ui32FenceSyncCheckpointCount > 0) { CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Ray RDM Fence (&psRayContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psRayContext->sSyncAddrListFence)); -#if defined(CMP_CHECKPOINT_DEBUG) +#if defined(RAY_CHECKPOINT_DEBUG) if (ui32IntClientUpdateCount > 0) { IMG_UINT32 iii; @@ -530,7 +521,7 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, } ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; } -#if defined(CMP_CHECKPOINT_DEBUG) +#if defined(RAY_CHECKPOINT_DEBUG) if (ui32IntClientUpdateCount > 0) { IMG_UINT32 iii; @@ -558,7 +549,7 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs; } ui32IntClientUpdateCount++; -#if defined(CMP_CHECKPOINT_DEBUG) +#if defined(RAY_CHECKPOINT_DEBUG) if (ui32IntClientUpdateCount > 0) { IMG_UINT32 iii; @@ -575,7 +566,7 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, } CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); -#if (ENABLE_CMP_UFO_DUMP == 1) +#if (ENABLE_RAY_UFO_DUMP == 1) PVR_DPF((PVR_DBG_ERROR, "%s: dumping Ray (RDM) fence/updates syncs...", __func__)); { IMG_UINT32 ii; @@ -630,26 +621,33 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - sWorkloadCharacteristics.sRay.ui32AccStructSize = ui32AccStructSizeInBytes; - sWorkloadCharacteristics.sRay.ui32DispatchSize = ui32DispatchSize; - - /* Prepare workload estimation */ - WorkEstPrepare(psRayContext->psDeviceNode->pvDevice, - &psRayContext->sWorkEstData, - &psRayContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM, - RGXFWIF_CCB_CMD_TYPE_RAY, - &sWorkloadCharacteristics, - ui64DeadlineInus, - &sWorkloadKickDataRay); - - if (sWorkloadKickDataRay.ui32CyclesPrediction != 0) - { - PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch size = %u, Acc struct size = %u, prediction = %u", - __func__, - sWorkloadCharacteristics.sRay.ui32DispatchSize, - sWorkloadCharacteristics.sRay.ui32AccStructSize, - sWorkloadKickDataRay.ui32CyclesPrediction)); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + sWorkloadCharacteristics.sRay.ui32AccStructSize = ui32AccStructSizeInBytes; + sWorkloadCharacteristics.sRay.ui32DispatchSize = ui32DispatchSize; + + /* Prepare workload estimation */ + WorkEstPrepare(psRayContext->psDeviceNode->pvDevice, + &psRayContext->sWorkEstData, + &psRayContext->sWorkEstData.uWorkloadMatchingData.sRay.sDataRDM, + RGXFWIF_CCB_CMD_TYPE_RAY, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataRay); + + if (sWorkloadKickDataRay.ui32CyclesPrediction != 0) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch size = %u, Acc struct size = %u, prediction = %u", + __func__, + sWorkloadCharacteristics.sRay.ui32DispatchSize, + sWorkloadCharacteristics.sRay.ui32AccStructSize, + sWorkloadKickDataRay.ui32CyclesPrediction)); + } } +#else + PVR_UNREFERENCED_PARAMETER(ui32AccStructSizeInBytes); + PVR_UNREFERENCED_PARAMETER(ui32DispatchSize); + PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus); #endif RGXCmdHelperInitCmdCCB(psDevInfo, @@ -681,13 +679,30 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); if (eError != PVRSRV_OK) { - goto fail_cmdaquire; + goto fail_cmdacquire; + } + + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + goto fail_acquirepowerlock; } if (eError == PVRSRV_OK) { #if defined(SUPPORT_WORKLOAD_ESTIMATION) - ui32RDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + ui32RDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB); + } #endif /* All the required resources are ready at this point, we can't fail so @@ -698,22 +713,25 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* The following is used to determine the offset of the command header containing - the workload estimation data so that can be accessed when the KCCB is read */ - ui32RDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + /* The following is used to determine the offset of the command header containing + * the workload estimation data so that can be accessed when the KCCB is read */ + ui32RDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData); - ui32RDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRayContext->psServerCommonContext)); + ui32RDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRayContext->psServerCommonContext)); - /* This checks if the command would wrap around at the end of the CCB and - * therefore would start at an offset of 0 rather than the current command - * offset */ - if (ui32RDMCmdOffset < ui32RDMCmdOffsetWrapCheck) - { - ui32RDMWorkloadDataRO = ui32RDMCmdOffset; - } - else - { - ui32RDMWorkloadDataRO = 0; + /* This checks if the command would wrap around at the end of the CCB and + * therefore would start at an offset of 0 rather than the current command + * offset */ + if (ui32RDMCmdOffset < ui32RDMCmdOffsetWrapCheck) + { + ui32RDMWorkloadDataRO = ui32RDMCmdOffset; + } + else + { + ui32RDMWorkloadDataRO = 0; + } } #endif @@ -726,9 +744,12 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, /* Add the Workload data into the KCCB kick */ #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Store the offset to the CCCB command header so that it can be referenced - * when the KCCB command reaches the FW */ - sRayKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32RDMWorkloadDataRO + ui32RDMCmdHeaderOffset; + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + /* Store the offset to the CCCB command header so that it can be referenced + * when the KCCB command reaches the FW */ + sRayKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32RDMWorkloadDataRO + ui32RDMCmdHeaderOffset; + } #endif ui32FWCtx = FWCommonContextGetFWAddress(psRayContext->psServerCommonContext).ui32Addr; @@ -747,11 +768,11 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, NO_CYCEST); /* - * Submit the compute command to the firmware. + * Submit the ray command to the firmware. */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError2 = RGXScheduleCommand(psRayContext->psDeviceNode->pvDevice, + eError2 = RGXScheduleCommandWithoutPowerLock(psRayContext->psDeviceNode->pvDevice, RGXFWIF_DM_RAY, &sRayKCCBCmd, ui32PDumpFlags); @@ -760,7 +781,9 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); + + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); if (eError2 != PVRSRV_OK) { @@ -786,7 +809,7 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, */ if (eError != PVRSRV_OK ) { - goto fail_cmdaquire; + goto fail_cmdsubmit; } #if defined(NO_HARDWARE) @@ -831,7 +854,9 @@ PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, return PVRSRV_OK; -fail_cmdaquire: +fail_cmdsubmit: +fail_acquirepowerlock: +fail_cmdacquire: fail_invalfbsc: SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListFence); SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListUpdate); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxray.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxray.h index 454516cb83ec..ecf23db03a46 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxray.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxray.h @@ -47,7 +47,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "devicemem.h" #include "device.h" #include "rgxfwutils.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "pvr_notifier.h" #include "sync_server.h" diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxsrvinit.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxsrvinit.c index 2426d9f8a938..709cbfd1002e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxsrvinit.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxsrvinit.c @@ -46,9 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_debug.h" #include "osfunc.h" #include "km_apphint_defs.h" - #include "htbuffer_types.h" -#include "htbuffer_init.h" #include "devicemem.h" #include "devicemem_pdump.h" @@ -60,6 +58,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdefs_km.h" #include "pvrsrv.h" +#include "rgxinit_apphints.h" #include "rgxinit.h" #include "rgxmulticore.h" @@ -74,15 +73,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif #include "rgx_fwif_hwperf.h" -#include "rgx_hwperf_table.h" - -static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] = -{ -#define X(a, b, c, d, e, f, g) {a, b, 0xFF, d, e, f, NULL} -RGX_CNT_BLK_TYPE_MODEL_DIRECT_LIST, -RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST -#undef X -}; +#include "rgxhwperf_common.h" #include "fwload.h" #include "rgxlayer_impl.h" @@ -92,6 +83,7 @@ RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST #include "rgx_bvnc_defs_km.h" #include "rgxdevice.h" + #include "pvrsrv.h" #if defined(SUPPORT_TRUSTED_DEVICE) @@ -99,12 +91,12 @@ RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST #include "pvrsrv_device.h" #endif -#define DRIVER_MODE_HOST 0 /* AppHint value for host driver mode */ - #define HW_PERF_FILTER_DEFAULT 0x00000000 /* Default to no HWPerf */ #define HW_PERF_FILTER_DEFAULT_ALL_ON 0xFFFFFFFF /* All events */ +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) #define AVAIL_POW_UNITS_MASK_DEFAULT (PVRSRV_APPHINT_HWVALAVAILABLESPUMASK) #define AVAIL_RAC_MASK_DEFAULT (PVRSRV_APPHINT_HWVALAVAILABLERACMASK) +#endif /* Kernel CCB size */ @@ -121,168 +113,206 @@ RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST #error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too high. #endif -#if defined(SUPPORT_VALIDATION) -#include "pvrsrv_apphint.h" -#endif -#include "os_srvinit_param.h" +#include "os_apphint.h" + +/* + * _ParseHTBAppHints: + * + * Generate necessary references to the globally visible AppHints which are + * declared in the above #include "km_apphint_defs.h" + * Without these local references some compiler tool-chains will treat + * unreferenced declarations as fatal errors. This function duplicates the + * HTB_specific apphint references which are made in htbserver.c:HTBInit() + * However, it makes absolutely *NO* use of these hints. + */ +static void +_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode, void *pvAppHintState) +{ + IMG_UINT32 ui32AppHintDefault; + IMG_UINT32 ui32LogType; + IMG_UINT32 ui32OpMode; + IMG_UINT32 ui32BufferSize; + + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEHTBLOGGROUP; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableHTBLogGroup, + &ui32AppHintDefault, &ui32LogType); + ui32AppHintDefault = PVRSRV_APPHINT_HTBOPERATIONMODE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBOperationMode, + &ui32AppHintDefault, &ui32OpMode); + ui32AppHintDefault = PVRSRV_APPHINT_HTBUFFERSIZE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBufferSizeInKB, + &ui32AppHintDefault, &ui32BufferSize); +} -#if defined(__linux__) -#include "km_apphint.h" -#else /*! ******************************************************************************* - * AppHint mnemonic data type helper tables + + @Function GetFilterFlags + + @Description Initialise and return filter flags + + @Input bFilteringMode : Enable new TPU filtering mode + @Input ui32TruncateMode : TPU Truncate mode + + @Return IMG_UINT32 : Filter flags + ******************************************************************************/ -/* apphint map of name vs. enable flag */ -static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = { -#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, - HTB_LOG_SFGROUPLIST -#undef X -}; -/* apphint map of arg vs. OpMode */ -static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = { - { "droplatest", HTB_OPMODE_DROPLATEST}, - { "dropoldest", HTB_OPMODE_DROPOLDEST}, - /* HTB should never be started in HTB_OPMODE_BLOCK - * as this can lead to deadlocks - */ -}; +static INLINE IMG_UINT32 GetFilterFlags(IMG_BOOL bFilteringMode, IMG_UINT32 ui32TruncateMode) +{ + IMG_UINT32 ui32FilterFlags = 0; -static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = { - { "trace", 0}, - { "none", 0} -#if defined(SUPPORT_TBI_INTERFACE) - , { "tbi", 1} -#endif -}; + ui32FilterFlags |= bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0; + if (ui32TruncateMode == 2) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT; + } + else if (ui32TruncateMode == 3) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF; + } -static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = { - { "mono", 0 }, - { "mono_raw", 1 }, - { "sched", 2 } -}; + return ui32FilterFlags; +} -static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP }; -/* - * Services AppHints initialisation - */ -#define X(a, b, c, d, e, f) SrvInitParamInit ## b(a, d, e) -APPHINT_LIST_ALL -#undef X -#endif /* defined(__linux__) */ +/*! +******************************************************************************* -/* - * Container for all the apphints used by this module - */ -typedef struct _RGX_SRVINIT_APPHINTS_ + @Function InitDeviceFlags + + @Description Initialise and return device flags + + @Input psDeviceNode : Pointer to device node + @Input pvAppHintState : Pointer to apphint state + @Input psHints : Apphints container + + @Return void + +******************************************************************************/ +static INLINE void InitDeviceFlags(PVRSRV_DEVICE_NODE *psDeviceNode, + void *pvAppHintState, + RGX_INIT_APPHINTS *psHints) { - IMG_UINT32 ui32DriverMode; - IMG_BOOL bEnableSignatureChecks; - IMG_UINT32 ui32SignatureChecksBufSize; - - IMG_BOOL bAssertOnOutOfMem; - IMG_BOOL bAssertOnHWRTrigger; -#if defined(SUPPORT_VALIDATION) - IMG_UINT32 ui32RenderKillingCtl; - IMG_UINT32 ui32CDMTDMKillingCtl; - IMG_BOOL bValidateIrq; - IMG_BOOL bValidateSOCUSCTimer; - IMG_UINT32 ui32AvailablePowUnitsMask; - IMG_UINT32 ui32AvailableRACMask; - IMG_BOOL bInjectPowUnitsStateMaskChange; - IMG_BOOL bEnablePowUnitsStateMaskChange; - IMG_UINT32 ui32FBCDCVersionOverride; - IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; - IMG_UINT32 aui32USRMNumRegions[RGXFWIF_USRM_DM_LAST]; - IMG_UINT64 aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_LAST]; -#endif - IMG_BOOL bCheckMlist; - IMG_BOOL bDisableClockGating; - IMG_BOOL bDisableDMOverlap; - IMG_BOOL bDisableFEDLogging; - IMG_BOOL bDisablePDP; - IMG_BOOL bEnableDMKillRand; - IMG_BOOL bEnableRandomCsw; - IMG_BOOL bEnableSoftResetCsw; - IMG_BOOL bFilteringMode; - IMG_BOOL bHWPerfDisableCounterFilter; - IMG_BOOL bZeroFreelist; - IMG_UINT32 ui32EnableFWContextSwitch; - IMG_UINT32 ui32FWContextSwitchProfile; - IMG_UINT32 ui32ISPSchedulingLatencyMode; - IMG_UINT32 ui32HWPerfFWBufSize; - IMG_UINT32 ui32HWPerfHostBufSize; - IMG_UINT32 ui32HWPerfFilter0; - IMG_UINT32 ui32HWPerfFilter1; - IMG_UINT32 ui32HWPerfHostFilter; - IMG_UINT32 ui32TimeCorrClock; - IMG_UINT32 ui32HWRDebugDumpLimit; - IMG_UINT32 ui32JonesDisableMask; - IMG_UINT32 ui32LogType; - IMG_UINT32 ui32TruncateMode; - IMG_UINT32 ui32KCCBSizeLog2; - IMG_UINT32 ui32CDMArbitrationMode; - FW_PERF_CONF eFirmwarePerf; - RGX_ACTIVEPM_CONF eRGXActivePMConf; - RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf; - IMG_BOOL bSPUClockGating; - - IMG_BOOL bEnableTrustedDeviceAceConfig; - IMG_UINT32 ui32FWContextSwitchCrossDM; -#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) - IMG_UINT32 ui32PhysMemTestPasses; + IMG_UINT32 ui32DeviceFlags = 0; + IMG_BOOL bAppHintDefault; + IMG_BOOL bZeroFreelist; + IMG_BOOL bDisableFEDLogging; + + + bAppHintDefault = PVRSRV_APPHINT_ZEROFREELIST; + OSGetAppHintBOOL(psDeviceNode, pvAppHintState, ZeroFreelist, + &bAppHintDefault, &bZeroFreelist); + ui32DeviceFlags |= bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0; + + bAppHintDefault = PVRSRV_APPHINT_DISABLEFEDLOGGING; + OSGetAppHintBOOL(psDeviceNode, pvAppHintState, DisableFEDLogging, + &bAppHintDefault, &bDisableFEDLogging); + ui32DeviceFlags |= bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0; + + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); #endif -} RGX_SRVINIT_APPHINTS; + + psHints->ui32DeviceFlags = ui32DeviceFlags; +} /*! ******************************************************************************* @Function GetApphints - @Description Read init time apphints and initialise internal variables + @Description Read init time apphints and initialise apphints structure @Input psHints : Pointer to apphints container @Return void ******************************************************************************/ -static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints) +static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_INIT_APPHINTS *psHints) { - void *pvParamState = SrvInitParamOpen(); + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; + IMG_BOOL bAppHintDefault; IMG_UINT32 ui32ParamTemp; +#if defined(__linux__) + IMG_UINT64 ui64AppHintDefault; +#endif - /* - * NB AppHints initialised to a default value via SrvInitParamInit* macros above - */ - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, DriverMode, psHints->ui32DriverMode); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableSignatureChecks, psHints->bEnableSignatureChecks); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize); - - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, AssertOutOfMemory, psHints->bAssertOnOutOfMem); - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger); - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, CheckMList, psHints->bCheckMlist); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, DisableClockGating, psHints->bDisableClockGating); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, DisableDMOverlap, psHints->bDisableDMOverlap); - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, DisableFEDLogging, psHints->bDisableFEDLogging); - SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, EnableAPM, ui32ParamTemp); + OSCreateAppHintState(&pvAppHintState); + + bAppHintDefault = PVRSRV_APPHINT_ENABLESIGNATURECHECKS; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableSignatureChecks, + &bAppHintDefault, &psHints->bEnableSignatureChecks); + ui32AppHintDefault = PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, SignatureChecksBufSize, + &ui32AppHintDefault, &psHints->ui32SignatureChecksBufSize); + + bAppHintDefault = PVRSRV_APPHINT_ASSERTOUTOFMEMORY; + OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, AssertOutOfMemory, + &bAppHintDefault, &psHints->bAssertOnOutOfMem); + bAppHintDefault = PVRSRV_APPHINT_ASSERTONHWRTRIGGER; + OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, AssertOnHWRTrigger, + &bAppHintDefault, &psHints->bAssertOnHWRTrigger); + bAppHintDefault = PVRSRV_APPHINT_CHECKMLIST; + OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, CheckMList, + &bAppHintDefault, &psHints->bCheckMlist); + bAppHintDefault = PVRSRV_APPHINT_DISABLECLOCKGATING; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, DisableClockGating, + &bAppHintDefault, &psHints->bDisableClockGating); + bAppHintDefault = PVRSRV_APPHINT_DISABLEDMOVERLAP; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, DisableDMOverlap, + &bAppHintDefault, &psHints->bDisableDMOverlap); + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEAPM; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, EnableAPM, + &ui32AppHintDefault, &ui32ParamTemp); psHints->eRGXActivePMConf = ui32ParamTemp; - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableGenericDMKillingRandMode, psHints->bEnableDMKillRand); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableRandomContextSwitch, psHints->bEnableRandomCsw); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableSoftResetContextSwitch, psHints->bEnableSoftResetCsw); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, EnableRDPowerIsland, ui32ParamTemp); + bAppHintDefault = PVRSRV_APPHINT_ENABLEDMKILLINGRANDMODE; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableGenericDMKillingRandMode, + &bAppHintDefault, &psHints->bEnableDMKillRand); + bAppHintDefault = PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableRandomContextSwitch, + &bAppHintDefault, &psHints->bEnableRandomCsw); + bAppHintDefault = PVRSRV_APPHINT_ENABLESOFTRESETCONTEXTSWITCH; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableSoftResetContextSwitch, + &bAppHintDefault, &psHints->bEnableSoftResetCsw); + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableFWContextSwitch, + &ui32AppHintDefault, &psHints->ui32EnableFWContextSwitch); + ui32AppHintDefault = PVRSRV_APPHINT_ENABLERDPOWERISLAND; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableRDPowerIsland, + &ui32AppHintDefault, &ui32ParamTemp); psHints->eRGXRDPowerIslandConf = ui32ParamTemp; - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableSPUClockGating, psHints->bSPUClockGating); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FirmwarePerf, ui32ParamTemp); +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + bAppHintDefault = PVRSRV_APPHINT_ENABLESPUCLOCKGATING; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableSPUClockGating, + &bAppHintDefault, &psHints->bSPUClockGating); +#endif + ui32AppHintDefault = PVRSRV_APPHINT_FIRMWAREPERF; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FirmwarePerf, + &ui32AppHintDefault, &ui32ParamTemp); psHints->eFirmwarePerf = ui32ParamTemp; - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, - HWPerfDisableCounterFilter, psHints->bHWPerfDisableCounterFilter); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize); - SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, KernelCCBSizeLog2, psHints->ui32KCCBSizeLog2); + ui32AppHintDefault = PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FWContextSwitchProfile, + &ui32AppHintDefault, &psHints->ui32FWContextSwitchProfile); + + /* + * HWPerf apphints * + */ + bAppHintDefault = PVRSRV_APPHINT_HWPERFDISABLECOUNTERFILTER; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, HWPerfDisableCounterFilter, + &bAppHintDefault, &psHints->bHWPerfDisableCounterFilter); + ui32AppHintDefault = PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB, + &ui32AppHintDefault, &psHints->ui32HWPerfHostBufSize); + ui32AppHintDefault = PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfFWBufSizeInKB, + &ui32AppHintDefault, &psHints->ui32HWPerfFWBufSize); + + ui32AppHintDefault = PVRSRV_APPHINT_KCCB_SIZE_LOG2; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, KernelCCBSizeLog2, + &ui32AppHintDefault, &psHints->ui32KCCBSizeLog2); if (psHints->ui32KCCBSizeLog2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE) { @@ -297,78 +327,127 @@ static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHIN psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE; } -#if defined(SUPPORT_VALIDATION) - if (psHints->ui32KCCBSizeLog2 != PVRSRV_APPHINT_KCCB_SIZE_LOG2) - { - PVR_LOG(("KernelCCBSizeLog2 set to %u", psHints->ui32KCCBSizeLog2)); - } + +#if defined(PVR_ARCH_VOLCANIC) + ui32AppHintDefault = PVRSRV_APPHINT_ISPSCHEDULINGLATENCYMODE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, ISPSchedulingLatencyMode, + &ui32AppHintDefault, &psHints->ui32ISPSchedulingLatencyMode); #endif - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, - ISPSchedulingLatencyMode, psHints->ui32ISPSchedulingLatencyMode); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, CDMArbitrationOverride, psHints->ui32CDMArbitrationMode); #if defined(__linux__) /* name changes */ { IMG_UINT64 ui64Tmp; - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, DisablePDumpPanic, psHints->bDisablePDP); - SrvInitParamGetUINT64(psDevInfo->psDeviceNode, pvParamState, HWPerfFWFilter, ui64Tmp); + bAppHintDefault = PVRSRV_APPHINT_DISABLEPDUMPPANIC; + OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, DisablePDumpPanic, + &bAppHintDefault, &psHints->bDisablePDP); + ui64AppHintDefault = PVRSRV_APPHINT_HWPERFFWFILTER; + OSGetAppHintUINT64(psDevInfo->psDeviceNode, pvAppHintState, HWPerfFWFilter, + &ui64AppHintDefault, &ui64Tmp); psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu); psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu); } -#else - SrvInitParamUnreferenced(DisablePDumpPanic); - SrvInitParamUnreferenced(HWPerfFWFilter); - SrvInitParamUnreferenced(RGXBVNC); #endif - SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, HWPerfHostFilter, psHints->ui32HWPerfHostFilter); - SrvInitParamGetUINT32List(psDevInfo->psDeviceNode, pvParamState, TimeCorrClock, psHints->ui32TimeCorrClock); - SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, HWRDebugDumpLimit, ui32ParamTemp); + + ui32AppHintDefault = PVRSRV_APPHINT_HWPERFHOSTFILTER; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, HWPerfHostFilter, + &ui32AppHintDefault, &psHints->ui32HWPerfHostFilter); + ui32AppHintDefault = PVRSRV_APPHINT_SECONDARYOSCLOCKSOURCE; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, SecondaryOSClockSource, + &ui32AppHintDefault, &psHints->ui32SecondaryOSClockSource); + ui32AppHintDefault = PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, HWRDebugDumpLimit, + &ui32AppHintDefault, &ui32ParamTemp); psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, JonesDisableMask, ui32ParamTemp); - psHints->ui32JonesDisableMask = ui32ParamTemp & RGX_CR_JONES_FIX__ROGUE3__DISABLE_CLRMSK; - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, NewFilteringMode, psHints->bFilteringMode); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TruncateMode, psHints->ui32TruncateMode); + { + IMG_BOOL bFilteringMode = IMG_FALSE; + IMG_UINT32 ui32TruncateMode = 0U; + +#if defined(HW_ERN_42290_BIT_MASK) && defined(RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK) + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42290) && RGX_IS_FEATURE_SUPPORTED(psDevInfo, TPU_FILTERING_MODE_CONTROL)) +#endif + { + bAppHintDefault = PVRSRV_APPHINT_NEWFILTERINGMODE; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, NewFilteringMode, + &bAppHintDefault, &bFilteringMode); + } + +#if defined(HW_ERN_42606_BIT_MASK) + if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42606)) +#endif + { + ui32AppHintDefault = PVRSRV_APPHINT_TRUNCATEMODE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TruncateMode, + &ui32AppHintDefault, &ui32TruncateMode); + } + + psHints->ui32FilterFlags = GetFilterFlags(bFilteringMode, ui32TruncateMode); + } + +#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) +#endif + { + bAppHintDefault = PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableTrustedDeviceAceConfig, + &bAppHintDefault, &psHints->bEnableTrustedDeviceAceConfig); + } - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, ZeroFreelist, psHints->bZeroFreelist); #if defined(__linux__) - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM); -#else - SrvInitParamUnreferenced(FWContextSwitchCrossDM); + ui32AppHintDefault = 0; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FWContextSwitchCrossDM, + &ui32AppHintDefault, &psHints->ui32FWContextSwitchCrossDM); #endif #if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, PhysMemTestPasses, psHints->ui32PhysMemTestPasses); + ui32AppHintDefault = PVRSRV_APPHINT_PHYSMEMTESTPASSES; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysMemTestPasses, + &ui32AppHintDefault, &psHints->ui32PhysMemTestPasses); #endif -#if defined(SUPPORT_VALIDATION) - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, KillingCtl, psHints->ui32RenderKillingCtl); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, CDMTDMKillingCtl, psHints->ui32CDMTDMKillingCtl); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, ValidateIrq, psHints->bValidateIrq); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, ValidateSOCUSCTimer, psHints->bValidateSOCUSCTimer); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HWValAvailableSPUMask, psHints->ui32AvailablePowUnitsMask); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HWValAvailableRACMask, psHints->ui32AvailableRACMask); - SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, GPUUnitsPowerChange, psHints->bInjectPowUnitsStateMaskChange); - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, HWValEnableSPUPowerMaskChange, psHints->bEnablePowUnitsStateMaskChange); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FBCDCVersionOverride, psHints->ui32FBCDCVersionOverride); - - /* Apphints for Unified Store virtual partitioning. */ - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, USRMNumRegionsVDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_VDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, USRMNumRegionsDDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_DDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, USRMNumRegionsCDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_CDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, USRMNumRegionsPDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_PDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, USRMNumRegionsTDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_TDM]); - - /* Apphints for UVB virtual partitioning. */ - SrvInitParamGetUINT64(INITPARAM_NO_DEVICE, pvParamState, UVBRMNumRegionsVDM, psHints->aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_VDM]); - SrvInitParamGetUINT64(INITPARAM_NO_DEVICE, pvParamState, UVBRMNumRegionsDDM, psHints->aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_DDM]); - - /* Apphints for TPU trilinear frac masking */ - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskPDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]); +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + psHints->ui32AvailablePowUnitsMask = AVAIL_POW_UNITS_MASK_DEFAULT; + psHints->ui32AvailableRACMask = AVAIL_RAC_MASK_DEFAULT; +#endif /* defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) */ + + +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) + ui32AppHintDefault = PVRSRV_APPHINT_TFBCVERSION; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TFBCVersionDowngrade, + &ui32AppHintDefault, &psHints->ui32TFBCVersion); + + if (ui32AppHintDefault != psHints->ui32TFBCVersion) + { + PVR_LOG(("TFBCVersionDowngrade set to %u", psHints->ui32TFBCVersion)); + } + + psHints->bTFBCCompressionControlLossyMinChannel = false; + psHints->bTFBCCompressionControlYUVFormat = false; + psHints->ui32TFBCCompressionControlScheme = + PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME; + psHints->ui32TFBCCompressionControlGroup = + PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP; +#endif /* defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) */ + + ui32AppHintDefault = PVRSRV_APPHINT_DEBUGDUMPFWTLOGTYPE; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DebugDumpFWTLogType, + &ui32AppHintDefault, &psHints->eDebugDumpFWTLogType); + if ((IMG_UINT32)psHints->eDebugDumpFWTLogType > RGX_FWT_LOGTYPE_PARTIAL) + { + psHints->eDebugDumpFWTLogType = RGX_FWT_LOGTYPE_NONE; + PVR_DPF((PVR_DBG_WARNING, "Invalid value for DebugDumpFWTLogType. Setting to 0 (disabled).")); + } +#if defined(SUPPORT_ICS) + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEIDLECYCLESTEALING; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableIdleCycleStealing, + &ui32AppHintDefault, &psHints->ui32EnableIdleCycleStealing); + ui32AppHintDefault = PVRSRV_APPHINT_FAULTDETECTIONTIMEINTERVAL_USEC; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FaultDetectionTimeInterval, &ui32AppHintDefault, &psHints->ui32FDTI); + ui32AppHintDefault = PVRSRV_APPHINT_ICSTIMEINTERVAL_THRESHOLD; + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, ICSTimeIntervalThreshold, &ui32AppHintDefault, &psHints->ui32ICSThreshold); + bAppHintDefault = IMG_FALSE; + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ICSTestModeOn, &bAppHintDefault, &psHints->bTestModeOn); #endif /* @@ -377,8 +456,10 @@ static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHIN { IMG_UINT32 ui32LogGroup, ui32TraceOrTBI; - SrvInitParamGetUINT32BitField(psDevInfo->psDeviceNode, pvParamState, EnableLogGroup, ui32LogGroup); - SrvInitParamGetUINT32List(psDevInfo->psDeviceNode, pvParamState, FirmwareLogType, ui32TraceOrTBI); + ui32AppHintDefault = PVRSRV_APPHINT_ENABLELOGGROUP; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, EnableLogGroup, &ui32AppHintDefault, &ui32LogGroup); + ui32AppHintDefault = PVRSRV_APPHINT_FIRMWARELOGTYPE; + OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, FirmwareLogType, &ui32AppHintDefault, &ui32TraceOrTBI); /* Defaulting to TRACE */ BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); @@ -394,13 +475,14 @@ static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHIN BITMASK_UNSET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); } #endif - psHints->ui32LogType = ui32LogGroup; } - SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig); + _ParseHTBAppHints(psDevInfo->psDeviceNode, pvAppHintState); - SrvInitParamClose(pvParamState); + InitDeviceFlags(psDevInfo->psDeviceNode, pvAppHintState, psHints); + + OSFreeAppHintState(pvAppHintState); } @@ -418,7 +500,7 @@ static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHIN ******************************************************************************/ static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_SRVINIT_APPHINTS *psHints, + RGX_INIT_APPHINTS *psHints, IMG_UINT32 *pui32FWConfigFlags, IMG_UINT32 *pui32FWConfigFlagsExt, IMG_UINT32 *pui32FwOsCfgFlags) @@ -426,51 +508,188 @@ static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32FWConfigFlags = 0; IMG_UINT32 ui32FWConfigFlagsExt = 0; IMG_UINT32 ui32FwOsCfgFlags = psHints->ui32FWContextSwitchCrossDM | - (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK); + (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK) +#if defined(SUPPORT_ICS) + | ((psHints->ui32EnableIdleCycleStealing << RGXFWIF_INICFG_OS_ICS_DM_ALL_SHIFT) & + ~RGXFWIF_INICFG_OS_ICS_CLRMSK) +#endif +; + + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) + IMG_UINT32 ui32FWConfigFlagsSupValExt = 0; + IMG_UINT32 ui32TFBCVersion = 0U; +#endif - if (PVRSRV_VZ_MODE_IS(GUEST)) + ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0; + ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0; + ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0; + ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0; + +#if defined(RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PIPELINED_DATAMASTERS_VERSION) && + (RGX_GET_FEATURE_VALUE(psDevInfo, PIPELINED_DATAMASTERS_VERSION) > 0)) { - ui32FWConfigFlags = 0; - ui32FWConfigFlagsExt = 0; + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, ERYX_TOP_INFRASTRUCTURE)) + { + if ((!RGX_IS_FEATURE_SUPPORTED(psDevInfo, DIVANO_TOP_INFRASTRUCTURE)) || + (RGX_GET_FEATURE_VALUE(psDevInfo, ECC_RAMS) == 0)) + { + /* Pipeline DM roadblocks are currently enabled pre-DXS. */ + ui32FWConfigFlags |= RGXFWIF_INICFG_DM_PIPELINE_ROADBLOCKS_EN; + } + } } - else - { - ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0; - ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0; - ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0; - ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0; - ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0; - ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0; - ui32FWConfigFlags |= psHints->bEnableDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0; - ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0; - ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0; - ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0; - ui32FWConfigFlags |= (psHints->ui32ISPSchedulingLatencyMode << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) & RGXFWIF_INICFG_ISPSCHEDMODE_MASK; -#if defined(SUPPORT_VALIDATION) -#if defined(NO_HARDWARE) && defined(PDUMP) - ui32FWConfigFlags |= psHints->bValidateIrq ? RGXFWIF_INICFG_VALIDATE_IRQ : 0; #endif + + ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0; + ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0; + ui32FWConfigFlags |= psHints->bEnableDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0; + ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0; + ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0; + ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0; + ui32FWConfigFlags |= psHints->bHWPerfDisableCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0; + ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK; +#if defined(PVR_ARCH_VOLCANIC) + ui32FWConfigFlags |= (psHints->ui32ISPSchedulingLatencyMode << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) & RGXFWIF_INICFG_ISPSCHEDMODE_MASK; #endif - ui32FWConfigFlags |= psHints->bHWPerfDisableCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0; - ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK; -#if defined(SUPPORT_VALIDATION) - ui32FWConfigFlags |= psHints->bEnablePowUnitsStateMaskChange ? RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN : 0; - ui32FWConfigFlags |= psHints->bValidateSOCUSCTimer ? RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER : 0; - ui32FWConfigFlags |= psHints->bSPUClockGating ? RGXFWIF_INICFG_SPU_CLOCK_GATE : 0; + { + ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; + } + - if ((ui32FWConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) && - ((psHints->eRGXActivePMConf != 0) || (psHints->eRGXRDPowerIslandConf != 0))) +#if defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) + + /* Determine if we need to present a TFBC v1.0, v1.1 or native + * behaviour. For V1.0 we need to set the following features: + * TFBCCompressionControlLossyMinChannel = 0x1 + * TFBCCompressionControlYUVFormat = 0x1 + * TFBCCompressionControlScheme = 0x2 + * TFBCCompressionControlGroup = 0x0 + * For V1.1 we need to set the following: + * TFBCCompressionControlLossyMinChannel = 0x1 + * TFBCCompressionControlYUVFormat = 0x0 + * TFBCCompressionControlScheme = 0x1 + * TFBCCompressionControlGroup = 0 / 1 (depends on LOSSY_37_PERCENT) + * The gating for these values depends on whether the GPU supports + * RGX_FEATURE_TFBC_VERSION = 20U + */ + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, TFBC_VERSION)) + { + ui32TFBCVersion = RGX_GET_FEATURE_VALUE(psDevInfo, TFBC_VERSION); + + if (ui32TFBCVersion >= 20U) { - psHints->eRGXActivePMConf = 0; - psHints->eRGXRDPowerIslandConf = 0; - PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with both EnableAPM and EnableRDPowerIsland disabled.\n" - "Overriding current value for both with new value 0.")); + switch (psHints->ui32TFBCVersion) { + case 10: /* TFBC Version 1.0 */ + psHints->bTFBCCompressionControlLossyMinChannel = true; + psHints->bTFBCCompressionControlYUVFormat = true; + psHints->ui32TFBCCompressionControlScheme = 2U; + psHints->ui32TFBCCompressionControlGroup = 0U; + +#if defined(DEBUG) + PVR_LOG(("%s: Setting TFBC Version 1.0, Native v%u", + __func__, ui32TFBCVersion)); +#endif + break; + + case 11: /* TFBC Version 1.1 */ + psHints->bTFBCCompressionControlLossyMinChannel = true; + psHints->bTFBCCompressionControlYUVFormat = false; + psHints->ui32TFBCCompressionControlScheme = 1U; + psHints->ui32TFBCCompressionControlGroup = + PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP; + +#if defined(DEBUG) + PVR_LOG(("%s: Setting TFBC Version 1.1, Native v%u", + __func__, ui32TFBCVersion)); +#endif + break; + + case 0: /* Leave with whatever the ui32TFBCVersion is */ + break; + default: /* Unexpected / unsupported value */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Unexpected TFBC Version %u" + " Ignoring. Using value %u instead", + __func__, psHints->ui32TFBCVersion, + ui32TFBCVersion)); + break; + } + + ui32FWConfigFlagsExt |= + ((((psHints->ui32TFBCCompressionControlGroup << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) & + ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) | + ((psHints->ui32TFBCCompressionControlScheme << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) & + ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) | + ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0) | + ((psHints->bTFBCCompressionControlLossyMinChannel) ? RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN : 0)) + << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK; + /* Save the CompressionControlGroup for later use by + * ->pfnGetTFBCLossyGroup() */ +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + psDevInfo->ui32TFBCLossyGroup = psHints->ui32TFBCCompressionControlGroup; +#endif } + else if (ui32TFBCVersion == 11U) + { + switch (psHints->ui32TFBCVersion) { + case 10: /* TFBC Version 1.0 */ + psHints->bTFBCCompressionControlLossyMinChannel = true; + psHints->bTFBCCompressionControlYUVFormat = true; + psHints->ui32TFBCCompressionControlScheme = 2U; + psHints->ui32TFBCCompressionControlGroup = 0U; + +#if defined(DEBUG) + PVR_LOG(("%s: Setting TFBC Version 1.0, Native v%u", + __func__, ui32TFBCVersion)); #endif - ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; - ui32FWConfigFlags |= (psHints->ui32CDMArbitrationMode << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) & RGXFWIF_INICFG_CDM_ARBITRATION_MASK; + break; + + case 0: /* Leave with whatever the ui32TFBCVersion is */ + break; + + default: /* Unexpected / unsupported value */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Unexpected TFBC Version %u" + " Ignoring. Using value %u instead", + __func__, psHints->ui32TFBCVersion, + ui32TFBCVersion)); + break; + } + ui32FWConfigFlagsExt |= + ((((psHints->ui32TFBCCompressionControlGroup << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) & + ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) | + ((psHints->ui32TFBCCompressionControlScheme << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) & + ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) | + ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0) | + ((psHints->bTFBCCompressionControlLossyMinChannel) ? RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN : 0)) + << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK; + /* Save the CompressionControlGroup for later use by + * ->pfnGetTFBCLossyGroup() */ +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + psDevInfo->ui32TFBCLossyGroup = psHints->ui32TFBCCompressionControlGroup; +#endif + } + else /* TFBC v1.0 */ + { + PVR_UNREFERENCED_PARAMETER(ui32FWConfigFlagsSupValExt); +#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) + psDevInfo->ui32TFBCLossyGroup = 0; +#endif + if ((psHints->ui32TFBCVersion != 0U) && + (psHints->ui32TFBCVersion != ui32TFBCVersion)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Cannot specify TFBC version %u" + " on a version %u GPU core", __func__, + psHints->ui32TFBCVersion, ui32TFBCVersion)); + } + } } +#endif /* defined(RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX) */ if ((ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN) && ((ui32FWConfigFlags & RGXFWIF_INICFG_ISPSCHEDMODE_MASK) == RGXFWIF_INICFG_ISPSCHEDMODE_NONE)) @@ -488,112 +707,133 @@ static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode, } +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) /*! ******************************************************************************* - @Function GetFilterFlags - - @Description Initialise and return filter flags + @Function RGXTDProcessFWImage - @Input psHints : Apphints container + @Description Fetch and send data used by the trusted device to complete + the FW image setup - @Return IMG_UINT32 : Filter flags + @Input psDeviceNode : Device node + @Input psFWParams : Firmware and parameters used by the FW + @Return PVRSRV_ERROR ******************************************************************************/ -static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints) +static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_FW_PARAMS *psFWParams) { - IMG_UINT32 ui32FilterFlags = 0; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#endif + PVRSRV_ERROR eError; - ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0; - if (psHints->ui32TruncateMode == 2) + if (psDevConfig->pfnTDSendFWImage == NULL) { - ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT; + PVR_DPF((PVR_DBG_ERROR, "%s: TDSendFWImage not implemented!", __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; } - else if (psHints->ui32TruncateMode == 3) + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) { - ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF; + if (psFWParams->uFWP.sMips.ui32FWPageTableNumPages > TD_MAX_NUM_MIPS_PAGETABLE_PAGES) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Number of page table pages %u greater " + "than what is allowed by the TD interface (%u), FW might " + "not work properly!", __func__, + psFWParams->uFWP.sMips.ui32FWPageTableNumPages, + TD_MAX_NUM_MIPS_PAGETABLE_PAGES)); + } } +#endif - return ui32FilterFlags; -} + eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, psFWParams); + return eError; +} +#endif +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /*! ******************************************************************************* - @Function InitDeviceFlags + @Function RGXAcquireMipsBootldrData - @Description Initialise and return device flags + @Description Acquire MIPS bootloader data parameters - @Input psHints : Apphints container - @Input pui32DeviceFlags : Pointer to device flags + @Input psDeviceNode : Device node + @Input puFWParams : FW boot parameters - @Return void + @Return PVRSRV_ERROR ******************************************************************************/ -static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints, - IMG_UINT32 *pui32DeviceFlags) +static PVRSRV_ERROR RGXAcquireMipsBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_FW_BOOT_PARAMS *puFWParams) { - IMG_UINT32 ui32DeviceFlags = 0; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice; + MMU_DEVICEATTRIBS *psFWMMUDevAttrs = psDevInfo->psDeviceNode->psFirmwareMMUDevAttrs; + IMG_DEV_PHYADDR sAddr; + IMG_UINT32 ui32PTSize, i; + PVRSRV_ERROR eError; + IMG_BOOL bValid; -#if defined(SUPPORT_VALIDATION) - ui32DeviceFlags |= psHints->bInjectPowUnitsStateMaskChange? RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN : 0; -#endif - ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0; - ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0; -#if defined(SUPPORT_VALIDATION) - ui32DeviceFlags |= psHints->bEnablePowUnitsStateMaskChange ? RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN : 0; -#endif -#if defined(PVRSRV_ENABLE_CCCB_GROW) - BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); + /* Rogue Registers physical address */ +#if defined(SUPPORT_ALT_REGBASE) + puFWParams->sMips.sGPURegAddr = psDeviceNode->psDevConfig->sAltRegsGpuPBase; +#else + PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], + 1, + &puFWParams->sMips.sGPURegAddr, + &(psDeviceNode->psDevConfig->sRegsCpuPBase)); #endif - *pui32DeviceFlags = ui32DeviceFlags; -} - -#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -/*! -******************************************************************************* - - @Function RGXTDProcessFWImage + /* MIPS Page Table physical address */ + MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sAddr); - @Description Fetch and send data used by the trusted device to complete - the FW image setup + /* MIPS Page Table allocation is contiguous. Pass one or more addresses + * to the FW depending on the Page Table size and alignment. */ - @Input psDeviceNode : Device node - @Input psRGXFW : Firmware blob - @Input puFWParams : Parameters used by the FW at boot time + ui32PTSize = (psFWMMUDevAttrs->psTopLevelDevVAddrConfig->uiNumEntriesPT) + << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + ui32PTSize = PVR_ALIGN(ui32PTSize, 1U << psFWMMUDevAttrs->ui32BaseAlign); - @Return PVRSRV_ERROR -******************************************************************************/ -static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, - OS_FW_IMAGE *psRGXFW, - PVRSRV_FW_BOOT_PARAMS *puFWParams) -{ - PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_TD_FW_PARAMS sTDFWParams; - PVRSRV_ERROR eError; + puFWParams->sMips.ui32FWPageTableLog2PageSize = psFWMMUDevAttrs->ui32BaseAlign; + puFWParams->sMips.ui32FWPageTableNumPages = ui32PTSize >> psFWMMUDevAttrs->ui32BaseAlign; - if (psDevConfig->pfnTDSendFWImage == NULL) + if (puFWParams->sMips.ui32FWPageTableNumPages > 4U) { - PVR_DPF((PVR_DBG_ERROR, "%s: TDSendFWImage not implemented!", __func__)); - return PVRSRV_ERROR_NOT_IMPLEMENTED; + PVR_DPF((PVR_DBG_ERROR, "%s: Page table cannot be mapped by the FW " + "(size 0x%x, log2 page size %u, %u pages)", + __func__, ui32PTSize, puFWParams->sMips.ui32FWPageTableLog2PageSize, + puFWParams->sMips.ui32FWPageTableNumPages)); + return PVRSRV_ERROR_INIT_FAILURE; } - sTDFWParams.pvFirmware = OSFirmwareData(psRGXFW); - sTDFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW); - - if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + /* Confirm page alignment fits in 64-bits */ + if (psFWMMUDevAttrs->ui32BaseAlign > 63) { - sTDFWParams.uFWP.sMeta = puFWParams->sMeta; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid page alignment " + "(psFWMMUDevAttrs->ui32BaseAlign = %u)", + __func__, psFWMMUDevAttrs->ui32BaseAlign)); + return PVRSRV_ERROR_INIT_FAILURE; } - else + + for (i = 0; i < puFWParams->sMips.ui32FWPageTableNumPages; i++) { - sTDFWParams.uFWP.sRISCV = puFWParams->sRISCV; + puFWParams->sMips.asFWPageTableAddr[i].uiAddr = + sAddr.uiAddr + i * (1ULL << psFWMMUDevAttrs->ui32BaseAlign); } - eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams); + /* MIPS Stack Pointer Physical Address */ + eError = RGXGetPhyAddr(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR, + &puFWParams->sMips.sFWStackAddr, + RGXGetFWImageSectionOffset(NULL, MIPS_STACK), + OSGetPageShift(), + 1, + &bValid); return eError; } @@ -607,16 +847,14 @@ static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, @Description Allocate, initialise and pdump Firmware code and data memory @Input psDeviceNode : Device Node - @Input psHints : Apphints @Return PVRSRV_ERROR ******************************************************************************/ -static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, - RGX_SRVINIT_APPHINTS *psHints) +static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode) { OS_FW_IMAGE *psRGXFW = NULL; - const IMG_BYTE *pbRGXFirmware = NULL; + PVRSRV_FW_PARAMS sFWParams = {0}; /* FW code memory */ IMG_DEVMEM_SIZE_T uiFWCodeAllocSize; @@ -635,15 +873,23 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, void *pvFWCorememDataHostAddr = NULL; PVRSRV_FW_BOOT_PARAMS uFWParams; - RGX_LAYER_PARAMS sLayerParams; - PVRSRV_ERROR eError = PVRSRV_OK; - + PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + IMG_BOOL bUseSecureFWData = + RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && + RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32) || +#endif + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR); +#endif + /* * Get pointer to Firmware image */ - eError = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW, &pbRGXFirmware); + eError = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW); if (eError != PVRSRV_OK) { @@ -651,19 +897,35 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, goto fw_load_fail; } - sLayerParams.psDevInfo = psDevInfo; + /* + * Get pointer and size + */ + sFWParams.pvFirmware = OSFirmwareData(psRGXFW); + sFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW); + + /* + * Allow it to be pre-processed by the platform hook + */ + if (psDeviceNode->psDevConfig->pfnPrepareFWImage != NULL) + { + eError = psDeviceNode->psDevConfig->pfnPrepareFWImage( + psDeviceNode->psDevConfig->hSysData, &sFWParams); + if (eError != PVRSRV_OK) + goto cleanup_initfw; + } /* * Allocate Firmware memory */ - eError = RGXGetFWImageAllocSize(&sLayerParams, - pbRGXFirmware, - OSFirmwareSize(psRGXFW), + eError = RGXGetFWImageAllocSize(&psDevInfo->sLayerParams, + sFWParams.pvFirmware, + sFWParams.ui32FirmwareSize, &uiFWCodeAllocSize, &uiFWDataAllocSize, &uiFWCorememCodeAllocSize, - &uiFWCorememDataAllocSize); + &uiFWCorememDataAllocSize, + &psDevInfo->sFWInfoHeader); if (eError != PVRSRV_OK) { @@ -673,6 +935,22 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, goto cleanup_initfw; } + /* + * Initiate FW compatibility check for Native and Host. + * Guest compatibility check must be done after FW boot. + */ + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + eError = PVRSRVDevInitCompatCheck(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed compatibility check for device %p (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + goto cleanup_initfw; + } + } + psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize; #if defined(SUPPORT_TRUSTED_DEVICE) @@ -696,7 +974,7 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: PVRSRVRGXInitAllocFWImgMem failed (%d)", + "%s: RGXInitAllocFWImgMem failed (%d)", __func__, eError)); goto cleanup_initfw; @@ -706,60 +984,69 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, * Acquire pointers to Firmware allocations */ -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr); - PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw); - -#else - /* We can't get a pointer to a secure FW allocation from within the DDK */ - pvFWCodeHostAddr = NULL; +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (bUseSecureFWData) + { + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCodeHostAddr = NULL; + } + else #endif + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw); + } -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) - eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr); - PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code); - -#else - /* We can't get a pointer to a secure FW allocation from within the DDK */ - pvFWDataHostAddr = NULL; +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (bUseSecureFWData) + { + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWDataHostAddr = NULL; + } + else #endif + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code); + } -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (bUseSecureFWData) + { + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCorememCodeHostAddr = NULL; + } + else +#endif if (uiFWCorememCodeAllocSize != 0) { eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr); PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data); } - else + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (bUseSecureFWData) { - pvFWCorememCodeHostAddr = NULL; + pvFWCorememDataHostAddr = NULL; } -#else - /* We can't get a pointer to a secure FW allocation from within the DDK */ - pvFWCorememCodeHostAddr = NULL; + else #endif - -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) if (uiFWCorememDataAllocSize != 0) { eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr); PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode); } - else -#endif - { - pvFWCorememDataHostAddr = NULL; - } /* * Prepare FW boot parameters */ + OSCachedMemSet(&uFWParams, 0, sizeof(PVRSRV_FW_BOOT_PARAMS)); if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) { - uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; - uFWParams.sMeta.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase; - uFWParams.sMeta.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; + uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; + uFWParams.sMeta.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase; + uFWParams.sMeta.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; uFWParams.sMeta.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; uFWParams.sMeta.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; uFWParams.sMeta.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; @@ -770,8 +1057,25 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, uFWParams.sMeta.ui32NumThreads = 1; #endif } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + eError = RGXAcquireMipsBootldrData(psDeviceNode, &uFWParams); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXAcquireMipsBootldrData failed (%d)", + __func__, eError)); + goto release_fw_allocations; + } + } +#endif else { + uFWParams.sRISCV.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; + uFWParams.sRISCV.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase; + uFWParams.sRISCV.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; uFWParams.sRISCV.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; uFWParams.sRISCV.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; @@ -783,17 +1087,13 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, /* - * Process the Firmware image and setup code and data segments. - * - * When the trusted device is enabled and the FW code lives - * in secure memory we will only setup the data segments here, - * while the code segments will be loaded to secure memory - * by the trusted device. + * On Volcanic the TEE handles the loading of all Firmware sections. */ +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) if (!psDeviceNode->bAutoVzFwIsUp) { - eError = RGXProcessFWImage(&sLayerParams, - pbRGXFirmware, + eError = RGXProcessFWImage(&psDevInfo->sLayerParams, + sFWParams.pvFirmware, pvFWCodeHostAddr, pvFWDataHostAddr, pvFWCorememCodeHostAddr, @@ -802,18 +1102,33 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: RGXProcessFWImage failed (%d)", - __func__, - eError)); - goto release_corememdata; + "%s: RGXProcessFWImage failed (%d)", + __func__, eError)); + goto release_fw_allocations; + } + RGXFwSharedMemCacheOpExec(pvFWCodeHostAddr, + sizeof(psDevInfo->psRGXFWCodeMemDesc->uiAllocSize), + PVRSRV_CACHE_OP_FLUSH); + if (uiFWCorememCodeAllocSize) + { + RGXFwSharedMemCacheOpExec(pvFWCorememCodeHostAddr, + sizeof(psDevInfo->psRGXFWCorememCodeMemDesc->uiAllocSize), + PVRSRV_CACHE_OP_FLUSH); } - } -#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) - if (psRGXFW) - { - RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams); + RGXFwSharedMemCacheOpExec(pvFWDataHostAddr, + sizeof(psDevInfo->psRGXFWDataMemDesc->uiAllocSize), + PVRSRV_CACHE_OP_FLUSH); + if (uiFWCorememDataAllocSize) + { + RGXFwSharedMemCacheOpExec(pvFWCorememDataHostAddr, + sizeof(psDevInfo->psRGXFWIfCorememDataStoreMemDesc->uiAllocSize), + PVRSRV_CACHE_OP_FLUSH); + } } +#else + sFWParams.uFWP = uFWParams; + RGXTDProcessFWImage(psDeviceNode, &sFWParams); #endif @@ -830,12 +1145,17 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, PDUMP_FLAGS_CONTINUOUS); #endif - PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "Dump firmware data image"); - DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc, - 0, - uiFWDataAllocSize, - PDUMP_FLAGS_CONTINUOUS); +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData) +#endif + { + PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, + "Dump firmware data image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc, + 0, + uiFWDataAllocSize, + PDUMP_FLAGS_CONTINUOUS); + } #if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) if (uiFWCorememCodeAllocSize != 0) @@ -849,8 +1169,11 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, } #endif -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData && uiFWCorememDataAllocSize) +#else if (uiFWCorememDataAllocSize != 0) +#endif { PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem data store image"); @@ -859,34 +1182,47 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, uiFWCorememDataAllocSize, PDUMP_FLAGS_CONTINUOUS); } -#endif +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) || defined(RGX_FEATURE_MIPS_BIT_MASK) /* * Release Firmware allocations and clean up */ -release_corememdata: -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) - if (uiFWCorememDataAllocSize !=0) +release_fw_allocations: +#endif +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData && (uiFWCorememDataAllocSize != 0)) +#else + if (uiFWCorememDataAllocSize != 0) +#endif { DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); } release_corememcode: +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData && (uiFWCorememCodeAllocSize != 0)) +#else if (uiFWCorememCodeAllocSize != 0) +#endif { DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); } -#endif -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) release_data: - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData) #endif + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); + } -#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) release_code: - DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (!bUseSecureFWData) #endif + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + } cleanup_initfw: OSUnloadFirmware(psRGXFW); fw_load_fail: @@ -894,85 +1230,6 @@ static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, return eError; } -IMG_INTERNAL static inline IMG_UINT32 RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *); -IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); - -IMG_INTERNAL /*static inline*/ IMG_UINT32 -RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel) -{ - *ppsModel = gasCntBlkTypeModel; - return ARRAY_SIZE(gasCntBlkTypeModel); -} - -/*! -******************************************************************************* - @Function RGXHWPerfMaxDefinedBlks - - @Description Return the number of valid block-IDs for the given device node - - @Input (PVRSRV_RGXDEV_INFO *) pvDevice device-node to query - - @Returns (IMG_UINT32) Number of block-IDs (RGX_CNTBLK_ID) - valid for this device. -******************************************************************************/ -IMG_INTERNAL static inline IMG_UINT32 -RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *psDevInfo) -{ - RGX_HWPERF_CNTBLK_RT_INFO sRtInfo; - IMG_UINT32 uiRetVal; - const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *psHWPBlkConfig; - IMG_UINT32 uiNumArrayEls, ui; - - uiRetVal = RGX_CNTBLK_ID_DIRECT_LAST; - - uiNumArrayEls = RGXGetHWPerfBlockConfig(&psHWPBlkConfig); - - if (psHWPBlkConfig == NULL) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected NULL Config Block", __func__)); - return 0; - } - PVR_ASSERT(uiNumArrayEls > 0); - - /* Iterate over each block-ID and find the number of instances of each - * block which are present for this device type. We only query the - * Indirect blocks as their presence varies according to GPU. All direct - * blocks have an entry - but they may not be physically present. - */ - for (ui = RGX_CNTBLK_ID_DIRECT_LAST; ui < uiNumArrayEls; ui++) - { - if (rgx_hwperf_blk_present(&psHWPBlkConfig[ui], (void *)psDevInfo, &sRtInfo)) - { - uiRetVal += sRtInfo.uiNumUnits; - PVR_DPF((PVR_DBG_VERBOSE, "%s: Block %u, NumUnits %u, Total %u", - __func__, ui, sRtInfo.uiNumUnits, uiRetVal)); - } -#ifdef DEBUG - else - { - if (psHWPBlkConfig[ui].uiCntBlkIdBase == RGX_CNTBLK_ID_RAC0) - { - if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, - RAY_TRACING_ARCH) > 2U) - { - PVR_DPF((PVR_DBG_WARNING, "%s: Block %u *NOT* present", - __func__, ui)); - } - } - else - { - PVR_DPF((PVR_DBG_WARNING, "%s: Block %u *NOT* present", - __func__, ui)); - } - } -#endif - } - - PVR_DPF((PVR_DBG_VERBOSE, "%s: Num Units = %u", __func__, uiRetVal)); - - return uiRetVal; -} - /*! ******************************************************************************* @@ -1001,8 +1258,6 @@ static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc; IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx; RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; - IMG_UINT32 uiUnit; - IMG_BOOL bDirect; ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); @@ -1026,7 +1281,7 @@ static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, #endif /* defined(PDUMP) */ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, - "HWPerf Counter config starts here."); + "HWPerf Counter Config starts here."); /* Simply iterate over all the RGXFWIW_HWPERF_CTL blocks in order */ psHWPerfInitBlkData = &psHWPerfInitDataInt->sBlkCfg[0]; @@ -1034,6 +1289,8 @@ static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++, psHWPerfInitBlkData++) { + IMG_UINT32 uiUnit; + IMG_BOOL bDirect; IMG_BOOL bSingleton; /* Exit early if this core does not have any of these counter blocks @@ -1193,6 +1450,7 @@ static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode) PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt); InitialiseHWPerfCounters(psDeviceNode, psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData); + RGXFwSharedMemCacheOpPtr(psHWPerfInitData, FLUSH); failHWPerfCountersMemDescAqCpuVirt: DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); @@ -1200,38 +1458,6 @@ static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode) return eError; } -/* - * _ParseHTBAppHints: - * - * Generate necessary references to the globally visible AppHints which are - * declared in the above #include "km_apphint_defs.h" - * Without these local references some compiler tool-chains will treat - * unreferenced declarations as fatal errors. This function duplicates the - * HTB_specific apphint references which are made in htbserver.c:HTBInit() - * However, it makes absolutely *NO* use of these hints. - */ -static void -_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode) -{ - void *pvParamState = NULL; - IMG_UINT32 ui32LogType; - IMG_BOOL bAnyLogGroupConfigured; - IMG_UINT32 ui32BufferSize; - IMG_UINT32 ui32OpMode; - - /* Services initialisation parameters */ - pvParamState = SrvInitParamOpen(); - if (pvParamState == NULL) - return; - - SrvInitParamGetUINT32BitField(INITPARAM_NO_DEVICE, pvParamState, EnableHTBLogGroup, ui32LogType); - bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE; - SrvInitParamGetUINT32List(INITPARAM_NO_DEVICE, pvParamState, HTBOperationMode, ui32OpMode); - SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HTBufferSizeInKB, ui32BufferSize); - - SrvInitParamClose(pvParamState); -} - #if defined(SUPPORT_TRUSTED_DEVICE) static PVRSRV_ERROR RGXValidateTDHeap(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_PHYS_HEAP ePhysHeap, @@ -1240,7 +1466,7 @@ static PVRSRV_ERROR RGXValidateTDHeap(PVRSRV_DEVICE_NODE *psDeviceNode, PHYS_HEAP *psHeap = psDeviceNode->apsPhysHeap[ePhysHeap]; PHYS_HEAP_USAGE_FLAGS ui32HeapFlags = PhysHeapGetFlags(psHeap); PHYS_HEAP_USAGE_FLAGS ui32InvalidFlags = ~(PHYS_HEAP_USAGE_FW_PRIV_DATA | PHYS_HEAP_USAGE_FW_CODE - | PHYS_HEAP_USAGE_GPU_SECURE); + | PHYS_HEAP_USAGE_GPU_SECURE | PHYS_HEAP_USAGE_FW_PRIVATE); PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32RequiredFlags) != 0, PVRSRV_ERROR_NOT_SUPPORTED, @@ -1261,11 +1487,14 @@ static PVRSRV_ERROR RGXValidateTDHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError; - eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_PRIV_DATA, PHYS_HEAP_USAGE_FW_PRIV_DATA); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_PRIV_DATA"); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_PRIV_DATA, PHYS_HEAP_USAGE_FW_PRIV_DATA); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_PRIV_DATA"); - eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_CODE, PHYS_HEAP_USAGE_FW_CODE); - PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_CODE"); + eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_CODE, PHYS_HEAP_USAGE_FW_CODE); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_CODE"); + } eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_GPU_SECURE, PHYS_HEAP_USAGE_GPU_SECURE); PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:GPU_SECURE"); @@ -1291,27 +1520,18 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_ERROR eError; /* Services initialisation parameters */ - RGX_SRVINIT_APPHINTS sApphints = {0}; + RGX_INIT_APPHINTS sApphints = {0}; IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt, ui32FwOsCfgFlags; - IMG_UINT32 ui32DeviceFlags; - IMG_UINT32 ui32AvailablePowUnitsMask, ui32AvailableRACMask; PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; - /* Number of HWPerf Block-IDs (RGX_CNTBLK_ID) which are available */ - IMG_UINT32 ui32NumHWPerfBlocks; - - /* Size of the RGXFWIF_HWPERF_CTL_BLK structure - varies by BVNC */ - IMG_UINT32 ui32HWPerfBlkSize; - RGX_LAYER_PARAMS sLayerParams; - PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 1"); PDUMPCOMMENT(psDeviceNode, "Device Name: %s", psDeviceNode->psDevConfig->pszName); PDUMPCOMMENT(psDeviceNode, "Device ID: %u (%d)", psDeviceNode->sDevId.ui32InternalID, - psDeviceNode->sDevId.i32OsDeviceID); + psDeviceNode->sDevId.i32KernelDeviceID); if (psDeviceNode->psDevConfig->pszVersion) { @@ -1327,52 +1547,28 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); - RGXInitMultiCoreInfo(psDeviceNode); - -#if defined(PDUMP) - eError = DevmemIntAllocDefBackingPage(psDeviceNode, - &psDeviceNode->sDummyPage, - PVR_DUMMY_PAGE_INIT_VALUE, - DUMMY_PAGE, - IMG_TRUE); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__)); - goto cleanup; - } - eError = DevmemIntAllocDefBackingPage(psDeviceNode, - &psDeviceNode->sDevZeroPage, - PVR_ZERO_PAGE_INIT_VALUE, - DEV_ZERO_PAGE, - IMG_TRUE); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__)); - goto cleanup; - } -#endif - - sLayerParams.psDevInfo = psDevInfo; - #if defined(SUPPORT_TRUSTED_DEVICE) eError = RGXValidateTDHeaps(psDeviceNode); PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeaps"); #endif #if defined(SUPPORT_AUTOVZ) - if (PVRSRV_VZ_MODE_IS(HOST)) + if (PVRSRV_VZ_MODE_IS(HOST, DEVNODE, psDeviceNode)) { - /* The RGX_CR_MTS_DM0_INTERRUPT_ENABLE register is always set by the firmware during initialisation + /* The RGX_CR_MTS_DM4_INTERRUPT_ENABLE register is always set by the firmware during initialisation * and it provides a good method of determining if the firmware has been booted previously */ - psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_DM0_INTERRUPT_ENABLE) != 0); + psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_DM4_INTERRUPT_ENABLE) != 0); PVR_LOG(("AutoVz startup check: firmware is %s;", (psDeviceNode->bAutoVzFwIsUp) ? "already running" : "powered down")); + PVR_LOG(("AutoVz allow GPU powerdown is %s:", + (psDeviceNode->bAutoVzAllowGPUPowerdown) ? "enabled" : "disabled")); } - else if (PVRSRV_VZ_MODE_IS(GUEST)) + else if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { /* Guest assumes the firmware is always available */ psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; + psDeviceNode->bAutoVzAllowGPUPowerdown = IMG_FALSE; } else #endif @@ -1381,7 +1577,7 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; } - if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) || (psDeviceNode->bAutoVzFwIsUp)) { /* set the device power state here as the regular power * callbacks will not be executed on this driver */ @@ -1390,9 +1586,11 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) /* Set which HW Safety Events will be handled by the driver */ psDevInfo->ui32HostSafetyEventMask |= RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER) ? - RGX_CR_EVENT_STATUS_WDT_TIMEOUT_EN : 0; - psDevInfo->ui32HostSafetyEventMask |= (RGX_DEVICE_HAS_FEATURE_VALUE(&sLayerParams, ECC_RAMS) - && (RGX_DEVICE_GET_FEATURE_VALUE(&sLayerParams, ECC_RAMS) > 0)) ? + RGX_CR_EVENT_STATUS__AXT_IF_EQ2_AND_PIPEDM_EQ0__WDT_TIMEOUT_EN : 0; + psDevInfo->ui32HostSafetyEventMask |= (RGX_DEVICE_HAS_FEATURE_VALUE(&psDevInfo->sLayerParams, ECC_RAMS) + && (RGX_DEVICE_GET_FEATURE_VALUE(&psDevInfo->sLayerParams, ECC_RAMS) > 0)) ? + RGX_CR_EVENT_STATUS_FAULT_FW_EN : 0; + psDevInfo->ui32HostSafetyEventMask |= RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_DUAL_LOCKSTEP) ? RGX_CR_EVENT_STATUS_FAULT_FW_EN : 0; #if defined(PDUMP) @@ -1400,22 +1598,16 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) "Register defs revision: %d", RGX_CR_DEFS_KM_REVISION); #endif - ui32NumHWPerfBlocks = RGXHWPerfMaxDefinedBlks((void *)psDevInfo); - - ui32HWPerfBlkSize = sizeof(RGXFWIF_HWPERF_CTL) + - (ui32NumHWPerfBlocks - 1) * sizeof(RGXFWIF_HWPERF_CTL_BLK); - /* Services initialisation parameters */ - _ParseHTBAppHints(psDeviceNode); GetApphints(psDevInfo, &sApphints); - InitDeviceFlags(&sApphints, &ui32DeviceFlags); + #if defined(SUPPORT_GPUVIRT_VALIDATION) #if defined(EMULATOR) if ((sApphints.bEnableTrustedDeviceAceConfig) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACE))) { - SetTrustedDeviceAceEnabled(); + SetTrustedDeviceAceEnabled(psDeviceNode->psDevConfig->hSysData); } #endif #endif @@ -1429,15 +1621,14 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) goto cleanup; } - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - eError = InitFirmware(psDeviceNode, &sApphints); + eError = InitFirmware(psDeviceNode); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: InitFirmware failed (%d)", - __func__, - eError)); + "%s: InitFirmware failed (%d)", + __func__, eError)); goto cleanup; } } @@ -1448,62 +1639,28 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt, &ui32FwOsCfgFlags); -#if defined(SUPPORT_VALIDATION) - ui32AvailablePowUnitsMask = sApphints.ui32AvailablePowUnitsMask; - ui32AvailableRACMask = sApphints.ui32AvailableRACMask; -#else - ui32AvailablePowUnitsMask = AVAIL_POW_UNITS_MASK_DEFAULT; - ui32AvailableRACMask = AVAIL_RAC_MASK_DEFAULT; -#endif - eError = RGXInitFirmware(psDeviceNode, - sApphints.bEnableSignatureChecks, - sApphints.ui32SignatureChecksBufSize, - sApphints.ui32HWPerfFWBufSize, - (IMG_UINT64)sApphints.ui32HWPerfFilter0 | - ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32), + &sApphints, ui32FWConfigFlags, - sApphints.ui32LogType, - GetFilterFlags(&sApphints), - sApphints.ui32JonesDisableMask, - sApphints.ui32HWRDebugDumpLimit, -#if defined(SUPPORT_VALIDATION) - sApphints.ui32RenderKillingCtl, - sApphints.ui32CDMTDMKillingCtl, - &sApphints.aui32TPUTrilinearFracMask[0], - &sApphints.aui32USRMNumRegions[0], - (IMG_PUINT64)&sApphints.aui64UVBRMNumRegions[0], -#else - 0, 0, - NULL, NULL, NULL, -#endif - ui32HWPerfBlkSize, - sApphints.eRGXRDPowerIslandConf, - sApphints.bSPUClockGating, - sApphints.eFirmwarePerf, - sApphints.ui32KCCBSizeLog2, ui32FWConfigFlagsExt, - ui32AvailablePowUnitsMask, - ui32AvailableRACMask, ui32FwOsCfgFlags); if (eError != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVRGXInitFirmware failed (%d)", - __func__, - eError)); + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXInitFirmware failed (%d)", + __func__, eError)); goto cleanup; } - if (!PVRSRV_VZ_MODE_IS(GUEST)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { eError = InitialiseAllCounters(psDeviceNode); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: InitialiseAllCounters failed (%d)", - __func__, - eError)); + "%s: InitialiseAllCounters failed (%d)", + __func__, eError)); goto cleanup; } } @@ -1512,23 +1669,15 @@ PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) * Perform second stage of RGX initialisation */ eError = RGXInitDevPart2(psDeviceNode, - ui32DeviceFlags, - sApphints.ui32HWPerfHostFilter, - sApphints.eRGXActivePMConf, - ui32AvailablePowUnitsMask, - ui32AvailableRACMask); + &sApphints); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: PVRSRVRGXInitDevPart2KM failed (%d)", - __func__, - eError)); + "%s: RGXInitDevPart2 failed (%d)", + __func__, eError)); goto cleanup; } -#if defined(SUPPORT_VALIDATION) - PVRSRVAppHintDumpState(psDeviceNode); -#endif eError = PVRSRV_OK; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxstartstop.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxstartstop.c index 8dcbf4f6d0ec..042138268df0 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxstartstop.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxstartstop.c @@ -48,7 +48,142 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * Any new code should be built on top of the existing abstraction layer, * which should be extended when necessary. */ #include "rgxstartstop.h" -#include "rgxfwutils.h" + + + +/* + RGXWriteMetaRegThroughSP +*/ +PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32StateReg, ui32StateReadyFlag; + IMG_UINT32 ui32CtrlReg, ui32DataReg; + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_REGISTER_UNPACKED_ACCESSES)) + { + /* ensure the meta_registers_unpacked_accesses auto-increment feature is not used */ + BITMASK_UNSET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN); + + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) > 1) + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN | + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA; + ui32DataReg = RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA; + } + else + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN | + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32DataReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + } + } + else +#endif + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1_READY_EN | + RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL0; + ui32DataReg = RGX_CR_META_SP_MSLVDATAT; + } + + eError = RGXPollReg32(hPrivate, ui32StateReg, ui32StateReadyFlag, ui32StateReadyFlag); + + if (eError == PVRSRV_OK) + { + /* Issue a Write */ + RGXWriteReg32(hPrivate, ui32CtrlReg, ui32RegAddr); + (void) RGXReadReg32(hPrivate, ui32CtrlReg); /* Fence write */ + RGXWriteReg32(hPrivate, ui32DataReg, ui32RegValue); + (void) RGXReadReg32(hPrivate, ui32DataReg); /* Fence write */ + } + + return eError; +} + +/* + RGXReadMetaRegThroughSP +*/ +PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32* ui32RegValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32StateReg, ui32StateReadyFlag; + IMG_UINT32 ui32CtrlReg, ui32DataReg; + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_REGISTER_UNPACKED_ACCESSES)) + { + /* ensure the meta_registers_unpacked_accesses auto-increment feature is not used */ + BITMASK_UNSET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN); + + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) > 1) + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN | + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA; + ui32DataReg = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA; + BITMASK_SET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__RD_EN); + } + else + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN | + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; + ui32DataReg = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA; + BITMASK_SET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__RD_EN); + } + } + else +#endif + { + ui32StateReg = RGX_CR_META_SP_MSLVCTRL1; + ui32StateReadyFlag = RGX_CR_META_SP_MSLVCTRL1_READY_EN | + RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32CtrlReg = RGX_CR_META_SP_MSLVCTRL0; + ui32DataReg = RGX_CR_META_SP_MSLVDATAX; + BITMASK_SET(ui32RegAddr, RGX_CR_META_SP_MSLVCTRL0_RD_EN); + } + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + ui32StateReg, + ui32StateReadyFlag, + ui32StateReadyFlag); + if (eError == PVRSRV_OK) + { + /* Issue a Read */ + RGXWriteReg32(hPrivate, ui32CtrlReg, ui32RegAddr); + (void) RGXReadReg32(hPrivate, ui32CtrlReg); /* Fence write */ + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + ui32StateReg, + ui32StateReadyFlag, + ui32StateReadyFlag); + if (eError != PVRSRV_OK) return eError; + } + +#if !defined(NO_HARDWARE) + *ui32RegValue = RGXReadReg32(hPrivate, ui32DataReg); +#else + PVR_UNREFERENCED_PARAMETER(ui32DataReg); + *ui32RegValue = 0xFFFFFFFF; +#endif + + return eError; +} /* * Specific fields for RGX_CR_IDLE must not be polled in pdumps @@ -127,93 +262,286 @@ static void RGXInitMetaProcWrapper(const void *hPrivate) /* Garten IDLE bit controlled by META */ ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META; +#if defined(RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_SHIFT) + /* Set the Garten Wrapper BIF Fence address */ + if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) + { + /* Set PC = 0 for fences */ + ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_CLRMSK; + ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV + << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_SHIFT; + + } + else + { + /* Set PC = 0 for fences */ + ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK; + ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV + << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT; + + /* Set SLC DM=META */ + ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_BIFDM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT; + } +#endif + RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper"); RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig); } - +#if defined(RGX_FEATURE_MIPS_BIT_MASK) /*! ******************************************************************************* - @Function RGXInitRiscvProcWrapper + @Function RGXInitMipsProcWrapper - @Description Configures the hardware wrapper of the RISCV processor + @Description Configures the hardware wrapper of the MIPS processor @Input hPrivate : Implementation specific data @Return void ******************************************************************************/ -static void RGXInitRiscvProcWrapper(const void *hPrivate) +static void RGXInitMipsProcWrapper(const void *hPrivate) { - IMG_DEV_VIRTADDR sTmp; + IMG_DEV_PHYADDR sPhyAddr; + IMG_UINT64 ui64RemapSettings = RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; /* Same for all remap registers */ - RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper"); + RGXCommentLog(hPrivate, "RGXStart: Configure MIPS wrapper"); - RGXCommentLog(hPrivate, "RGXStart: Write boot code remap"); - RGXAcquireBootCodeAddr(hPrivate, &sTmp); - RGXWriteReg64(hPrivate, - RGXRISCVFW_BOOTLDR_CODE_REMAP, - sTmp.uiAddr | - (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) - << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | - (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | - RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN); - - RGXCommentLog(hPrivate, "RGXStart: Write boot data remap"); - RGXAcquireBootDataAddr(hPrivate, &sTmp); - RGXWriteReg64(hPrivate, - RGXRISCVFW_BOOTLDR_DATA_REMAP, - sTmp.uiAddr | - (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) - << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | - (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | - RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN); + /* + * MIPS wrapper (registers transaction ID and ISA mode) setup + */ - /* Garten IDLE bit controlled by RISCV */ - RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV"); + RGXCommentLog(hPrivate, "RGXStart: Write wrapper config register"); + + if (RGXGetDevicePhysBusWidth(hPrivate) > 32) + { + RGXWriteReg32(hPrivate, + RGX_CR_MIPS_WRAPPER_CONFIG, + (RGXMIPSFW_REGISTERS_VIRTUAL_BASE >> + RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) | + RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS); + } + else + { + RGXAcquireGPURegsAddr(hPrivate, &sPhyAddr); + + RGXMIPSWrapperConfig(hPrivate, + RGX_CR_MIPS_WRAPPER_CONFIG, + sPhyAddr.uiAddr, + RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN, + RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS); + } + + /* + * Boot remap setup + */ + + RGXAcquireBootRemapAddr(hPrivate, &sPhyAddr); + +#if defined(SUPPORT_TRUSTED_DEVICE) + /* Do not mark accesses to a FW code remap region as DRM accesses */ + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; +#endif + + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; +#if defined(MIPS_FW_CODE_OSID) + ui64RemapSettings |= ((IMG_UINT64) MIPS_FW_CODE_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#else + ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#endif + + RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers"); + RGXBootRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP1_CONFIG1, + RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP1_CONFIG2, + sPhyAddr.uiAddr, + ~RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + +#if defined(FIX_HW_BRN_63553_BIT_MASK) + if (RGX_DEVICE_HAS_BRN(hPrivate, 63553)) + { + IMG_BOOL bPhysBusAbove32Bit = RGXGetDevicePhysBusWidth(hPrivate) > 32; + IMG_BOOL bDevicePA0IsValid = RGXDevicePA0IsValid(hPrivate); + + /* WA always required on 36 bit cores, to avoid continuous unmapped memory accesses to address 0x0 */ + if (bPhysBusAbove32Bit || !bDevicePA0IsValid) + { + RGXCodeRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP5_CONFIG1, + 0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP5_CONFIG2, + sPhyAddr.uiAddr, + ~RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + } + } +#endif + + /* + * Data remap setup + */ + + RGXAcquireDataRemapAddr(hPrivate, &sPhyAddr); + +#if defined(SUPPORT_TRUSTED_DEVICE) + if (RGXGetDevicePhysBusWidth(hPrivate) > 32) + { + /* Remapped private data in secure memory */ + ui64RemapSettings |= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN; + } + else + { + /* Remapped data in non-secure memory */ + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; + } +#endif + + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; + ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; + + RGXCommentLog(hPrivate, "RGXStart: Write data remap registers"); + RGXDataRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP2_CONFIG1, + RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP2_CONFIG2, + sPhyAddr.uiAddr, + ~RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + + /* + * Code remap setup + */ + + RGXAcquireCodeRemapAddr(hPrivate, &sPhyAddr); + +#if defined(SUPPORT_TRUSTED_DEVICE) + /* Do not mark accesses to a FW code remap region as DRM accesses */ + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; +#endif + + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; +#if defined(MIPS_FW_CODE_OSID) + ui64RemapSettings |= ((IMG_UINT64) MIPS_FW_CODE_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#else + ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; +#endif + + RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers"); + RGXCodeRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP3_CONFIG1, + RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP3_CONFIG2, + sPhyAddr.uiAddr, + ~RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + + if (RGXGetDevicePhysBusWidth(hPrivate) == 32) + { + /* + * Trampoline remap setup + */ + + RGXAcquireTrampolineRemapAddr(hPrivate, &sPhyAddr); + ui64RemapSettings = RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE; + +#if defined(SUPPORT_TRUSTED_DEVICE) + /* Remapped data in non-secure memory */ + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; +#endif + + ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; + ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; + + RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers"); + RGXTrampolineRemapConfig(hPrivate, + RGX_CR_MIPS_ADDR_REMAP4_CONFIG1, + sPhyAddr.uiAddr | RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN, + RGX_CR_MIPS_ADDR_REMAP4_CONFIG2, + RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR, + ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK, + ui64RemapSettings); + } + + /* Garten IDLE bit controlled by MIPS */ + RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to MIPS"); RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); + + /* Turn on the EJTAG probe (only useful driver live) */ + RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0); } +#endif /*! ******************************************************************************* - @Function RGXInitBIF + @Function RGXInitRiscvProcWrapper - @Description Initialise RGX BIF + @Description Configures the hardware wrapper of the RISCV processor - @Input hPrivate : Implementation specific data + @Input hPrivate : Implementation specific data @Return void ******************************************************************************/ -static void RGXInitBIF(const void *hPrivate) +static void RGXInitRiscvProcWrapper(const void *hPrivate) { - IMG_DEV_PHYADDR sPCAddr; - IMG_UINT32 uiPCAddr; - IMG_UINT32 ui32CBaseMapCtxReg; - RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; - PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; + IMG_UINT32 ui32BootCodeRemap = RGXRISCVFW_BOOTLDR_CODE_REMAP; + IMG_UINT32 ui32BootDataRemap = RGXRISCVFW_BOOTLDR_DATA_REMAP; + IMG_DEV_VIRTADDR sTmp; - /* - * Acquire the address of the Kernel Page Catalogue. - */ - RGXAcquireKernelMMUPC(hPrivate, &sPCAddr); + RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper"); + +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) < 4) +#endif + { + RGXCommentLog(hPrivate, "RGXStart: Write boot code remap"); + RGXAcquireBootCodeAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + ui32BootCodeRemap, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) + << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | + RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN); + + RGXCommentLog(hPrivate, "RGXStart: Write boot data remap"); + RGXAcquireBootDataAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + ui32BootDataRemap, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) + << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN) + RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN | +#endif + RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN); + } + + /* Garten IDLE bit controlled by RISCV */ + RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV"); + RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); +} + + +static void RGXWriteKernelCatBase(const void *hPrivate, IMG_DEV_PHYADDR sPCAddr) +{ + IMG_UINT32 uiPCAddr; - if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) > 1) { + IMG_UINT32 ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4; + uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT) << RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT) & ~RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_CLRMSK); - /* - * Write the kernel catalogue base. - */ - RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue"); - - ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4; - /* Set the mapping context */ RGXWriteReg32(hPrivate, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV); (void)RGXReadReg32(hPrivate, ui32CBaseMapCtxReg); /* Fence write */ @@ -237,22 +565,50 @@ static void RGXInitBIF(const void *hPrivate) uiPCAddr); #endif } +#else /* defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) */ + if (!RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) + { + /* Write the cat-base address */ + RGXWriteKernelMMUPC64(hPrivate, + BIF_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), + RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT, + RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT, + ((sPCAddr.uiAddr + >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT) + << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT) + & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK); + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + /* Keep catbase registers in sync */ + RGXWriteKernelMMUPC64(hPrivate, + FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), + RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT, + RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT, + ((sPCAddr.uiAddr + >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) + << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) + & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK); + } + + /* + * Trusted Firmware boot + */ +#if defined(SUPPORT_TRUSTED_DEVICE) + RGXCommentLog(hPrivate, "RGXWriteKernelCatBase: Trusted Device enabled"); + RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); +#endif + } +#endif /* defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) */ else { uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT) << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK); - /* - * Write the kernel catalogue base. - */ - RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue"); - - ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT; - /* Set the mapping context */ - RGXWriteReg32(hPrivate, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV); - (void)RGXReadReg32(hPrivate, ui32CBaseMapCtxReg); /* Fence write */ + RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV); + (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ /* Write the cat-base address */ RGXWriteKernelMMUPC32(hPrivate, @@ -263,8 +619,8 @@ static void RGXInitBIF(const void *hPrivate) #if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV) /* Set-up different MMU ID mapping to the same PC used above */ - RGXWriteReg32(hPrivate, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWIF); - (void)RGXReadReg32(hPrivate, ui32CBaseMapCtxReg); /* Fence write */ + RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF); + (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ RGXWriteKernelMMUPC32(hPrivate, RGX_CR_MMU_CBASE_MAPPING, @@ -275,7 +631,52 @@ static void RGXInitBIF(const void *hPrivate) } } +/*! +******************************************************************************* + + @Function RGXInitBIF + + @Description Initialise RGX BIF + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitBIF(const void *hPrivate) +{ +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) + { + /* + * Trusted Firmware boot + */ +#if defined(SUPPORT_TRUSTED_DEVICE) + RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled"); + RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); +#endif + } + else +#endif /* defined(RGX_FEATURE_MIPS_BIT_MASK) */ + { + IMG_DEV_PHYADDR sPCAddr; + + /* + * Acquire the address of the Kernel Page Catalogue. + */ + RGXAcquireKernelMMUPC(hPrivate, &sPCAddr); + + /* + * Write the kernel catalogue base. + */ + RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue"); + + RGXWriteKernelCatBase(hPrivate, sPCAddr); + } +} + + +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) /**************************************************************************/ /*! @Function RGXInitMMURangeRegisters @Description Initialises MMU range registers for Non4K pages. @@ -284,30 +685,66 @@ static void RGXInitBIF(const void *hPrivate) */ /**************************************************************************/ static void RGXInitMMURangeRegisters(const void *hPrivate) { - RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; - PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; IMG_UINT32 ui32RegAddr = RGX_CR_MMU_PAGE_SIZE_RANGE_ONE; IMG_UINT32 i; - for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i, ui32RegAddr += sizeof(IMG_UINT64)) + for (i = 0; i < RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES; ++i, ui32RegAddr += sizeof(IMG_UINT64)) { - RGXWriteReg64(hPrivate, ui32RegAddr, psDevInfo->aui64MMUPageSizeRangeValue[i]); + RGXWriteReg64(hPrivate, ui32RegAddr, RGXMMUInitRangeValue(i)); } } +#endif -/**************************************************************************/ /*! -@Function RGXInitAXIACE -@Description Initialises AXI ACE registers -@Input hPrivate Implementation specific data -@Return void - */ /**************************************************************************/ +/*! +******************************************************************************* + + @Function RGXInitAXIACE + + @Description Initialise AXI-ACE interface + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ static void RGXInitAXIACE(const void *hPrivate) { IMG_UINT64 ui64RegVal; + IMG_UINT32 ui32RegAddr; + +#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) + ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION; + + /* Setup AXI-ACE config. Set everything to outer cache */ + ui64RegVal = (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) | + (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) | + (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT); + +#if defined(FIX_HW_BRN_42321_BIT_MASK) + if (RGX_DEVICE_HAS_BRN(hPrivate, 42321)) + { + ui64RegVal |= (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT); + } +#endif + +#if defined(FIX_HW_BRN_68186_BIT_MASK) + if (RGX_DEVICE_HAS_BRN(hPrivate, 68186)) + { + /* default value for reg_enable_fence_out is zero. Force to 1 to allow core_clk < mem_clk */ + ui64RegVal |= (IMG_UINT64)1 << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT; + } +#endif +#else /* defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) */ + ui32RegAddr = RGX_CR_ACE_CTRL; /** - * The below configuration is only applicable for RGX core's supporting + * The below configuration is only applicable for RGX cores supporting * ACE/ACE-lite protocol and connected to ACE coherent interconnect. */ @@ -330,9 +767,10 @@ static void RGXInitAXIACE(const void *hPrivate) */ ui64RegVal |= (IMG_UINT64)(RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE | RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE); +#endif /* defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) */ RGXCommentLog(hPrivate, "Init AXI-ACE interface"); - RGXWriteReg64(hPrivate, RGX_CR_ACE_CTRL, ui64RegVal); + RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal); } static void RGXMercerSoftResetSet(const void *hPrivate, IMG_UINT64 ui32MercerFlags) @@ -383,6 +821,12 @@ static void RGXSPUSoftResetDeAssert(const void *hPrivate) static void RGXResetSequence(const void *hPrivate, const IMG_CHAR *pcRGXFW_PROCESSOR) { /* Set RGX in soft-reset */ + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + RGXCommentLog(hPrivate, "RGXStart: soft reset cpu core"); + RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 0); + } + RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1"); RGXSPUSoftResetAssert(hPrivate); @@ -414,7 +858,7 @@ static void RGXResetSequence(const void *hPrivate, const IMG_CHAR *pcRGXFW_PROCE static void DeassertMetaReset(const void *hPrivate) { - /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */ + /* Need to wait for at least 32 cycles before taking the FW processor out of reset ... */ RGXWaitCycles(hPrivate, 32, 3); RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0); @@ -444,8 +888,16 @@ static PVRSRV_ERROR InitJonesECCRAM(const void *hPrivate) } /* Clocks must be set to "on" during RAMs initialization. */ - RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_ON); + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, PIPELINED_DATAMASTERS_VERSION) > 0) + { + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0__PIPEDM_GT0__ALL_ON); + } + else + { + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_ON); + } RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, RGX_CR_CLK_CTRL1_ALL_ON); + RGXWriteReg32(hPrivate, RGX_CR_CLK_CTRL2, RGX_CR_CLK_CTRL2_ALL_ON); if (bMetaFW) { @@ -457,6 +909,16 @@ static PVRSRV_ERROR InitJonesECCRAM(const void *hPrivate) ui32Mask = bMetaFW ? RGX_CR_JONES_RAM_INIT_KICK_MASKFULL : RGX_CR_JONES_RAM_INIT_KICK_MASKFULL & ~RGX_CR_JONES_RAM_INIT_KICK_GARTEN_EN; + + /* SLC component in CHEST either does not exist or not powered at this point */ + ui32Mask &= RGX_CR_JONES_RAM_INIT_KICK_SLC_CHEST_CLRMSK; + + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) >= 4) + { + /* FBCDC has components in CHEST which may not be powered at this point */ + ui32Mask &= RGX_CR_JONES_RAM_INIT_KICK_FBCDC_CLRMSK; + } + RGXWriteReg64(hPrivate, RGX_CR_JONES_RAM_INIT_KICK, ui32Mask); eError = RGXPollReg64(hPrivate, RGX_CR_JONES_RAM_STATUS, ui32Mask, ui32Mask); @@ -466,8 +928,16 @@ static PVRSRV_ERROR InitJonesECCRAM(const void *hPrivate) RGXReadMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, &ui32Value); } - RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_AUTO); + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, PIPELINED_DATAMASTERS_VERSION) > 0) + { + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0__PIPEDM_GT0__ALL_AUTO); + } + else + { + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_AUTO); + } RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, RGX_CR_CLK_CTRL1_ALL_AUTO); + RGXWriteReg32(hPrivate, RGX_CR_CLK_CTRL2, RGX_CR_CLK_CTRL2_ALL_AUTO); if (bMetaFW) { @@ -478,21 +948,24 @@ static PVRSRV_ERROR InitJonesECCRAM(const void *hPrivate) return eError; } + PVRSRV_ERROR RGXStart(const void *hPrivate) { - RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; - PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; PVRSRV_ERROR eError = PVRSRV_OK; - IMG_BOOL bDoFWSlaveBoot = IMG_FALSE; IMG_CHAR *pcRGXFW_PROCESSOR; + IMG_BOOL bDoFWSlaveBoot = IMG_FALSE; IMG_BOOL bMetaFW = IMG_FALSE; if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) { pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; - bMetaFW = IMG_FALSE; - bDoFWSlaveBoot = IMG_FALSE; } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + else if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS; + } +#endif else { pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; @@ -500,46 +973,35 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate); } - /* Disable the default sys_bus_secure protection to perform minimal setup */ - RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0); - (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); + if (RGX_DEVICE_HAS_FEATURE(hPrivate, SYS_BUS_SECURE_RESET)) + { + /* Disable the default sys_bus_secure protection to perform minimal setup */ + RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure"); + RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0); + (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ + } +#if defined(RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK) /* Only bypass HMMU if the module is present */ - if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK)) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, HYPERVISOR_MMU)) { - if (PVRSRV_VZ_MODE_IS(NATIVE)) - { - /* Always set HMMU in bypass mode */ - RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL); - (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS); - } -#if defined(PVRSRV_VZ_BYPASS_HMMU) - if (PVRSRV_VZ_MODE_IS(HOST)) - { - /* Also set HMMU in bypass mode */ - RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL); - (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS); - } -#endif + /* Always set HMMU in bypass mode */ + RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL); + (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS); } - -#if defined(SUPPORT_VALIDATION) -#if !defined(RGX_CR_FIRMWARE_PROCESSOR_LS) -#define RGX_CR_FIRMWARE_PROCESSOR_LS (0x01A0U) -#define RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_EN (0x00000001U) #endif + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_DUAL_LOCKSTEP)) { - if (psDevInfo->ui32ValidationFlags & RGX_VAL_LS_EN) - { - /* Set the dual LS mode */ - RGXWriteReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS, RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_EN); - (void) RGXReadReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS); - } + /* Enable the dual LS mode */ + RGXCommentLog(hPrivate, "RGXStart: Enable Dual Core Lock Step"); + RGXWriteReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS, (0x3 << RGX_CR_FIRMWARE_PROCESSOR_LS__RDL__ENABLE_SHIFT)); + (void) RGXReadReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS); } -#endif + /*! - * Start series8 FW init sequence + * Start FW init sequence */ RGXResetSequence(hPrivate, pcRGXFW_PROCESSOR); @@ -552,7 +1014,7 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) return eError; } - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, GPU_MULTICORE_SUPPORT)) { /* Set OR reduce for ECC faults to ensure faults are not missed during early boot stages */ RGXWriteReg32(hPrivate, RGX_CR_MULTICORE_EVENT_REDUCE, RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_EN | RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_EN); @@ -562,22 +1024,6 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) RGXWriteReg32(hPrivate, RGX_CR_EVENT_ENABLE, RGX_CR_EVENT_ENABLE_FAULT_FW_EN); } - if (RGX_DEVICE_HAS_BRN(hPrivate, BRN_66927)) - { - IMG_UINT64 ui64ClockCtrl; - - ui64ClockCtrl = RGXReadReg64(hPrivate, RGX_CR_CLK_CTRL0); - CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL0_MCU_L0); - CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL0_PM); - CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL0_FBDC); - RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, ui64ClockCtrl); - - ui64ClockCtrl = RGXReadReg64(hPrivate, RGX_CR_CLK_CTRL1); - CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL1_PIXEL); - CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL1_GEO_VERTEX); - RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, ui64ClockCtrl); - } - if (bMetaFW) { if (bDoFWSlaveBoot) @@ -597,175 +1043,121 @@ PVRSRV_ERROR RGXStart(const void *hPrivate) /* * Initialise Firmware wrapper */ - if (bMetaFW) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + RGXInitRiscvProcWrapper(hPrivate); + } + else if (bMetaFW) { RGXInitMetaProcWrapper(hPrivate); } +#if defined(RGX_FEATURE_MIPS_BIT_MASK) else { - RGXInitRiscvProcWrapper(hPrivate); + RGXInitMipsProcWrapper(hPrivate); } +#endif - if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4) +#if defined(RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, MMU_VERSION) >= 4) { - // initialise the MMU range based config registers for Non4K pages. + /* initialise the MMU range based config registers for Non4K pages */ RGXInitMMURangeRegisters(hPrivate); } +#endif + +#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, AXI_ACELITE)) +#endif + { + /* We must init the AXI-ACE interface before 1st BIF transaction */ + RGXInitAXIACE(hPrivate); + } - RGXInitAXIACE(hPrivate); /* * Initialise BIF. */ RGXInitBIF(hPrivate); + RGXSetPoweredState(hPrivate, IMG_TRUE); + RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR); DeassertMetaReset(hPrivate); - if (bMetaFW) + if (bMetaFW && bDoFWSlaveBoot) { - if (bDoFWSlaveBoot) - { - eError = RGXFabricCoherencyTest(hPrivate); - if (eError != PVRSRV_OK) return eError; + eError = RGXFabricCoherencyTest(hPrivate); + if (eError != PVRSRV_OK) return eError; - RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start"); - eError = RGXStartFirmware(hPrivate); - if (eError != PVRSRV_OK) return eError; - } - else - { - RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); - } + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start"); + eError = RGXStartFirmware(hPrivate); + if (eError != PVRSRV_OK) return eError; } else { - /* Bring Debug Module out of reset */ - RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL, RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); - - /* Boot the FW */ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); - RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 1); - RGXWaitCycles(hPrivate, 32, 3); + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + /* Bring Debug Module out of reset */ +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) >= 4) + { + RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4, RGX_CR_FWCORE_DMI_DMCONTROL__HOST_SECURITY_GEQ4__DMACTIVE_EN); + } + else +#endif + { + RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL, RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); + } + + /* Boot the FW */ + RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 1); + RGXWaitCycles(hPrivate, 32, 3); + } } #if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) - RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure"); - RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN); - (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) == 1) +#endif + { + RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure"); + RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ + } #endif - - /*! - * End series8 FW init sequence - */ return eError; } PVRSRV_ERROR RGXStop(const void *hPrivate) { - RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; - PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; PVRSRV_ERROR eError = PVRSRV_OK; IMG_BOOL bMetaFW = RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META); - IMG_UINT32 ui32JonesIdleMask = RGX_CR_JONES_IDLE_MASKFULL^RGX_CR_JONES_IDLE_AXI2IMG_EN; + IMG_UINT32 ui32JonesIdleMask = RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_AXI2IMG_EN|RGX_CR_JONES_IDLE_SI_AXI2IMG_EN); RGXDeviceAckIrq(hPrivate); -#if defined(SUPPORT_VALIDATION) && !defined(TC_MEMORY_CONFIG) -#if !defined(RGX_CR_POWER_EVENT) -#define RGX_CR_POWER_EVENT (0x0038U) -#define RGX_CR_POWER_EVENT_GPU_MASK_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) -#define RGX_CR_POWER_EVENT_GPU_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF1F)) -#define RGX_CR_POWER_EVENT_DOMAIN_SPU0_SHIFT (9U) -#define RGX_CR_POWER_EVENT_DOMAIN_CLUSTER0_SHIFT (8U) -#define RGX_CR_POWER_EVENT_DOMAIN_CLUSTER_CLUSTER0_SHIFT (32U) -#define RGX_CR_POWER_EVENT_TYPE_SHIFT (0U) -#define RGX_CR_POWER_EVENT_TYPE_POWER_DOWN (0x00000000U) -#define RGX_CR_POWER_EVENT_REQ_EN (0x00000002U) -#endif - - /* Power off any enabled SPUs */ - if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN)) - { - if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 3) - { - IMG_UINT64 ui64PowUnitOffMask; - IMG_UINT64 ui64RegVal; - - ui64PowUnitOffMask = (1 << RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, NUM_CLUSTERS)) -1; - ui64RegVal = (~RGX_CR_POWER_EVENT_GPU_MASK_CLRMSK) | // GPU_MASK specifies all cores - (~RGX_CR_POWER_EVENT_GPU_ID_CLRMSK) | // GPU_ID all set means use the GPU_MASK - (ui64PowUnitOffMask << RGX_CR_POWER_EVENT_DOMAIN_CLUSTER_CLUSTER0_SHIFT) | - RGX_CR_POWER_EVENT_TYPE_POWER_DOWN; - - RGXWriteReg64(hPrivate, - RGX_CR_POWER_EVENT, - ui64RegVal); - - RGXWriteReg64(hPrivate, - RGX_CR_POWER_EVENT, - ui64RegVal | RGX_CR_POWER_EVENT_REQ_EN); - } - else if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 2) - { - IMG_UINT64 ui64PowUnitOffMask; - IMG_UINT64 ui64RegVal; - - ui64PowUnitOffMask = (1 << RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, NUM_CLUSTERS)) -1; - ui64RegVal = (~RGX_CR_POWER_EVENT_GPU_MASK_CLRMSK) | // GPU_MASK specifies all cores - (~RGX_CR_POWER_EVENT_GPU_ID_CLRMSK) | // GPU_ID all set means use the GPU_MASK - (ui64PowUnitOffMask << RGX_CR_POWER_EVENT_DOMAIN_CLUSTER0_SHIFT) | - RGX_CR_POWER_EVENT_TYPE_POWER_DOWN; - - RGXWriteReg64(hPrivate, - RGX_CR_POWER_EVENT, - ui64RegVal); - - RGXWriteReg64(hPrivate, - RGX_CR_POWER_EVENT, - ui64RegVal | RGX_CR_POWER_EVENT_REQ_EN); - } - else - { - IMG_UINT32 ui32PowUnitOffMask; - IMG_UINT32 ui32RegVal; - - ui32PowUnitOffMask = (1 << RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, NUM_SPU)) -1; - ui32RegVal = (ui32PowUnitOffMask << RGX_CR_POWER_EVENT_DOMAIN_SPU0_SHIFT) | - RGX_CR_POWER_EVENT_TYPE_POWER_DOWN; - - RGXWriteReg32(hPrivate, - RGX_CR_POWER_EVENT, - ui32RegVal); - - RGXWriteReg32(hPrivate, - RGX_CR_POWER_EVENT, - ui32RegVal | RGX_CR_POWER_EVENT_REQ_EN); - } + /* Set FW power state OFF to disable LISR handler */ + RGXSetPoweredState(hPrivate, IMG_FALSE); - /* Poll on complete */ - eError = RGXPollReg32(hPrivate, - RGX_CR_EVENT_STATUS, - RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN, - RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN); - if (eError != PVRSRV_OK) return eError; - - /* Update the SPU_ENABLE mask */ - if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 1) - { - RGXWriteReg32(hPrivate, RGX_CR_SPU_ENABLE, 0); - } - RGXWriteReg32(hPrivate, 0xF020, 0); - } -#endif /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */ - if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) || - RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) < 2) + if (!RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, RAY_TRACING_ARCH) || + RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, RAY_TRACING_ARCH) < 2) { ui32JonesIdleMask ^= (RGX_CR_JONES_IDLE_ASC_EN|RGX_CR_JONES_IDLE_RCE_EN); } +#if defined(FIX_HW_BRN_74812_BIT_MASK) + if (RGX_DEVICE_HAS_BRN(hPrivate, 74812)) + { + /* BRN74812 means the IPP idle signal may not be valid and is therefore ignored... */ + ui32JonesIdleMask &= RGX_CR_JONES_IDLE_IPP_CLRMSK; + } +#endif + eError = RGXPollReg32(hPrivate, RGX_CR_JONES_IDLE, ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN), @@ -791,15 +1183,18 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC, RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL); - RGXWriteReg32(hPrivate, - RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC, - RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK - & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL); - RGXWriteReg32(hPrivate, - RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC, - RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK - & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL); + if (bMetaFW) + { + RGXWriteReg32(hPrivate, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL); + } #if defined(PDUMP) if (bMetaFW) @@ -818,44 +1213,42 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) ~META_CR_TXENABLE_ENABLE_BIT); if (eError != PVRSRV_OK) return eError; + /* Clear down any irq raised by META (done after disabling the FW + * threads to avoid a race condition). + * This is only really needed for PDumps but we do it anyway driver-live. + */ +#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) /* Wait for the Slave Port to finish all the transactions */ - if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_REGISTER_UNPACKED_ACCESSES)) { - /* Clear down any irq raised by META (done after disabling the FW - * threads to avoid a race condition). - * This is only really needed for PDumps but we do it anyway driver-live. - */ - if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) > 1) { - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, 0x0); - (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED); /* Fence write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA); /* Fence write */ eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN); + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN); } else { - RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED, 0x0); - (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED); /* Fence write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA); /* Fence write */ eError = RGXPollReg32(hPrivate, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN, - RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN - | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN); + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN); } } else +#endif { - /* Clear down any irq raised by META (done after disabling the FW - * threads to avoid a race condition). - * This is only really needed for PDumps but we do it anyway driver-live. - */ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0); (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */ @@ -864,6 +1257,7 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); } + if (eError != PVRSRV_OK) return eError; } #endif @@ -901,10 +1295,8 @@ PVRSRV_ERROR RGXStop(const void *hPrivate) RGX_CR_JONES_IDLE, ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN), ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN)); - if (eError != PVRSRV_OK) return eError; - if (bMetaFW) { IMG_UINT32 ui32RegValue; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxta3d.c b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxta3d.c index 4127ca34784f..9ebf43429e19 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxta3d.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxta3d.c @@ -51,11 +51,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_debug.h" #include "rgxutils.h" #include "rgxfwutils.h" +#include "rgxfwcmnctx.h" #include "rgxta3d.h" #include "rgxmem.h" #include "allocmem.h" #include "devicemem.h" #include "devicemem_pdump.h" +#include "ri_server.h" #include "osfunc.h" #include "pvrsrv.h" #include "rgx_memallocflags.h" @@ -63,7 +65,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxhwperf.h" #include "ospvr_gputrace.h" #include "rgxsyncutils.h" -#include "htbuffer.h" +#include "htbserver.h" #include "rgxdefs_km.h" #include "rgx_fwif_km.h" @@ -73,7 +75,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "sync.h" #include "process_stats.h" +#if !defined(PM_INTERACTIVE_MODE) #include "rgxpmdefs.h" +#endif #include "rgxtimerquery.h" @@ -204,23 +208,17 @@ PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData, psTAData->psServerCommonContext, RGXFWIF_DM_GEOM, PDUMP_FLAGS_CONTINUOUS); - if (eError == PVRSRV_ERROR_RETRY) - { - return eError; - } - else if (eError != PVRSRV_OK) - { - PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psDeviceNode, + eError, + RGXFWRequestCommonContextCleanUp); /* ... it has so we can free its resources */ FWCommonContextFree(psTAData->psServerCommonContext); DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc); psTAData->psServerCommonContext = NULL; - return PVRSRV_OK; + + return eError; } static @@ -234,23 +232,17 @@ PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData, ps3DData->psServerCommonContext, RGXFWIF_DM_3D, PDUMP_FLAGS_CONTINUOUS); - if (eError == PVRSRV_ERROR_RETRY) - { - return eError; - } - else if (eError != PVRSRV_OK) - { - PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", - __func__, - PVRSRVGetErrorString(eError))); - return eError; - } + + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psDeviceNode, + eError, + RGXFWRequestCommonContextCleanUp); /* ... it has so we can free its resources */ FWCommonContextFree(ps3DData->psServerCommonContext); DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc); ps3DData->psServerCommonContext = NULL; - return PVRSRV_OK; + + return eError; } static void _RGXDumpPMRPageList(DLLIST_NODE *psNode) @@ -384,8 +376,9 @@ static void _CheckFreelist(RGX_FREELIST *psFreeList, if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum) { - PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016llx calculated 0x%016llx", - __func__, psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum)); + PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016" IMG_UINT64_FMTSPECx " calculated 0x%016" IMG_UINT64_FMTSPECx, + __func__, psFreeList, + ui64ExpectedCheckSum, *pui64CalculatedCheckSum)); bFreelistBad = IMG_TRUE; } @@ -498,6 +491,7 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, { uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE; } + eError = PhysmemNewRamBackedPMR(psFreeList->psConnection, psFreeList->psDevInfo->psDeviceNode, uiSize, @@ -505,7 +499,10 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, 1, &ui32MappingTable, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, - PVRSRV_MEMALLOCFLAG_GPU_READABLE, + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_OS_LINUX_DENY_MOVE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_PRIVATE) | + PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP, sizeof(szAllocName), szAllocName, psFreeList->ownerPid, @@ -554,13 +551,14 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, } /* Attach RI information */ - eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR, + eError = RIWriteMEMDESCEntryKM(psFreeList->psConnection, + psFreeList->psDevInfo->psDeviceNode, + psPMRNode->psPMR, OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN), szAllocName, 0, uiSize, - IMG_FALSE, - IMG_FALSE, + 0, &psPMRNode->hRIHandle); PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM"); @@ -596,6 +594,12 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, PVRSRV_ERROR res; IMG_HANDLE hMapHandle; + /* Check for overflow. Validate size and offset. */ + PVR_GOTO_IF_INVALID_PARAM(psFreeList->uiFreeListPMROffset + ui32MapSize > psFreeList->uiFreeListPMROffset, + eError, ErrorPopulateFreelist); + PVR_GOTO_IF_INVALID_PARAM(psFreeList->uiFreeListPMROffset + ui32MapSize <= PMR_LogicalSize(psFreeList->psFreeListPMR), + eError, ErrorPopulateFreelist); + /* Map both the FL and the shadow FL */ res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize, (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle); @@ -605,6 +609,7 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, "%s: Failed to map freelist (ID=%d)", __func__, psFreeList->ui32FreelistID)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto ErrorPopulateFreelist; } @@ -640,6 +645,7 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, "%s: Failed to release freelist mapping (ID=%d)", __func__, psFreeList->ui32FreelistID)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto ErrorPopulateFreelist; } } @@ -693,7 +699,7 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, /* Error handling */ ErrorPopulateFreelist: - PMRUnrefPMR(psPMRNode->psPMR); + (void) PMRUnrefPMR(psPMRNode->psPMR); ErrorBlockAlloc: OSFreeMem(psPMRNode); @@ -759,7 +765,7 @@ static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM"); } -#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ /* Free PMR (We should be the only one that holds a ref on the PMR) */ eError = PMRUnrefPMR(psPMRNode->psPMR); @@ -855,7 +861,6 @@ void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, "FreeList Lookup for FreeList ID 0x%08x failed (Populate)", ui32FreelistID)); PVR_ASSERT(IMG_FALSE); - return; } @@ -863,7 +868,6 @@ void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages; psFreeList->ui32ReadyFLPages = 0; - /* Try to grow the freelist */ eError = RGXGrowFreeList(psFreeList, psFreeList->ui32GrowFLPages, @@ -905,7 +909,6 @@ void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages; - PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: Grow pages=%u, new pages=%u, ready pages=%u, counter=%d", psFreeList, ui32GrowValue, @@ -913,7 +916,7 @@ void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages, psFreeList->ui32NumGrowReqByFW)); - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psDevInfo, RGXFWIF_DM_3D, @@ -924,8 +927,8 @@ void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); - /* Kernel CCB should never fill up, as the FW is processing them right away */ + } END_LOOP_UNTIL_TIMEOUT_US(); + /* Kernel CCB should never fill up, as the FW is processing them right away */ PVR_ASSERT(eError == PVRSRV_OK); } @@ -954,14 +957,14 @@ static void _RGXFreeListReconstruction(PDLLIST_NODE psNode) PMRUnwritePMPageList(psPMRNode->psPageList); psPMRNode->psPageList = NULL; eError = PMRWritePMPageList( - /* Target PMR, offset, and length */ - psFreeList->psFreeListPMR, - (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), - (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), - /* Referenced PMR, and "page" granularity */ - psPMRNode->psPMR, - RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, - &psPMRNode->psPageList); + /* Target PMR, offset, and length */ + psFreeList->psFreeListPMR, + (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), + (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), + /* Referenced PMR, and "page" granularity */ + psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + &psPMRNode->psPageList); if (eError != PVRSRV_OK) { @@ -996,10 +999,8 @@ static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList) { IMG_UINT32 ui32OriginalFLPages; DLLIST_NODE *psNode, *psNext; + RGXFWIF_FREELIST *psFWFreeList; PVRSRV_ERROR eError; -#if !defined(PM_INTERACTIVE_MODE) - IMG_DEV_VIRTADDR sFreeListBaseDevVAddr; -#endif //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID)); @@ -1027,30 +1028,36 @@ static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList) return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED; } + /* Update firmware freelist structure */ + eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + if (eError != PVRSRV_OK) { - RGXFWIF_FREELIST *psFWFreeList; + return eError; + } - /* Update firmware freelist structure */ - eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); - if (eError != PVRSRV_OK) - { - return eError; - } + psFWFreeList->ui32MaxPages = psFreeList->ui32MaxFLPages; + psFWFreeList->ui32GrowPages = psFreeList->ui32GrowFLPages; + psFWFreeList->ui32CurrentPages = psFreeList->ui32CurrentFLPages; + psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID; + psFWFreeList->bGrowPending = IMG_FALSE; + psFWFreeList->ui32ReadyPages = psFreeList->ui32ReadyFLPages; #if defined(PM_INTERACTIVE_MODE) - psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; - psFWFreeList->ui32AllocatedPageCount = 0; - psFWFreeList->ui32AllocatedMMUPageCount = 0; + psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; + psFWFreeList->ui64CurrentDevVAddr = (psFWFreeList->psFreeListDevVAddr.uiAddr + + ((psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); + psFWFreeList->ui32AllocatedPageCount = 0; + psFWFreeList->ui32AllocatedMMUPageCount = 0; #else - sFreeListBaseDevVAddr = psFWFreeList->sFreeListBaseDevVAddr; - psFWFreeList->bUpdatePending = IMG_FALSE; - psFWFreeList->ui32UpdateNewPages = 0; - psFWFreeList->ui32UpdateNewReadyPages = 0; - psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0; + psFWFreeList->bUpdatePending = IMG_FALSE; + psFWFreeList->ui32UpdateNewPages = 0; + psFWFreeList->ui32UpdateNewReadyPages = 0; + psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0; #endif + RGXFwSharedMemCacheOpPtr(psFWFreeList, FLUSH); - DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); - } + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); #if !defined(PM_INTERACTIVE_MODE) /* Reset freelist state buffer */ @@ -1068,7 +1075,7 @@ static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList) PVR_ASSERT(uiNbBytes == sizeof(sFLState)); - sFLBaseAddr.uiAddr = (sFreeListBaseDevVAddr.uiAddr + + sFLBaseAddr.uiAddr = (psFreeList->sFreeListBaseDevVAddr.uiAddr + ((psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32))) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); /* Note: Freelist base address is packed shifted down. */ @@ -1113,8 +1120,8 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; RGXFWIF_HWRTDATA *psHWRTData; #endif - IMG_UINT32 ui32FinalFreelistsCount = 0; - IMG_UINT32 aui32FinalFreelists[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT * 2]; /* Worst-case is double what we are sent */ + IMG_UINT32 ui32PIDCount = 0; + IMG_UINT32 aui32PIDList[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; PVR_ASSERT(psDevInfo != NULL); PVR_ASSERT(ui32FreelistsCount <= RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT); @@ -1136,69 +1143,53 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, { sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] | RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; - aui32FinalFreelists[ui32Loop] = paui32Freelists[ui32Loop]; } - ui32FinalFreelistsCount = ui32FreelistsCount; - /* - * The list of freelists we have been given for reconstruction will - * consist of local and global freelists (maybe MMU as well). Any - * local freelists should have their global list specified as well. - * There may be cases where the global freelist is not given (in - * cases of partial setups before a poll failure for example). To - * handle that we must first ensure every local freelist has a global - * freelist specified, otherwise we add that to the 'final' list. - * This final list of freelists is created in a first pass. - * - * Even with the global freelists listed, there may be other local - * freelists not listed, which are going to have their global freelist - * reconstructed. Therefore we have to find those freelists as well - * meaning we will have to iterate the entire list of freelists to - * find which must be reconstructed. This is the second pass. + * All freelists belonging to the same PID will be reconstructed. + * This simplifies tracking for AGP, since there is no longer a + * single global freelist per local freelist. The list of unique + * PIDs is calculated from the list of freelists in this first pass. */ OSLockAcquire(psDevInfo->hLockFreeList); dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) { RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); - IMG_BOOL bInList = IMG_FALSE; - IMG_BOOL bGlobalInList = IMG_FALSE; - /* Check if this local freelist is in the list and ensure its global is too. */ - if (psFreeList->ui32FreelistGlobalID != 0) + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) { - for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + IMG_UINT32 ui32PIDLoop; + + if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID) { - if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID) + for (ui32PIDLoop = 0; ui32PIDLoop < ui32PIDCount; ui32PIDLoop++) { - bInList = IMG_TRUE; + if (aui32PIDList[ui32PIDLoop] == psFreeList->ownerPid) + { + break; + } } - if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + + if (ui32PIDLoop == ui32PIDCount) { - bGlobalInList = IMG_TRUE; + aui32PIDList[ui32PIDCount++] = psFreeList->ownerPid; } } - - if (bInList && !bGlobalInList) - { - aui32FinalFreelists[ui32FinalFreelistsCount] = psFreeList->ui32FreelistGlobalID; - ui32FinalFreelistsCount++; - } } } dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) { RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); IMG_BOOL bReconstruct = IMG_FALSE; + IMG_UINT32 ui32PIDLoop; /* - * Check if this freelist needs to be reconstructed (was it requested - * or is its global freelist going to be reconstructed)... + * Check if this freelist needs to be reconstructed (is it in the list + * of PIDs which will have every single one of their freelists reconstructed) */ - for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + for (ui32PIDLoop = 0; ui32PIDLoop < ui32PIDCount; ui32PIDLoop++) { - if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID || - aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + if (aui32PIDList[ui32PIDLoop] == psFreeList->ownerPid) { bReconstruct = IMG_TRUE; break; @@ -1227,6 +1218,8 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, psHWRTData->eState = RGXFWIF_RTDATA_STATE_HWR; psHWRTData->ui32HWRTDataFlags &= ~HWRTDATA_HAS_LAST_TA; + RGXFwSharedMemCacheOpValue(psHWRTData->eState, FLUSH); + RGXFwSharedMemCacheOpValue(psHWRTData->ui32HWRTDataFlags, FLUSH); DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); } @@ -1262,7 +1255,7 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, } /* send feedback */ - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psDevInfo, RGXFWIF_DM_GEOM, @@ -1273,23 +1266,23 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); - /* Kernel CCB should never fill up, as the FW is processing them right away */ + /* Kernel CCB should never fill up, as the FW is processing them right away */ PVR_ASSERT(eError == PVRSRV_OK); } -/* Create HWRTDataSet */ -static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_DEV_VIRTADDR psVHeapTableDevVAddr, - IMG_DEV_VIRTADDR sPMDataDevVAddr, /* per-HWRTData */ - IMG_DEV_VIRTADDR sPMSecureDataDevVAddr, /* per-HWRTData */ - RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], - IMG_DEV_VIRTADDR sTailPtrsDevVAddr, - IMG_UINT16 ui16MaxRTs, - RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie, - RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) /* per-HWRTData */ +/* Create a single HWRTData instance */ +static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR sPMDataDevVAddr, /* per-HWRTData */ + IMG_DEV_VIRTADDR sPMSecureDataDevVAddr, /* per-HWRTData */ + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_UINT16 ui16MaxRTs, + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) /* per-HWRTData */ { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo; @@ -1306,6 +1299,14 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, /* local pointer for CPU-mapped [FW]HWRTData */ RGXFWIF_HWRTDATA *psHWRTData = NULL; + /* + * Fill in a local copy of RGXFWIF_HWRTDATA first to reduces reads and writes to + * device memory. Using a byte array buffer avoids the limitation that some OSs + * cannot align local variables to 64 bytes, usually needed by RGXFWIF_HWRTDATA + */ + IMG_BYTE aui8HWRTDataLocal[sizeof(RGXFWIF_HWRTDATA)] = {0}; + RGXFWIF_HWRTDATA *psHWRTDataLocal = (RGXFWIF_HWRTDATA *)aui8HWRTDataLocal; + PVR_UNREFERENCED_PARAMETER(psConnection); /* Prepare the HW RT DataSet struct */ @@ -1318,9 +1319,7 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, *ppsKMHWRTDataSet = psKMHWRTDataSet; psKMHWRTDataSet->psDeviceNode = psDeviceNode; - psKMHWRTDataSet->psHWRTDataCommonCookie = psHWRTDataCommonCookie; - psDevInfo = psDeviceNode->pvDevice; /* @@ -1329,11 +1328,11 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, * Therefore the GPU cache doesn't need coherency and write-combine will * suffice on the CPU side. (WC buffer will be flushed at the first TA-kick) */ - eError = DevmemFwAllocate( psDevInfo, - sizeof(RGXFWIF_HWRTDATA), - RGX_FWCOMCTX_ALLOCFLAGS, - "FwHwRTData", - &psHWRTDataFwMemDesc ); + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_HWRTDATA), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwHwRTData", + &psHWRTDataFwMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -1354,39 +1353,21 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError); #if defined(PM_INTERACTIVE_MODE) - psHWRTData->psVHeapTableDevVAddr = psVHeapTableDevVAddr; + psHWRTDataLocal->psVHeapTableDevVAddr = psVHeapTableDevVAddr; #endif - psHWRTData->sHWRTDataCommonFwAddr = psHWRTDataCommonCookie->sHWRTDataCommonFwAddr; - - psHWRTData->sPMSecureRenderStateDevVAddr = sPMSecureDataDevVAddr; + psHWRTDataLocal->sPMSecureRenderStateDevVAddr = sPMSecureDataDevVAddr; #if defined(PM_INTERACTIVE_MODE) - psHWRTData->sPMMListDevVAddr = sPMDataDevVAddr; + psHWRTDataLocal->psPMMListDevVAddr = sPMDataDevVAddr; #else - psHWRTData->sPMRenderStateDevVAddr = sPMDataDevVAddr; + psHWRTDataLocal->sPMRenderStateDevVAddr = sPMDataDevVAddr; #endif - psHWRTData->sTailPtrsDevVAddr = sTailPtrsDevVAddr; - OSLockAcquire(psDevInfo->hLockFreeList); - for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) - { - psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop]; - psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++; - psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr; - /* invalid initial snapshot value, the snapshot is always taken during first kick - * and hence the value get replaced during the first kick anyway. So it's safe to set it 0. - */ - psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0; - psHWRTData->bRenderStateNeedsReset = IMG_FALSE; - } -#if !defined(SUPPORT_SHADOW_FREELISTS) - dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData)); -#endif - OSLockRelease(psDevInfo->hLockFreeList); + psHWRTDataLocal->sTailPtrsDevVAddr = sTailPtrsDevVAddr; { - RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl; + RGXFWIF_RTA_CTL *psRTACtl = &psHWRTDataLocal->sRTACtl; psRTACtl->ui32RenderTargetIndex = 0; psRTACtl->ui32ActiveRenderTargets = 0; @@ -1396,7 +1377,6 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, if (ui16MaxRTs > 1) { - /* Allocate memory for the checks */ PDUMPCOMMENT(psDeviceNode, "Allocate memory for shadow render target cache"); eError = DevmemFwAllocate( psDevInfo, ui16MaxRTs * sizeof(IMG_UINT32), @@ -1426,7 +1406,6 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, RFW_FWADDR_FLAG_NONE ); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", FWAllocateRTArryFwAddrError); - /* Allocate memory for the checks */ PDUMPCOMMENT(psDeviceNode, "Allocate memory for tracking renders accumulation"); eError = DevmemFwAllocate(psDevInfo, ui16MaxRTs * sizeof(IMG_UINT32), @@ -1458,11 +1437,31 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, } } + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop]; + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++; + psHWRTDataLocal->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr; + /* Invalid initial snapshot value. The snapshot is always taken during the first + * kick and hence this value gets replaced, so it's safe to set it to zero. + */ + psHWRTDataLocal->aui32FreeListHWRSnapshot[ui32Loop] = 0; + psHWRTDataLocal->bRenderStateNeedsReset = IMG_FALSE; + } +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData)); +#endif + OSLockRelease(psDevInfo->hLockFreeList); + + OSCachedMemCopy(psHWRTData, psHWRTDataLocal, sizeof(*psHWRTDataLocal)); + #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, "Dump HWRTData 0x%08X", psKMHWRTDataSet->sHWRTDataFwAddr.ui32Addr); DevmemPDumpLoadMem(psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS); #endif + RGXFwSharedMemCacheOpPtr(psHWRTData, FLUSH); DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); return PVRSRV_OK; @@ -1473,13 +1472,6 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, FWAllocateRTArryFwAddrError: DevmemFwUnmapAndFree(psDevInfo, psRTArrayFwMemDesc); FWAllocateRTArryError: - OSLockAcquire(psDevInfo->hLockFreeList); - for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) - { - PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); - psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; - } - OSLockRelease(psDevInfo->hLockFreeList); DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); FWRTDataCpuMapError: RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); @@ -1493,13 +1485,15 @@ static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, return eError; } -/* Destroy HWRTDataSet */ -static PVRSRV_ERROR RGXDestroyHWRTData_aux(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) +static void RGXDestroyHWRTData_aux(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) { PVRSRV_RGXDEV_INFO *psDevInfo; IMG_UINT32 ui32Loop; - PVR_ASSERT(psKMHWRTDataSet); + if (psKMHWRTDataSet == NULL) + { + return; + } psDevInfo = psKMHWRTDataSet->psDeviceNode->pvDevice; @@ -1533,113 +1527,177 @@ static PVRSRV_ERROR RGXDestroyHWRTData_aux(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); OSFreeMem(psKMHWRTDataSet); - - return PVRSRV_OK; } /* Create set of HWRTData(s) and bind it with a shared FW HWRTDataCommon */ -PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_DEV_VIRTADDR psVHeapTableDevVAddr, +PVRSRV_ERROR RGXCreateHWRTDataSet( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, IMG_DEV_VIRTADDR asPMDataDevVAddr[RGXMKIF_NUM_RTDATAS], - IMG_DEV_VIRTADDR asPMSecureDataDevVAddr[RGXMKIF_NUM_RTDATAS], - RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], - IMG_UINT32 ui32ScreenPixelMax, - IMG_UINT64 ui64PPPMultiSampleCtl, - IMG_UINT32 ui32TEStride, - IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], - IMG_UINT32 ui32TPCSize, - IMG_UINT32 ui32TEScreen, - IMG_UINT32 ui32TEAA, - IMG_UINT32 ui32TEMTILE1, - IMG_UINT32 ui32TEMTILE2, - IMG_UINT32 ui32RgnStride, - IMG_UINT32 ui32ISPMergeLowerX, - IMG_UINT32 ui32ISPMergeLowerY, - IMG_UINT32 ui32ISPMergeUpperX, - IMG_UINT32 ui32ISPMergeUpperY, - IMG_UINT32 ui32ISPMergeScaleX, - IMG_UINT32 ui32ISPMergeScaleY, - IMG_UINT16 ui16MaxRTs, - RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]) + IMG_DEV_VIRTADDR asPMSecureDataDevVAddr[RGXMKIF_NUM_RTDATAS], + RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], + IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psVHeapTableDevVAddr); + PVR_UNREFERENCED_PARAMETER(asPMDataDevVAddr); + PVR_UNREFERENCED_PARAMETER(asPMSecureDataDevVAddr); + PVR_UNREFERENCED_PARAMETER(apsFreeLists); + PVR_UNREFERENCED_PARAMETER(asTailPtrsDevVAddr); + PVR_UNREFERENCED_PARAMETER(ui16MaxRTs); + PVR_UNREFERENCED_PARAMETER(pasKMHWRTDataSet); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +} + +static PVRSRV_ERROR +_WritePMStateBuffer(PMR* psRenderStatesPMR, + IMG_DEVMEM_OFFSET_T ui64Offset, + IMG_DEV_VIRTADDR sMListDevVAddr, + IMG_DEV_VIRTADDR sVHeapTableDevVAddr) { PVRSRV_ERROR eError; - IMG_UINT32 ui32RTDataID; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; - RGXFWIF_HWRTDATA_COMMON *psHWRTDataCommon; - DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; - RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + size_t uiNbBytes; + IMG_UINT32 asRenderStateBuffer[RGX_PM_MAX_RSTATE_SIZE_DWORDS] = {0}; - /* Prepare KM cleanup object for HWRTDataCommon FW object */ - psHWRTDataCommonCookie = OSAllocZMem(sizeof(*psHWRTDataCommonCookie)); - if (psHWRTDataCommonCookie == NULL) + PVR_ASSERT(PVRSRV_IS_FEATURE_SUPPORTED(PMR_DeviceNode(psRenderStatesPMR), PM_BYTE_ALIGNED_BASE_ADDRESSES)); + PVR_ASSERT((sMListDevVAddr.uiAddr & (RGX_PM_MLIST_BASE_ADDR_MAX_ALIGNSIZE - 1)) == 0); + PVR_ASSERT((sVHeapTableDevVAddr.uiAddr & (RGX_PM_VHEAP_BASE_ADDR_MAX_ALIGNSIZE - 1)) == 0); + + /* Initialise the Mlist base and Vheap base */ + _RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(asRenderStateBuffer, sMListDevVAddr.uiAddr); + _RGX_PM_RENDERSTATE_BUFFER_SET_VHEAP_BASE_ADDR(asRenderStateBuffer, sVHeapTableDevVAddr.uiAddr); + + eError = PMR_WriteBytes(psRenderStatesPMR, + ui64Offset, + (IMG_UINT8*)&asRenderStateBuffer, + sizeof(asRenderStateBuffer), + &uiNbBytes); + if (eError != PVRSRV_OK) { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_HWRTDataCommonCookieAlloc; + PVR_DPF((PVR_DBG_ERROR, + "%s: Error from PMR_WriteBytes: %s", + __func__, PVRSRVGetErrorString(eError))); + goto ErrorExit; } - /* - * This FW common context is only mapped into kernel for initialisation. - * Otherwise this allocation is only used by the FW. - * Therefore the GPU cache doesn't need coherency, and write-combine will - * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) - */ - eError = DevmemFwAllocate(psDevInfo, - sizeof(RGXFWIF_HWRTDATA_COMMON), - RGX_FWCOMCTX_ALLOCFLAGS, - "FwHWRTDataCommon", - &psHWRTDataCommonFwMemDesc); + PVR_ASSERT(uiNbBytes == sizeof(asRenderStateBuffer)); - if (eError != PVRSRV_OK) + PDUMPCOMMENT(PMR_DeviceNode(psRenderStatesPMR), "PMState buffer"); + PMRPDumpLoadMem(psRenderStatesPMR, ui64Offset, sizeof(asRenderStateBuffer), PDUMP_FLAGS_CONTINUOUS, false); + + return PVRSRV_OK; +ErrorExit: + return eError; +} + +PVRSRV_ERROR RGXCreateHWRTDataSet2( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR sVHeapTableDevVAddr, + DEVMEMINT_RESERVATION *psPMMListsReservation, + DEVMEMINT_RESERVATION *psPMStateReservation, + DEVMEMINT_RESERVATION *psPMSecureStateReservation, + RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], + IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32RTDataID; + IMG_UINT32 ui32GlobalFLMaxPages, ui32LocalFLMaxPages; + IMG_DEVMEM_SIZE_T ui64MListSize, ui64AlignedRenderStateSize; + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; + + PMR *psPMStatePMR=NULL, *psPMSecureStatePMR=NULL, *psMListsPMR=NULL; + IMG_DEV_VIRTADDR sPMStateBaseDevVAddr, sPMSecureStateDevVAddr, sMListsDevVAddr; + + PVR_LOG_RETURN_IF_INVALID_PARAM(0 < ui16MaxRTs && ui16MaxRTs <= RGX_MAX_RTA_INDICES, "Number of TA Render targets outside the range (0, RGX_MAX_RTA_INDICES) is unsupported"); + + ui64AlignedRenderStateSize = PVR_ALIGN(sizeof(RGX_PM_RENDERSTATE_BUFFER), + MAX(RGX_PM_RENDERSTATE_BASE_ADDR_ALIGNSIZE, + GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, SLC_CACHE_LINE_SIZE_BITS)))); + + /* Check if freelists have uniform sizes */ + eError = ValidateFreeListSizes(apsFreeLists, + &ui32LocalFLMaxPages, + &ui32GlobalFLMaxPages); + PVR_LOG_RETURN_IF_ERROR(eError, "Invalid freelist sizes"); + + ui64MListSize = RGXCalcMListSize(psDeviceNode, + ui32LocalFLMaxPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + ui32GlobalFLMaxPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE); + + eError = AcquireValidateRefCriticalBuffer(psDeviceNode, + psPMStateReservation, + ui64AlignedRenderStateSize * RGXMKIF_NUM_RTDATAS, + &psPMStatePMR, + &sPMStateBaseDevVAddr); + PVR_LOG_GOTO_IF_ERROR(eError, + "Validation failed for MListAndState reservations", err_validation_rs); + + eError = AcquireValidateRefCriticalBuffer(psDeviceNode, + psPMSecureStateReservation, + ui64AlignedRenderStateSize * RGXMKIF_NUM_RTDATAS, + &psPMSecureStatePMR, + &sPMSecureStateDevVAddr); + PVR_LOG_GOTO_IF_ERROR(eError, + "Validation failed for secure data reservations", err_validation_srs); + + eError = AcquireValidateRefCriticalBuffer(psDeviceNode, + psPMMListsReservation, + ui64MListSize * RGXMKIF_NUM_RTDATAS, + &psMListsPMR, + &sMListsDevVAddr); + PVR_LOG_GOTO_IF_ERROR(eError, + "Validation failed for secure data reservations", err_validation_mlist); + + /* Prepare KM cleanup object for common data */ + psHWRTDataCommonCookie = OSAllocZMem(sizeof(*psHWRTDataCommonCookie)); + if (psHWRTDataCommonCookie == NULL) { - PVR_DPF((PVR_DBG_ERROR, "%s: DevmemAllocate for FwHWRTDataCommon failed", __func__)); - goto err_HWRTDataCommonAlloc; - } - eError = RGXSetFirmwareAddress(&sHWRTDataCommonFwAddr, psHWRTDataCommonFwMemDesc, 0, RFW_FWADDR_FLAG_NONE); - PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", err_HWRTDataCommonFwAddr); - - eError = DevmemAcquireCpuVirtAddr(psHWRTDataCommonFwMemDesc, (void **)&psHWRTDataCommon); - PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", err_HWRTDataCommonVA); - - psHWRTDataCommon->ui32ScreenPixelMax = ui32ScreenPixelMax; - psHWRTDataCommon->ui64PPPMultiSampleCtl = ui64PPPMultiSampleCtl; - psHWRTDataCommon->ui32TEStride = ui32TEStride; - psHWRTDataCommon->ui32TPCSize = ui32TPCSize; - psHWRTDataCommon->ui32TEScreen = ui32TEScreen; - psHWRTDataCommon->ui32TEAA = ui32TEAA; - psHWRTDataCommon->ui32TEMTILE1 = ui32TEMTILE1; - psHWRTDataCommon->ui32TEMTILE2 = ui32TEMTILE2; - psHWRTDataCommon->ui32RgnStride = ui32RgnStride; /* Region stride in Bytes */ - psHWRTDataCommon->ui32ISPMergeLowerX = ui32ISPMergeLowerX; - psHWRTDataCommon->ui32ISPMergeLowerY = ui32ISPMergeLowerY; - psHWRTDataCommon->ui32ISPMergeUpperX = ui32ISPMergeUpperX; - psHWRTDataCommon->ui32ISPMergeUpperY = ui32ISPMergeUpperY; - psHWRTDataCommon->ui32ISPMergeScaleX = ui32ISPMergeScaleX; - psHWRTDataCommon->ui32ISPMergeScaleY = ui32ISPMergeScaleY; -#if defined(PDUMP) - PDUMPCOMMENT(psDeviceNode, "Dump HWRTDataCommon"); - DevmemPDumpLoadMem(psHWRTDataCommonFwMemDesc, 0, sizeof(*psHWRTDataCommon), PDUMP_FLAGS_CONTINUOUS); -#endif - DevmemReleaseCpuVirtAddr(psHWRTDataCommonFwMemDesc); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_common_cookie_alloc; + } - psHWRTDataCommonCookie->ui32RefCount = 0; - psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc = psHWRTDataCommonFwMemDesc; - psHWRTDataCommonCookie->sHWRTDataCommonFwAddr = sHWRTDataCommonFwAddr; + psHWRTDataCommonCookie->psPMStateReservation = psPMStateReservation; + psHWRTDataCommonCookie->psPMSecureStateReservation = psPMSecureStateReservation; + psHWRTDataCommonCookie->psPMMListsReservation = psPMMListsReservation; /* Here we are creating a set of HWRTData(s) the number of elements in the set equals RGXMKIF_NUM_RTDATAS. */ - for (ui32RTDataID = 0; ui32RTDataID < RGXMKIF_NUM_RTDATAS; ui32RTDataID++) { + IMG_DEV_VIRTADDR sRenderStateDevVirtAddr; + IMG_DEV_VIRTADDR sSecureRenderStateDevVirtAddr; + IMG_DEV_VIRTADDR sMListDevVirtAddr; + + sRenderStateDevVirtAddr.uiAddr = + sPMStateBaseDevVAddr.uiAddr + ui64AlignedRenderStateSize * ui32RTDataID; + sSecureRenderStateDevVirtAddr.uiAddr = + sPMSecureStateDevVAddr.uiAddr + ui64AlignedRenderStateSize * ui32RTDataID; + sMListDevVirtAddr.uiAddr = + sMListsDevVAddr.uiAddr + ui64MListSize * ui32RTDataID; + + eError = _WritePMStateBuffer(psPMStatePMR, + ui64AlignedRenderStateSize * ui32RTDataID, + sMListDevVirtAddr, + sVHeapTableDevVAddr); + PVR_LOG_GOTO_IF_ERROR(eError, + "Failed to write PM state buffer", err_HWRTDataAlloc); + eError = RGXCreateHWRTData_aux( psConnection, psDeviceNode, - psVHeapTableDevVAddr, - asPMDataDevVAddr[ui32RTDataID], - asPMSecureDataDevVAddr[ui32RTDataID], + sVHeapTableDevVAddr, + sRenderStateDevVirtAddr, + sSecureRenderStateDevVirtAddr, &apsFreeLists[(ui32RTDataID % RGXMKIF_NUM_GEOMDATAS) * RGXFW_MAX_FREELISTS], asTailPtrsDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], ui16MaxRTs, @@ -1661,11 +1719,10 @@ PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, return PVRSRV_OK; err_HWRTDataAlloc: - PVR_DPF((PVR_DBG_WARNING, "%s: err_HWRTDataAlloc %u", - __func__, psHWRTDataCommonCookie->ui32RefCount)); + PVR_DPF((PVR_DBG_WARNING, "%s: err_HWRTDataAlloc", __func__)); if (pasKMHWRTDataSet) { - for (ui32RTDataID = psHWRTDataCommonCookie->ui32RefCount; ui32RTDataID > 0; ui32RTDataID--) + for (; ui32RTDataID > 0; ui32RTDataID--) { if (pasKMHWRTDataSet[ui32RTDataID-1] != NULL) { @@ -1674,14 +1731,14 @@ PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, } } } -err_HWRTDataCommonVA: - RGXUnsetFirmwareAddress(psHWRTDataCommonFwMemDesc); -err_HWRTDataCommonFwAddr: - DevmemFwUnmapAndFree(psDevInfo, psHWRTDataCommonFwMemDesc); -err_HWRTDataCommonAlloc: OSFreeMem(psHWRTDataCommonCookie); -err_HWRTDataCommonCookieAlloc: - +err_common_cookie_alloc: + UnrefAndReleaseCriticalBuffer(psPMMListsReservation); +err_validation_mlist: + UnrefAndReleaseCriticalBuffer(psPMSecureStateReservation); +err_validation_srs: + UnrefAndReleaseCriticalBuffer(psPMStateReservation); +err_validation_rs: return eError; } @@ -1691,16 +1748,13 @@ PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, */ PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) { - PVRSRV_RGXDEV_INFO *psDevInfo; PVRSRV_DEVICE_NODE *psDevNode; PVRSRV_ERROR eError; PRGXFWIF_HWRTDATA psHWRTData; RGX_HWRTDATA_COMMON_COOKIE *psCommonCookie; - PVR_ASSERT(psKMHWRTDataSet); psDevNode = psKMHWRTDataSet->psDeviceNode; - psDevInfo = psDevNode->pvDevice; eError = RGXSetFirmwareAddress(&psHWRTData, psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, @@ -1709,10 +1763,10 @@ PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) /* Cleanup HWRTData */ eError = RGXFWRequestHWRTDataCleanUp(psDevNode, psHWRTData); - if (eError != PVRSRV_OK) - { - return eError; - } + + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psDevNode, + eError, + RGXFWRequestHWRTDataCleanUp); psCommonCookie = psKMHWRTDataSet->psHWRTDataCommonCookie; @@ -1734,46 +1788,100 @@ PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) psCommonCookie->ui32RefCount--; /* When ref count for HWRTDataCommonCookie hits ZERO - * we have to destroy the HWRTDataCommon [FW object] and the cookie + * we have to destroy the cookie * [KM object] afterwards. */ if (psCommonCookie->ui32RefCount == 0) { - RGXUnsetFirmwareAddress(psCommonCookie->psHWRTDataCommonFwMemDesc); + UnrefAndReleaseCriticalBuffer(psCommonCookie->psPMStateReservation); + UnrefAndReleaseCriticalBuffer(psCommonCookie->psPMSecureStateReservation); + UnrefAndReleaseCriticalBuffer(psCommonCookie->psPMMListsReservation); - /* We don't need to flush the SLC before freeing. - * FW RequestCleanUp has already done that for HWRTData, so we're fine - * now. */ - - DevmemFwUnmapAndFree(psDevNode->pvDevice, - psCommonCookie->psHWRTDataCommonFwMemDesc); OSFreeMem(psCommonCookie); } - return PVRSRV_OK; } -PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_HANDLE hMemCtxPrivData, - IMG_UINT32 ui32MaxFLPages, - IMG_UINT32 ui32InitFLPages, - IMG_UINT32 ui32GrowFLPages, - IMG_UINT32 ui32GrowParamThreshold, - RGX_FREELIST *psGlobalFreeList, - IMG_BOOL bCheckFreelist, - IMG_DEV_VIRTADDR sFreeListBaseDevVAddr, - IMG_DEV_VIRTADDR sFreeListStateDevVAddr, - PMR *psFreeListPMR, - IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, - PMR *psFreeListStatePMR, - IMG_DEVMEM_OFFSET_T uiFreeListStatePMROffset, - RGX_FREELIST **ppsFreeList) +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + DEVMEMINT_RESERVATION *psFreeListAndStateReservation, + RGX_FREELIST **ppsFreeList) { - PVRSRV_ERROR eError; - RGXFWIF_FREELIST *psFWFreeList; - DEVMEM_MEMDESC *psFWFreelistMemDesc; - RGX_FREELIST *psFreeList; - PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + RGXFWIF_FREELIST *psFWFreeList; + RGXFWIF_FREELIST sFWFreeList = {{0}}; + DEVMEM_MEMDESC *psFWFreelistMemDesc; + RGX_FREELIST *psFreeList; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_DEV_VIRTADDR sFreeListBaseDevVAddr; + IMG_DEV_VIRTADDR sFreeListStateDevVAddr; + PMR* psFreeListAndStatePMR = NULL; + IMG_UINT32 ui32ReadyPages; + IMG_UINT32 uiFreeListOffset = PVR_ALIGN( + sizeof(RGX_PM_FREELISTSTATE_BUFFER), + GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, SLC_CACHE_LINE_SIZE_BITS))); + + /* Obtain reference to reservation object */ + if (!DevmemIntReservationAcquire(psFreeListAndStateReservation)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire reservation for freelist buffer", + __func__)); + eError = PVRSRV_ERROR_REFCOUNT_OVERFLOW; + goto ErrorReservationAcquire; + } + + eError = DevmemIntGetReservationData(psFreeListAndStateReservation, &psFreeListAndStatePMR, &sFreeListStateDevVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error from DevmemIntGetReservationData: %s", + __func__, PVRSRVGetErrorString(eError))); + + goto ErrorAllocHost; + } + + /* Check if client properly allocated PMMETA_PROTECT */ + if ((PMR_Flags(psFreeListAndStatePMR) & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT)) == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Freelist PMR must have PMMETA_PROTECT set", + __func__)); + eError = PVRSRV_ERROR_INVALID_FLAGS; + goto ErrorAllocHost; + } + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + if (PVRSRV_CHECK_OS_LINUX_MOVABLE(PMR_Flags(psFreeListAndStatePMR))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Freelist PMR must not have OS_LINUX_MOVABLE set", + __func__)); + eError = PVRSRV_ERROR_INVALID_FLAGS; + goto ErrorAllocHost; + } +#endif + + /* Ref the PMR to prevent resource being destroyed before use */ + eError = PMRRefPMR(psFreeListAndStatePMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRRefPMR", ErrorAllocHost); + + if (PMR_IsSparse(psFreeListAndStatePMR)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Free list PMR cannot be sparse!", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto ErrorRefPMR; + } + + sFreeListBaseDevVAddr.uiAddr = sFreeListStateDevVAddr.uiAddr + uiFreeListOffset; if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) { @@ -1793,8 +1901,8 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; - PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u", - __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u", + __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages)); ui32InitFLPages = ui32NewInitFLPages; ui32GrowFLPages = ui32NewGrowFLPages; @@ -1806,10 +1914,10 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, if (psFreeList == NULL) { PVR_DPF((PVR_DBG_ERROR, - "%s: failed to allocate host data structure", - __func__)); + "%s: failed to allocate host data structure", + __func__)); eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto ErrorAllocHost; + goto ErrorRefPMR; } /* @@ -1820,36 +1928,42 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) */ eError = DevmemFwAllocate(psDevInfo, - sizeof(*psFWFreeList), - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | - PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | - PVRSRV_MEMALLOCFLAG_GPU_READABLE | - PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | - PVRSRV_MEMALLOCFLAG_CPU_READABLE | - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | - PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | - PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | - PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), - "FwFreeList", - &psFWFreelistMemDesc); + sizeof(*psFWFreeList), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_OS_LINUX_DENY_MOVE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwFreeList", + &psFWFreelistMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: DevmemAllocate for RGXFWIF_FREELIST failed", - __func__)); + "%s: DevmemAllocate for RGXFWIF_FREELIST failed", + __func__)); goto FWFreeListAlloc; } /* Initialise host data structures */ psFreeList->psDevInfo = psDevInfo; psFreeList->psConnection = psConnection; - psFreeList->psFreeListPMR = psFreeListPMR; - psFreeList->uiFreeListPMROffset = uiFreeListPMROffset; - psFreeList->psFreeListStatePMR = psFreeListStatePMR; - psFreeList->uiFreeListStatePMROffset = uiFreeListStatePMROffset; + + psFreeList->psFreeListStatePMR = psFreeListAndStatePMR; + psFreeList->uiFreeListStatePMROffset = 0; + + psFreeList->psFreeListPMR = psFreeListAndStatePMR; + psFreeList->uiFreeListPMROffset = uiFreeListOffset; + + psFreeList->psFreeListAndStateReservation = psFreeListAndStateReservation; + psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc; + psFreeList->sFreeListBaseDevVAddr = sFreeListBaseDevVAddr; eError = RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); @@ -1871,48 +1985,77 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, #endif psFreeList->ownerPid = OSGetCurrentClientProcessIDKM(); - /* Add to list of freelists */ OSLockAcquire(psDevInfo->hLockFreeList); psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++; dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode); OSLockRelease(psDevInfo->hLockFreeList); + ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages); + + /* Write freelist state buffer */ + { + RGX_PM_FREELISTSTATE_BUFFER sFLState = {0}; + size_t uiNbBytes; + IMG_DEV_VIRTADDR sFLBaseAddr; + + sFLBaseAddr.uiAddr = (psFreeList->sFreeListBaseDevVAddr.uiAddr + + ((psFreeList->ui32MaxFLPages - ui32InitFLPages + ui32ReadyPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); + + + RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(sFLState, (sFLBaseAddr.uiAddr >> RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT)); + + RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(sFLState, ui32InitFLPages - ui32ReadyPages - 1); + RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(sFLState, 0); + RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(sFLState, 0); + + eError = PMR_WriteBytes(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, (IMG_UINT8*)&sFLState, sizeof(sFLState), &uiNbBytes); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error from PMR_WriteBytes: %s", + __func__, PVRSRVGetErrorString(eError))); + goto FWFreeListAlloc; + } + PVR_ASSERT(uiNbBytes == sizeof(sFLState)); + + PDUMPCOMMENT(psDeviceNode, "FreeListState buffer"); + PMRPDumpLoadMem(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, sizeof(sFLState), PDUMP_FLAGS_CONTINUOUS, false); + } /* Initialise FW data structure */ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap); { - const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages); - - psFWFreeList->ui32MaxPages = ui32MaxFLPages; - psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages; - psFWFreeList->ui32GrowPages = ui32GrowFLPages; - psFWFreeList->bUpdatePending = IMG_FALSE; - psFWFreeList->ui32UpdateNewPages = 0; - psFWFreeList->ui32UpdateNewReadyPages = 0; - psFWFreeList->sFreeListBaseDevVAddr = sFreeListBaseDevVAddr; + sFWFreeList.ui32MaxPages = ui32MaxFLPages; + sFWFreeList.ui32CurrentPages = ui32InitFLPages - ui32ReadyPages; + sFWFreeList.ui32GrowPages = ui32GrowFLPages; + sFWFreeList.bUpdatePending = IMG_FALSE; + sFWFreeList.ui32UpdateNewPages = 0; + sFWFreeList.ui32UpdateNewReadyPages = 0; + sFWFreeList.sFreeListBaseDevVAddr = sFreeListBaseDevVAddr; #if defined(PM_INTERACTIVE_MODE) - psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; - psFWFreeList->ui64CurrentDevVAddr = (sFreeListBaseDevVAddr.uiAddr + - ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & - ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); + sFWFreeList.ui32CurrentStackTop = sFWFreeList.ui32CurrentPages - 1; + sFWFreeList.ui64CurrentDevVAddr = (sFreeListBaseDevVAddr.uiAddr + + ((ui32MaxFLPages - sFWFreeList.ui32CurrentPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); #endif - psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID; - psFWFreeList->bGrowPending = IMG_FALSE; - psFWFreeList->ui32ReadyPages = ui32ReadyPages; + sFWFreeList.ui32FreeListID = psFreeList->ui32FreelistID; + sFWFreeList.bGrowPending = IMG_FALSE; + sFWFreeList.ui32ReadyPages = ui32ReadyPages; #if defined(SUPPORT_SHADOW_FREELISTS) /* Get the FW Memory Context address... */ - eError = RGXSetFirmwareAddress(&psFWFreeList->psFWMemContext, + eError = RGXSetFirmwareAddress(&sFWFreeList.psFWMemContext, RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData), 0, RFW_FWADDR_NOREF_FLAG); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed", - __func__)); + "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed", + __func__)); DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); goto FWFreeListCpuMap; } @@ -1927,20 +2070,26 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, * Access to the physical PMR will be used to update the contents of the * PM state buffer when PB grow occurs following OOM. */ - psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0; - psFWFreeList->sFreeListStateDevVAddr = sFreeListStateDevVAddr; + sFWFreeList.sFreeListLastGrowDevVAddr.uiAddr = 0; + sFWFreeList.sFreeListStateDevVAddr = sFreeListStateDevVAddr; } + OSCachedMemCopy(psFWFreeList, &sFWFreeList, sizeof(sFWFreeList)); + RGXFwSharedMemCacheOpPtr(psFWFreeList, FLUSH); + PVR_DPF((PVR_DBG_MESSAGE, - "Freelist [%p]: Created: Max pages 0x%08x, Init pages 0x%08x, FL base address 0x%016" IMG_UINT64_FMTSPECx ", Current FL base address 0x%016" IMG_UINT64_FMTSPECx ", Current pages %u", - psFreeList, - ui32MaxFLPages, - ui32InitFLPages, - sFreeListBaseDevVAddr.uiAddr, - (sFreeListBaseDevVAddr.uiAddr + - ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & - ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1), - psFWFreeList->ui32CurrentPages - 1)); + "Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, " + "FL base address 0x%016" IMG_UINT64_FMTSPECx ", " + "Current FL base address 0x%016" IMG_UINT64_FMTSPECx ", " + "Current pages %u", + psFreeList, + ui32MaxFLPages, + ui32InitFLPages, + sFreeListBaseDevVAddr.uiAddr, + (sFreeListBaseDevVAddr.uiAddr + + ((ui32MaxFLPages - sFWFreeList.ui32CurrentPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1), + sFWFreeList.ui32CurrentPages - 1)); #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, "Dump FW FreeList"); DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS); @@ -1952,14 +2101,14 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, */ PDUMPCOMMENT(psDeviceNode, "FreeList TotalPages"); DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, - offsetof(RGXFWIF_FREELIST, ui32CurrentPages), - psFWFreeList->ui32CurrentPages, - PDUMP_FLAGS_CONTINUOUS); + offsetof(RGXFWIF_FREELIST, ui32CurrentPages), + sFWFreeList.ui32CurrentPages, + PDUMP_FLAGS_CONTINUOUS); PDUMPCOMMENT(psDeviceNode, "FreeList StackPointer"); DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, - offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop), - psFWFreeList->ui32CurrentStackTop, - PDUMP_FLAGS_CONTINUOUS); + offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop), + sFWFreeList.ui32CurrentStackTop, + PDUMP_FLAGS_CONTINUOUS); #endif #endif DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); @@ -1972,10 +2121,10 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%s)", - __func__, - sFreeListBaseDevVAddr.uiAddr, - PVRSRVGetErrorString(eError))); + "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%s)", + __func__, + sFreeListBaseDevVAddr.uiAddr, + PVRSRVGetErrorString(eError))); goto FWFreeListCpuMap; } #if defined(PVRSRV_ENABLE_PROCESS_STATS) @@ -1995,9 +2144,8 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, return PVRSRV_OK; /* Error handling */ - FWFreeListCpuMap: - /* Remove freelists from list */ + /* Remove freelists from list */ OSLockAcquire(psDevInfo->hLockFreeList); dllist_remove_node(&psFreeList->sNode); OSLockRelease(psDevInfo->hLockFreeList); @@ -2009,7 +2157,13 @@ PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, FWFreeListAlloc: OSFreeMem(psFreeList); +ErrorRefPMR: + (void) PMRUnrefPMR(psFreeListAndStatePMR); + ErrorAllocHost: + DevmemIntReservationRelease(psFreeListAndStateReservation); + +ErrorReservationAcquire: PVR_ASSERT(eError != PVRSRV_OK); return eError; } @@ -2038,12 +2192,14 @@ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) /* Freelist is not in use => start firmware cleanup */ eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo, psFreeList->sFreeListFWDevVAddr); - if (eError != PVRSRV_OK) - { - /* Can happen if the firmware took too long to handle the cleanup request, - * or if SLC-flushes didn't went through (due to some GPU lockup) */ - return eError; - } + + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psFreeList->psDevInfo->psDeviceNode, + eError, + RGXFWRequestFreeListCleanUp); + + /* Device becomes unrecoverable if the firmware took too long to + * handle the cleanup request, or if SLC-flushes didn't go through + * (due to some GPU lockup) */ /* Remove FreeList from linked list before we destroy it... */ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); @@ -2058,15 +2214,22 @@ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) if (psFreeList->bCheckFreelist) { RGXFWIF_FREELIST *psFWFreeList; - IMG_UINT64 ui32CurrentStackTop; + IMG_UINT32 ui32CurrentStackTop; IMG_UINT64 ui64CheckSum; /* Get the current stack pointer for this free list */ DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + RGXFwSharedMemCacheOpValue(psFWFreeList->ui32CurrentStackTop, INVALIDATE); ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop; DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); - if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1) + if (ui32CurrentStackTop > psFreeList->ui32MaxFLPages) + { + PVR_LOG(("%s: FW freelist corrupted (%d)", + __func__, + ui32CurrentStackTop)); + } + else if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1) { /* Do consistency tests (as the list is fully populated) */ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); @@ -2098,6 +2261,12 @@ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead)); PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0); + /* Remove references from the PMR resources */ + eError = PMRUnrefPMR(psFreeList->psFreeListStatePMR); + PVR_ASSERT(eError == PVRSRV_OK); + + DevmemIntReservationRelease(psFreeList->psFreeListAndStateReservation); + /* free Freelist */ OSFreeMem(psFreeList); @@ -2118,10 +2287,13 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGXFWIF_PRBUFFER *psFWZSBuffer; + RGXFWIF_PRBUFFER sFWZSBuffer = {0}; RGX_ZSBUFFER_DATA *psZSBuffer; DEVMEM_MEMDESC *psFWZSBufferMemDesc; IMG_BOOL bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE; + PVR_UNREFERENCED_PARAMETER(psConnection); + /* Allocate host data structure */ psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer)); if (psZSBuffer == NULL) @@ -2136,20 +2308,25 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, /* Populate Host data */ psZSBuffer->psDevInfo = psDevInfo; psZSBuffer->psReservation = psReservation; + + /* Obtain reference to reservation object */ + if (!DevmemIntReservationAcquire(psZSBuffer->psReservation)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire reservation for ZS-Buffer", + __func__)); + eError = PVRSRV_ERROR_REFCOUNT_OVERFLOW; + goto ErrorReservationAcquire; + } + psZSBuffer->psPMR = psPMR; - psZSBuffer->uiMapFlags = uiMapFlags; + /* Obtain reference to PMR */ + eError = PMRRefPMR(psZSBuffer->psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRRefPMR", ErrorRefPMR); + psZSBuffer->ui32RefCount = 0; + psZSBuffer->bIsBacked = IMG_FALSE; psZSBuffer->bOnDemand = bOnDemand; - if (bOnDemand) - { - /* psZSBuffer->ui32ZSBufferID set below with lock... */ - psZSBuffer->psMapping = NULL; - - OSLockAcquire(psDevInfo->hLockZSBuffer); - psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++; - dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode); - OSLockRelease(psDevInfo->hLockZSBuffer); - } /* Allocate firmware memory for ZS-Buffer. */ PDUMPCOMMENT(psDeviceNode, "Allocate firmware ZS-Buffer data structure"); @@ -2157,7 +2334,6 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, sizeof(*psFWZSBuffer), PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | - PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | @@ -2190,20 +2366,31 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, goto ErrorAcquireFWZSBuffer; } - /* Populate FW ZS-Buffer data structure */ - psFWZSBuffer->bOnDemand = bOnDemand; - psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED; - psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID; - /* Get firmware address of ZS-Buffer. */ eError = RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE); PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); + if (bOnDemand) + { + /* psZSBuffer->ui32ZSBufferID set below with lock... */ + OSLockAcquire(psDevInfo->hLockZSBuffer); + psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++; + dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode); + OSLockRelease(psDevInfo->hLockZSBuffer); + } + + /* Populate FW ZS-Buffer data structure */ + sFWZSBuffer.bOnDemand = bOnDemand; + sFWZSBuffer.eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED; + sFWZSBuffer.ui32BufferID = psZSBuffer->ui32ZSBufferID; + + OSCachedMemCopy(psFWZSBuffer, &sFWZSBuffer, sizeof(sFWZSBuffer)); /* Dump the ZS-Buffer and the memory content */ #if defined(PDUMP) PDUMPCOMMENT(psDeviceNode, "Dump firmware ZS-Buffer"); DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS); #endif + RGXFwSharedMemCacheOpPtr(psFWZSBuffer, FLUSH); /* Release address acquired above. */ DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); @@ -2228,6 +2415,10 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, DevmemFwUnmapAndFree(psDevInfo, psFWZSBufferMemDesc); ErrorAllocFWZSBuffer: + (void) PMRUnrefPMR(psZSBuffer->psPMR); +ErrorRefPMR: + DevmemIntReservationRelease(psZSBuffer->psReservation); +ErrorReservationAcquire: OSFreeMem(psZSBuffer); ErrorAllocCleanup: @@ -2242,7 +2433,7 @@ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer) { POS_LOCK hLockZSBuffer; - PVRSRV_ERROR eError; + PVRSRV_ERROR eError, eError2; PVR_ASSERT(psZSBuffer); hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; @@ -2250,30 +2441,45 @@ PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer) /* Request ZS Buffer cleanup */ eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo, psZSBuffer->sZSBufferFWDevVAddr); - if (eError == PVRSRV_OK) - { - /* Free the firmware render context. */ - RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc); - DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc); - /* Remove Deferred Allocation from list */ - if (psZSBuffer->bOnDemand) - { - OSLockAcquire(hLockZSBuffer); - PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode)); - dllist_remove_node(&psZSBuffer->sNode); - OSLockRelease(hLockZSBuffer); - } + RGX_RETURN_IF_ERROR_AND_DEVICE_RECOVERABLE(psZSBuffer->psDevInfo->psDeviceNode, + eError, + RGXFWRequestZSBufferCleanUp); - PVR_ASSERT(psZSBuffer->ui32RefCount == 0); + OSLockAcquire(hLockZSBuffer); - PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer)); + if (psZSBuffer->ui32RefCount != 0) + { + /* ZS-Buffer is still referenced */ + OSLockRelease(hLockZSBuffer); + PVR_DPF((PVR_DBG_WARNING, "Trying to destroy a ZS-Buffer [%p] that's still in use.", psZSBuffer)); + return PVRSRV_ERROR_RETRY; + } - /* Free ZS-Buffer host data structure */ - OSFreeMem(psZSBuffer); + /* Free the firmware render context. */ + RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc); + DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc); + /* Remove Deferred Allocation from list */ + if (psZSBuffer->bOnDemand) + { + PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode)); + dllist_remove_node(&psZSBuffer->sNode); } + PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer)); + + /* Release reference to reservation object and the PMR */ + eError2 = PMRUnrefPMR(psZSBuffer->psPMR); + PVR_LOG_IF_ERROR(eError2, "PMRUnrefPMR"); + + DevmemIntReservationRelease(psZSBuffer->psReservation); + + /* Free ZS-Buffer host data structure */ + OSFreeMem(psZSBuffer); + + OSLockRelease(hLockZSBuffer); + return eError; } @@ -2302,47 +2508,36 @@ RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) OSLockAcquire(hLockZSBuffer); - if (psZSBuffer->ui32RefCount == 0) + if (psZSBuffer->bIsBacked == IMG_FALSE) { - if (psZSBuffer->bOnDemand) - { - IMG_HANDLE hDevmemHeap = (IMG_HANDLE)NULL; + IMG_HANDLE hDevmemHeap; - PVR_ASSERT(psZSBuffer->psMapping == NULL); - - /* Get Heap */ - eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap); - PVR_ASSERT(psZSBuffer->psMapping == NULL); - if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL)) - { - OSLockRelease(hLockZSBuffer); - return PVRSRV_ERROR_INVALID_HEAP; - } + /* Get Heap */ + eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap); + if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL)) + { + OSLockRelease(hLockZSBuffer); + return PVRSRV_ERROR_INVALID_HEAP; + } - eError = DevmemIntMapPMR(hDevmemHeap, - psZSBuffer->psReservation, - psZSBuffer->psPMR, - psZSBuffer->uiMapFlags, - &psZSBuffer->psMapping); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)", - psZSBuffer, - psZSBuffer->ui32ZSBufferID, - PVRSRVGetErrorString(eError))); - OSLockRelease(hLockZSBuffer); - return eError; + eError = DevmemIntMapPMR(psZSBuffer->psReservation, psZSBuffer->psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)", + psZSBuffer, + psZSBuffer->ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + OSLockRelease(hLockZSBuffer); + return eError; - } - PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired", - psZSBuffer, - psZSBuffer->ui32ZSBufferID)); } - } + PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); - /* Increase refcount*/ - psZSBuffer->ui32RefCount++; + psZSBuffer->bIsBacked = IMG_TRUE; + } OSLockRelease(hLockZSBuffer); @@ -2357,6 +2552,10 @@ RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, RGX_POPULATION *psPopulation; PVRSRV_ERROR eError; + OSLockAcquire(psZSBuffer->psDevInfo->hLockZSBuffer); + psZSBuffer->ui32RefCount++; + OSLockRelease(psZSBuffer->psDevInfo->hLockZSBuffer); + psZSBuffer->ui32NumReqByApp++; #if defined(PVRSRV_ENABLE_PROCESS_STATS) @@ -2391,6 +2590,11 @@ RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, OnErrorBacking: PVR_ASSERT(eError != PVRSRV_OK); + + OSLockAcquire(psZSBuffer->psDevInfo->hLockZSBuffer); + psZSBuffer->ui32RefCount--; + OSLockRelease(psZSBuffer->psDevInfo->hLockZSBuffer); + return eError; } @@ -2405,8 +2609,6 @@ RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) return PVRSRV_ERROR_INVALID_PARAMS; } - PVR_ASSERT(psZSBuffer->ui32RefCount); - PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested", psZSBuffer, @@ -2416,33 +2618,27 @@ RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) OSLockAcquire(hLockZSBuffer); - if (psZSBuffer->bOnDemand) + if (psZSBuffer->bOnDemand && psZSBuffer->bIsBacked == IMG_TRUE) { - if (psZSBuffer->ui32RefCount == 1) + eError = DevmemIntUnmapPMR(psZSBuffer->psReservation); + if (eError != PVRSRV_OK) { - PVR_ASSERT(psZSBuffer->psMapping); + PVR_DPF((PVR_DBG_ERROR, + "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)", + psZSBuffer, + psZSBuffer->ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + OSLockRelease(hLockZSBuffer); + return eError; + } - eError = DevmemIntUnmapPMR(psZSBuffer->psMapping); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)", - psZSBuffer, - psZSBuffer->ui32ZSBufferID, - PVRSRVGetErrorString(eError))); - OSLockRelease(hLockZSBuffer); - return eError; - } + PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); - PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed", - psZSBuffer, - psZSBuffer->ui32ZSBufferID)); - } + psZSBuffer->bIsBacked = IMG_FALSE; } - /* Decrease refcount*/ - psZSBuffer->ui32RefCount--; - OSLockRelease(hLockZSBuffer); return PVRSRV_OK; @@ -2452,24 +2648,31 @@ PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation) { PVRSRV_ERROR eError; + POS_LOCK hLockZSBuffer; if (!psPopulation) { return PVRSRV_ERROR_INVALID_PARAMS; } + hLockZSBuffer = psPopulation->psZSBuffer->psDevInfo->hLockZSBuffer; + eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer); if (eError != PVRSRV_OK) { return eError; } + OSLockAcquire(hLockZSBuffer); + psPopulation->psZSBuffer->ui32RefCount--; + OSLockRelease(hLockZSBuffer); + OSFreeMem(psPopulation); return PVRSRV_OK; } -static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID) +static RGX_ZSBUFFER_DATA *FindAndRefZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID) { DLLIST_NODE *psNode, *psNext; RGX_ZSBUFFER_DATA *psZSBuffer = NULL; @@ -2483,6 +2686,8 @@ static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID) { psZSBuffer = psThisZSBuffer; + + psZSBuffer->ui32RefCount++; break; } } @@ -2502,7 +2707,7 @@ void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_ASSERT(psDevInfo); /* scan all deferred allocations */ - psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + psZSBuffer = FindAndRefZSBuffer(psDevInfo, ui32ZSBufferID); if (psZSBuffer == NULL) { @@ -2518,8 +2723,9 @@ void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "Populating ZS-Buffer failed with error %u (ID = 0x%08x)", - eError, ui32ZSBufferID)); + "Populating ZS-Buffer (ID = 0x%08x) failed (%s)", + ui32ZSBufferID, + PVRSRVGetErrorString(eError))); bBackingDone = IMG_FALSE; } @@ -2528,20 +2734,20 @@ void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone; - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GEOM, - &sTACCBCmd, - PDUMP_FLAGS_NONE); + RGXFWIF_DM_GEOM, + &sTACCBCmd, + PDUMP_FLAGS_NONE); if (eError != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); - /* Kernel CCB should never fill up, as the FW is processing them right away */ + /* Kernel CCB should never fill up, as the FW is processing them right away */ PVR_ASSERT(eError == PVRSRV_OK); psZSBuffer->ui32NumReqByFW++; @@ -2550,6 +2756,11 @@ void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, PVRSRVStatsUpdateZSBufferStats(psDevInfo->psDeviceNode, 0, 1, psZSBuffer->owner); #endif + + OSLockAcquire(psDevInfo->hLockZSBuffer); + psZSBuffer->ui32RefCount--; + OSLockRelease(psDevInfo->hLockZSBuffer); + } void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, @@ -2562,7 +2773,7 @@ void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_ASSERT(psDevInfo); /* scan all deferred allocations */ - psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + psZSBuffer = FindAndRefZSBuffer(psDevInfo, ui32ZSBufferID); if (psZSBuffer == NULL) { @@ -2578,8 +2789,9 @@ void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "UnPopulating ZS-Buffer failed with error %u (ID = 0x%08x)", - eError, ui32ZSBufferID)); + "UnPopulating ZS-Buffer (ID = 0x%08x) failed (%s)", + ui32ZSBufferID, + PVRSRVGetErrorString(eError))); PVR_ASSERT(IMG_FALSE); } @@ -2588,38 +2800,42 @@ void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE; - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psDevInfo, - RGXFWIF_DM_GEOM, - &sTACCBCmd, - PDUMP_FLAGS_NONE); + RGXFWIF_DM_GEOM, + &sTACCBCmd, + PDUMP_FLAGS_NONE); if (eError != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); /* Kernel CCB should never fill up, as the FW is processing them right away */ PVR_ASSERT(eError == PVRSRV_OK); + + OSLockAcquire(psDevInfo->hLockZSBuffer); + psZSBuffer->ui32RefCount--; + OSLockRelease(psDevInfo->hLockZSBuffer); } static PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - SERVER_MMU_CONTEXT *psServerMMUContext, - DEVMEM_MEMDESC *psAllocatedMemDesc, - IMG_UINT32 ui32AllocatedOffset, - DEVMEM_MEMDESC *psFWMemContextMemDesc, - IMG_INT32 i32Priority, - IMG_UINT32 ui32MaxDeadlineMS, - IMG_UINT64 ui64RobustnessAddress, - RGX_COMMON_CONTEXT_INFO *psInfo, - RGX_SERVER_RC_TA_DATA *psTAData, - IMG_UINT32 ui32CCBAllocSizeLog2, - IMG_UINT32 ui32CCBMaxAllocSizeLog2, - IMG_UINT32 ui32ContextFlags) + PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_INT32 i32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_TA_DATA *psTAData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; @@ -2645,22 +2861,22 @@ PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, } eError = FWCommonContextAllocate(psConnection, - psDeviceNode, - REQ_TYPE_TA, - RGXFWIF_DM_GEOM, - psServerMMUContext, - psAllocatedMemDesc, - ui32AllocatedOffset, - psFWMemContextMemDesc, - psTAData->psContextStateMemDesc, - ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2, - ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2, - ui32ContextFlags, - i32Priority, - ui32MaxDeadlineMS, - ui64RobustnessAddress, - psInfo, - &psTAData->psServerCommonContext); + psDeviceNode, + REQ_TYPE_TA, + RGXFWIF_DM_GEOM, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + psTAData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + i32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &psTAData->psServerCommonContext); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2694,23 +2910,23 @@ PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, static PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - SERVER_MMU_CONTEXT *psServerMMUContext, - DEVMEM_MEMDESC *psAllocatedMemDesc, - IMG_UINT32 ui32AllocatedOffset, - DEVMEM_MEMDESC *psFWMemContextMemDesc, - IMG_INT32 i32Priority, - IMG_UINT32 ui32MaxDeadlineMS, - IMG_UINT64 ui64RobustnessAddress, - RGX_COMMON_CONTEXT_INFO *psInfo, - RGX_SERVER_RC_3D_DATA *ps3DData, - IMG_UINT32 ui32CCBAllocSizeLog2, - IMG_UINT32 ui32CCBMaxAllocSizeLog2, - IMG_UINT32 ui32ContextFlags) + PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_INT32 i32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_3D_DATA *ps3DData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) { PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; PVRSRV_ERROR eError; - IMG_UINT uiNumISPStoreRegs = RGXFWIF_IPP_RESUME_REG_COUNT; /* default 1 register for IPP_resume */ + IMG_UINT uiNumISPStoreRegs; IMG_UINT ui3DRegISPStateStoreSize = 0; /* @@ -2719,6 +2935,9 @@ PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, */ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware 3D context suspend state"); +#if defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) + uiNumISPStoreRegs = RGXFWIF_IPP_RESUME_REG_COUNT; /* default 1 register for IPP_resume */ + uiNumISPStoreRegs += (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU) * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_PER_SPU) * RGXFWIF_PIPE_COUNT_PER_ISP); @@ -2728,6 +2947,23 @@ PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, { return PVRSRV_ERROR_NOT_SUPPORTED; } +#else /* defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) + { + uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, + RGX_FEATURE_NUM_RASTER_PIPES_IDX); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + uiNumISPStoreRegs *= (1U + psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, + RGX_FEATURE_XPU_MAX_SLAVES_IDX)); + } + } + else + { + uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, + RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); + } +#endif /* defined(RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX) */ /* Size of the CS buffer */ /* Calculate the size of the 3DCTX ISP state */ @@ -2749,22 +2985,22 @@ PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, } eError = FWCommonContextAllocate(psConnection, - psDeviceNode, - REQ_TYPE_3D, - RGXFWIF_DM_3D, - psServerMMUContext, - psAllocatedMemDesc, - ui32AllocatedOffset, - psFWMemContextMemDesc, - ps3DData->psContextStateMemDesc, - ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2, - ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2, - ui32ContextFlags, - i32Priority, - ui32MaxDeadlineMS, - ui64RobustnessAddress, - psInfo, - &ps3DData->psServerCommonContext); + psDeviceNode, + REQ_TYPE_3D, + RGXFWIF_DM_3D, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + ps3DData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + i32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &ps3DData->psServerCommonContext); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, @@ -2799,19 +3035,19 @@ PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, * PVRSRVRGXCreateRenderContextKM */ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_INT32 i32Priority, - IMG_UINT32 ui32FrameworkRegisterSize, - IMG_PBYTE pabyFrameworkRegisters, - IMG_HANDLE hMemCtxPrivData, - IMG_UINT32 ui32StaticRenderContextStateSize, - IMG_PBYTE pStaticRenderContextState, - IMG_UINT32 ui32PackedCCBSizeU8888, - IMG_UINT32 ui32ContextFlags, - IMG_UINT64 ui64RobustnessAddress, - IMG_UINT32 ui32MaxTADeadlineMS, - IMG_UINT32 ui32Max3DDeadlineMS, - RGX_SERVER_RENDER_CONTEXT **ppsRenderContext) + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_INT32 i32Priority, + IMG_UINT32 ui32FrameworkRegisterSize, + IMG_PBYTE pabyFrameworkRegisters, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticRenderContextStateSize, + IMG_PBYTE pStaticRenderContextState, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxTADeadlineMS, + IMG_UINT32 ui32Max3DDeadlineMS, + RGX_SERVER_RENDER_CONTEXT **ppsRenderContext) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; @@ -2857,7 +3093,10 @@ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); + } #endif if (ui32FrameworkRegisterSize) @@ -2894,38 +3133,38 @@ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, } eError = _Create3DContext(psConnection, - psDeviceNode, - hMemCtxPrivData, - psRenderContext->psFWRenderContextMemDesc, - offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), - psFWMemContextMemDesc, - i32Priority, - ui32Max3DDeadlineMS, - ui64RobustnessAddress, - &sInfo, - &psRenderContext->s3DData, - U32toU8_Unpack3(ui32PackedCCBSizeU8888), - U32toU8_Unpack4(ui32PackedCCBSizeU8888), - ui32ContextFlags); + psDeviceNode, + hMemCtxPrivData, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), + psFWMemContextMemDesc, + i32Priority, + ui32Max3DDeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->s3DData, + U32toU8_Unpack3(ui32PackedCCBSizeU8888), + U32toU8_Unpack4(ui32PackedCCBSizeU8888), + ui32ContextFlags); if (eError != PVRSRV_OK) { goto fail_3dcontext; } eError = _CreateTAContext(psConnection, - psDeviceNode, - hMemCtxPrivData, - psRenderContext->psFWRenderContextMemDesc, - offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), - psFWMemContextMemDesc, - i32Priority, - ui32MaxTADeadlineMS, - ui64RobustnessAddress, - &sInfo, - &psRenderContext->sTAData, - U32toU8_Unpack1(ui32PackedCCBSizeU8888), - U32toU8_Unpack2(ui32PackedCCBSizeU8888), - ui32ContextFlags); + psDeviceNode, + hMemCtxPrivData, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), + psFWMemContextMemDesc, + i32Priority, + ui32MaxTADeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->sTAData, + U32toU8_Unpack1(ui32PackedCCBSizeU8888), + U32toU8_Unpack2(ui32PackedCCBSizeU8888), + ui32ContextFlags); if (eError != PVRSRV_OK) { goto fail_tacontext; @@ -2938,8 +3177,13 @@ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, goto fail_acquire_cpu_mapping; } + /* Copy the static render context data */ OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize); +#if defined(SUPPORT_TRP) + psFWRenderContext->eTRPGeomCoreAffinity = RGXFWIF_DM_MAX; +#endif DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS); + RGXFwSharedMemCacheOpPtr(psFWRenderContext, FLUSH); DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); #if defined(SUPPORT_BUFFER_SYNC) @@ -2977,14 +3221,32 @@ PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, fail_buffer_sync_context_create: #endif fail_acquire_cpu_mapping: - _DestroyTAContext(&psRenderContext->sTAData, - psDeviceNode); + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + PVRSRV_ERROR eError2 = _DestroyTAContext(&psRenderContext->sTAData, + psDeviceNode); + if (!PVRSRVIsRetryError(eError2)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } + END_LOOP_UNTIL_TIMEOUT_US(); fail_tacontext: - _Destroy3DContext(&psRenderContext->s3DData, - psRenderContext->psDeviceNode); + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + PVRSRV_ERROR eError2 = _Destroy3DContext(&psRenderContext->s3DData, + psRenderContext->psDeviceNode); + if (!PVRSRVIsRetryError(eError2)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } + END_LOOP_UNTIL_TIMEOUT_US(); fail_3dcontext: fail_frameworkcopy: - if (psRenderContext->psFWFrameworkMemDesc != NULL) + if (psRenderContext->psFWFrameworkMemDesc) { DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); } @@ -3006,10 +3268,6 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice; -#if defined(SUPPORT_WORKLOAD_ESTIMATION) - RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; - IMG_UINT32 ui32WorkEstCCBSubmitted; -#endif /* remove node from list before calling destroy - as destroy, if successful * will invalidate the node @@ -3061,32 +3319,39 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, - (void **)&psFWRenderContext); - if (eError != PVRSRV_OK) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to map firmware render context (%s)", - __func__, - PVRSRVGetErrorString(eError))); - goto e0; - } + RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; + + eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, + (void **)&psFWRenderContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto e0; + } + RGXFwSharedMemCacheOpValue(psFWRenderContext->ui32WorkEstCCBSubmitted, INVALIDATE); - ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted; + ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted; - DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); + DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); - /* Check if all of the workload estimation CCB commands for this workload are read */ - if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived) - { + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived) + { - PVR_DPF((PVR_DBG_WARNING, - "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", - __func__, ui32WorkEstCCBSubmitted, - psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)); + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)); - eError = PVRSRV_ERROR_RETRY; - goto e0; + eError = PVRSRV_ERROR_RETRY; + goto e0; + } } #endif @@ -3096,7 +3361,7 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender */ if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE)) { - if (psRenderContext->psFWFrameworkMemDesc != NULL) + if (psRenderContext->psFWFrameworkMemDesc) { /* Free the framework buffer */ DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); @@ -3111,8 +3376,12 @@ PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRender SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) + { + WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); + } #endif + OSLockDestroy(psRenderContext->hLock); OSFreeMem(psRenderContext); @@ -3313,54 +3582,55 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData; RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData; - IMG_UINT64 ui64FBSCEntryMask; + IMG_UINT64 ui64FBSCEntryMask; - IMG_UINT32 ui32TACmdCount=0; - IMG_UINT32 ui323DCmdCount=0; - IMG_UINT32 ui32TACmdOffset=0; - IMG_UINT32 ui323DCmdOffset=0; - RGXFWIF_UFO sPRUFO; - IMG_UINT32 i; - PVRSRV_ERROR eError = PVRSRV_OK; - PVRSRV_ERROR eError2 = PVRSRV_OK; + IMG_UINT32 ui32TACmdCount=0; + IMG_UINT32 ui323DCmdCount=0; + __maybe_unused IMG_UINT32 ui32TACmdOffset=0; + __maybe_unused IMG_UINT32 ui323DCmdOffset=0; - PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext); - IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); - IMG_BOOL bCCBStateOpen = IMG_FALSE; + RGXFWIF_UFO sPRUFO; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError2 = PVRSRV_OK; - IMG_UINT32 ui32ClientPRUpdateCount = 0; - PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL; - IMG_UINT32 *paui32ClientPRUpdateValue = NULL; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_BOOL bCCBStateOpen = IMG_FALSE; + + IMG_UINT32 ui32ClientPRUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL; + IMG_UINT32 *paui32ClientPRUpdateValue = NULL; PRGXFWIF_TIMESTAMP_ADDR pPreAddr; PRGXFWIF_TIMESTAMP_ADDR pPostAddr; PRGXFWIF_UFO_ADDR pRMWUFOAddr; - PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL; - PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL; - PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL; - PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL; - PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress; + PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress; - IMG_UINT64 uiCheckTAFenceUID = 0; - IMG_UINT64 uiCheck3DFenceUID = 0; - IMG_UINT64 uiUpdateTAFenceUID = 0; - IMG_UINT64 uiUpdate3DFenceUID = 0; + IMG_UINT64 uiCheckTAFenceUID = 0; + IMG_UINT64 uiCheck3DFenceUID = 0; + IMG_UINT64 uiUpdateTAFenceUID = 0; + IMG_UINT64 uiUpdate3DFenceUID = 0; IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd; - RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; - RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D); IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0; IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE; - PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE; - PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE; + PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE; + PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE; - IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE; + IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE; IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0; IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; @@ -3386,10 +3656,6 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, RGX_SYNC_DATA s3DSyncData = {NULL}; /*!< Contains internal update syncs for 3D */ IMG_BOOL bTestSLRAdd3DCheck = IMG_FALSE; -#if defined(SUPPORT_VALIDATION) - PVRSRV_FENCE hTestSLRTmpFence = PVRSRV_NO_FENCE; - PSYNC_CHECKPOINT psDummySyncCheckpoint = NULL; -#endif #if defined(SUPPORT_BUFFER_SYNC) PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; @@ -3458,6 +3724,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, CMDTA3D_SHARED *psGeomCmdShared = (CMDTA3D_SHARED *)pui8TADMCmd; CMDTA3D_SHARED *ps3DCmdShared = (CMDTA3D_SHARED *)pui83DDMCmd; CMDTA3D_SHARED *psPR3DCmdShared = (CMDTA3D_SHARED *)pui83DPRDMCmd; + RGXFWIF_DEV_VIRTADDR sNullFWAddr = {0}; if (psKMHWRTDataSet == NULL) { @@ -3469,48 +3736,53 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, */ if (psGeomCmdShared != NULL) { - psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; - - if (psZSBuffer != NULL) + if (ui32TACmdSize < sizeof(*psGeomCmdShared)) { - psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; - } - if (psMSAAScratchBuffer != NULL) - { - psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid TACmd size", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; } + + psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = + psZSBuffer ? psZSBuffer->sZSBufferFWDevVAddr : sNullFWAddr; + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = + psMSAAScratchBuffer ? psMSAAScratchBuffer->sZSBufferFWDevVAddr : sNullFWAddr; } /* Write FW address for 3D CMD */ if (ps3DCmdShared != NULL) { - ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; - - if (psZSBuffer != NULL) - { - ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; - } - if (psMSAAScratchBuffer != NULL) + if (ui323DCmdSize < sizeof(*ps3DCmdShared)) { - ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid 3DCmd size", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; } + + ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = + psZSBuffer ? psZSBuffer->sZSBufferFWDevVAddr : sNullFWAddr; + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = + psMSAAScratchBuffer ? psMSAAScratchBuffer->sZSBufferFWDevVAddr : sNullFWAddr; } /* Write FW address for PR3D CMD */ if (psPR3DCmdShared != NULL) { - psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; - - if (psZSBuffer != NULL) - { - psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; - } - if (psMSAAScratchBuffer != NULL) + if (ui323DPRCmdSize < sizeof(*psPR3DCmdShared)) { - psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid 3DPRCmd size", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; } + + psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = + psZSBuffer ? psZSBuffer->sZSBufferFWDevVAddr : sNullFWAddr; + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = + psMSAAScratchBuffer ? psMSAAScratchBuffer->sZSBufferFWDevVAddr : sNullFWAddr; } } @@ -3597,6 +3869,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, __func__, eError)); } OSLockRelease(psRenderContext->hLock); + return eError; } @@ -3627,6 +3900,9 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, } } } +#else + PVR_UNREFERENCED_PARAMETER(paui32SyncPMRFlags); + PVR_UNREFERENCED_PARAMETER(ppsSyncPMRs); #endif /* defined(SUPPORT_BUFFER_SYNC) */ #if !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 @@ -3648,8 +3924,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, &ui32FenceTASyncCheckpointCount, &apsFenceTASyncCheckpoints, &uiCheckTAFenceUID, - ui32PDumpFlags - ); + ui32PDumpFlags); if (unlikely(eError != PVRSRV_OK)) { CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", @@ -3686,8 +3961,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, &ui32Fence3DSyncCheckpointCount, &apsFence3DSyncCheckpoints, &uiCheck3DFenceUID, - ui32PDumpFlags - ); + ui32PDumpFlags); if (unlikely(eError != PVRSRV_OK)) { CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", @@ -3741,50 +4015,6 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, UPDATE_FENCE_CHECKPOINT_COUNT : 0; } -#if defined(SUPPORT_VALIDATION) - /* Check if TestingSLR is adding an extra sync checkpoint to the - * 3D fence check (which we won't signal) - */ - if ((psDevInfo->ui32TestSLRInterval > 0) && - (--psDevInfo->ui32TestSLRCount == 0)) - { - bTestSLRAdd3DCheck = IMG_TRUE; - psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; - } - - if ((bTestSLRAdd3DCheck) && (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)) - { - if (iUpdate3DTimeline == PVRSRV_NO_TIMELINE) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Would append additional SLR checkpoint " - "to 3D fence but no update 3D timeline provided", __func__)); - } - else - { - SyncCheckpointAlloc(psRenderContext->psDeviceNode->hSyncCheckpointContext, - iUpdate3DTimeline, - hTestSLRTmpFence, - "TestSLRCheck", - &psDummySyncCheckpoint); - PVR_DPF((PVR_DBG_WARNING, "%s: Appending additional SLR checkpoint to 3D fence " - "checkpoints (psDummySyncCheckpoint=<%p>)", - __func__, (void*)psDummySyncCheckpoint)); - SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, - 1, - &psDummySyncCheckpoint); - if (!pauiClient3DFenceUFOAddress) - { - pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; - } - - if (ui32Client3DFenceCount == 0) - { - b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; - } - ui323DFenceCount++; - } - } -#endif /* defined(SUPPORT_VALIDATION) */ /* * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command, @@ -3818,7 +4048,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, __func__, ui32TAFenceCount, ui32TAUpdateCount)); RGXCmdHelperInitCmdCCB_CommandSize( - psDevInfo, + psDevInfo, ui64FBSCEntryMask, ui32TAFenceCount, ui32TAUpdateCount, @@ -3842,7 +4072,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ui323DFenceCount)); RGXCmdHelperInitCmdCCB_CommandSize( - psDevInfo, + psDevInfo, 0, /* empty ui64FBSCEntryMask it is assumed that PRs should * not invalidate FBSC */ ui323DFenceCount, @@ -3862,7 +4092,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ui32PRUpdateCount)); RGXCmdHelperInitCmdCCB_CommandSize( - psDevInfo, + psDevInfo, 0, /* empty ui64FBSCEntryMask it is assumed that PRs should * not invalidate FBSC */ 0, @@ -3887,7 +4117,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, } RGXCmdHelperInitCmdCCB_CommandSize( - psDevInfo, + psDevInfo, ui64FBSCEntryMask, /* equals: [a] 0 if 3D is preceded by TA * [b] value from the MMU ctx otherwise */ bKickTA ? 0 : ui323DFenceCount, @@ -4004,9 +4234,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ", __func__, (void*)pauiClient3DUpdateUFOAddress)); - eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, - ui32PRFenceSyncOffset, - &uiPRFenceUFOAddress); + eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, ui32PRFenceSyncOffset, &uiPRFenceUFOAddress); if (unlikely(eError != PVRSRV_OK)) { @@ -4041,6 +4269,11 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence, ui32BufferFenceSyncCheckpointCount, apsBufferFenceSyncCheckpoints); + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + apsBufferFenceSyncCheckpoints = NULL; + } if (!pauiClientTAFenceUFOAddress) { pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; @@ -4067,6 +4300,11 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence, ui32BufferFenceSyncCheckpointCount, apsBufferFenceSyncCheckpoints); + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + apsBufferFenceSyncCheckpoints = NULL; + } if (!pauiClient3DFenceUFOAddress) { pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; @@ -4157,9 +4395,6 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) { - PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL; - PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL; - CHKPT_DBG((PVR_DBG_ERROR, "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d", __func__, iCheckTAFence, iUpdateTATimeline)); @@ -4190,7 +4425,10 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ui32PDumpFlags); if (unlikely(eError != PVRSRV_OK)) { - PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence[TA] failed (%d)", __func__, eError)); + PVR_DPF((PVR_DBG_ERROR, + "%s: SyncCheckpointCreateFence[TA] failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); goto fail_create_ta_fence; } @@ -4202,11 +4440,17 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, (void*)psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue)); - /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */ - pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint); - CHKPT_DBG((PVR_DBG_ERROR, - "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", - __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr)); +#if defined(TA3D_CHECKPOINT_DEBUG) + { + PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL; + + /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */ + pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", + __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr)); + } +#endif } /* Append the sync prim update for the TA timeline (if required) */ @@ -4256,7 +4500,10 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ui32PDumpFlags); if (unlikely(eError != PVRSRV_OK)) { - PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence[3D] failed (%d)", __func__, eError)); + PVR_DPF((PVR_DBG_ERROR, + "%s: SyncCheckpointCreateFence[3D] failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); goto fail_create_3d_fence; } @@ -4268,11 +4515,17 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue)); - /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */ - pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint); - CHKPT_DBG((PVR_DBG_ERROR, - "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", - __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr)); +#if defined(TA3D_CHECKPOINT_DEBUG) + { + PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL; + + /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */ + pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", + __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr)); + } +#endif } /* Append the sync prim update for the 3D timeline (if required) */ @@ -4594,13 +4847,18 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, } #if defined(SUPPORT_WORKLOAD_ESTIMATION) - if (bKickTA || bKick3D || bAbort) + if ((!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) && (bKickTA || bKick3D || bAbort)) { sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize = ui32RenderTargetSize; sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls; sWorkloadCharacteristics.sTA3D.ui32NumberOfIndices = ui32NumberOfIndices; sWorkloadCharacteristics.sTA3D.ui32NumberOfMRTs = ui32NumberOfMRTs; } +#else + PVR_UNREFERENCED_PARAMETER(ui32RenderTargetSize); + PVR_UNREFERENCED_PARAMETER(ui32NumberOfDrawCalls); + PVR_UNREFERENCED_PARAMETER(ui32NumberOfIndices); + PVR_UNREFERENCED_PARAMETER(ui32NumberOfMRTs); #endif /* Init and acquire to TA command if required */ @@ -4609,21 +4867,25 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData; #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Prepare workload estimation */ - WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, - &psRenderContext->sWorkEstData, - &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA, - RGXFWIF_CCB_CMD_TYPE_GEOM, - &sWorkloadCharacteristics, - ui64DeadlineInus, - &sWorkloadKickDataTA); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psDevInfo)) + { + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA, + RGXFWIF_CCB_CMD_TYPE_GEOM, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataTA); + } #endif /* Init the TA command helper */ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount)); - RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(psTAData->psServerCommonContext), + RGXCmdHelperInitCmdCCB_OtherData(psDevInfo, + FWCommonContextGetClientCCB(psTAData->psServerCommonContext), ui32ClientTAFenceCount, pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, @@ -4649,9 +4911,12 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, pasTACmdHelperData); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* The following is used to determine the offset of the command header containing - the workload estimation data so that can be accessed when the KCCB is read */ - ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) + { + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData); + } #endif eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData); @@ -4684,7 +4949,8 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d", __func__, ui32Client3DFenceCount)); - RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + RGXCmdHelperInitCmdCCB_OtherData(psDevInfo, + FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), pauiClient3DFenceUFOAddress, NULL, @@ -4724,12 +4990,14 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ui32ClientPRUpdateValueCount, (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue)); } + if (!bUseCombined3DAnd3DPR) { CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d", __func__, ui32ClientPRUpdateCount)); - RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + RGXCmdHelperInitCmdCCB_OtherData(psDevInfo, + FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), 0, NULL, NULL, @@ -4758,18 +5026,22 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, const RGXFWIF_CCB_CMD_TYPE e3DCmdType = bAbort ? RGXFWIF_CCB_CMD_TYPE_ABORT : RGXFWIF_CCB_CMD_TYPE_3D; #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Prepare workload estimation */ - WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, - &psRenderContext->sWorkEstData, - &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D, - e3DCmdType, - &sWorkloadCharacteristics, - ui64DeadlineInus, - &sWorkloadKickData3D); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) + { + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D, + e3DCmdType, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickData3D); + } #endif /* Init the 3D command helper */ - RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + RGXCmdHelperInitCmdCCB_OtherData(psDevInfo, + FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), bKickTA ? 0 : ui32Client3DFenceCount, /* For a kick with a TA, the 3D fences are added before the PR command instead */ bKickTA ? NULL : pauiClient3DFenceUFOAddress, NULL, @@ -4795,10 +5067,13 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, &pas3DCmdHelperData[ui323DCmdCount++]); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* The following are used to determine the offset of the command header containing the workload estimation - data so that can be accessed when the KCCB is read */ - ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]); - ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1); + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psRenderContext->psDeviceNode)) + { + /* The following are used to determine the offset of the command header containing the workload estimation + data so that can be accessed when the KCCB is read */ + ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]); + ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1); + } #endif } @@ -4825,6 +5100,40 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, } } + if (ui32TACmdCount) + { + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl, + &sTACmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_GEOM, + bKickTA, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_taattachcleanupctls; + } + } + + if (ui323DCmdCount) + { + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl, + &s3DCmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_3D, + bKick3D, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_3dattachcleanupctls; + } + } + /* We should acquire the space in the kernel CCB here as after this point we release the commands which will take operations on server syncs @@ -4834,6 +5143,21 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, /* Everything is ready to go now, release the commands */ + + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + goto fail_acquirepowerlock; + } + if (ui32TACmdCount) { ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); @@ -4843,17 +5167,20 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); - - /* This checks if the command would wrap around at the end of the CCB and therefore would start at an - offset of 0 rather than the current command offset */ - if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck) - { - ui32TACommandOffset = ui32TACmdOffset; - } - else + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) { - ui32TACommandOffset = 0; + ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and therefore would start at an + offset of 0 rather than the current command offset */ + if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck) + { + ui32TACommandOffset = ui32TACmdOffset; + } + else + { + ui32TACommandOffset = 0; + } } #endif } @@ -4867,15 +5194,18 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); - - if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) { - ui323DCommandOffset = ui323DCmdOffset; - } - else - { - ui323DCommandOffset = 0; + ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); + + if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck) + { + ui323DCommandOffset = ui323DCmdOffset; + } + else + { + ui323DCommandOffset = 0; + } } #endif } @@ -4891,23 +5221,12 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Add the Workload data into the KCCB kick */ - sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset; -#endif - - eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl, - &sTACmdKickData.ui32NumCleanupCtl, - RGXFWIF_DM_GEOM, - bKickTA, - psKMHWRTDataSet, - psZSBuffer, - psMSAAScratchBuffer); - if (unlikely(eError != PVRSRV_OK)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) { - CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", - __func__, eError)); - goto fail_taattachcleanupctls; + /* Add the Workload data into the KCCB kick */ + sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset; } +#endif if (psGeomCmdShared) { @@ -4940,9 +5259,9 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; sTAKCCBCmd.uCmdData.sCmdKickData = sTACmdKickData; - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + eError2 = RGXScheduleCommandWithoutPowerLock(psRenderContext->psDeviceNode->pvDevice, RGXFWIF_DM_GEOM, &sTAKCCBCmd, ui32PDumpFlags); @@ -4951,17 +5270,15 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); } if (eError2 != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2)); - if (eError == PVRSRV_OK) - { - eError = eError2; - } - goto fail_taacquirecmd; + /* Mark the error and bail out */ + eError = eError2; + goto fail_tasubmitcmd; } PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode, @@ -4982,23 +5299,12 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, /* Add the Workload data into the KCCB kick */ #if defined(SUPPORT_WORKLOAD_ESTIMATION) - /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ - s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset; -#endif - - eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl, - &s3DCmdKickData.ui32NumCleanupCtl, - RGXFWIF_DM_3D, - bKick3D, - psKMHWRTDataSet, - psZSBuffer, - psMSAAScratchBuffer); - if (unlikely(eError != PVRSRV_OK)) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevInfo->psDeviceNode)) { - CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", - __func__, eError)); - goto fail_3dattachcleanupctls; + /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ + s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset; } +#endif if (ps3DCmdShared) { @@ -5038,9 +5344,9 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, s3DKCCBCmd.uCmdData.sCmdKickData = s3DCmdKickData; } - LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { - eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + eError2 = RGXScheduleCommandWithoutPowerLock(psRenderContext->psDeviceNode->pvDevice, RGXFWIF_DM_3D, &s3DKCCBCmd, ui32PDumpFlags); @@ -5049,7 +5355,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); - } END_LOOP_UNTIL_TIMEOUT(); + } END_LOOP_UNTIL_TIMEOUT_US(); if (eError2 != PVRSRV_OK) { @@ -5058,7 +5364,7 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, { eError = eError2; } - goto fail_3dacquirecmd; + goto fail_3dsubmitcmd; } PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode, @@ -5075,9 +5381,11 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, { CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); - goto fail_3dacquirecmd; + goto fail_3dsubmitcmd; } + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + #if defined(NO_HARDWARE) /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ if (psUpdateTASyncCheckpoint) @@ -5184,16 +5492,14 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, OSFreeMem(s3DSyncData.paui32ClientUpdateValue); } -#if defined(SUPPORT_VALIDATION) - if (bTestSLRAdd3DCheck) - { - SyncCheckpointFree(psDummySyncCheckpoint); - } -#endif OSLockRelease(psRenderContext->hLock); return PVRSRV_OK; +fail_3dsubmitcmd: +fail_tasubmitcmd: + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); +fail_acquirepowerlock: fail_3dattachcleanupctls: fail_taattachcleanupctls: fail_3dacquirecmd: @@ -5240,8 +5546,11 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, * NOTE: 3D fence is always submitted, either via 3D or TA(PR). */ #if defined(SUPPORT_BUFFER_SYNC) - SyncAddrListDeRefCheckpoints(ui32BufferFenceSyncCheckpointCount, - apsBufferFenceSyncCheckpoints); + if (apsBufferFenceSyncCheckpoints) + { + SyncAddrListDeRefCheckpoints(ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + } #endif SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); fail_resolve_input_3d_fence: @@ -5267,12 +5576,6 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, { OSFreeMem(s3DSyncData.paui32ClientUpdateValue); } -#if defined(SUPPORT_VALIDATION) - if (bTestSLRAdd3DCheck) - { - SyncCheckpointFree(psDummySyncCheckpoint); - } -#endif #if defined(SUPPORT_BUFFER_SYNC) if (psBufferSyncData) { @@ -5288,6 +5591,56 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, return eError; } +PVRSRV_ERROR PVRSRVRGXSendZSStoreDisableKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_BOOL bDisableDepthStore, + IMG_BOOL bDisableStencilStore, + IMG_UINT32 ui32ExtJobRefToDisableZSStore) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sDisableZSStoreCmd = { 0 }; + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + sDisableZSStoreCmd.eCmdType = RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE; + sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext); + sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.sDisableZSStore.bDisableZStore = bDisableDepthStore; + sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.sDisableZSStore.bDisableSStore = bDisableStencilStore; + sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.sDisableZSStore.ui32ExtJobRefToDisableZSStore = ui32ExtJobRefToDisableZSStore; + + if (psRenderContext->ui32CleanupStatus & (RC_CLEANUP_TA_COMPLETE | RC_CLEANUP_3D_COMPLETE)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: submit disable depth and stencil store command to render context that has been cleaned up (%u)", + __func__, PVRSRV_ERROR_INVALID_CONTEXT)); + return PVRSRV_ERROR_INVALID_CONTEXT; + } + + LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + RGXFWIF_DM_3D, + &sDisableZSStoreCmd, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT_US(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to submit disable depth and stencil store command (%u)", + __func__, + eError)); + } + + return eError; +} + PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE * psDeviceNode, RGX_SERVER_RENDER_CONTEXT *psRenderContext, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxta3d.h b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxta3d.h index 1ddd5aaeb3a3..c7cf0edfc108 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxta3d.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxta3d.h @@ -52,9 +52,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_fwif_resetframework.h" #include "sync_server.h" #include "connection_server.h" -#include "rgxdebug.h" +#include "rgxdebug_common.h" #include "pvr_notifier.h" -#include "ri_server.h" typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT; typedef struct _RGX_FREELIST_ RGX_FREELIST; @@ -117,28 +116,28 @@ typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE; typedef struct _RGX_HWRTDATA_COMMON_COOKIE_ { - DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; - RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; - IMG_UINT32 ui32RefCount; - + DEVMEMINT_RESERVATION* psPMStateReservation; + DEVMEMINT_RESERVATION* psPMSecureStateReservation; + DEVMEMINT_RESERVATION* psPMMListsReservation; + IMG_UINT32 ui32RefCount; } RGX_HWRTDATA_COMMON_COOKIE; typedef struct _RGX_KM_HW_RT_DATASET_ { - RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; - /* RGX_RTDATA_CLEANUP_DATA */ /* RGXMKIF_NUM_RTDATAS */ - PVRSRV_DEVICE_NODE *psDeviceNode; + RGX_HWRTDATA_COMMON_COOKIE* psHWRTDataCommonCookie; + + PVRSRV_DEVICE_NODE *psDeviceNode; RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr; - DEVMEM_MEMDESC *psHWRTDataFwMemDesc; - DEVMEM_MEMDESC *psRTArrayFwMemDesc; - DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc; + DEVMEM_MEMDESC *psHWRTDataFwMemDesc; + DEVMEM_MEMDESC *psRTArrayFwMemDesc; + DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc; - RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS]; + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS]; #if !defined(SUPPORT_SHADOW_FREELISTS) - DLLIST_NODE sNodeHWRTData; + DLLIST_NODE sNodeHWRTData; #endif } RGX_KM_HW_RT_DATASET; @@ -155,6 +154,8 @@ struct _RGX_FREELIST_ { PMR *psFreeListStatePMR; IMG_DEVMEM_OFFSET_T uiFreeListStatePMROffset; + DEVMEMINT_RESERVATION *psFreeListAndStateReservation; + /* Freelist config */ IMG_UINT32 ui32MaxFLPages; IMG_UINT32 ui32InitFLPages; @@ -186,6 +187,7 @@ struct _RGX_FREELIST_ { /* FW data structures */ DEVMEM_MEMDESC *psFWFreelistMemDesc; RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; + IMG_DEV_VIRTADDR sFreeListBaseDevVAddr; #if defined(SUPPORT_WORKLOAD_ESTIMATION) HASH_TABLE* psWorkloadHashTable; @@ -211,11 +213,10 @@ typedef struct { DEVMEMINT_RESERVATION *psReservation; PMR *psPMR; - DEVMEMINT_MAPPING *psMapping; - PVRSRV_MEMALLOCFLAGS_T uiMapFlags; IMG_UINT32 ui32ZSBufferID; IMG_UINT32 ui32RefCount; IMG_BOOL bOnDemand; + IMG_BOOL bIsBacked; IMG_UINT32 ui32NumReqByApp; /* Number of Backing Requests from Application */ IMG_UINT32 ui32NumReqByFW; /* Number of Backing Requests from Firmware */ @@ -232,46 +233,40 @@ typedef struct { /* Dump the physical pages of a freelist */ IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList); - +PVRSRV_ERROR RGXCreateHWRTDataSet2(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + DEVMEMINT_RESERVATION *psPMMListsReservation, + DEVMEMINT_RESERVATION *psPMStateReservation, + DEVMEMINT_RESERVATION *psPMSecureStateReservation, + RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], + IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]); /* Create HWRTDataSet */ PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_DEV_VIRTADDR psVHeapTableDevVAddr, - IMG_DEV_VIRTADDR sPMDataDevVAddr[RGXMKIF_NUM_RTDATAS], - IMG_DEV_VIRTADDR sPMSecureDataDevVAddr[RGXMKIF_NUM_RTDATAS], - RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], - IMG_UINT32 ui32ScreenPixelMax, - IMG_UINT64 ui64PPPMultiSampleCtl, - IMG_UINT32 ui32TEStride, - IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], - IMG_UINT32 ui32TPCSize, - IMG_UINT32 ui32TEScreen, - IMG_UINT32 ui32TEAA, - IMG_UINT32 ui32TEMTILE1, - IMG_UINT32 ui32TEMTILE2, - IMG_UINT32 ui32RgnStride, - IMG_UINT32 ui32ISPMergeLowerX, - IMG_UINT32 ui32ISPMergeLowerY, - IMG_UINT32 ui32ISPMergeUpperX, - IMG_UINT32 ui32ISPMergeUpperY, - IMG_UINT32 ui32ISPMergeScaleX, - IMG_UINT32 ui32ISPMergeScaleY, - IMG_UINT16 ui16MaxRTs, - RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]); + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR sPMDataDevVAddr[RGXMKIF_NUM_RTDATAS], + IMG_DEV_VIRTADDR sPMSecureDataDevVAddr[RGXMKIF_NUM_RTDATAS], + RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], + IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]); /* Destroy HWRTDataSet */ -PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKmHwRTDataSet); +PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet); /* - RGXCreateZSBuffer + RGXCreateZSBufferKM */ PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE * psDeviceNode, + PVRSRV_DEVICE_NODE *psDeviceNode, DEVMEMINT_RESERVATION *psReservation, PMR *psPMR, PVRSRV_MEMALLOCFLAGS_T uiMapFlags, - RGX_ZSBUFFER_DATA **ppsZSBuffer); + RGX_ZSBUFFER_DATA **ppsZSBuffer); /* RGXDestroyZSBufferKM @@ -329,22 +324,17 @@ PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, PDLLIST_NODE pListHeader); /* Create free list */ -PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_HANDLE hMemCtxPrivData, - IMG_UINT32 ui32MaxFLPages, - IMG_UINT32 ui32InitFLPages, - IMG_UINT32 ui32GrowFLPages, - IMG_UINT32 ui32GrowParamThreshold, - RGX_FREELIST *psGlobalFreeList, - IMG_BOOL bCheckFreelist, - IMG_DEV_VIRTADDR sFreeListBaseDevVAddr, - IMG_DEV_VIRTADDR sFreeListStateDevVAddr, - PMR *psFreeListPMR, - IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, - PMR *psFreeListStatePMR, - IMG_DEVMEM_OFFSET_T uiFreeListStatePMROffset, - RGX_FREELIST **ppsFreeList); +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + DEVMEMINT_RESERVATION *psFreeListAndStateReservation, + RGX_FREELIST **ppsFreeList); /* Destroy free list */ PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList); @@ -369,16 +359,21 @@ void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, @Description Server-side implementation of RGXCreateRenderContext - @Input pvDeviceNode - device node + @Input psConnection - + @Input psDeviceNode - device node @Input i32Priority - context priority + @Input ui32FrameworkCommandSize - framework command size + @Input pabyFrameworkCommand - ptr to framework command @Input hMemCtxPrivData - memory context private data + @Input ui32StaticRenderContextStateSize - size of fixed render state + @Input pStaticRenderContextState - ptr to fixed render state buffer @Input ui32PackedCCBSizeU8888 : ui8TACCBAllocSizeLog2 - TA CCB size ui8TACCBMaxAllocSizeLog2 - maximum size to which TA CCB can grow ui83DCCBAllocSizeLog2 - 3D CCB size ui83DCCBMaxAllocSizeLog2 - maximum size to which 3D CCB can grow @Input ui32ContextFlags - flags which specify properties of the context - @Output ppsRenderContext - clean up data + @Output ppsRenderContext - @Return PVRSRV_ERROR @@ -479,6 +474,14 @@ PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, IMG_UINT64 ui64DeadlineInus); +PVRSRV_ERROR PVRSRVRGXSendZSStoreDisableKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_BOOL bDisableDepthStore, + IMG_BOOL bDisableStencilStore, + IMG_UINT32 ui32ExtJobRefToDisableZSStore); + + PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE * psDevNode, RGX_SERVER_RENDER_CONTEXT *psRenderContext, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/Kbuild.mk b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/Kbuild.mk index b0c853155ba9..a53db2eac5bb 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/Kbuild.mk +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/Kbuild.mk @@ -39,6 +39,15 @@ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ### ########################################################################### +# Define used in code when no feature MASK or IDX flag can determine the +# PVR_ARCH build config of the KMD server. KMD Server either supports all +# Rogue cores, or all Volcanic cores, but not both. +ifeq ($(PVR_ARCH),rogue) + ccflags-y += -DPVR_ARCH_ROGUE +else ifeq ($(PVR_ARCH),volcanic) + ccflags-y += -DPVR_ARCH_VOLCANIC +endif + # Window system ccflags-y += -DWINDOW_SYSTEM=\"$(WINDOW_SYSTEM)\" @@ -68,7 +77,7 @@ SYSTEM_DIR := $(TOP)/services/system/$(PVR_SYSTEM) endif $(PVRSRV_MODNAME)-y += \ - services/server/env/linux/pvr_drm.o \ + services/server/env/linux/dkf_server.o \ services/server/env/linux/event.o \ services/server/env/linux/fwload.o \ services/server/env/linux/km_apphint.o \ @@ -79,13 +88,16 @@ $(PVRSRV_MODNAME)-y += \ services/server/env/linux/osconnection_server.o \ services/server/env/linux/physmem_osmem_linux.o \ services/server/env/linux/pmr_os.o \ + services/server/env/linux/pmr_env.o \ services/server/env/linux/pvr_bridge_k.o \ services/server/env/linux/pvr_debug.o \ + services/server/env/linux/pvr_drm.o \ services/server/env/linux/physmem_dmabuf.o \ services/server/common/devicemem_heapcfg.o \ services/shared/common/devicemem.o \ services/shared/common/devicemem_utils.o \ services/shared/common/hash.o \ + services/shared/common/hash_functions.o \ services/shared/common/ra.o \ services/shared/common/sync.o \ services/shared/common/mem_utils.o \ @@ -97,6 +109,8 @@ $(PVRSRV_MODNAME)-y += \ services/server/common/physheap.o \ services/server/common/physmem.o \ services/server/common/physmem_lma.o \ + services/server/common/physmem_ramem.o \ + services/server/common/physmem_osmem.o \ services/server/common/physmem_hostmem.o \ services/server/common/pmr.o \ services/server/common/power.o \ @@ -106,9 +120,6 @@ $(PVRSRV_MODNAME)-y += \ services/server/common/srvcore.o \ services/server/common/sync_checkpoint.o \ services/server/common/sync_server.o \ - services/shared/common/htbuffer.o \ - services/server/common/htbserver.o \ - services/server/common/htb_debug.o \ services/server/common/tlintern.o \ services/shared/common/tlclient.o \ services/server/common/tlserver.o \ @@ -120,7 +131,25 @@ $(PVRSRV_MODNAME)-y += \ services/server/common/info_page_km.o \ services/shared/common/pvrsrv_error.o \ services/server/common/debug_common.o \ - services/server/common/di_server.o + services/server/common/di_server.o \ + services/server/common/vmm_pvz_client.o \ + services/server/common/vmm_pvz_server.o \ + services/server/common/vz_vmm_pvz.o \ + services/server/common/vz_vmm_vm.o \ + services/system/$(PVR_ARCH)/common/vmm_type_$(VMM_TYPE).o + +ifeq ($(PVRSRV_ENABLE_DYNAMIC_PHYSHEAPS),1) + $(PVRSRV_MODNAME)-y += \ + services/server/common/physmem_dlm.o \ + services/server/common/physmem_ima.o +endif + +ifeq ($(PVRSRV_ENABLE_HTB),1) +$(PVRSRV_MODNAME)-y += \ + services/shared/common/htbuffer.o \ + services/server/common/htbserver.o \ + services/server/common/htb_debug.o +endif ifeq ($(SUPPORT_DMA_TRANSFER),1) $(PVRSRV_MODNAME)-y += \ @@ -165,19 +194,21 @@ endif ifeq ($(SUPPORT_RGX),1) $(PVRSRV_MODNAME)-y += \ services/server/devices/rgx_bridge_init.o \ - services/server/env/linux/pvr_gputrace.o \ services/server/devices/rgxfwdbg.o \ services/server/devices/rgxtimerquery.o \ services/server/devices/rgxccb.o \ - services/server/devices/$(PVR_ARCH_DEFS)/rgxdebug.o \ + services/server/devices/rgxdebug_common.o \ + services/server/devices/$(PVR_ARCH)/rgxdebug.o \ services/server/devices/rgxfwtrace_strings.o \ services/server/devices/$(PVR_ARCH)/rgxfwutils.o \ + services/server/devices/rgxfwcmnctx.o \ + services/server/devices/rgxfwriscv.o \ services/server/devices/$(PVR_ARCH)/rgxinit.o \ services/server/devices/rgxbvnc.o \ - services/server/devices/rgxkicksync.o \ + services/server/devices/rgxlayer_impl_common.o \ services/server/devices/$(PVR_ARCH)/rgxlayer_impl.o \ services/server/devices/rgxmem.o \ - services/server/devices/$(PVR_ARCH)/rgxmmuinit.o \ + services/server/devices/rgxmmuinit.o \ services/server/devices/rgxregconfig.o \ services/server/devices/$(PVR_ARCH)/rgxta3d.o \ services/server/devices/rgxsyncutils.o \ @@ -185,16 +216,26 @@ $(PVRSRV_MODNAME)-y += \ services/server/devices/rgxutils.o \ services/server/devices/rgxhwperf_common.o \ services/server/devices/$(PVR_ARCH)/rgxhwperf.o \ - services/server/devices/$(PVR_ARCH)/rgxpower.o \ + services/server/devices/rgxpower.o \ services/server/devices/$(PVR_ARCH)/rgxstartstop.o \ services/server/devices/rgxtimecorr.o \ services/server/devices/rgxcompute.o \ services/server/devices/$(PVR_ARCH)/rgxmulticore.o \ services/server/devices/rgxshader.o -ifeq ($(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD),1) +# add pvr_gputrace.o if any of these features are enabled +ifneq (, $(filter 1,$(PVRSRV_TRACE_ROGUE_EVENTS) $(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD))) +$(PVRSRV_MODNAME)-$(CONFIG_EVENT_TRACING) += services/server/env/linux/pvr_gputrace.o +endif + +ifeq ($(PVRSRV_ANDROID_TRACE_GPU_FREQ),1) $(PVRSRV_MODNAME)-y += \ - services/server/env/linux/pvr_gpuwork.o + services/server/env/linux/pvr_gpufreq.o +endif + +ifeq ($(SUPPORT_RGXKICKSYNC_BRIDGE),1) +$(PVRSRV_MODNAME)-y += \ + services/server/devices/rgxkicksync.o endif ifeq ($(SUPPORT_USC_BREAKPOINT),1) @@ -262,6 +303,10 @@ $(PVRSRV_MODNAME)-y += \ ifeq ($(SUPPORT_RGX),1) $(PVRSRV_MODNAME)-y += \ services/server/devices/$(PVR_ARCH)/rgxpdump.o +ifeq ($(PDUMP),1) +$(PVRSRV_MODNAME)-y += \ + services/server/devices/rgxpdump_common.o +endif endif endif @@ -312,9 +357,8 @@ $(PVRSRV_MODNAME)-y += \ services/server/env/linux/pvr_sync_file.o \ services/server/env/linux/pvr_counting_timeline.o \ services/server/env/linux/pvr_sw_fence.o \ + services/server/env/linux/pvr_export_fence.o \ services/server/env/linux/pvr_fence.o -else -$(PVRSRV_MODNAME)-y += services/server/env/linux/pvr_sync2.o endif else ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1) @@ -326,7 +370,13 @@ endif ifeq ($(SUPPORT_LINUX_DVFS),1) $(PVRSRV_MODNAME)-y += \ - services/server/env/linux/pvr_dvfs_device.o + services/server/env/linux/pvr_dvfs_device.o \ + services/server/env/linux/pvr_dvfs_common.o +else + ifeq ($(SUPPORT_PDVFS),1) +$(PVRSRV_MODNAME)-y += \ + services/server/env/linux/pvr_dvfs_common.o + endif endif ifeq ($(PVRSRV_ENABLE_PVR_ION_STATS),1) @@ -370,7 +420,7 @@ $(PVRSRV_MODNAME)-y += \ services/server/devices/rgxfwimageutils.o ifeq ($(PVR_ARCH),rogue) $(PVRSRV_MODNAME)-y += \ - services/shared/devices/$(PVR_ARCH_DEFS)/rgx_hwperf_table.o + services/shared/devices/$(PVR_ARCH)/rgx_hwperf_table.o endif endif @@ -416,11 +466,15 @@ ccflags-y += \ -I$(bridge_base)/srvcore_bridge \ -I$(bridge_base)/sync_bridge \ -I$(bridge_base)/synctracking_bridge \ - -I$(bridge_base)/htbuffer_bridge \ -I$(bridge_base)/pvrtl_bridge \ -I$(bridge_base)/cache_bridge \ -I$(bridge_base)/dmabuf_bridge +ifeq ($(PVRSRV_ENABLE_HTB),1) +ccflags-y += \ + -I$(bridge_base)/htbuffer_bridge +endif + ifeq ($(SUPPORT_DMA_TRANSFER),1) ccflags-y += \ -I$(bridge_base)/dma_bridge @@ -428,9 +482,9 @@ endif ifeq ($(SUPPORT_RGX),1) ccflags-y += \ + -I$(bridge_base)/rgxtq2_bridge \ -I$(bridge_base)/rgxta3d_bridge \ -I$(bridge_base)/rgxhwperf_bridge \ - -I$(bridge_base)/rgxkicksync_bridge \ -I$(bridge_base)/rgxcmp_bridge \ -I$(bridge_base)/rgxregconfig_bridge \ -I$(bridge_base)/rgxtimerquery_bridge \ @@ -443,15 +497,14 @@ ifeq ($(PVR_ARCH),rogue) ccflags-y += \ -I$(bridge_base)/rgxtq_bridge endif -# Oceanic does not support TDM -ifneq ($(PVR_ARCH_DEFS),oceanic) -ccflags-y += \ - -I$(bridge_base)/rgxtq2_bridge -endif ifeq ($(SUPPORT_USC_BREAKPOINT),1) ccflags-y += \ -I$(bridge_base)/rgxbreakpoint_bridge endif +ifeq ($(SUPPORT_RGXKICKSYNC_BRIDGE),1) +ccflags-y += \ + -I$(bridge_base)/rgxkicksync_bridge +endif endif $(PVRSRV_MODNAME)-y += \ @@ -459,11 +512,15 @@ $(PVRSRV_MODNAME)-y += \ generated/$(PVR_ARCH)/cmm_bridge/server_cmm_bridge.o \ generated/$(PVR_ARCH)/srvcore_bridge/server_srvcore_bridge.o \ generated/$(PVR_ARCH)/sync_bridge/server_sync_bridge.o \ - generated/$(PVR_ARCH)/htbuffer_bridge/server_htbuffer_bridge.o \ generated/$(PVR_ARCH)/pvrtl_bridge/server_pvrtl_bridge.o \ generated/$(PVR_ARCH)/cache_bridge/server_cache_bridge.o \ generated/$(PVR_ARCH)/dmabuf_bridge/server_dmabuf_bridge.o +ifeq ($(PVRSRV_ENABLE_HTB),1) +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/htbuffer_bridge/server_htbuffer_bridge.o +endif + ifeq ($(SUPPORT_DMA_TRANSFER),1) $(PVRSRV_MODNAME)-y += \ generated/$(PVR_ARCH)/dma_bridge/server_dma_bridge.o @@ -471,9 +528,9 @@ endif ifeq ($(SUPPORT_RGX),1) $(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/rgxtq2_bridge/server_rgxtq2_bridge.o \ generated/$(PVR_ARCH)/rgxta3d_bridge/server_rgxta3d_bridge.o \ generated/$(PVR_ARCH)/rgxhwperf_bridge/server_rgxhwperf_bridge.o \ - generated/$(PVR_ARCH)/rgxkicksync_bridge/server_rgxkicksync_bridge.o \ generated/$(PVR_ARCH)/rgxcmp_bridge/server_rgxcmp_bridge.o \ generated/$(PVR_ARCH)/rgxregconfig_bridge/server_rgxregconfig_bridge.o \ generated/$(PVR_ARCH)/rgxtimerquery_bridge/server_rgxtimerquery_bridge.o \ @@ -486,15 +543,14 @@ ifeq ($(PVR_ARCH),rogue) $(PVRSRV_MODNAME)-y += \ generated/$(PVR_ARCH)/rgxtq_bridge/server_rgxtq_bridge.o endif -# Oceanic does not support TDM -ifneq ($(PVR_ARCH_DEFS),oceanic) -$(PVRSRV_MODNAME)-y += \ - generated/$(PVR_ARCH)/rgxtq2_bridge/server_rgxtq2_bridge.o -endif ifeq ($(SUPPORT_USC_BREAKPOINT),1) $(PVRSRV_MODNAME)-y += \ generated/$(PVR_ARCH)/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.o endif +ifeq ($(SUPPORT_RGXKICKSYNC_BRIDGE),1) +$(PVRSRV_MODNAME)-y += \ + generated/$(PVR_ARCH)/rgxkicksync_bridge/server_rgxkicksync_bridge.o +endif endif ifeq ($(SUPPORT_WRAP_EXTMEM),1) @@ -541,10 +597,8 @@ ifeq ($(SUPPORT_VALIDATION),1) ccflags-y += -I$(bridge_base)/validation_bridge $(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/validation_bridge/server_validation_bridge.o $(PVRSRV_MODNAME)-y += services/server/common/validation.o -ifeq ($(PVR_ARCH),volcanic) $(PVRSRV_MODNAME)-y += services/server/common/validation_soc.o endif -endif ifeq ($(PVR_TESTING_UTILS),1) ccflags-y += -I$(bridge_base)/tutils_bridge @@ -576,10 +630,14 @@ endif $(PVRSRV_MODNAME)-y += \ generated/$(PVR_ARCH)/mm_bridge/client_mm_direct_bridge.o \ generated/$(PVR_ARCH)/sync_bridge/client_sync_direct_bridge.o \ - generated/$(PVR_ARCH)/htbuffer_bridge/client_htbuffer_direct_bridge.o \ generated/$(PVR_ARCH)/cache_bridge/client_cache_direct_bridge.o \ generated/$(PVR_ARCH)/pvrtl_bridge/client_pvrtl_direct_bridge.o +ifeq ($(PVRSRV_ENABLE_HTB),1) +$(PVRSRV_MODNAME)-y +=\ + generated/$(PVR_ARCH)/htbuffer_bridge/client_htbuffer_direct_bridge.o +endif + ifeq ($(PDUMP),1) $(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/pdumpmm_bridge/client_pdumpmm_direct_bridge.o endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/Linux.mk b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/Linux.mk index f18461d91ea1..caf3a077a3a7 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/Linux.mk +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/Linux.mk @@ -39,12 +39,8 @@ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ### ########################################################################### -ifneq ($(SERVICES_SC),1) - modules := srvkm srvkm_type := kernel_module srvkm_target := $(PVRSRV_MODNAME).ko srvkm_makefile := $(THIS_DIR)/Kbuild.mk - -endif # SERVICES_SC diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/allocmem.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/allocmem.c index 15e8a5a46344..817680376d79 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/allocmem.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/allocmem.c @@ -49,9 +49,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "allocmem.h" #include "pvr_debug.h" #include "process_stats.h" -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) -#include "pvrsrv.h" -#endif #include "osfunc.h" @@ -85,10 +82,6 @@ static IMG_UINT32 g_ui32kmallocThreshold = PVR_LINUX_KMALLOC_ALLOCATION_THRESHOL /* Spinlock used so that the global variables above may not be modified by more than 1 thread at a time */ static DEFINE_SPINLOCK(kmalloc_lock); -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) -static DEFINE_SPINLOCK(kmalloc_leak_lock); -static IMG_UINT32 g_ui32kmallocLeakCounter = 0; -#endif static inline void OSTryDecreaseKmallocThreshold(void) { @@ -181,12 +174,12 @@ static inline void *_pvr_alloc_stats_add(void *pvAddr, IMG_UINT32 ui32Size DEBUG PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, pvAddr, sCpuPAddr, - ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)), + PVR_ALIGN(ui32Size, PAGE_SIZE), OSGetCurrentClientProcessIDKM() DEBUG_MEMSTATS_ARGS); #else PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, - ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)), + PVR_ALIGN(ui32Size, PAGE_SIZE), (IMG_UINT64)(uintptr_t) pvAddr, OSGetCurrentClientProcessIDKM()); #endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ @@ -299,32 +292,6 @@ void *(OSAllocZMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) */ void (OSFreeMem)(void *pvMem) { -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) - unsigned long flags; - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - - if (psPVRSRVData) - { - IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc; - - spin_lock_irqsave(&kmalloc_leak_lock, flags); - - g_ui32kmallocLeakCounter++; - if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax)) - { - g_ui32kmallocLeakCounter = 0; - spin_unlock_irqrestore(&kmalloc_leak_lock, flags); - - PVR_DPF((PVR_DBG_WARNING, - "%s: Skipped freeing of pointer 0x%p to trigger memory leak.", - __func__, - pvMem)); - return; - } - - spin_unlock_irqrestore(&kmalloc_leak_lock, flags); - } -#endif if (pvMem != NULL) { pvMem = _pvr_alloc_stats_remove(pvMem); @@ -396,32 +363,6 @@ void *OSAllocZMemNoStats(IMG_UINT32 ui32Size) */ void (OSFreeMemNoStats)(void *pvMem) { -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) - unsigned long flags; - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - - if (psPVRSRVData) - { - IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc; - - spin_lock_irqsave(&kmalloc_leak_lock, flags); - - g_ui32kmallocLeakCounter++; - if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax)) - { - g_ui32kmallocLeakCounter = 0; - spin_unlock_irqrestore(&kmalloc_leak_lock, flags); - - PVR_DPF((PVR_DBG_WARNING, - "%s: Skipped freeing of pointer 0x%p to trigger memory leak.", - __func__, - pvMem)); - return; - } - - spin_unlock_irqrestore(&kmalloc_leak_lock, flags); - } -#endif if (pvMem != NULL) { if (!is_vmalloc_addr(pvMem)) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/dkf_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/dkf_server.c new file mode 100644 index 000000000000..6136b627d9ae --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/dkf_server.c @@ -0,0 +1,409 @@ +/*************************************************************************/ /*! +@File dkf_server.c +@Title DRM Key Framework support routines. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "dkf_server.h" +#include "img_types.h" +#include "device.h" +#include "pvrsrv_error.h" +#include "dkp_impl.h" +#include "osfunc.h" +#include "pvr_debug.h" + +#if defined(SUPPORT_LINUX_FDINFO) +typedef struct DKF_REGISTERED_DKP_TAG +{ + IMG_HANDLE hPrivHandle; /*!< Private data - can be NULL */ + const IMG_CHAR *pszDKPName; /*!< DKP provider name */ + IMG_PID pid; /*!< Process-ID - can be 0 */ + IMG_UINT32 uiReserved1; /*!< Reserved field - padding */ + DKP_PFN_SHOW *psDKPShowPfn; /*!< DKP Callback function */ + DLLIST_NODE sDKFEntryNode; /*!< List of DKP entries */ + + DKP_CONNECTION_FLAGS ui32Filter; /*!< The types of connection to output to */ +} DKF_REGISTERED_DKP; + +/* Global sentinel to track all allocated DKP callback key/value pairs */ +typedef struct DKF_TAG +{ + POS_LOCK hDKFListLock; /*!< Lock for accessing sDKFListNode */ + IMG_UINT32 ui32NumEntries; /*!< Number of registered DKP entries */ + IMG_UINT32 uiReserved1; /*!< Reserved field - padding */ + DLLIST_NODE sDKFListNode; /*!< Head of the DKF_REGISTERED_DKP linked list */ + DKF_VPRINTF_FUNC *pfnPrint; /*!< Default printf-style output fn */ + void *pvPrintArg1; /*!< First arg for pfnPrint */ +} DKF; + +static DKF *gpsDKF; + +static_assert(DKF_CONNECTION_FLAG_INVALID == DKP_CONNECTION_FLAG_INVALID, "DKF and DKP INVALID connection flags do not match"); +static_assert(DKF_CONNECTION_FLAG_SYNC == DKP_CONNECTION_FLAG_SYNC, "DKF and DKP SYNC connection flags do not match"); +static_assert(DKF_CONNECTION_FLAG_SERVICES == DKP_CONNECTION_FLAG_SERVICES, "DKF and DKP SERVICES connection flags do not match"); + +PVRSRV_ERROR PVRDKFInit(void) +{ + DKF *psDKF; + PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY; + + if (gpsDKF != NULL) + { + PVR_DPF((PVR_DBG_WARNING, "%s: gpsDKF = %p, NULL expected", + __func__, gpsDKF)); + + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s called", __func__)); + + psDKF = OSAllocZMemNoStats(sizeof(*psDKF)); + PVR_GOTO_IF_NOMEM(psDKF, eError, Error); + + dllist_init(&psDKF->sDKFListNode); + eError = OSLockCreate(&psDKF->hDKFListLock); + PVR_GOTO_IF_ERROR(eError, ErrorFree); + + gpsDKF = psDKF; + + return PVRSRV_OK; + +ErrorFree: + OSFreeMemNoStats(psDKF); + /* fallthrough */ + +Error: + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + return eError; +} + + +void PVRDKFDeInit(void) +{ + IMG_UINT32 uiNumFreed = 0U; + + PVR_DPF((PVR_DBG_MESSAGE, "%s called", __func__)); + + if (gpsDKF == NULL) + { + return; + } + + /* Ensure we leave no data allocated. The DKP instances should have + * been cleaned up by their module deInit processing. Handle badly + * behaved clients here. + */ + if (gpsDKF->ui32NumEntries > 0) + { + DLLIST_NODE *psThis, *psNext; + + PVR_DPF((PVR_DBG_ERROR, + "%s: Have %u un-freed allocations remaining", __func__, + gpsDKF->ui32NumEntries)); + + OSLockAcquire(gpsDKF->hDKFListLock); + + dllist_foreach_node(&gpsDKF->sDKFListNode, psThis, psNext) + { + DKF_REGISTERED_DKP *psEntry = IMG_CONTAINER_OF(psThis, + DKF_REGISTERED_DKP, + sDKFEntryNode); + + dllist_remove_node(&psEntry->sDKFEntryNode); + + uiNumFreed++; + + OSFreeMemNoStats(psEntry); + } + + if (uiNumFreed != gpsDKF->ui32NumEntries) + { + PVR_DPF((PVR_DBG_ERROR, "Could only free %u out of %u", + uiNumFreed, gpsDKF->ui32NumEntries)); + } + + dllist_remove_node(&gpsDKF->sDKFListNode); + + OSLockRelease(gpsDKF->hDKFListLock); + } + + OSLockDestroy(gpsDKF->hDKFListLock); + + OSFreeMemNoStats(gpsDKF); + + gpsDKF = NULL; +} + +void PVRDKFTraverse(DKF_VPRINTF_FUNC *pfnPrint, + void *pvArg, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_PID pid, + DKF_CONNECTION_FLAGS ui32ConnectionType) +{ + PDLLIST_NODE pNext, pNode; + + PVR_ASSERT(gpsDKF != NULL); + PVR_ASSERT(pfnPrint != NULL); + PVR_ASSERT(ui32ConnectionType != DKF_CONNECTION_FLAG_INVALID); + + OSLockAcquire(gpsDKF->hDKFListLock); + + gpsDKF->pfnPrint = pfnPrint; + gpsDKF->pvPrintArg1 = pvArg; + + if (dllist_is_empty(&gpsDKF->sDKFListNode)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: No DKPs registered", __func__)); + } + else + { + dllist_foreach_node(&gpsDKF->sDKFListNode, pNode, pNext) + { + DKF_REGISTERED_DKP *psEntry = IMG_CONTAINER_OF(pNode, + DKF_REGISTERED_DKP, + sDKFEntryNode); + + if (psEntry->psDKPShowPfn != NULL && + BITMASK_ANY(psEntry->ui32Filter, ui32ConnectionType)) + { + psEntry->psDKPShowPfn(psDevNode, pid, + psEntry->hPrivHandle); + } + } + } + + OSLockRelease(gpsDKF->hDKFListLock); +} + +/* + * Wrapper function to display data using preconfigured DKF-specific output + * function. + */ +void PVRDKPOutput(IMG_HANDLE hPrivData, const char *fmt, ...) +{ + DKF_REGISTERED_DKP *psDKFEntry = (DKF_REGISTERED_DKP *)hPrivData; + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list Arglist; + DLLIST_NODE *pNode, *pNext; + IMG_BOOL bFound = IMG_FALSE; + + if (psDKFEntry == NULL || gpsDKF == NULL) + { + PVR_DPF((PVR_DBG_WARNING, "%s: NULL DKF entry found (%p, %p)", + __func__, psDKFEntry, gpsDKF)); + return; + } + + PVR_ASSERT(gpsDKF->pfnPrint != NULL); + + /* validate that this is a legitimate output function reference */ + + dllist_foreach_node(&gpsDKF->sDKFListNode, pNode, pNext) + { + DKF_REGISTERED_DKP *psEntry = IMG_CONTAINER_OF(pNode, + DKF_REGISTERED_DKP, + sDKFEntryNode); + + if (psEntry == psDKFEntry) + { + bFound = IMG_TRUE; + break; + } + } + + if (!bFound) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Handle %p not found.", __func__, + hPrivData)); + return; + } + + OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, "%s", fmt); + + va_start(Arglist, fmt); + gpsDKF->pfnPrint(gpsDKF->pvPrintArg1, szBuffer, &Arglist); + va_end(Arglist); +} + +PVRSRV_ERROR PVRSRVRegisterDKP(IMG_HANDLE hPrivData, + const char *pszDKPName, + DKP_PFN_SHOW *psShowPfn, + DKP_CONNECTION_FLAGS ui32Filter, + PPVRDKF_DKP_HANDLE phDkpHandle) +{ + DKF_REGISTERED_DKP *psDKFEntry; /* New entry for this DKP */ + PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY; + + /* Check for a NULL argument. Nothing to allocate if we are not provided + * a location to store the DkpHandle reference. + */ + if (phDkpHandle == NULL || ui32Filter == DKF_CONNECTION_FLAG_INVALID) + { + +#if defined(DEBUG) + PVR_DPF((PVR_DBG_WARNING, "%s(%p, %s, %p, %p) Called", __func__, + hPrivData, pszDKPName, psShowPfn, + phDkpHandle)); +#endif + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDKFEntry = OSAllocZMemNoStats(sizeof(*psDKFEntry)); + PVR_GOTO_IF_NOMEM(psDKFEntry, eError, Error); + + OSLockAcquire(gpsDKF->hDKFListLock); + + psDKFEntry->hPrivHandle = hPrivData; + psDKFEntry->pszDKPName = pszDKPName; + psDKFEntry->psDKPShowPfn = psShowPfn; + psDKFEntry->ui32Filter = ui32Filter; + dllist_init(&psDKFEntry->sDKFEntryNode); + + /* + * Append the new entry to the end of the gpsDKF list-head. Return a + * reference to the new entry to the caller. + */ + dllist_add_to_tail(&gpsDKF->sDKFListNode, &psDKFEntry->sDKFEntryNode); + + gpsDKF->ui32NumEntries++; + + OSLockRelease(gpsDKF->hDKFListLock); + + *phDkpHandle = psDKFEntry; + + return PVRSRV_OK; + + +Error: + PVR_DPF((PVR_DBG_ERROR, "%s: Error: '%s'", __func__, + PVRSRVGetErrorString(eError))); + + return eError; +} + +PVRSRV_ERROR PVRSRVUnRegisterDKP(IMG_HANDLE hPrivData, PVRDKF_DKP_HANDLE hDkpHandle) +{ + DKF_REGISTERED_DKP *psDKFEntry = (DKF_REGISTERED_DKP *)hDkpHandle; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psDKFEntry) + { +#if defined(DEBUG) + if (psDKFEntry->hPrivHandle == hPrivData) + { + PVR_DPF((PVR_DBG_VERBOSE, "%s: Matched %p private handle", + __func__, hDkpHandle)); + } + else + { + PVR_DPF((PVR_DBG_VERBOSE, + "%s: Did not find match (%p. vs %p), freeing anyway", + __func__, hPrivData, psDKFEntry->hPrivHandle)); + } +#endif /* DEBUG */ + + OSLockAcquire(gpsDKF->hDKFListLock); + + dllist_remove_node(&psDKFEntry->sDKFEntryNode); + + gpsDKF->ui32NumEntries--; + + OSLockRelease(gpsDKF->hDKFListLock); + + OSFreeMemNoStats(psDKFEntry); + + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} +#else + +/* Stub routines for earlier kernel versions */ + +PVRSRV_ERROR PVRDKFInit(void) +{ + return PVRSRV_OK; +} + +void PVRDKFDeInit(void) +{ +} + +void PVRDKFTraverse(DKF_VPRINTF_FUNC *pfnPrint, + void *pvArg, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_PID pid, + IMG_UINT32 ui32ConnectionType) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(pid); +} + +void PVRDKPOutput(IMG_HANDLE hPrivData, const char *fmt, ...) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(fmt); +} + +PVRSRV_ERROR PVRSRVRegisterDKP(IMG_HANDLE hPrivData, const char *pszDKPName, + DKP_PFN_SHOW *psShowPfn, + DKP_CONNECTION_FLAGS ui32Filter, + PPVRDKF_DKP_HANDLE phDkpHandle) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(pszDKPName); + PVR_UNREFERENCED_PARAMETER(psShowPfn); + PVR_UNREFERENCED_PARAMETER(phDkpHandle); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVUnRegisterDKP(IMG_HANDLE hPrivData, PVRDKF_DKP_HANDLE hDkpHandle) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(hDkpHandle); + + return PVRSRV_OK; +} +#endif /* SUPPORT_LINUX_FDINFO */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/dkf_server.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/dkf_server.h new file mode 100644 index 000000000000..304d9bc71b60 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/dkf_server.h @@ -0,0 +1,103 @@ +/**************************************************************************/ /*! +@File dkf_server.h +@Title Functions for supporting the DRM Key Framework +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(DKF_SERVER_H) +#define DKF_SERVER_H + +#include "img_types.h" +#include "pvrsrv_error.h" + +#if defined(SUPPORT_LINUX_FDINFO) + +#include +typedef void (DKF_VPRINTF_FUNC)(struct drm_printer *p, const char *fmt, va_list *va) __printf(2, 0); + +#else /* !defined(SUPPORT_LINUX_FDINFO) */ +typedef void (DKF_VPRINTF_FUNC)(void *p, const char *fmt, ...) __printf(2, 3); + +#endif /* defined(SUPPORT_LINUX_FDINFO) */ +struct _PVRSRV_DEVICE_NODE_; + +typedef IMG_UINT32 DKF_CONNECTION_FLAGS; + +#define DKF_CONNECTION_FLAG_SYNC BIT(0) +#define DKF_CONNECTION_FLAG_SERVICES BIT(1) + +#define DKF_CONNECTION_FLAG_INVALID IMG_UINT32_C(0) + +/*! @Function PVRDKFTraverse + * + * @Description + * Outputs the DKP data associated with the given device node's + * framework entries. + * + * @Input pfnPrint The print function callback to be used to output. + * @Input pvArg Print function first argument. + * @Input psDevNode Device node associated with fdinfo owner. + * @Input pid Process ID of the process owning the FD the fdinfo + * file relates to. + * @Input ui32ConnectionType A value indicating the PVR connection type + * (sync or services). + */ +void PVRDKFTraverse(DKF_VPRINTF_FUNC *pfnPrint, + void *pvArg, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_PID pid, + DKF_CONNECTION_FLAGS ui32ConnectionType); + +/* @Function PVRDKFInit + * + * @Description + * Initialises the DKF infrastructure for subsequent usage by the PVR system. + * + * @Returns PVRSRV_ERROR. + */ +PVRSRV_ERROR PVRDKFInit(void); + +/* @Function PVRDKFDeInit + * + * @Description + * Removes and frees all associated system-specific DKF meta-data. + */ +void PVRDKFDeInit(void); + +#endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/env_connection.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/env_connection.h index 2a6c7d05c412..d2e4f9743370 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/env_connection.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/env_connection.h @@ -58,9 +58,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "allocmem.h" #endif +struct drm_file; + typedef struct _ENV_CONNECTION_PRIVATE_DATA_ { PVRSRV_DEVICE_NODE *psDevNode; + struct drm_file *psDRMFile; } ENV_CONNECTION_PRIVATE_DATA; #if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) @@ -79,6 +82,7 @@ typedef struct _ENV_CONNECTION_DATA_ pid_t owner; PVRSRV_DEVICE_NODE *psDevNode; + struct drm_file *psDRMFile; #if defined(SUPPORT_NATIVE_FENCE_SYNC) void *pvPvrSyncPrivateData; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/event.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/event.c index aec0fc8a02dd..98b029c926df 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/event.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/event.c @@ -372,9 +372,11 @@ PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, DEFINE_WAIT(sWait); - PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT*)hOSEventObject; PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; + PVR_ASSERT(psLinuxEventObjectList != NULL); + /* Check if the driver is good shape */ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) { @@ -460,7 +462,7 @@ PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, OSLockRelease(psLinuxEventObject->hLock); #endif - if (signal_pending(current)) + if (signal_pending(current) && test_tsk_thread_flag(current, TIF_SIGPENDING)) { return PVRSRV_ERROR_INTERRUPTED; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/fwload.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/fwload.c index 35e52af56a2e..8c597b3c2e28 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/fwload.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/fwload.c @@ -179,19 +179,23 @@ OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString, IMG_INT32 res; PVRSRV_ERROR eError; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)) + res = firmware_request_nowarn(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice); +#else res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice); +#endif if (res != 0) { release_firmware(psFW); if (res == -ENOENT) { - PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') not found (%d)", + PVR_DPF((PVR_DBG_WARNING, "%s: requested firmware('%s') not found (%d)", __func__, pszBVNCString, res)); eError = PVRSRV_ERROR_NOT_FOUND; } else { - PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') not ready (%d)", + PVR_DPF((PVR_DBG_WARNING, "%s: requested firmware('%s') not ready (%d)", __func__, pszBVNCString, res)); eError = PVRSRV_ERROR_NOT_READY; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/handle_idr.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/handle_idr.c index c40e096bfaa5..48bb8f4410e3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/handle_idr.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/handle_idr.c @@ -41,11 +41,11 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /***************************************************************************/ -#include #include #include #include #include +#include #include "handle_impl.h" #include "allocmem.h" @@ -110,29 +110,11 @@ static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase, PVR_ASSERT(phHandle != NULL); PVR_ASSERT(pvData != NULL); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) idr_preload(GFP_KERNEL); id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0); idr_preload_end(); result = id; -#else - do - { - if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0) - { - return PVRSRV_ERROR_OUT_OF_MEMORY; - } - - result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id); - } while (result == -EAGAIN); - - if ((IMG_UINT32)id > psBase->ui32MaxHandleValue) - { - idr_remove(&psBase->sIdr, id); - result = -ENOSPC; - } -#endif if (result < 0) { @@ -390,10 +372,6 @@ static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase) { PVR_ASSERT(psBase); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) - idr_remove_all(&psBase->sIdr); -#endif - /* Finally destroy the idr */ idr_destroy(&psBase->sIdr); @@ -402,7 +380,6 @@ static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase) return PVRSRV_OK; } - static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab = { .pfnAcquireHandle = AcquireHandle, @@ -413,7 +390,7 @@ static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab = .pfnEnableHandlePurging = EnableHandlePurging, .pfnPurgeHandles = PurgeHandles, .pfnCreateHandleBase = CreateHandleBase, - .pfnDestroyHandleBase = DestroyHandleBase + .pfnDestroyHandleBase = DestroyHandleBase, }; PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/km_apphint.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/km_apphint.c index 80fee023360f..6e9bfd6f07d4 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/km_apphint.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/km_apphint.c @@ -46,7 +46,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #include #include -//#include /* Common and SO layer */ #include "img_defs.h" @@ -94,8 +93,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define APPHINT_BUFFER_SIZE 512 -#define APPHINT_DEVICES_MAX 16 - /* Apphint Debug output level */ #define APPHINT_DPF_LEVEL PVR_DBG_VERBOSE @@ -304,22 +301,22 @@ static const struct apphint_class_state class_state[] = { static struct apphint_state { struct workqueue_struct *workqueue; - DI_GROUP *debuginfo_device_rootdir[APPHINT_DEVICES_MAX]; - DI_ENTRY *debuginfo_device_entry[APPHINT_DEVICES_MAX][APPHINT_DEBUGINFO_DEVICE_ID_MAX]; + DI_GROUP *debuginfo_device_rootdir[PVRSRV_MAX_DEVICES]; + DI_ENTRY *debuginfo_device_entry[PVRSRV_MAX_DEVICES][APPHINT_DEBUGINFO_DEVICE_ID_MAX]; DI_GROUP *debuginfo_rootdir; DI_ENTRY *debuginfo_entry[APPHINT_DEBUGINFO_ID_MAX]; DI_GROUP *buildvar_rootdir; DI_ENTRY *buildvar_entry[APPHINT_BUILDVAR_ID_MAX]; unsigned int num_devices; - PVRSRV_DEVICE_NODE *devices[APPHINT_DEVICES_MAX]; + PVRSRV_DEVICE_NODE *devices[PVRSRV_MAX_DEVICES]; unsigned int initialized; /* Array contains value space for 1 copy of all apphint values defined * (for device 1) and N copies of device specific apphint values for * multi-device platforms. */ - struct apphint_action val[APPHINT_ID_MAX + ((APPHINT_DEVICES_MAX-1)*APPHINT_DEBUGINFO_DEVICE_ID_MAX)]; + struct apphint_action val[APPHINT_ID_MAX + ((PVRSRV_MAX_DEVICES-1)*APPHINT_DEBUGINFO_DEVICE_ID_MAX)]; } apphint = { /* statically initialise default values to ensure that any module_params @@ -385,11 +382,11 @@ get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device, return; } - for (i = 0; device && i < APPHINT_DEVICES_MAX; i++) { + for (i = 0; device && i < PVRSRV_MAX_DEVICES; i++) { if (apphint.devices[i] == device) break; } - if (APPHINT_DEVICES_MAX == i) { + if (PVRSRV_MAX_DEVICES == i) { PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__)); i = 0; } @@ -638,7 +635,7 @@ static int apphint_read(char *buffer, size_t count, APPHINT_ID ue, goto err_exit; } - OSStringLCopy(value->STRING, string, len); + OSStringSafeCopy(value->STRING, string, len); break; } default: @@ -685,6 +682,9 @@ static PVRSRV_ERROR get_apphint_value_from_action(const struct apphint_action * } } else { if (action->device == APPHINT_OF_DRIVER_NO_DEVICE) { + if (psDevNode != action->device) { + return PVRSRV_ERROR_INVALID_DEVICE; + } psDevice = psDevNode; } else { psDevice = action->device; @@ -734,7 +734,7 @@ static PVRSRV_ERROR get_apphint_value_from_action(const struct apphint_action * return result; } -/** +/* * apphint_write - write the current AppHint data to a buffer * * Returns length written or -errno @@ -844,7 +844,7 @@ static int apphint_write(char *buffer, const size_t size, ******************************************************************************* Module parameters initialization - different from debuginfo ******************************************************************************/ -/** +/* * apphint_kparam_set - Handle an update of a module parameter * * Returns 0, or -errno. arg is in kp->arg. @@ -854,28 +854,36 @@ static int apphint_kparam_set(const char *val, const struct kernel_param *kp) char val_copy[APPHINT_BUFFER_SIZE]; APPHINT_ID id; union apphint_value value; - int result; + ssize_t result = OSStringSafeCopy(val_copy, val, APPHINT_BUFFER_SIZE); - /* need to discard const in case of string comparison */ - result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE); + /* Document the assumption - we can safely store the result of + * apphint_read() in a ssize_t variable, no need for a separate one */ +#ifndef SSIZE_MAX +#define SSIZE_MAX ((~(size_t)0) >> 1) +#endif + BUILD_BUG_ON(INT_MAX > SSIZE_MAX); - get_apphint_id_from_action_addr(kp->arg, &id); - if (result < APPHINT_BUFFER_SIZE) { - result = apphint_read(val_copy, result, id, &value); - if (result >= 0) { - ((struct apphint_action *)kp->arg)->stored = value; - ((struct apphint_action *)kp->arg)->initialised = true; - if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { - ((struct apphint_action *)kp->arg)->free = true; - } - } - } else { + if (result < 0) { PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__)); + return (int)result; + } + + get_apphint_id_from_action_addr(kp->arg, &id); + result = apphint_read(val_copy, result, id, &value); + if (result < 0) { + return (int)result; + } + + ((struct apphint_action *)kp->arg)->stored = value; + ((struct apphint_action *)kp->arg)->initialised = true; + if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { + ((struct apphint_action *)kp->arg)->free = true; } - return (result > 0) ? 0 : result; + + return 0; } -/** +/* * apphint_kparam_get - handle a read of a module parameter * * Returns length written or -errno. Buffer is 4k (ie. be short!) @@ -932,7 +940,7 @@ static void *apphint_di_next(OSDI_IMPL_ENTRY *s, void *v, IMG_UINT64 *pos) { PVR_UNREFERENCED_PARAMETER(s); PVR_UNREFERENCED_PARAMETER(v); - PVR_UNREFERENCED_PARAMETER(pos); + (*pos)++; return NULL; } @@ -964,7 +972,7 @@ static int apphint_di_show(OSDI_IMPL_ENTRY *s, void *v) Debug Info supporting functions ******************************************************************************/ -/** +/* * apphint_set - Handle a DI value update */ static IMG_INT64 apphint_set(const IMG_CHAR *buffer, IMG_UINT64 count, @@ -1003,7 +1011,7 @@ static IMG_INT64 apphint_set(const IMG_CHAR *buffer, IMG_UINT64 count, return result; } -/** +/* * apphint_debuginfo_init - Create the specified debuginfo entries */ static int apphint_debuginfo_init(const char *sub_dir, @@ -1022,8 +1030,9 @@ static int apphint_debuginfo_init(const char *sub_dir, .pfnNext = apphint_di_next, .pfnShow = apphint_di_show, .pfnWrite = apphint_set, .ui32WriteLenMax = APPHINT_BUFFER_SIZE }; + /* Determine if we're booted as a GUEST VZ OS */ - IMG_BOOL bIsGUEST = PVRSRV_VZ_MODE_IS(GUEST); + IMG_BOOL bIsGUEST = PVRSRV_VZ_MODE_IS(GUEST, DEVID, device_num); if (*rootdir) { PVR_DPF((PVR_DBG_WARNING, @@ -1070,7 +1079,7 @@ static int apphint_debuginfo_init(const char *sub_dir, return result; } -/** +/* * apphint_debuginfo_deinit- destroy the debuginfo entries */ static void apphint_debuginfo_deinit(unsigned int num_entries, @@ -1184,7 +1193,7 @@ static void apphint_dump_values(const char *group_name, } } -/** +/* * Callback for debug dump */ static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, @@ -1211,7 +1220,7 @@ static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, init_data_debuginfo, ARRAY_SIZE(init_data_debuginfo), pfnDumpDebugPrintf, pvDumpDebugFile, false, device); - for (i = 0; i < APPHINT_DEVICES_MAX; i++) { + for (i = 0; i < PVRSRV_MAX_DEVICES; i++) { if (!apphint.devices[i] || (device && device != apphint.devices[i])) continue; @@ -1246,7 +1255,7 @@ int pvr_apphint_init(void) goto err_out; } - for (i = 0; i < APPHINT_DEVICES_MAX; i++) + for (i = 0; i < PVRSRV_MAX_DEVICES; i++) apphint.devices[i] = NULL; /* create workqueue with strict execution ordering to ensure no @@ -1288,7 +1297,7 @@ int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device) goto err_out; } - if (apphint.num_devices+1 > APPHINT_DEVICES_MAX) { + if (apphint.num_devices+1 > PVRSRV_MAX_DEVICES) { result = -EMFILE; goto err_out; } @@ -1317,16 +1326,16 @@ int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device) } } - result = apphint_debuginfo_init("apphint", apphint.num_devices, + result = apphint_debuginfo_init("apphint", device->sDevId.ui32InternalID, ARRAY_SIZE(init_data_debuginfo_device), init_data_debuginfo_device, device->sDebugInfo.psGroup, - &apphint.debuginfo_device_rootdir[apphint.num_devices], - apphint.debuginfo_device_entry[apphint.num_devices]); + &apphint.debuginfo_device_rootdir[device->sDevId.ui32InternalID], + apphint.debuginfo_device_entry[device->sDevId.ui32InternalID]); if (0 != result) goto err_out; - apphint.devices[apphint.num_devices] = device; + apphint.devices[device->sDevId.ui32InternalID] = device; apphint.num_devices++; (void)SOPvrDbgRequestNotifyRegister( @@ -1348,12 +1357,12 @@ void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device) return; /* find the device */ - for (i = 0; i < APPHINT_DEVICES_MAX; i++) { + for (i = 0; i < PVRSRV_MAX_DEVICES; i++) { if (apphint.devices[i] == device) break; } - if (APPHINT_DEVICES_MAX == i) + if (PVRSRV_MAX_DEVICES == i) return; if (device->hAppHintDbgReqNotify) { @@ -1380,7 +1389,7 @@ void pvr_apphint_deinit(void) return; /* remove any remaining device data */ - for (i = 0; apphint.num_devices && i < APPHINT_DEVICES_MAX; i++) { + for (i = 0; apphint.num_devices && i < PVRSRV_MAX_DEVICES; i++) { if (apphint.devices[i]) pvr_apphint_device_unregister(apphint.devices[i]); } @@ -1492,13 +1501,13 @@ int pvr_apphint_get_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR * if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) { if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints { - if (OSStringLCopy(pBuffer, apphint.val[ue + device_offset].stored.STRING, size) < size) { + if (OSStringSafeCopy(pBuffer, apphint.val[ue + device_offset].stored.STRING, size) >= 0) { error = 0; } } else { - if (OSStringLCopy(pBuffer, apphint.val[ue].stored.STRING, size) < size) { + if (OSStringSafeCopy(pBuffer, apphint.val[ue].stored.STRING, size) >= 0) { error = 0; } } @@ -1585,10 +1594,8 @@ int pvr_apphint_set_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR * error = apphint.val[ue + device_offset].set.STRING(apphint.val[ue + device_offset].device, apphint.val[ue + device_offset].private_data, pBuffer); - } else { - if (strlcpy(apphint.val[ue + device_offset].stored.STRING, pBuffer, size) < size) { - error = 0; - } + } else if (OSStringSafeCopy(apphint.val[ue + device_offset].stored.STRING, pBuffer, size) >= 0) { + error = 0; } apphint.val[ue].device = device; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/module_common.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/module_common.c index f1442074b9c2..8df0593db286 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/module_common.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/module_common.c @@ -80,6 +80,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_ion_stats.h" +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) +#include "physmem_osmem_linux.h" +#endif +#endif + +#include "dkf_server.h" + #if defined(SUPPORT_DISPLAY_CLASS) /* Display class interface */ #include "kerneldisplay.h" @@ -104,7 +112,7 @@ EXPORT_SYMBOL(PVRSRVCheckStatus); * Required by LMA DC drivers, and some non-DC LMA display drivers. */ #include "physheap.h" -EXPORT_SYMBOL(PhysHeapAcquireByUsage); +EXPORT_SYMBOL(PhysHeapAcquireByID); EXPORT_SYMBOL(PhysHeapRelease); EXPORT_SYMBOL(PhysHeapGetType); EXPORT_SYMBOL(PhysHeapGetCpuPAddr); @@ -118,9 +126,6 @@ EXPORT_SYMBOL(PVRSRVGetDeviceInstance); #if defined(SUPPORT_RGX) #include "rgxapi_km.h" -#if defined(SUPPORT_SHARED_SLC) -EXPORT_SYMBOL(RGXInitSLC); -#endif EXPORT_SYMBOL(RGXHWPerfConnect); EXPORT_SYMBOL(RGXHWPerfDisconnect); EXPORT_SYMBOL(RGXHWPerfControl); @@ -149,8 +154,14 @@ CONNECTION_DATA *LinuxServicesConnectionFromFile(struct file *pFile) { if (pFile) { - struct drm_file *psDRMFile = pFile->private_data; - PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; + struct drm_file *psDRMFile; + PVRSRV_CONNECTION_PRIV *psConnectionPriv; + + psDRMFile = pFile->private_data; + PVR_LOG_RETURN_IF_FALSE(psDRMFile != NULL, "psDRMFile is NULL", NULL); + + psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; + PVR_LOG_RETURN_IF_FALSE(psConnectionPriv != NULL, "psConnectionPriv is NULL", NULL); return (CONNECTION_DATA*)psConnectionPriv->pvConnectionData; } @@ -191,6 +202,14 @@ int PVRSRVDriverInit(void) return -ENOMEM; } +#if defined(SUPPORT_LINUX_FDINFO) + error = PVRDKFInit(); + if (error != PVRSRV_OK) + { + return -ENODEV; + } +#endif /* SUPPORT_LINUX_FDINFO */ + error = PVRSRVCommonDriverInit(); if (error != PVRSRV_OK) { @@ -288,12 +307,16 @@ void PVRSRVDriverDeinit(void) pvr_sync_deinit(); #endif - PVRSRVCommonDriverDeInit(); - #if defined(SUPPORT_RGX) PVRGpuTraceSupportDeInit(); #endif + PVRSRVCommonDriverDeInit(); + +#if defined(SUPPORT_LINUX_FDINFO) + PVRDKFDeInit(); +#endif /* SUPPORT_LINUX_FDINFO */ + PVROSFuncDeInit(); } @@ -325,7 +348,7 @@ int PVRSRVDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode) { PVR_DPF((PVR_DBG_WARNING, "%s: failed to initialise PVR GPU Tracing on device%d (%d)", - __func__, psDeviceNode->sDevId.i32OsDeviceID, error)); + __func__, psDeviceNode->sDevId.i32KernelDeviceID, error)); } } #endif @@ -350,11 +373,9 @@ void PVRSRVDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode) pvr_sync_device_deinit(psDeviceNode->psDevConfig->pvOSDevice); #endif -#if defined(SUPPORT_DMA_TRANSFER) - PVRSRVDeInitialiseDMA(psDeviceNode); -#endif - +#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) pvr_fence_cleanup(); +#endif } /**************************************************************************/ /*! @@ -410,7 +431,7 @@ int PVRSRVDeviceSuspend(struct drm_device *psDev) if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, PVRSRV_SYS_POWER_STATE_OFF, - PVRSRV_POWER_FLAGS_SUSPEND_REQ) != PVRSRV_OK) + PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ) != PVRSRV_OK) { /* Ignore return error as we're already returning an error here. */ (void) LinuxBridgeUnblockClientsAccess(psDevPriv); @@ -434,7 +455,7 @@ int PVRSRVDeviceResume(struct drm_device *psDev) if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, PVRSRV_SYS_POWER_STATE_ON, - PVRSRV_POWER_FLAGS_RESUME_REQ) != PVRSRV_OK) + PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ) != PVRSRV_OK) { return -EINVAL; } @@ -505,13 +526,14 @@ int PVRSRVDeviceServicesOpen(PVRSRV_DEVICE_NODE *psDeviceNode, mutex_unlock(&sDeviceInitMutex); goto fail_alloc_connection_priv; } + psConnectionPriv->ui32Type = DKF_CONNECTION_FLAG_SERVICES; } else { psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; } - if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT) + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_CREATED) { eError = PVRSRVCommonDeviceInitialise(psDeviceNode); if (eError != PVRSRV_OK) @@ -530,6 +552,7 @@ int PVRSRVDeviceServicesOpen(PVRSRV_DEVICE_NODE *psDeviceNode, mutex_unlock(&sDeviceInitMutex); sPrivData.psDevNode = psDeviceNode; + sPrivData.psDRMFile = psDRMFile; /* * Here we pass the file pointer which will passed through to our @@ -549,15 +572,6 @@ int PVRSRVDeviceServicesOpen(PVRSRV_DEVICE_NODE *psDeviceNode, #endif psDRMFile->driver_priv = (void*)psConnectionPriv; -#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) - eError = PVRSRVGpuTraceWorkPeriodEventStatsRegister( - &psConnectionPriv->pvGpuWorkPeriodEventStats); - if (eError != PVRSRV_OK) - { - iErr = -ENOMEM; - goto fail_connect; - } -#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ goto out; fail_connect: @@ -580,6 +594,7 @@ int PVRSRVDeviceServicesOpen(PVRSRV_DEVICE_NODE *psDeviceNode, static int PVRSRVDeviceSyncOpen(PVRSRV_DEVICE_NODE *psDeviceNode, struct drm_file *psDRMFile) { +#if defined(SUPPORT_NATIVE_FENCE_SYNC) PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); CONNECTION_DATA *psConnection = NULL; ENV_CONNECTION_PRIVATE_DATA sPrivData; @@ -624,7 +639,10 @@ static int PVRSRVDeviceSyncOpen(PVRSRV_DEVICE_NODE *psDeviceNode, psConnectionPriv->pvSyncConnectionData = (void*)psConnection; #endif + psConnectionPriv->ui32Type = DKF_CONNECTION_FLAG_SYNC; + sPrivData.psDevNode = psDeviceNode; + sPrivData.psDRMFile = psDRMFile; /* Call environment specific connection data init function */ eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, &sPrivData); @@ -667,6 +685,11 @@ static int PVRSRVDeviceSyncOpen(PVRSRV_DEVICE_NODE *psDeviceNode, kfree(psConnectionPriv); out: return iErr; +#else /* if defined(SUPPORT_NATIVE_FENCE_SYNC) */ + /* Ioctl should not be being called (no DDK support for native sync) */ + PVR_DPF((PVR_DBG_ERROR, "%s: only supported when SUPPORT_NATIVE_FENCE_SYNC=1", __func__)); + return -ENOTTY; +#endif /* if defined(SUPPORT_NATIVE_FENCE_SYNC) */ } /**************************************************************************/ /*! @@ -688,15 +711,6 @@ void PVRSRVDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode, if (psConnectionPriv->pvConnectionData) { -#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) - if (psConnectionPriv->pvGpuWorkPeriodEventStats) - { - PVRSRVGpuTraceWorkPeriodEventStatsUnregister( - psConnectionPriv->pvGpuWorkPeriodEventStats); - psConnectionPriv->pvGpuWorkPeriodEventStats = NULL; - } -#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ - #if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) if (psConnectionPriv->pfDeviceRelease) { @@ -713,6 +727,12 @@ void PVRSRVDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode, #endif } +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + LinuxDeregisterMigrateCallbacks(psDRMFile->filp); +#endif +#endif + kfree(psDRMFile->driver_priv); psDRMFile->driver_priv = NULL; } @@ -735,8 +755,26 @@ drm_pvr_srvkm_init(struct drm_device *dev, void *arg, struct drm_file *psDRMFile case PVR_SRVKM_SERVICES_INIT: { iErr = PVRSRVDeviceServicesOpen(priv->dev_node, psDRMFile); + if (iErr) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDeviceServicesOpen() failed(%d)", + __func__, iErr)); + break; + } +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + iErr = LinuxRegisterMigrateCallbacks(psDRMFile->filp); +#endif +#endif break; } +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + case PVR_SRVKM_SERVICES_PAGE_MIGRATE_INIT: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + iErr = LinuxRegisterMigrateCallbacks(psDRMFile->filp); +#endif +#endif + break; default: { PVR_DPF((PVR_DBG_ERROR, "%s: invalid init_module (%d)", diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/module_common.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/module_common.h index 811cacdd98ac..35f33770e2e5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/module_common.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/module_common.h @@ -80,12 +80,10 @@ typedef struct */ void *pvSyncConnectionData; -#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) - /* hGpuWorkPeriodEventStats is used to hold gpu work period event stats - * private data for each apps which have been working with GPU. + /* An integer representing the type of connection (indicated by the + * DKF_CONNECTION_FLAGS). */ - void *pvGpuWorkPeriodEventStats; -#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ + uint32_t ui32Type; } PVRSRV_CONNECTION_PRIV; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osconnection_server.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osconnection_server.c index 0c3bc2d672ee..543b4d8ddabf 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osconnection_server.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osconnection_server.c @@ -84,6 +84,7 @@ PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOS psEnvConnection->owner = current->tgid; psEnvConnection->psDevNode = psPrivData->psDevNode; + psEnvConnection->psDRMFile = psPrivData->psDRMFile; #if defined(SUPPORT_NATIVE_FENCE_SYNC) psEnvConnection->pvPvrSyncPrivateData = NULL; @@ -120,23 +121,23 @@ PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOS PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData) { - ENV_CONNECTION_DATA *psEnvConnection; - if (hOsPrivateData == NULL) { return PVRSRV_OK; } - psEnvConnection = hOsPrivateData; - #if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) - PVR_ASSERT(psEnvConnection->psIonData != NULL); + { + ENV_CONNECTION_DATA *psEnvConnection = hOsPrivateData; - PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL); - ion_client_destroy(psEnvConnection->psIonData->psIonClient); + PVR_ASSERT(psEnvConnection->psIonData != NULL); - IonDevRelease(psEnvConnection->psIonData->psIonDev); - OSFreeMem(psEnvConnection->psIonData); + PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL); + ion_client_destroy(psEnvConnection->psIonData->psIonClient); + + IonDevRelease(psEnvConnection->psIonData->psIonDev); + OSFreeMem(psEnvConnection->psIonData); + } #endif OSFreeMem(hOsPrivateData); @@ -155,3 +156,13 @@ PVRSRV_DEVICE_NODE *OSGetDevNode(CONNECTION_DATA *psConnection) return psEnvConnection->psDevNode; } + +struct drm_file *OSGetDRMFile(CONNECTION_DATA *psConnection) +{ + ENV_CONNECTION_DATA *psEnvConnection; + + psEnvConnection = PVRSRVConnectionPrivateData(psConnection); + PVR_ASSERT(psEnvConnection); + + return psEnvConnection->psDRMFile; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc.c index 6d060ddc97d7..62390830ccd5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc.c @@ -64,13 +64,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #include #include +#include #include #include #include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) +#include #include #include -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ +#include #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) #include #include @@ -87,6 +88,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif #include "log2.h" +#include "sysinfo.h" #include "osfunc.h" #include "cache_km.h" #include "img_defs.h" @@ -104,7 +106,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif #include "physmem_osmem_linux.h" #include "dma_support.h" -#include "kernel_compatibility.h" #include "pvrsrv_sync_server.h" @@ -113,16 +114,40 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_ricommon.h" #endif -#if defined(VIRTUAL_PLATFORM) -#define EVENT_OBJECT_TIMEOUT_US (120000000ULL) +#include "kernel_compatibility.h" + +#if !defined(EVENT_OBJECT_TIMEOUT_US) +#error EVENT_OBJECT_TIMEOUT_US should be defined sysinfo.h +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) +#define PVR_FOLL_LONGTERM (0x0U) #else -#if defined(EMULATOR) || defined(TC_APOLLO_TCF5) -#define EVENT_OBJECT_TIMEOUT_US (2000000ULL) +#define PVR_FOLL_LONGTERM FOLL_LONGTERM +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 6, 0)) + +#define pvr_pin_user_pages_for_dma(puiAddress, num_pages, bWrite, pages) get_user_pages_fast( \ + (unsigned long)puiAddress, \ + (int)num_pages, \ + (int) (bWrite ? FOLL_WRITE : 0) | PVR_FOLL_LONGTERM, \ + pages) +#define pvr_unpin_user_page_for_dma(p) put_page(p) + #else -#define EVENT_OBJECT_TIMEOUT_US (100000ULL) -#endif /* EMULATOR */ + +#define pvr_pin_user_pages_for_dma(puiAddress, num_pages, bWrite, pages) pin_user_pages_fast( \ + (unsigned long)puiAddress, \ + (int) num_pages, \ + (int) (bWrite ? FOLL_WRITE : 0) | PVR_FOLL_LONGTERM, \ + pages) +#define pvr_unpin_user_page_for_dma(p) unpin_user_page(p) + #endif +#define _FREEZABLE IMG_TRUE +#define _NON_FREEZABLE IMG_FALSE typedef struct { struct task_struct *kthread; @@ -167,6 +192,8 @@ void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, void *pvDumpDebugFile) { PDLLIST_NODE psNodeCurr, psNodeNext; + LINUX_THREAD_ACTIVITY_STATS sThreadStats; + PVRSRV_ERROR eError = PVRSRV_OK; dllist_foreach_node(&gsThreadListHead, psNodeCurr, psNodeNext) { @@ -182,6 +209,21 @@ void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, psThreadListNode->pfnDebugDumpCB(pfnDumpDebugPrintf, pvDumpDebugFile); } } + + eError = LinuxGetThreadActivityStats(&sThreadStats); + if (eError == PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("Active Threads (UM/KM): %d / %d", + sThreadStats.i32DriverThreadCount, + sThreadStats.i32KernelThreadCount); + + PVR_DUMPDEBUG_LOG("Suspended Threads: %d", + sThreadStats.i32SuspendedThreadCount); + } + else + { + PVR_LOG_ERROR(eError, "LinuxGetThreadActivityStats"); + } } PVRSRV_ERROR OSPhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize, @@ -265,13 +307,11 @@ PVRSRV_ERROR OSPhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize, void OSPhyContigPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle) { struct page *psPage = (struct page*) psMemHandle->u.pvHandle; - IMG_UINT32 uiSize, uiPageCount=0, ui32Order; + IMG_UINT32 ui32Order; PVR_UNREFERENCED_PARAMETER(psPhysHeap); ui32Order = psMemHandle->uiOrder; - uiPageCount = (1 << ui32Order); - uiSize = (uiPageCount * PAGE_SIZE); #if defined(PVRSRV_ENABLE_PROCESS_STATS) #if !defined(PVRSRV_ENABLE_MEMORY_STATS) @@ -556,6 +596,7 @@ IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, #if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) static struct workqueue_struct *gpFenceStatusWq; +static struct workqueue_struct *gpFenceCtxDestroyWq; static PVRSRV_ERROR _NativeSyncInit(void) { @@ -567,12 +608,21 @@ static PVRSRV_ERROR _NativeSyncInit(void) return PVRSRV_ERROR_INIT_FAILURE; } + gpFenceCtxDestroyWq = create_freezable_workqueue("pvr_fence_context_destroy"); + if (!gpFenceCtxDestroyWq) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create foreign fence context destruction workqueue", + __func__)); + return PVRSRV_ERROR_INIT_FAILURE; + } + return PVRSRV_OK; } static void _NativeSyncDeinit(void) { destroy_workqueue(gpFenceStatusWq); + destroy_workqueue(gpFenceCtxDestroyWq); } struct workqueue_struct *NativeSyncGetFenceStatusWq(void) @@ -587,13 +637,27 @@ struct workqueue_struct *NativeSyncGetFenceStatusWq(void) return gpFenceStatusWq; } + +struct workqueue_struct *NativeSyncGetFenceCtxDestroyWq(void) +{ + if (!gpFenceCtxDestroyWq) + { +#if defined(DEBUG) + PVR_ASSERT(gpFenceCtxDestroyWq); +#endif + return NULL; + } + + return gpFenceCtxDestroyWq; +} #endif PVRSRV_ERROR OSInitEnvData(void) { - PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError; - LinuxInitPhysmem(); + eError = LinuxInitPhysmem(); + PVR_GOTO_IF_ERROR(eError, error_out); _OSInitThreadList(); @@ -601,6 +665,7 @@ PVRSRV_ERROR OSInitEnvData(void) eError = _NativeSyncInit(); #endif +error_out: return eError; } @@ -811,6 +876,22 @@ IMG_CHAR *OSGetCurrentClientProcessNameKM(void) return OSGetCurrentProcessName(); } +uintptr_t OSAcquireCurrentPPIDResourceRefKM(void) +{ + struct pid *psPPIDResource = find_pid_ns(OSGetCurrentClientProcessIDKM(), &init_pid_ns); + + PVR_ASSERT(psPPIDResource != NULL); + /* Take ref on pPid */ + get_pid(psPPIDResource); + return (uintptr_t)psPPIDResource; +} + +void OSReleasePPIDResourceRefKM(uintptr_t psPPIDResource) +{ + /* Drop ref on uiProc */ + put_pid((struct pid*)psPPIDResource); +} + uintptr_t OSGetCurrentClientThreadIDKM(void) { return OSGetCurrentThreadID(); @@ -850,38 +931,37 @@ typedef struct PVRSRV_ERROR pvr_error; } error_map_t; -/* return -ve versions of POSIX errors as they are used in this form */ -static const error_map_t asErrorMap[] = -{ - {-EFAULT, PVRSRV_ERROR_BRIDGE_EFAULT}, - {-EINVAL, PVRSRV_ERROR_BRIDGE_EINVAL}, - {-ENOMEM, PVRSRV_ERROR_BRIDGE_ENOMEM}, - {-ERANGE, PVRSRV_ERROR_BRIDGE_ERANGE}, - {-EPERM, PVRSRV_ERROR_BRIDGE_EPERM}, - {-ENOTTY, PVRSRV_ERROR_BRIDGE_ENOTTY}, - {-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED}, - {-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL}, - {-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY}, - {-EACCES, PVRSRV_ERROR_PMR_NOT_PERMITTED}, - {-EINVAL, PVRSRV_ERROR_INVALID_PARAMS}, - - {0, PVRSRV_OK} -}; +#define PVRSRV_ERROR_TO_OS_ERROR \ + X(PVRSRV_OK, 0) \ + X(PVRSRV_ERROR_BRIDGE_EFAULT, EFAULT) \ + X(PVRSRV_ERROR_BRIDGE_EINVAL, EINVAL) \ + X(PVRSRV_ERROR_BRIDGE_ENOMEM, ENOMEM) \ + X(PVRSRV_ERROR_BRIDGE_ERANGE, ERANGE) \ + X(PVRSRV_ERROR_BRIDGE_EPERM, EPERM) \ + X(PVRSRV_ERROR_BRIDGE_ENOTTY, ENOTTY) \ + X(PVRSRV_ERROR_BRIDGE_CALL_FAILED, ENOTTY) \ + X(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL, ERANGE) \ + X(PVRSRV_ERROR_OUT_OF_MEMORY, ENOMEM) \ + X(PVRSRV_ERROR_PMR_NOT_PERMITTED, EACCES) \ + X(PVRSRV_ERROR_INVALID_PARAMS, EINVAL) \ + X(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING, EPERM) \ + X(PVRSRV_ERROR_NOT_IMPLEMENTED, ENOSYS) \ + X(PVRSRV_ERROR_BAD_MAPPING, EINVAL) -int PVRSRVToNativeError(PVRSRV_ERROR e) +/* return -ve versions of POSIX errors as they are used in this form */ +int PVRSRVToNativeError(PVRSRV_ERROR eError) { - int os_error = -EFAULT; - int i; - - for (i = 0; i < ARRAY_SIZE(asErrorMap); i++) + switch (eError) { - if (e == asErrorMap[i].pvr_error) - { - os_error = asErrorMap[i].os_error; - break; - } +#define X(_PVRSRV_ERROR, _OS_ERROR) \ + case (_PVRSRV_ERROR): return -(_OS_ERROR); + + PVRSRV_ERROR_TO_OS_ERROR + +#undef X + default: + return -EFAULT; } - return os_error; } typedef struct _MISR_DATA_ { @@ -977,6 +1057,11 @@ PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData) #endif } +void OSSyncIRQ(IMG_UINT32 ui32IRQ) +{ + synchronize_irq(ui32IRQ); +} + /* OS specific values for thread priority */ static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] = { @@ -1048,6 +1133,7 @@ PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, psOSThreadData->pfnThread = pfnThread; psOSThreadData->hData = hData; + psOSThreadData->pszThreadName = pszThreadName; psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, "%s", pszThreadName); if (IS_ERR(psOSThreadData->kthread)) @@ -1058,7 +1144,6 @@ PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, if (bIsSupportingThread) { - psOSThreadData->pszThreadName = pszThreadName; psOSThreadData->pfnDebugDumpCB = pfnDebugDumpCB; psOSThreadData->bIsThreadRunning = IMG_TRUE; psOSThreadData->bIsSupportingThread = IMG_TRUE; @@ -1121,8 +1206,6 @@ IMG_BOOL OSIsMapPhysNonContigSupported(void) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && !defined(CONFIG_VMAP_PFN) return IMG_FALSE; -#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) - return IMG_FALSE; #else return IMG_TRUE; #endif @@ -1135,9 +1218,10 @@ void OSUnMapPhysArrayToLin(void *pvLinAddr, void *pvPrivData) #if defined(CONFIG_VMAP_PFN) PVR_UNREFERENCED_PARAMETER(pvPrivData); vunmap(pvLinAddr); -#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) unmap_kernel_range((unsigned long) (uintptr_t) pvLinAddr, get_vm_area_size(pvPrivData)); + free_vm_area(pvPrivData); #else PVR_DPF((PVR_DBG_ERROR,"%s: Cannot map into kernel, no method supported.", __func__)); PVR_ASSERT(0); @@ -1180,7 +1264,7 @@ PVRSRV_ERROR OSMapPhysArrayToLin(IMG_CPU_PHYADDR pPagePA[], *ppvPrivData = NULL; return PVRSRV_OK; } -#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) { pte_t *pte[32], **pte_array; struct vm_struct *psVMA; @@ -1243,31 +1327,30 @@ OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, if (uiMappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) { - PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu"); + PVR_DPF((PVR_DBG_ERROR, "Found non-cpu cache mode flag when mapping to the cpu")); return NULL; } - if (! PVRSRV_VZ_MODE_IS(NATIVE)) - { - /* - This is required to support DMA physheaps for GPU virtualization. - Unfortunately, if a region of kernel managed memory is turned into - a DMA buffer, conflicting mappings can come about easily on Linux - as the original memory is mapped by the kernel as normal cached - memory whilst DMA buffers are mapped mostly as uncached device or - cache-coherent device memory. In both cases the system will have - two conflicting mappings for the same memory region and will have - "undefined behaviour" for most processors notably ARMv6 onwards - and some x86 micro-architectures. As a result, perform ioremapping - manually for DMA physheap allocations by translating from CPU/VA - to BUS/PA thereby preventing the creation of conflicting mappings. - */ - pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes); - if (pvLinAddr != NULL) - { - return (void __force *) pvLinAddr; - } +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + /* + This is required to support DMA physheaps for GPU virtualization. + Unfortunately, if a region of kernel managed memory is turned into + a DMA buffer, conflicting mappings can come about easily on Linux + as the original memory is mapped by the kernel as normal cached + memory whilst DMA buffers are mapped mostly as uncached device or + cache-coherent device memory. In both cases the system will have + two conflicting mappings for the same memory region and will have + "undefined behaviour" for most processors notably ARMv6 onwards + and some x86 micro-architectures. As a result, perform ioremapping + manually for DMA physheap allocations by translating from CPU/VA + to BUS/PA thereby preventing the creation of conflicting mappings. + */ + pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes); + if (pvLinAddr != NULL) + { + return (void __force *) pvLinAddr; } +#endif switch (uiMappingFlags) { @@ -1290,11 +1373,11 @@ OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, break; case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT: case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT: - PVR_ASSERT(!"Unexpected cpu cache mode"); + PVR_DPF((PVR_DBG_ERROR, "Unexpected cpu cache mode")); pvLinAddr = NULL; break; default: - PVR_ASSERT(!"Unsupported cpu cache mode"); + PVR_DPF((PVR_DBG_ERROR, "Unsupported cpu cache mode")); pvLinAddr = NULL; break; } @@ -1308,13 +1391,12 @@ OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes) { PVR_UNREFERENCED_PARAMETER(ui32Bytes); - if (!PVRSRV_VZ_MODE_IS(NATIVE)) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (SysDmaCpuVAddrToDevPAddr(pvLinAddr)) { - if (SysDmaCpuVAddrToDevPAddr(pvLinAddr)) - { - return IMG_TRUE; - } + return IMG_TRUE; } +#endif iounmap((void __iomem *) pvLinAddr); @@ -1335,7 +1417,7 @@ typedef struct TIMER_CALLBACK_DATA_TAG struct work_struct sWork; }TIMER_CALLBACK_DATA; -static struct workqueue_struct *psTimerWorkQueue; +static struct workqueue_struct *psTimerWorkQueue = NULL; static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS]; @@ -1378,7 +1460,7 @@ static void OSTimerCallbackWrapper(uintptr_t uData) res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork); if (res == 0) { - PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued")); + PVR_LOG(("OSTimerCallbackWrapper: work already queued")); } } @@ -1536,14 +1618,7 @@ PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject) return LinuxEventObjectListDestroy(hEventObject); } -#define _FREEZABLE IMG_TRUE -#define _NON_FREEZABLE IMG_FALSE - -/* - * EventObjectWaitTimeout() - */ -static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM, - IMG_UINT64 uiTimeoutus) +PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus) { PVRSRV_ERROR eError; @@ -1560,11 +1635,6 @@ static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM, return eError; } -PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus) -{ - return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus); -} - PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM) { return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US); @@ -1575,20 +1645,14 @@ PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, { PVRSRV_ERROR eError; -#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) - if (hOSEventKM) + if (hOSEventKM != NULL && uiTimeoutus > 0) { - if (uiTimeoutus > 0) - eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, - _FREEZABLE); - else - eError = LinuxEventObjectWaitUntilSignalled(hOSEventKM); + eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, _FREEZABLE); } -#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ - if (hOSEventKM && uiTimeoutus > 0) +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + else if (hOSEventKM != NULL) { - eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, - _FREEZABLE); + eError = LinuxEventObjectWaitUntilSignalled(hOSEventKM); } #endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ else @@ -1723,181 +1787,6 @@ void OSDumpStack(void) dump_stack(); } -PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, - IMG_UINT64 sCpuVAddrBase, - IMG_CPU_PHYADDR sCpuPAHeapBase, - IMG_UINT32 ui32AllocPageCount, - IMG_UINT32 *pai32AllocIndices, - IMG_UINT32 ui32FreePageCount, - IMG_UINT32 *pai32FreeIndices, - IMG_BOOL bIsLMA) -{ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) - pfn_t sPFN; -#else - IMG_UINT64 uiPFN; -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ - - PVRSRV_ERROR eError; - - struct mm_struct *psMM = current->mm; - struct vm_area_struct *psVMA = NULL; - struct address_space *psMapping = NULL; - struct page *psPage = NULL; - - IMG_UINT64 uiCPUVirtAddr = 0; - IMG_UINT32 ui32Loop = 0; - IMG_UINT32 ui32PageSize = OSGetPageSize(); - IMG_BOOL bMixedMap = IMG_FALSE; - - /* - * Acquire the lock before manipulating the VMA - * In this case only mmap_sem lock would suffice as the pages associated with this VMA - * are never meant to be swapped out. - * - * In the future, in case the pages are marked as swapped, page_table_lock needs - * to be acquired in conjunction with this to disable page swapping. - */ - - /* Find the Virtual Memory Area associated with the user base address */ - psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase); - if (NULL == psVMA) - { - eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND; - return eError; - } - - /* Acquire the memory sem */ - mmap_write_lock(psMM); - - psMapping = psVMA->vm_file->f_mapping; - - /* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */ - psVMA->vm_pgoff = (psVMA->vm_start >> PAGE_SHIFT); - - /* Delete the entries for the pages that got freed */ - if (ui32FreePageCount && (pai32FreeIndices != NULL)) - { - for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) - { - uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize)); - - unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); - -#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE - /* - * Still need to map pages in case remap flag is set. - * That is not done until the remap case succeeds - */ -#endif - } - eError = PVRSRV_OK; - } - - if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA) - { - vm_flags_set(psVMA, VM_MIXEDMAP); - bMixedMap = IMG_TRUE; - } - else - { - if (ui32AllocPageCount && (NULL != pai32AllocIndices)) - { - for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) - { - - psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) - sPFN = page_to_pfn_t(psPage); - - if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) -#else - uiPFN = page_to_pfn(psPage); - - if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0)) -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ - { - bMixedMap = IMG_TRUE; - vm_flags_set(psVMA, VM_MIXEDMAP); - break; - } - } - } - } - - /* Map the pages that got allocated */ - if (ui32AllocPageCount && (NULL != pai32AllocIndices)) - { - for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) - { - int err; - - uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize)); - unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); - - if (bIsLMA) - { - phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr + - ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) - sPFN = phys_to_pfn_t(uiAddr, 0); - psPage = pfn_t_to_page(sPFN); -#else - uiPFN = uiAddr >> PAGE_SHIFT; - psPage = pfn_to_page(uiPFN); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ - } - else - { - psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) - sPFN = page_to_pfn_t(psPage); -#else - uiPFN = page_to_pfn(psPage); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ - } - - if (bMixedMap) - { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) - vm_fault_t vmf; - - vmf = vmf_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); - if (vmf & VM_FAULT_ERROR) - { - err = vm_fault_to_errno(vmf, 0); - } - else - { - err = 0; - } -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) - err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); -#else - err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */ - } - else - { - err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage); - } - - if (err) - { - PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err)); - eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; - goto eFailed; - } - } - } - - eError = PVRSRV_OK; -eFailed: - mmap_write_unlock(psMM); - - return eError; -} - /*************************************************************************/ /*! @Function OSDebugSignalPID @Description Sends a SIGTRAP signal to a specific PID in user mode for @@ -1974,7 +1863,7 @@ typedef struct _OS_CLEANUP_DATA_ PFN_SERVER_CLEANUP pfnServerCleanup; void* pvServerCleanupData; - enum dma_transfer_direction eDirection; + enum dma_data_direction eDirection; struct sg_table **ppsSg; struct page ***pages; IMG_UINT32* puiNumPages; @@ -2031,11 +1920,16 @@ static int cleanup_thread(void *pvData) /* Unpin pages */ for (j=0; jpuiNumPages[i]; j++) { - if (psOSCleanup->eDirection == DMA_DEV_TO_MEM) + /* + * using DMA_FROM_DEVICE from enum dma_data_direction instead of DMA_DEV_TO_MEM + * from enum dma_transfer_direction to avoid casting explicitly + */ + if (psOSCleanup->eDirection == DMA_FROM_DEVICE) { set_page_dirty_lock(psOSCleanup->pages[i][j]); } - put_page(psOSCleanup->pages[i][j]); + + pvr_unpin_user_page_for_dma(psOSCleanup->pages[i][j]); } } else @@ -2073,11 +1967,16 @@ static int cleanup_thread(void *pvData) */ if (psOSCleanup->pages[i][j]) { - if (psOSCleanup->eDirection == DMA_DEV_TO_MEM) + /* + * using DMA_FROM_DEVICE from enum dma_data_direction instead of DMA_DEV_TO_MEM + * from enum dma_transfer_direction to avoid casting explicitly + */ + if (psOSCleanup->eDirection == DMA_FROM_DEVICE) { set_page_dirty_lock(psOSCleanup->pages[i][j]); } - put_page(psOSCleanup->pages[i][j]); + + pvr_unpin_user_page_for_dma(psOSCleanup->pages[i][j]); } } } @@ -2098,12 +1997,14 @@ static int cleanup_thread(void *pvData) OSFreeMem(psOSCleanup->uiNumValidPages); OSFreeMem(psOSCleanup); - if (sync_completion && bSucceed) + if (bSucceed) { - complete(sync_completion); + kthread_complete_and_exit(sync_completion, 0); + } + else + { + kthread_complete_and_exit(NULL, 0); } - - do_exit(0); return 0; } @@ -2130,7 +2031,7 @@ static void dma_callback(void *pvOSCleanup) spin_unlock_irqrestore(&psOSCleanup->spinlock, flags); } -#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) +#if defined(PVRSRV_DEBUG_DMA) static void DMADumpPhysicalAddresses(struct page **ppsHostMemPages, IMG_UINT32 uiNumPages, @@ -2149,16 +2050,20 @@ DMADumpPhysicalAddresses(struct page **ppsHostMemPages, if (uiIdx == 0) { sPagePhysAddr.uiAddr += ui64Offset; - PVR_DPF((PVR_DBG_MESSAGE, "\tHost mem start at 0x%llX", sPagePhysAddr.uiAddr)); + PVR_DPF((PVR_DBG_MESSAGE, + "\tHost mem start at 0x%" IMG_UINT64_FMTSPECx, + (IMG_UINT64)sPagePhysAddr.uiAddr)); } else { - PVR_DPF((PVR_DBG_MESSAGE, "\tHost Mem Page %d at 0x%llX", uiIdx, - sPagePhysAddr.uiAddr)); + PVR_DPF((PVR_DBG_MESSAGE, + "\tHost Mem Page %d at 0x%" IMG_UINT64_FMTSPECx, + uiIdx, (IMG_UINT64)sPagePhysAddr.uiAddr)); } } - PVR_DPF((PVR_DBG_MESSAGE, "Devmem CPU phys address: 0x%llX", - sDmaAddr->uiAddr)); + PVR_DPF((PVR_DBG_MESSAGE, + "Devmem CPU phys address: 0x%" IMG_UINT64_FMTSPECx, + (IMG_UINT64)sDmaAddr->uiAddr)); } #endif @@ -2343,13 +2248,13 @@ PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; OS_CLEANUP_DATA* psOSCleanupData = pvOSData; + enum dma_data_direction eDataDirection = bMemToDev ? DMA_TO_DEVICE : DMA_FROM_DEVICE; struct dma_slave_config sConfig = {0}; struct dma_async_tx_descriptor *psDesc; unsigned long offset = (unsigned long)puiAddress & ((1 << PAGE_SHIFT) - 1); unsigned int num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; int num_pinned_pages = 0; - unsigned int gup_flags = 0; struct sg_table *psSg = OSAllocZMem(sizeof(struct sg_table)); PVR_LOG_GOTO_IF_NOMEM(psSg, eError, e0); @@ -2357,32 +2262,30 @@ PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, psOSCleanupData->pages[psOSCleanupData->uiCount] = OSAllocZMem(num_pages * sizeof(struct page *)); PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->pages[psOSCleanupData->uiCount], eError, e1); - gup_flags |= bMemToDev ? 0 : FOLL_WRITE; + num_pinned_pages = pvr_pin_user_pages_for_dma( + puiAddress, + num_pages, + !bMemToDev, + psOSCleanupData->pages[psOSCleanupData->uiCount]); - num_pinned_pages = get_user_pages_fast( - (unsigned long)puiAddress, - (int)num_pages, - gup_flags, - psOSCleanupData->pages[psOSCleanupData->uiCount]); + psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = num_pinned_pages; if (num_pinned_pages != num_pages) { PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast failed: (%d - %u)", num_pinned_pages, num_pages)); eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto e2; + goto e2; /* Unpin what was pinned and return error */ } -#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) +#if defined(PVRSRV_DEBUG_DMA) DMADumpPhysicalAddresses(psOSCleanupData->pages[psOSCleanupData->uiCount], num_pages, psDmaAddr, offset); #endif - psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = num_pinned_pages; - if (sg_alloc_table_from_pages(psSg, psOSCleanupData->pages[psOSCleanupData->uiCount], num_pages, offset, uiSize, GFP_KERNEL) != 0) { eError = PVRSRV_ERROR_BAD_MAPPING; PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table_from_pages failed")); - goto e3; + goto e2; } if (bMemToDev) @@ -2399,25 +2302,25 @@ PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, } dmaengine_slave_config(pvChan, &sConfig); - iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); + iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); if (!iRet) { PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__)); eError = PVRSRV_ERROR_INVALID_PARAMS; - goto e4; + goto e3; } - dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, sConfig.direction); + dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, eDataDirection); psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0); if (!psDesc) { PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__)); eError = PVRSRV_ERROR_INVALID_PARAMS; - goto e5; + goto e4; } - psOSCleanupData->eDirection = sConfig.direction; + psOSCleanupData->eDirection = eDataDirection; psOSCleanupData->ppsSg[psOSCleanupData->uiCount] = psSg; psOSCleanupData->pfnServerCleanup = pfnServerCleanup; psOSCleanupData->pvServerCleanupData = pvServerCleanupParam; @@ -2425,31 +2328,27 @@ PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, psDesc->callback_param = psOSCleanupData; psDesc->callback = dma_callback; - if (bFirst) - { - struct task_struct* t1; - t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); - } + if (bFirst) + kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); psOSCleanupData->ppsDescriptors[psOSCleanupData->uiCount] = psDesc; psOSCleanupData->uiCount++; return PVRSRV_OK; -e5: - dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); e4: - sg_free_table(psSg); + dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); e3: + sg_free_table(psSg); +e2: { IMG_UINT32 i; /* Unpin pages */ for (i=0; ipuiNumPages[psOSCleanupData->uiCount]; i++) { - put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); + pvr_unpin_user_page_for_dma(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); } } -e2: OSFreeMem(psOSCleanupData->pages[psOSCleanupData->uiCount]); e1: OSFreeMem(psSg); @@ -2496,6 +2395,7 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32Idx; IMG_INT32 i32Rwd; + enum dma_data_direction eDataDirection = bMemToDev ? DMA_TO_DEVICE : DMA_FROM_DEVICE; struct dma_slave_config sConfig = {0}; struct dma_async_tx_descriptor *psDesc; @@ -2503,7 +2403,6 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, unsigned int num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned int num_valid_pages = CalculateValidPages(pbValid, ui32SizeInPages); unsigned int num_pinned_pages = 0; - unsigned int gup_flags = 0; unsigned int valid_idx; size_t transfer_size; struct page ** next_pages; @@ -2529,8 +2428,6 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount] = OSAllocZMem(num_valid_pages * sizeof(struct dma_async_tx_descriptor *)); PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount], eError, e11); - gup_flags |= bMemToDev ? 0 : FOLL_WRITE; - for (ui32Idx = 0, valid_idx = 0; ui32Idx < ui32SizeInPages; ui32Idx++) { if (valid_idx == num_valid_pages) @@ -2580,11 +2477,12 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, next_pages = psOSCleanupData->pages[psOSCleanupData->uiCount] + (valid_idx * 2); - num_pinned_pages = get_user_pages_fast( - (unsigned long)pvNextAddress, - (int)num_pages, - gup_flags, + num_pinned_pages = pvr_pin_user_pages_for_dma( + pvNextAddress, + num_pages, + !bMemToDev, next_pages); + if (num_pinned_pages != num_pages) { PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast for sparse failed: (%d - %u)", num_pinned_pages, num_pages)); @@ -2592,7 +2490,7 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, goto e2; } -#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) +#if defined(PVRSRV_DEBUG_DMA) DMADumpPhysicalAddresses(next_pages, num_pages, &psDmaAddr[ui32Idx], (unsigned long)pvNextAddress & (ui32PageSize - 1)); @@ -2627,14 +2525,14 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, } dmaengine_slave_config(pvChan, &sConfig); - iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); + iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); if (!iRet) { PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__)); eError = PVRSRV_ERROR_INVALID_PARAMS; goto e5; } - dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, sConfig.direction); + dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, eDataDirection); psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0); if (!psDesc) @@ -2642,7 +2540,7 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__)); eError = PVRSRV_ERROR_INVALID_PARAMS; - goto e6; + goto e6; } psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][valid_idx] = psSg; @@ -2656,13 +2554,11 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, if (bFirst) { - struct task_struct* t1; - - psOSCleanupData->eDirection = sConfig.direction; + psOSCleanupData->eDirection = eDataDirection; psOSCleanupData->pfnServerCleanup = pfnServerCleanup; psOSCleanupData->pvServerCleanupData = pvServerCleanupParam; - t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); + kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); } psOSCleanupData->uiCount++; @@ -2673,17 +2569,18 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, return PVRSRV_OK; e6: - dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); + dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); e5: sg_free_table(psSg); e4: OSFreeMem(psSg); e3: /* Unpin last */ - put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx]); + pvr_unpin_user_page_for_dma(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx]); + if (psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]) { - put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]); + pvr_unpin_user_page_for_dma(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]); } e2: /* rewind */ @@ -2692,7 +2589,7 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 i; psSg = psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][i32Rwd]; - dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); + dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); sg_free_table(psSg); /* Unpin pages */ @@ -2700,7 +2597,7 @@ PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, { if (psOSCleanupData->pages[psOSCleanupData->uiCount][i]) { - put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); + pvr_unpin_user_page_for_dma(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); } } } @@ -2743,7 +2640,11 @@ OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode, PVR_LOG_GOTO_IF_NOMEM(heap, eError, ErrorExit); buf = dma_heap_buffer_alloc(heap, uiSize, 0, 0); - PVR_LOG_GOTO_IF_NOMEM(buf, eError, ErrorBufPut); + if (IS_ERR_OR_NULL(buf)) + { + PVR_LOG_GOTO_WITH_ERROR("dma_heap_buffer_alloc", eError, + PVRSRV_ERROR_OUT_OF_MEMORY, ErrorHeapPut); + } if (buf->size < uiSize) { @@ -2754,13 +2655,18 @@ OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode, } buf_attachment = dma_buf_attach(buf, dev); - PVR_LOG_GOTO_IF_NOMEM(buf_attachment, eError, ErrorBufFree); + if (IS_ERR_OR_NULL(buf_attachment)) + { + PVR_LOG_GOTO_WITH_ERROR("dma_buf_attach", eError, + PVRSRV_ERROR_OUT_OF_MEMORY, ErrorBufFree); + } eError = PhysmemCreateNewDmaBufBackedPMR(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_EXTERNAL], buf_attachment, NULL, PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE, + OSGetCurrentClientProcessIDKM(), buf->size, 1, 1, @@ -2770,27 +2676,39 @@ OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode, ppsPMR); PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateNewDmaBufBackedPMR", ErrorBufDetach); + PhysmemSetDmaHeap(psPMR, heap); + return PVRSRV_OK; ErrorBufDetach: dma_buf_detach(buf, buf_attachment); ErrorBufFree: dma_heap_buffer_free(buf); -ErrorBufPut: - dma_buf_put(buf); +ErrorHeapPut: + dma_heap_put(heap); ErrorExit: - return eError; } IMG_INTERNAL void OSFreeSecBuf(PMR *psPMR) { + PVRSRV_ERROR eError; + struct dma_buf *buf = PhysmemGetDmaBuf(psPMR); - dma_buf_put(buf); + struct dma_heap *heap = PhysmemGetDmaHeap(psPMR); + + /* calls dma_buf_put() */ dma_heap_buffer_free(buf); - PMRUnrefPMR(psPMR); + if (heap != NULL) + { + PhysmemSetDmaHeap(psPMR, NULL); + dma_heap_put(heap); + } + + eError = PMRUnrefPMR(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); } #else /* PVR_ANDROID_HAS_DMA_HEAP_FIND */ IMG_INTERNAL PVRSRV_ERROR @@ -2829,7 +2747,7 @@ OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode, #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ErrorUnrefPMR: - PMRUnrefPMR(*ppsPMR); + (void) PMRUnrefPMR(*ppsPMR); #endif ErrorExit: return eError; @@ -2838,7 +2756,8 @@ OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_INTERNAL void OSFreeSecBuf(PMR *psPMR) { - PMRUnrefPMR(psPMR); + PVRSRV_ERROR eError = PMRUnrefPMR(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); } #endif #endif /* SUPPORT_SECURE_ALLOC_KM */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_arm.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_arm.c index 8873f1ffeb1c..d3d3ab9f83fa 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_arm.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_arm.c @@ -46,9 +46,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #else #include #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) - #include -#endif #include #include "pvrsrv_error.h" @@ -69,18 +66,22 @@ void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, IMG_CPU_PHYADDR sCPUPhysStart, IMG_CPU_PHYADDR sCPUPhysEnd) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)) + struct device *dev = psDevNode->psDevConfig->pvOSDevice; + if (dev) + { + dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_TO_DEVICE); + dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_FROM_DEVICE); + } +#else PVR_UNREFERENCED_PARAMETER(psDevNode); - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); -#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ - /* Inner cache */ - dmac_flush_range(pvVirtStart, pvVirtEnd); - - /* Outer cache */ - outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ +#endif } void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, @@ -89,17 +90,18 @@ void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, IMG_CPU_PHYADDR sCPUPhysStart, IMG_CPU_PHYADDR sCPUPhysEnd) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)) + struct device *dev = psDevNode->psDevConfig->pvOSDevice; + if (dev) + { + dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_TO_DEVICE); + } +#else PVR_UNREFERENCED_PARAMETER(psDevNode); - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); -#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ - /* Inner cache */ - dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE); - - /* Outer cache */ - outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ +#endif } void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, @@ -108,26 +110,24 @@ void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, IMG_CPU_PHYADDR sCPUPhysStart, IMG_CPU_PHYADDR sCPUPhysEnd) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)) + struct device *dev = psDevNode->psDevConfig->pvOSDevice; + if (dev) + { + dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_FROM_DEVICE); + } +#else PVR_UNREFERENCED_PARAMETER(psDevNode); - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); -#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ - /* Inner cache */ - dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE); - - /* Outer cache */ - outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ +#endif } -OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) + PVR_UNREFERENCED_PARAMETER(psDevNode); return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; -#else - return OS_CACHE_OP_ADDR_TYPE_BOTH; -#endif } /* User Enable Register */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_arm64.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_arm64.c index 38422bf68863..5d0419a00d48 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_arm64.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_arm64.c @@ -101,30 +101,35 @@ static inline void FlushRange(void *pvRangeAddrStart, begin_user_mode_access(); pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)ui32CacheLineSize); - for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) - { - switch (eCacheOp) - { - case PVRSRV_CACHE_OP_CLEAN: - asm volatile ("dc cvac, %0" :: "r" (pbBase)); - break; - case PVRSRV_CACHE_OP_INVALIDATE: - asm volatile ("dc ivac, %0" :: "r" (pbBase)); - break; + /* Memory-barrier */ + asm volatile("dsb sy" : : : "memory"); - case PVRSRV_CACHE_OP_FLUSH: + switch (eCacheOp) + { + case PVRSRV_CACHE_OP_CLEAN: + for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) + { + asm volatile ("dc cvac, %0" :: "r" (pbBase)); + } + break; + case PVRSRV_CACHE_OP_INVALIDATE: + case PVRSRV_CACHE_OP_FLUSH: + for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) + { asm volatile ("dc civac, %0" :: "r" (pbBase)); - break; - - default: - PVR_DPF((PVR_DBG_ERROR, - "%s: Cache maintenance operation type %d is invalid", - __func__, eCacheOp)); - break; - } + } + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Cache maintenance operation type %d is invalid", + __func__, eCacheOp)); + break; } + /* Memory-barrier */ + asm volatile("dsb sy" : : : "memory"); + end_user_mode_access(); } @@ -134,43 +139,20 @@ void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, IMG_CPU_PHYADDR sCPUPhysStart, IMG_CPU_PHYADDR sCPUPhysEnd) { - struct device *dev; - - if (pvVirtStart) - { - FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH); - return; - } - - dev = psDevNode->psDevConfig->pvOSDevice; - - if (dev) - { - dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, - sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, - DMA_TO_DEVICE); - dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, - sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, - DMA_FROM_DEVICE); - } - else + if (pvVirtStart == NULL) { /* - * Allocations done prior to obtaining device pointer may - * affect in cache operations being scheduled. - * - * Ignore operations with null device pointer. - * This prevents crashes on newer kernels that don't return dummy ops - * when null pointer is passed to get_dma_ops. - * + * Converting the physical addresses to virtual addresses allows us to + * utilize the assembly instruction that makes the Flush + Invalidate + * cache operation atomic. + * There is no state in-between the flush and the invalidate operation. */ - - /* Don't spam on nohw */ -#if !defined(NO_HARDWARE) - PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); -#endif + pvVirtStart = phys_to_virt(sCPUPhysStart.uiAddr); + pvVirtEnd = phys_to_virt(sCPUPhysEnd.uiAddr); } + FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH); + return; } void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, @@ -197,21 +179,7 @@ void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, } else { - /* - * Allocations done prior to obtaining device pointer may - * affect in cache operations being scheduled. - * - * Ignore operations with null device pointer. - * This prevents crashes on newer kernels that don't return dummy ops - * when null pointer is passed to get_dma_ops. - * - */ - - - /* Don't spam on nohw */ -#if !defined(NO_HARDWARE) - PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); -#endif + PVR_DPF((PVR_DBG_ERROR, "Cache operation cannot be completed!")); } } @@ -240,26 +208,21 @@ void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, } else { - /* - * Allocations done prior to obtaining device pointer may - * affect in cache operations being scheduled. - * - * Ignore operations with null device pointer. - * This prevents crashes on newer kernels that don't return dummy ops - * when null pointer is passed to get_dma_ops. - * - */ - - /* Don't spam on nohw */ -#if !defined(NO_HARDWARE) - PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); -#endif + PVR_DPF((PVR_DBG_ERROR, "Cache operation cannot be completed!")); } } -OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode) { + if (!psDevNode->psDevConfig->pvOSDevice) + { + /* Host Mem device node doesn't have an associated Linux dev ptr. + Use virtual addr ops instead of asking kernel to do physical + maintenance */ + return OS_CACHE_OP_ADDR_TYPE_VIRTUAL; + } + return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_riscv.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_riscv.c index 8b03cdd62d76..2d2ad2360433 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_riscv.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_riscv.c @@ -68,14 +68,14 @@ void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, sCPUPhysEnd); } -//#if !defined(NO_HARDWARE) -// else -// { -// PVR_DPF((PVR_DBG_WARNING, -// "%s: System doesn't implement cache maintenance. Skipping!", -// __func__)); -// } -//#endif +#if !defined(NO_HARDWARE) + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s: System doesn't implement cache maintenance. Skipping!", + __func__)); + } +#endif } void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, @@ -142,7 +142,7 @@ void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, #endif } -OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode) { /* * Need to obtain psDevNode here and do the following: @@ -154,27 +154,20 @@ OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) * Return BOTH for now on. * */ + PVR_UNREFERENCED_PARAMETER(psDevNode); return OS_CACHE_OP_ADDR_TYPE_BOTH; } void OSUserModeAccessToPerfCountersEn(void) { -//#if !defined(NO_HARDWARE) -// PVR_DPF((PVR_DBG_WARNING, "%s: Not implemented!", __func__)); -// PVR_ASSERT(0); -//#endif - PVR_DPF((PVR_DBG_WARNING, "%s: 20240529 Not implemented!", __func__)); +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_WARNING, "%s: Not implemented!", __func__)); + PVR_ASSERT(0); +#endif } IMG_BOOL OSIsWriteCombineUnalignedSafe(void) { -//#if !defined(NO_HARDWARE) -// PVR_DPF((PVR_DBG_WARNING, -// "%s: Not implemented (assuming false)!", -// __func__)); -// PVR_ASSERT(0); -// return IMG_FALSE; -//#else + // EIC770X support unaligned access. return IMG_TRUE; -//#endif } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_x86.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_x86.c index 2c271d8f4c6f..2c5c145e268f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_x86.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/osfunc_x86.c @@ -61,18 +61,14 @@ static void x86_flush_cache_range(const void *pvStart, const void *pvEnd) mb(); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) __uaccess_begin(); -#endif for (pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size) { clflush(pbBase); } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) __uaccess_end(); -#endif mb(); } @@ -118,8 +114,9 @@ void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, x86_flush_cache_range(pvVirtStart, pvVirtEnd); } -OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode) { + PVR_UNREFERENCED_PARAMETER(psDevNode); return OS_CACHE_OP_ADDR_TYPE_VIRTUAL; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/ossecure_export.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/ossecure_export.c index 1070067cacbe..c1b4a7083bf3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/ossecure_export.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/ossecure_export.c @@ -101,7 +101,7 @@ PVRSRV_ERROR OSSecureExport(const IMG_CHAR *pszName, psSecureFileData->pfnReleaseFunc = pfnReleaseFunc; /* Allocate a fd number */ - secure_fd = get_unused_fd(); + secure_fd = get_unused_fd_flags(0); if (secure_fd < 0) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pg_walk_through.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pg_walk_through.c deleted file mode 100644 index c44ce00993f3..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pg_walk_through.c +++ /dev/null @@ -1,213 +0,0 @@ -/*************************************************************************/ /*! -@File pg_walk_through.c -@Title Non Services allocated memory wrap functionality support -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Server-side component to support wrapping non-services - allocated memory, particularly by browsing through the - corresponding allocation host CPU map page table entries. -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#if defined(SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK) - -#include "physmem_extmem_wrap.h" -#include "kernel_compatibility.h" - -/* Find the PFN associated with a given CPU virtual address, and return the - * associated page structure, if it exists. - * The page in question must be present (i.e. no fault handling required), - * and must be writable. A get_page is done on the returned page structure. - */ -static IMG_BOOL _CPUVAddrToPFN(struct vm_area_struct *psVMArea, - uintptr_t uCPUVAddr, - unsigned long *pui32PFN, - struct page **ppsPage) -{ - pgd_t *psPGD; - p4d_t *psP4D; - pud_t *psPUD; - pmd_t *psPMD; - pte_t *psPTE; - struct mm_struct *psMM = psVMArea->vm_mm; - spinlock_t *psPTLock; - IMG_BOOL bRet = IMG_FALSE; - - *pui32PFN = 0; - *ppsPage = NULL; - - /* Walk the page tables to find the PTE */ - psPGD = pgd_offset(psMM, uCPUVAddr); - if (pgd_none(*psPGD) || pgd_bad(*psPGD)) - return bRet; - - psP4D = p4d_offset(psPGD, uCPUVAddr); - if (p4d_none(*psP4D) || unlikely(p4d_bad(*psP4D))) - return bRet; - - psPUD = pud_offset(psP4D, uCPUVAddr); - if (pud_none(*psPUD) || pud_bad(*psPUD)) - return bRet; - - psPMD = pmd_offset(psPUD, uCPUVAddr); - if (pmd_none(*psPMD) || pmd_bad(*psPMD)) - return bRet; - - psPTE = (pte_t *)pte_offset_map_lock(psMM, psPMD, uCPUVAddr, &psPTLock); - - /* Check if the returned PTE is actually valid and writable */ - if ((pte_none(*psPTE) == 0) && (pte_present(*psPTE) != 0) && (pte_write(*psPTE) != 0)) - { - *pui32PFN = pte_pfn(*psPTE); - bRet = IMG_TRUE; - - /* In case the pfn is valid, meaning it is a RAM page and not - * IO-remapped, we can get the actual page struct from it. - */ - if (pfn_valid(*pui32PFN)) - { - *ppsPage = pfn_to_page(*pui32PFN); - - get_page(*ppsPage); - } - } - - pte_unmap_unlock(psPTE, psPTLock); - - return bRet; -} - -/* Find the VMA to a given CPU virtual address and do a page table walk - * to find the corresponding pfns - */ -PVRSRV_ERROR _TryFindVMA(IMG_DEVMEM_SIZE_T uiSize, - uintptr_t pvCpuVAddr, - PMR_WRAP_DATA *psPrivData) -{ - struct vm_area_struct *psVMArea; - uintptr_t pvCpuVAddrEnd = pvCpuVAddr + uiSize; - IMG_UINT32 i; - uintptr_t uAddr; - PVRSRV_ERROR eError = PVRSRV_OK; - - /* Find the VMA */ - psVMArea = psPrivData->psVMArea; - if (psVMArea == NULL) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Couldn't find memory region containing start address %p", - __func__, - (void*) pvCpuVAddr)); - eError = PVRSRV_ERROR_INVALID_CPU_ADDR; - goto e0; - } - - mmap_read_lock(current->mm); - - /* Does the region represent memory mapped I/O? */ - if (!(psVMArea->vm_flags & VM_IO)) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", - __func__, - psVMArea->vm_flags)); - eError = PVRSRV_ERROR_INVALID_FLAGS; - goto e0; - } - - /* We require read and write access */ - if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: No read/write access to memory region (VMA flags: 0x%lx)", - __func__, - psVMArea->vm_flags)); - eError = PVRSRV_ERROR_INVALID_FLAGS; - goto e0; - } - - /* Do the actual page table walk and fill the private data arrays - * for page structs and physical addresses */ - for (uAddr = pvCpuVAddr, i = 0; - uAddr < pvCpuVAddrEnd; - uAddr += PAGE_SIZE, i++) - { - unsigned long ui32PFN = 0; - - PVR_ASSERT(i < psPrivData->uiTotalNumPages); - - if (!_CPUVAddrToPFN(psVMArea, uAddr, &ui32PFN, &psPrivData->ppsPageArray[i])) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Invalid CPU virtual address", - __func__)); - eError = PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES; - goto e1; - } - - psPrivData->ppvPhysAddr[i].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(ui32PFN << PAGE_SHIFT); - psPrivData->uiNumBackedPages += 1; - - if ((((IMG_UINT64) psPrivData->ppvPhysAddr[i].uiAddr) >> PAGE_SHIFT) != ui32PFN) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Page frame number out of range (%lu)", - __func__, - ui32PFN)); - eError = PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES; - goto e1; - } - } - - /* Check to confirm we saw every page */ - PVR_ASSERT(i == psPrivData->uiTotalNumPages); - mmap_read_unlock(current->mm); - return eError; - -e1: - for (; i != 0; i--) - { - if (psPrivData->ppsPageArray[i-1] != NULL) - { - put_page(psPrivData->ppsPageArray[i-1]); - } - } -e0: - mmap_read_unlock(current->mm); - return eError; -} -#endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_dmabuf.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_dmabuf.c index a90b3b5879d0..97f84d6095bc 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_dmabuf.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_dmabuf.c @@ -45,120 +45,917 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #include "physmem_dmabuf.h" +#include "physmem_dmabuf_internal.h" +#include "physmem.h" #include "pvrsrv.h" #include "pmr.h" -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#else +#include +#endif +#include + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +#include "allocmem.h" +#include "osfunc.h" +#include "pmr_impl.h" +#include "pmr_env.h" +#include "hash.h" +#include "private_data.h" +#include "module_common.h" +#include "pvr_ion_stats.h" +#include "cache_km.h" + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) +#include "mmap_stats.h" +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#include "kernel_compatibility.h" + +typedef struct _PMR_DMA_BUF_WRAPPER_ +{ + PMR *psPMR; + + /* + * Set if the PMR has been exported via GEM. This field should only + * be accessed using smp_store_release and smp_load_acquire. + * In general, the field is not safe to access, as the wrapper doesn't + * hold a reference on the GEM object. + */ + struct drm_gem_object *psObj; + + struct dma_resv sDmaResv; + + /* kernel mapping */ + IMG_UINT32 uiKernelMappingRefCnt; + IMG_HANDLE hKernelMappingHandle; + void *pvKernelMappingAddr; + PMR_SIZE_T uiKernelMappingLen; + POS_LOCK hKernelMappingLock; + + /* device mapping */ + IMG_UINT32 uiDeviceMappingRefCnt; + POS_LOCK hDeviceMappingLock; + struct sg_table *psTable; +} PMR_DMA_BUF_WRAPPER; + +typedef struct _PMR_DMA_BUF_GEM_OBJ { + struct drm_gem_object sBase; + PMR_DMA_BUF_WRAPPER *psPMRWrapper; +} PMR_DMA_BUF_GEM_OBJ; + +#define TO_PMR_DMA_BUF_GEM_OBJ(psObj) IMG_CONTAINER_OF((psObj), PMR_DMA_BUF_GEM_OBJ, sBase) + +static PVRSRV_ERROR _PMRKMap(PMR_DMA_BUF_WRAPPER *psPMRWrapper, + size_t uiSize, + void **pvAddr) +{ + IMG_HANDLE hKernelMapping; + void *pvAddrOut = NULL; + size_t uiLengthOut = 0; + PVRSRV_ERROR eError = PVRSRV_OK; + + OSLockAcquire(psPMRWrapper->hKernelMappingLock); + + if (psPMRWrapper->uiKernelMappingRefCnt++ == 0) + { + PVR_ASSERT(psPMRWrapper->hKernelMappingHandle == NULL); + PVR_ASSERT(psPMRWrapper->pvKernelMappingAddr == NULL); + + if (PMR_IsSparse(psPMRWrapper->psPMR)) + { + eError = PMRAcquireSparseKernelMappingData(psPMRWrapper->psPMR, + 0, + uiSize, + &pvAddrOut, + &uiLengthOut, + &hKernelMapping); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", + ErrReleaseLock); + } + else + { + eError = PMRAcquireKernelMappingData(psPMRWrapper->psPMR, + 0, + uiSize, + &pvAddrOut, + &uiLengthOut, + &hKernelMapping); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", + ErrReleaseLock); + } + + psPMRWrapper->hKernelMappingHandle = hKernelMapping; + psPMRWrapper->pvKernelMappingAddr = pvAddrOut; + psPMRWrapper->uiKernelMappingLen = uiLengthOut; + } + + *pvAddr = psPMRWrapper->pvKernelMappingAddr; + goto ExitUnlock; + +ErrReleaseLock: + psPMRWrapper->uiKernelMappingRefCnt--; + +ExitUnlock: + OSLockRelease(psPMRWrapper->hKernelMappingLock); + return eError; +} + +static void _PMRKUnmap(PMR_DMA_BUF_WRAPPER *psPMRWrapper) +{ + OSLockAcquire(psPMRWrapper->hKernelMappingLock); + + if (--psPMRWrapper->uiKernelMappingRefCnt == 0) + { + PVRSRV_ERROR eError; + + PVR_ASSERT(psPMRWrapper->hKernelMappingHandle != NULL); + PVR_ASSERT(psPMRWrapper->pvKernelMappingAddr != NULL); + + eError = PMRReleaseKernelMappingData(psPMRWrapper->psPMR, + psPMRWrapper->hKernelMappingHandle); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + + psPMRWrapper->hKernelMappingHandle = NULL; + psPMRWrapper->pvKernelMappingAddr = NULL; + psPMRWrapper->uiKernelMappingLen = 0; + } + + OSLockRelease(psPMRWrapper->hKernelMappingLock); +} + +static PVRSRV_ERROR _PMRInvalidateCache(PMR_DMA_BUF_WRAPPER *psPMRWrapper) +{ + PVRSRV_ERROR eError; + + OSLockAcquire(psPMRWrapper->hKernelMappingLock); + + if (psPMRWrapper->uiKernelMappingRefCnt == 0) { + OSLockRelease(psPMRWrapper->hKernelMappingLock); + return PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + } + + eError = CacheOpValExec(psPMRWrapper->psPMR, + (IMG_UINT64) (uintptr_t) psPMRWrapper->pvKernelMappingAddr, + 0, + psPMRWrapper->uiKernelMappingLen, + PVRSRV_CACHE_OP_INVALIDATE); + PVR_LOG_IF_ERROR(eError, "CacheOpValExec"); + + OSLockRelease(psPMRWrapper->hKernelMappingLock); + + return eError; +} + +static PVRSRV_ERROR _PMRCleanCache(PMR_DMA_BUF_WRAPPER *psPMRWrapper) +{ + PVRSRV_ERROR eError; + + OSLockAcquire(psPMRWrapper->hKernelMappingLock); + + if (psPMRWrapper->uiKernelMappingRefCnt == 0) { + OSLockRelease(psPMRWrapper->hKernelMappingLock); + return PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + } + + eError = CacheOpValExec(psPMRWrapper->psPMR, + (IMG_UINT64) (uintptr_t) psPMRWrapper->pvKernelMappingAddr, + 0, + psPMRWrapper->uiKernelMappingLen, + PVRSRV_CACHE_OP_FLUSH); + PVR_LOG_IF_ERROR(eError, "CacheOpValExec"); + + OSLockRelease(psPMRWrapper->hKernelMappingLock); + + return eError; +} + +/* + * dma_buf_ops common code + * + * Implementation of below callbacks adds the ability to export DmaBufs to other + * drivers. + * The following common functions are used by both the dma_buf and GEM + * callbacks. + */ + +static int PVRDmaBufOpsAttachCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, + struct dma_buf_attachment *psAttachment) +{ + PMR *psPMR = psPMRWrapper->psPMR; + + if (PMR_GetType(psPMR) == PMR_TYPE_DMABUF) + { + // don't support exporting PMRs that are itself created from imported + // DmaBufs + PVR_DPF((PVR_DBG_ERROR, "exporting PMRs of type DMABUF not supported")); + + return -ENOTSUPP; + } + + return 0; +} + +static struct sg_table *PVRDmaBufOpsMapCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, + struct dma_buf_attachment *psAttachment, + enum dma_data_direction eDirection) +{ + PVRSRV_ERROR eError; + PMR *psPMR = psPMRWrapper->psPMR; + IMG_UINT uiNents; + IMG_DEVMEM_SIZE_T uiPhysSize, uiVirtSize; + IMG_UINT32 uiNumVirtPages; + void *pvPAddrData = NULL; + IMG_DEV_PHYADDR asPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC], *psPAddr = asPAddr; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC], *pbValid = abValid; + IMG_UINT32 i; + IMG_DEV_PHYADDR sPAddrPrev, sPAddrCurr; + struct sg_table *psTable; + struct scatterlist *psSg; + int iRet = 0; + IMG_UINT32 uiDevPageShift, uiDevPageSize; + + OSLockAcquire(psPMRWrapper->hDeviceMappingLock); + + psPMRWrapper->uiDeviceMappingRefCnt++; + + if (psPMRWrapper->uiDeviceMappingRefCnt > 1) + { + goto OkUnlock; + } + + PVR_ASSERT(psPMRWrapper->psTable == NULL); + + uiDevPageShift = PMR_GetLog2Contiguity(psPMR); + uiDevPageSize = 1u << uiDevPageShift; + uiVirtSize = PMR_LogicalSize(psPMR); + uiPhysSize = PMR_PhysicalSize(psPMR); + if (uiPhysSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, "invalid PMR size")); + iRet = PVRSRVToNativeError(PVRSRV_ERROR_BAD_MAPPING); + goto ErrUnlockMapping; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s(): mapping pmr: 0x%p@0x%" IMG_UINT64_FMTSPECx + " from heap: \"%s\" with page size: 0x%x", __func__, psPMR, + uiPhysSize, PhysHeapName(PMR_PhysHeap(psPMR)), uiDevPageSize)); + + uiNumVirtPages = uiVirtSize >> uiDevPageShift; + + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_LOG_IF_ERROR(eError, "PMRLockSysPhysAddresses"); + iRet = PVRSRVToNativeError(eError); + goto ErrUnlockMapping; + } + + PMR_SetLayoutFixed(psPMR, IMG_TRUE); + + psTable = OSAllocZMem(sizeof(*psTable)); + if (psTable == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "OSAllocMem.1() failed")); + iRet = -ENOMEM; + goto ErrUnlockPhysAddresses; + } + + if (uiNumVirtPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + pvPAddrData = OSAllocMem(uiNumVirtPages * (sizeof(*psPAddr) + sizeof(*pbValid))); + if (pvPAddrData == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "OSAllocMem.2() failed")); + iRet = -ENOMEM; + goto ErrFreeTable; + } + + psPAddr = IMG_OFFSET_ADDR(pvPAddrData, 0); + pbValid = IMG_OFFSET_ADDR(pvPAddrData, uiNumVirtPages * sizeof(*psPAddr)); + } + + eError = PMR_DevPhysAddr(psPMR, + uiDevPageShift, + uiNumVirtPages, + 0, + psPAddr, + pbValid, + DEVICE_USE); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "PMR_DevPhysAddr"); + iRet = PVRSRVToNativeError(eError); + goto ErrFreePAddrData; + } + + /* Calculate how many contiguous regions there are in the PMR. This + * value will be used to allocate one scatter list for every region. */ + + /* Find first valid physical address. */ + for (i = 0; i < uiNumVirtPages && !pbValid[i]; i++); + + sPAddrPrev = psPAddr[i]; + uiNents = 1; + + PVR_DPF((PVR_DBG_MESSAGE, "%s(): %03u paddr: 0x%" IMG_UINT64_FMTSPECx, + __func__, i, sPAddrPrev.uiAddr)); + + /* Find the rest of the addresses. */ + for (i = i + 1; i < uiNumVirtPages; i++) + { + if (!pbValid[i]) + { + continue; + } + + sPAddrCurr = psPAddr[i]; + + PVR_DPF((PVR_DBG_MESSAGE, "%s(): %03u paddr: 0x%" IMG_UINT64_FMTSPECx + ", pprev: 0x%" IMG_UINT64_FMTSPECx ", valid: %u", __func__, + i, sPAddrCurr.uiAddr, sPAddrPrev.uiAddr, pbValid[i])); + + if (sPAddrCurr.uiAddr != (sPAddrPrev.uiAddr + uiDevPageSize)) + { + uiNents++; + } + + sPAddrPrev = sPAddrCurr; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s(): found %u contiguous regions", __func__, + uiNents)); + + /* Allocate scatter lists from all of the contiguous regions. */ + + iRet = sg_alloc_table(psTable, uiNents, GFP_KERNEL); + if (iRet != 0) + { + PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table() failed with error %d", iRet)); + goto ErrFreePAddrData; + } + + /* Fill in all of the physical addresses and sizes for all of the + * contiguous regions that were calculated. */ + + for (i = 0; i < uiNumVirtPages && !pbValid[i]; i++); + + psSg = psTable->sgl; + sPAddrPrev = psPAddr[i]; + + sg_dma_address(psSg) = sPAddrPrev.uiAddr; + sg_dma_len(psSg) = uiDevPageSize; + + for (i = i + 1; i < uiNumVirtPages; i++) + { + if (!pbValid[i]) + { + continue; + } + + sPAddrCurr = psPAddr[i]; + + if (sPAddrCurr.uiAddr != (sPAddrPrev.uiAddr + uiDevPageSize)) + { + psSg = sg_next(psSg); + PVR_ASSERT(psSg != NULL); + + sg_dma_address(psSg) = sPAddrCurr.uiAddr; + sg_dma_len(psSg) = uiDevPageSize; + } + else + { + sg_dma_len(psSg) += uiDevPageSize; + } + + sPAddrPrev = sPAddrCurr; + } + + if (pvPAddrData != NULL) + { + OSFreeMem(pvPAddrData); + } + + psPMRWrapper->psTable = psTable; + +OkUnlock: + OSLockRelease(psPMRWrapper->hDeviceMappingLock); + + return psPMRWrapper->psTable; + +ErrFreePAddrData: + if (pvPAddrData != NULL) + { + OSFreeMem(pvPAddrData); + } +ErrFreeTable: + OSFreeMem(psTable); +ErrUnlockPhysAddresses: + { + PVRSRV_ERROR eError2 = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError2, "PMRUnlockSysPhysAddresses"); + } +ErrUnlockMapping: + psPMRWrapper->uiDeviceMappingRefCnt--; + OSLockRelease(psPMRWrapper->hDeviceMappingLock); + + return ERR_PTR(iRet); +} + +static void PVRDmaBufOpsUnmapCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, + struct dma_buf_attachment *psAttachment, + struct sg_table *psTable, + enum dma_data_direction eDirection) +{ + PVRSRV_ERROR eError; + + OSLockAcquire(psPMRWrapper->hDeviceMappingLock); + + if (psPMRWrapper->uiDeviceMappingRefCnt == 0) + { + PVR_DPF((PVR_DBG_ERROR, "reference count on mapping already 0")); + goto ErrUnlock; + } + + psPMRWrapper->uiDeviceMappingRefCnt--; + + if (psPMRWrapper->uiDeviceMappingRefCnt > 0) + { + goto ErrUnlock; + } + + dma_unmap_sg(psAttachment->dev, psTable->sgl, psTable->nents, eDirection); + sg_free_table(psTable); + + eError = PMRUnlockSysPhysAddresses(psPMRWrapper->psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + + OSFreeMem(psPMRWrapper->psTable); + psPMRWrapper->psTable = NULL; + +ErrUnlock: + OSLockRelease(psPMRWrapper->hDeviceMappingLock); +} + +static int PVRDmaBufOpsBeginCpuAccessCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, + enum dma_data_direction eDirection) +{ + if (PVRSRV_CHECK_CPU_CACHED(PMR_Flags(psPMRWrapper->psPMR))) + { + PVRSRV_ERROR eError = _PMRInvalidateCache(psPMRWrapper); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "_PMRInvalidateCache"); + return OSPVRSRVToNativeError(eError); + } + } + + return 0; +} + +static int PVRDmaBufOpsEndCpuAccessCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, + enum dma_data_direction eDirection) +{ + if (PVRSRV_CHECK_CPU_CACHED(PMR_Flags(psPMRWrapper->psPMR))) + { + PVRSRV_ERROR eError = _PMRCleanCache(psPMRWrapper); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "_PMRCleanCache"); + return OSPVRSRVToNativeError(eError); + } + } + + return 0; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) +static void *PVRDmaBufOpsKMapCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, unsigned long uiPageNum) +{ + return NULL; +} + +static void PVRDmaBufOpsKUnMapCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, unsigned long uiPageNum, void *pvMem) +{ +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) +static void *PVRDmaBufOpsVMapCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper) +{ + void *pvAddrOut = NULL; + PVRSRV_ERROR eError; + + eError = _PMRKMap(psPMRWrapper, 0, &pvAddrOut); + + return eError == PVRSRV_OK ? pvAddrOut : NULL; +} +#else +static int PVRDmaBufOpsVMapCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, struct iosys_map *psMap) +{ + void *pvAddrOut = NULL; + PVRSRV_ERROR eError; + + eError = _PMRKMap(psPMRWrapper, 0, &pvAddrOut); + + if (eError != PVRSRV_OK) + { + return OSPVRSRVToNativeError(eError); + } + + if (PhysHeapGetType(PMR_PhysHeap(psPMRWrapper->psPMR)) == PHYS_HEAP_TYPE_UMA) + { + iosys_map_set_vaddr(psMap, pvAddrOut); + } + else + { + iosys_map_set_vaddr_iomem(psMap, pvAddrOut); + } + + return 0; +} +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) +static void PVRDmaBufOpsVUnMapCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, void *pvAddr) +{ + _PMRKUnmap(psPMRWrapper); +} +#else +static void PVRDmaBufOpsVUnMapCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, struct iosys_map *psMap) +{ + _PMRKUnmap(psPMRWrapper); + + iosys_map_clear(psMap); +} +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ + +static int PVRDmaBufOpsMMapCommon(PMR_DMA_BUF_WRAPPER *psPMRWrapper, struct vm_area_struct *psVMA) +{ + PMR *psPMR = psPMRWrapper->psPMR; + PVRSRV_MEMALLOCFLAGS_T uiProtFlags = + (BITMASK_HAS(psVMA->vm_flags, VM_READ) ? PVRSRV_MEMALLOCFLAG_CPU_READABLE : 0) | + (BITMASK_HAS(psVMA->vm_flags, VM_WRITE) ? PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0); + PVRSRV_ERROR eError; + + /* Forcibly clear the VM_MAYWRITE flag as this is inherited from the + * kernel mmap code and we do not want to produce a potentially writable + * mapping from a read-only mapping. + */ + if (!BITMASK_HAS(psVMA->vm_flags, VM_WRITE)) + { + pvr_vm_flags_clear(psVMA, VM_MAYWRITE); + } + + eError = PMRMMapPMR(psPMR, psVMA, uiProtFlags); + if (eError != PVRSRV_OK) + { + PVR_LOG_IF_ERROR(eError, "PMRMMapPMR"); + return OSPVRSRVToNativeError(eError); + } + + return 0; +} + +/* end of dma_buf_ops common code*/ + +/* + * dma_buf_ops (non-GEM) + * + * Implementation of below callbacks adds the ability to export DmaBufs to other + * drivers. + */ + +static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf, +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ + !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) + struct device *psDev, +#endif + struct dma_buf_attachment *psAttachment) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + + return PVRDmaBufOpsAttachCommon(psPMRWrapper, psAttachment); +} + +static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment, + enum dma_data_direction eDirection) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psAttachment->dmabuf->priv; + + return PVRDmaBufOpsMapCommon(psPMRWrapper, psAttachment, eDirection); +} + +static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment, + struct sg_table *psTable, + enum dma_data_direction eDirection) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psAttachment->dmabuf->priv; + + PVRDmaBufOpsUnmapCommon(psPMRWrapper, psAttachment, psTable, eDirection); +} + +static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + PMR *psPMR = psPMRWrapper->psPMR; + + PVRSRV_ERROR eError = PMRUnrefPMR(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnrefPMR"); +} + +static int PVRDmaBufOpsBeginCpuAccess(struct dma_buf *psDmaBuf, + enum dma_data_direction eDirection) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + + return PVRDmaBufOpsBeginCpuAccessCommon(psPMRWrapper, eDirection); +} + +static int PVRDmaBufOpsEndCpuAccess(struct dma_buf *psDmaBuf, + enum dma_data_direction eDirection) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + int iErr; + + iErr = PVRDmaBufOpsEndCpuAccessCommon(psPMRWrapper, eDirection); + + return iErr; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) +static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + + return PVRDmaBufOpsKMapCommon(psPMRWrapper, uiPageNum); +} + +static void PVRDmaBufOpsKUnMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum, void *pvMem) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + + return PVRDmaBufOpsKUnMapCommon(psPMRWrapper, uiPageNum, pvMem); +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) +static void *PVRDmaBufOpsVMap(struct dma_buf *psDmaBuf) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + + return PVRDmaBufOpsVMapCommon(psPMRWrapper); +} +#else +static int PVRDmaBufOpsVMap(struct dma_buf *psDmaBuf, struct iosys_map *psMap) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + + return PVRDmaBufOpsVMapCommon(psPMRWrapper, psMap); +} +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) +static void PVRDmaBufOpsVUnMap(struct dma_buf *psDmaBuf, void *pvAddr) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + + PVRDmaBufOpsVUnMapCommon(psPMRWrapper, pvAddr); +} +#else +static void PVRDmaBufOpsVUnMap(struct dma_buf *psDmaBuf, struct iosys_map *psMap) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; -#include -#include -#include -#include + PVRDmaBufOpsVUnMapCommon(psPMRWrapper, psMap); +} +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ -#include "img_types.h" -#include "img_defs.h" -#include "pvr_debug.h" -#include "pvrsrv_error.h" -#include "pvrsrv_memallocflags.h" +static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; -#include "allocmem.h" -#include "osfunc.h" -#include "pmr_impl.h" -#include "hash.h" -#include "private_data.h" -#include "module_common.h" -#include "pvr_ion_stats.h" + PVR_DPF((PVR_DBG_MESSAGE, "%s(): psDmaBuf = %px, psPMR = %px", __func__, + psDmaBuf, psPMRWrapper->psPMR)); -#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -#include "ri_server.h" -#endif + return PVRDmaBufOpsMMapCommon(psPMRWrapper, psVMA); +} -#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -#include "mmap_stats.h" +static const struct dma_buf_ops sPVRDmaBufOps = +{ + .attach = PVRDmaBufOpsAttach, + .map_dma_buf = PVRDmaBufOpsMap, + .unmap_dma_buf = PVRDmaBufOpsUnmap, + .release = PVRDmaBufOpsRelease, + .begin_cpu_access = PVRDmaBufOpsBeginCpuAccess, + .end_cpu_access = PVRDmaBufOpsEndCpuAccess, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ + !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) + .map_atomic = PVRDmaBufOpsKMap, #endif - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) -#include "process_stats.h" +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) + .map = PVRDmaBufOpsKMap, + .unmap = PVRDmaBufOpsKUnMap, #endif +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */ + .kmap_atomic = PVRDmaBufOpsKMap, + .kmap = PVRDmaBufOpsKMap, + .kunmap = PVRDmaBufOpsKUnMap, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */ + .mmap = PVRDmaBufOpsMMap, -#include "kernel_compatibility.h" + .vmap = PVRDmaBufOpsVMap, + .vunmap = PVRDmaBufOpsVUnMap, +}; + +/* end of dma_buf_ops (non-GEM) */ -#include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) -MODULE_IMPORT_NS(DMA_BUF); -#endif /* - * dma_buf_ops + * dma_buf_ops (GEM) * - * These are all returning errors if used. - * The point is to prevent anyone outside of our driver from importing - * and using our dmabuf. + * Implementation of below callbacks adds the ability to export DmaBufs to other + * drivers. */ -static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf, +static int PVRDmaBufOpsAttachGEM(struct dma_buf *psDmaBuf, #if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) struct device *psDev, #endif struct dma_buf_attachment *psAttachment) { - return -ENOSYS; + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + return PVRDmaBufOpsAttachCommon(psPMRWrapper, psAttachment); } -static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment, +static struct sg_table *PVRDmaBufOpsMapGEM(struct dma_buf_attachment *psAttachment, enum dma_data_direction eDirection) { - /* Attach hasn't been called yet */ - return ERR_PTR(-EINVAL); + struct drm_gem_object *psObj = psAttachment->dmabuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + return PVRDmaBufOpsMapCommon(psPMRWrapper, psAttachment, eDirection); } -static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment, +static void PVRDmaBufOpsUnmapGEM(struct dma_buf_attachment *psAttachment, struct sg_table *psTable, enum dma_data_direction eDirection) { + struct drm_gem_object *psObj = psAttachment->dmabuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + PVRDmaBufOpsUnmapCommon(psPMRWrapper, psAttachment, psTable, eDirection); } -static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf) +static int PVRDmaBufOpsBeginCpuAccessGEM(struct dma_buf *psDmaBuf, + enum dma_data_direction eDirection) +{ + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + return PVRDmaBufOpsBeginCpuAccessCommon(psPMRWrapper, eDirection); +} + +static int PVRDmaBufOpsEndCpuAccessGEM(struct dma_buf *psDmaBuf, + enum dma_data_direction eDirection) { - PMR *psPMR = (PMR *) psDmaBuf->priv; + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + int iErr; - PMRUnrefPMR(psPMR); + iErr = PVRDmaBufOpsEndCpuAccessCommon(psPMRWrapper, eDirection); + + return iErr; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) -static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum) +static void *PVRDmaBufOpsKMapGEM(struct dma_buf *psDmaBuf, unsigned long uiPageNum) { - return ERR_PTR(-ENOSYS); + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + return PVRDmaBufOpsKMapCommon(psPMRWrapper, uiPageNum); +} + +static void PVRDmaBufOpsKUnMapGEM(struct dma_buf *psDmaBuf, unsigned long uiPageNum, void *pvMem) +{ + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + return PVRDmaBufOpsKUnMapCommon(psPMRWrapper, uiPageNum, pvMem); } #endif -static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) +static void *PVRDmaBufOpsVMapGEM(struct dma_buf *psDmaBuf) +{ + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + return PVRDmaBufOpsVMapCommon(psPMRWrapper); +} +#else +static int PVRDmaBufOpsVMapGEM(struct dma_buf *psDmaBuf, struct iosys_map *psMap) { - return -ENOSYS; + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + return PVRDmaBufOpsVMapCommon(psPMRWrapper, psMap); } +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ -static const struct dma_buf_ops sPVRDmaBufOps = +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) +static void PVRDmaBufOpsVUnMapGEM(struct dma_buf *psDmaBuf, void *pvAddr) { - .attach = PVRDmaBufOpsAttach, - .map_dma_buf = PVRDmaBufOpsMap, - .unmap_dma_buf = PVRDmaBufOpsUnmap, - .release = PVRDmaBufOpsRelease, + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + PVRDmaBufOpsVUnMapCommon(psPMRWrapper, pvAddr); +} +#else +static void PVRDmaBufOpsVUnMapGEM(struct dma_buf *psDmaBuf, struct iosys_map *psMap) +{ + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + PVRDmaBufOpsVUnMapCommon(psPMRWrapper, psMap); +} +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ + +static int PVRDmaBufOpsMMapGEM(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA) +{ + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + PVR_DPF((PVR_DBG_MESSAGE, "%s(): psDmaBuf = %px, psPMR = %px", __func__, + psDmaBuf, psPMRWrapper->psPMR)); + + return PVRDmaBufOpsMMapCommon(psPMRWrapper, psVMA); +} + +static const struct dma_buf_ops sPVRDmaBufOpsGEM = +{ + .attach = PVRDmaBufOpsAttachGEM, + .map_dma_buf = PVRDmaBufOpsMapGEM, + .unmap_dma_buf = PVRDmaBufOpsUnmapGEM, + .release = drm_gem_dmabuf_release, + .begin_cpu_access = PVRDmaBufOpsBeginCpuAccessGEM, + .end_cpu_access = PVRDmaBufOpsEndCpuAccessGEM, #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) #if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) - .map_atomic = PVRDmaBufOpsKMap, + .map_atomic = PVRDmaBufOpsKMapGEM, #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) - .map = PVRDmaBufOpsKMap, -#endif -#else - .kmap_atomic = PVRDmaBufOpsKMap, - .kmap = PVRDmaBufOpsKMap, + .map = PVRDmaBufOpsKMapGEM, + .unmap = PVRDmaBufOpsKUnMapGEM, #endif - .mmap = PVRDmaBufOpsMMap, +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */ + .kmap_atomic = PVRDmaBufOpsKMapGEM, + .kmap = PVRDmaBufOpsKMapGEM, + .kunmap = PVRDmaBufOpsKUnMapGEM, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */ + .mmap = PVRDmaBufOpsMMapGEM, + + .vmap = PVRDmaBufOpsVMapGEM, + .vunmap = PVRDmaBufOpsVUnMapGEM, }; -/* end of dma_buf_ops */ +/* end of dma_buf_ops (GEM) */ typedef struct _PMR_DMA_BUF_DATA_ @@ -166,8 +963,12 @@ typedef struct _PMR_DMA_BUF_DATA_ /* Filled in at PMR create time */ PHYS_HEAP *psPhysHeap; struct dma_buf_attachment *psAttachment; +#if defined(SUPPORT_SECURE_ALLOC_KM) && defined(PVR_ANDROID_HAS_DMA_HEAP_FIND) + struct dma_heap *psDmaHeap; +#endif PFN_DESTROY_DMABUF_PMR pfnDestroy; IMG_BOOL bPoisonOnFree; + IMG_PID uiOriginPID; /* Mapping information. */ struct iosys_map sMap; @@ -177,12 +978,14 @@ typedef struct _PMR_DMA_BUF_DATA_ IMG_DEV_PHYADDR *pasDevPhysAddr; IMG_UINT32 ui32PhysPageCount; IMG_UINT32 ui32VirtPageCount; + + IMG_BOOL bZombie; } PMR_DMA_BUF_DATA; /* Start size of the g_psDmaBufHash hash table */ #define DMA_BUF_HASH_SIZE 20 -static DEFINE_MUTEX(g_HashLock); +static DEFINE_MUTEX(g_FactoryLock); static HASH_TABLE *g_psDmaBufHash; static IMG_UINT32 g_ui32HashRefCount; @@ -193,6 +996,106 @@ static IMG_UINT32 g_ui32HashRefCount; #define pvr_sg_length(sg) sg_dma_len(sg) #endif +/* + * _pvr_dma_resv_lock + * + * Locks a dma_buf's reservation object (dma_resv). This is required when calling + * certain functions from the dma_buf interface (for more info see: + * https://docs.kernel.org/driver-api/dma-buf.html#dma-buf-locking-convention). + * + * This is a wrapper for the kernel's own locking functions which have changed + * over a number of versions. + * + * The dma_resv uses a ww_mutex which provides additional behaviours to prevent + * deadlocks. However, this function expects that only a single lock will be + * locked in a function at a time making the error code returned is redundant. + * + * If this changes a ww_acquire_ctx should be used. + * For more information about ww_mutex and ww_acquire_ctx see: + * https://www.kernel.org/doc/Documentation/locking/ww-mutex-design.txt + * + * Unlock the dma_resv lock with _pvr_dma_resv_unlock + */ +static inline void _pvr_dma_resv_lock(const struct dma_buf *psDMABuf) +{ + int iErr = 0; + + PVR_ASSERT(psDMABuf != NULL); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + iErr = dma_resv_lock(psDMABuf->resv, NULL); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + iErr = reservation_object_lock(psDMABuf->resv, NULL); +#else + iErr = ww_mutex_lock(&psDMABuf->resv->lock, NULL); +#endif + + /* This is not expected to occur, but if it does we are notified. */ + PVR_LOG_IF_FALSE_VA(PVR_DBG_ERROR, + iErr == 0, + "Attempt to lock dma_resv resulted in %d", + iErr); +} + +/* + * _pvr_dma_resv_unlock + * + * Unlocks a dma_buf's dma_resv struct, previously locked with _pvr_dma_resv_lock. + */ +static inline void _pvr_dma_resv_unlock(const struct dma_buf *psDMABuf) +{ + PVR_ASSERT(psDMABuf != NULL); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + dma_resv_unlock(psDMABuf->resv); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + reservation_object_unlock(psDMABuf->resv); +#else + ww_mutex_unlock(&psDMABuf->resv->lock); +#endif +} + +static inline PVRSRV_ERROR _pvr_dma_buf_map_attachment(struct dma_buf_attachment *psAttachment, + enum dma_data_direction eDirection, + struct sg_table **ppsTable) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0)) + /* DMA buf reservation locks will be taken internally */ + *ppsTable = dma_buf_map_attachment_unlocked(psAttachment, eDirection); +#else + _pvr_dma_resv_lock(psAttachment->dmabuf); + *ppsTable = dma_buf_map_attachment(psAttachment, eDirection); + _pvr_dma_resv_unlock(psAttachment->dmabuf); +#endif + + if (IS_ERR_OR_NULL(*ppsTable)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map attachment. ppsTable = %p", + __func__, + *ppsTable)); + eError = PVRSRV_ERROR_DMABUF_ATTACHMENT_MAPPING; + } + + return eError; +} + +static inline void _pvr_dma_buf_unmap_attachment(struct dma_buf_attachment *psAttachment, + struct sg_table *psTable, + enum dma_data_direction eDirection) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0)) + /* DMA buf reservation locks will be taken internally */ + dma_buf_unmap_attachment_unlocked(psAttachment, psTable, eDirection); +#else + _pvr_dma_resv_lock(psAttachment->dmabuf); + dma_buf_unmap_attachment(psAttachment, psTable, eDirection); + _pvr_dma_resv_unlock(psAttachment->dmabuf); +#endif +} + static int DmaBufSetValue(struct dma_buf *psDmaBuf, int iValue, const char *szFunc) { @@ -210,6 +1113,7 @@ DmaBufSetValue(struct dma_buf *psDmaBuf, int iValue, const char *szFunc) goto err_out; } + _pvr_dma_resv_lock(psDmaBuf); err = dma_buf_vmap(psDmaBuf, &sMap); if (err) { @@ -249,6 +1153,8 @@ DmaBufSetValue(struct dma_buf *psDmaBuf, int iValue, const char *szFunc) err = 0; exit_end_access: + _pvr_dma_resv_unlock(psDmaBuf); + do { err_end_access = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); } while (err_end_access == -EAGAIN || err_end_access == -EINTR); @@ -271,121 +1177,142 @@ DmaBufSetValue(struct dma_buf *psDmaBuf, int iValue, const char *szFunc) * PMR callback functions * *****************************************************************************/ -static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv) +/* This function is protected by the pfn(Get/Release)PMRFactoryLock() lock + * acquired/released in _UnrefAndMaybeDestroy() in pmr.c. */ +static void PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv) { PMR_DMA_BUF_DATA *psPrivData = pvPriv; struct dma_buf_attachment *psAttachment = psPrivData->psAttachment; struct dma_buf *psDmaBuf = psAttachment->dmabuf; struct sg_table *psSgTable = psPrivData->psSgTable; - PMR *psPMR; - PVRSRV_ERROR eError = PVRSRV_OK; - if (psDmaBuf->ops != &sPVRDmaBufOps) + if (psDmaBuf->ops != &sPVRDmaBufOps && psDmaBuf->ops != &sPVRDmaBufOpsGEM) { if (g_psDmaBufHash) { /* We have a hash table so check if we've seen this dmabuf before */ - psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); - - if (psPMR) + if (HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf) != 0U) { - if (!PMRIsPMRLive(psPMR)) + g_ui32HashRefCount--; + + if (g_ui32HashRefCount == 0) { - HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf); - g_ui32HashRefCount--; - - if (g_ui32HashRefCount == 0) - { - HASH_Delete(g_psDmaBufHash); - g_psDmaBufHash = NULL; - } - } - else{ - eError = PVRSRV_ERROR_PMR_STILL_REFERENCED; + HASH_Delete(g_psDmaBufHash); + g_psDmaBufHash = NULL; } } + PVRSRVIonRemoveMemAllocRecord(psDmaBuf); } - }else - { - psPMR = (PMR *) psDmaBuf->priv; - if (PMRIsPMRLive(psPMR)) - { - eError = PVRSRV_ERROR_PMR_STILL_REFERENCED; - } - } - if (PVRSRV_OK != eError) +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(SUPPORT_PMR_DEFERRED_FREE) + if (psPrivData->bZombie) { - return eError; + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + psPrivData->uiOriginPID); + } + else +#endif + { + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + psPrivData->uiOriginPID); } - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) - PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, - psPrivData->ui32PhysPageCount << PAGE_SHIFT, - OSGetCurrentClientProcessIDKM()); #endif psPrivData->ui32PhysPageCount = 0; - dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL); - + _pvr_dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL); if (psPrivData->bPoisonOnFree) { - int err; + int err = DmaBufSetValue(psDmaBuf, PVRSRV_POISON_ON_FREE_VALUE, + __func__); + PVR_LOG_IF_FALSE(err != 0, "Failed to poison allocation before free"); - err = DmaBufSetValue(psDmaBuf, PVRSRV_POISON_ON_FREE_VALUE, __func__); - if (err) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to poison allocation before " - "free", __func__)); - PVR_ASSERT(IMG_FALSE); - } + PVR_ASSERT(err != 0); } if (psPrivData->pfnDestroy) { - eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment); - if (eError != PVRSRV_OK) - { - return eError; - } + psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment); } OSFreeMem(psPrivData->pasDevPhysAddr); OSFreeMem(psPrivData); +} + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +static PVRSRV_ERROR PMRZombifyDmaBufMem(PMR_IMPL_PRIVDATA pvPriv, PMR *psPMR) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + struct dma_buf_attachment *psAttachment = psPrivData->psAttachment; + struct dma_buf *psDmaBuf = psAttachment->dmabuf; + + PVR_UNREFERENCED_PARAMETER(psPMR); + + psPrivData->bZombie = IMG_TRUE; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + psPrivData->uiOriginPID); + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + psPrivData->uiOriginPID); +#endif + + PVRSRVIonZombifyMemAllocRecord(psDmaBuf); return PVRSRV_OK; } +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) { + /* The imported memory is assumed to be backed by a permanent allocation. + * The PMR does not need to be (un)locked */ PVR_UNREFERENCED_PARAMETER(pvPriv); return PVRSRV_OK; } +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv, + PMR_IMPL_ZOMBIEPAGES *ppvZombiePages) +#else static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) +#endif { + /* The imported memory is assumed to be backed by a permanent allocation. + * The PMR does not need to be (un)locked */ PVR_UNREFERENCED_PARAMETER(pvPriv); +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + *ppvZombiePages = NULL; +#endif return PVRSRV_OK; } -static void PMRGetFactoryLock(void) +static void PMRFactoryLock(void) { - mutex_lock(&g_HashLock); + mutex_lock(&g_FactoryLock); } -static void PMRReleaseFactoryLock(void) +static void PMRFactoryUnlock(void) { - mutex_unlock(&g_HashLock); + mutex_unlock(&g_FactoryLock); } static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv, IMG_UINT32 ui32Log2PageSize, IMG_UINT32 ui32NumOfPages, IMG_DEVMEM_OFFSET_T *puiOffset, +#if defined(SUPPORT_STATIC_IPA) + IMG_UINT64 ui64IPAPolicyValue, + IMG_UINT64 ui64IPAClearMask, +#endif IMG_BOOL *pbValid, IMG_DEV_PHYADDR *psDevPAddr) { @@ -393,6 +1320,11 @@ static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv, IMG_UINT32 ui32PageIndex; IMG_UINT32 idx; +#if defined(SUPPORT_STATIC_IPA) + PVR_UNREFERENCED_PARAMETER(ui64IPAPolicyValue); + PVR_UNREFERENCED_PARAMETER(ui64IPAClearMask); +#endif + if (ui32Log2PageSize != PAGE_SHIFT) { return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; @@ -412,6 +1344,11 @@ static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv, PVR_ASSERT(ui32InPageOffset < PAGE_SIZE); psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset; +#if defined(SUPPORT_STATIC_IPA) + /* Modify the physical address with the associated IPA values */ + psDevPAddr[idx].uiAddr &= ~ui64IPAClearMask; + psDevPAddr[idx].uiAddr |= ui64IPAPolicyValue; +#endif } } return PVRSRV_OK; @@ -513,15 +1450,18 @@ static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv, static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab = { - .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf, - .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf, - .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf, - .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf, - .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf, - .pfnMMap = PMRMMapDmaBuf, - .pfnFinalize = PMRFinalizeDmaBuf, - .pfnGetPMRFactoryLock = PMRGetFactoryLock, - .pfnReleasePMRFactoryLock = PMRReleaseFactoryLock, + .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf, + .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf, + .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf, + .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf, + .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf, + .pfnMMap = PMRMMapDmaBuf, + .pfnFinalize = PMRFinalizeDmaBuf, + .pfnGetPMRFactoryLock = PMRFactoryLock, + .pfnReleasePMRFactoryLock = PMRFactoryUnlock, +#if defined(SUPPORT_PMR_DEFERRED_FREE) + .pfnZombify = PMRZombifyDmaBufMem, +#endif }; /***************************************************************************** @@ -533,6 +1473,7 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, struct dma_buf_attachment *psAttachment, PFN_DESTROY_DMABUF_PMR pfnDestroy, PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_PID uiPid, IMG_DEVMEM_SIZE_T uiChunkSize, IMG_UINT32 ui32NumPhysChunks, IMG_UINT32 ui32NumVirtChunks, @@ -555,6 +1496,7 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, struct sg_table *table; IMG_UINT32 uiSglOffset; IMG_CHAR pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN]; + IMG_UINT32 ui32ActualDmaBufPageCount; bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags); bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags); @@ -570,7 +1512,7 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, goto errReturn; } - if (!PMRValidateSize((IMG_UINT64) ui32NumVirtChunks << uiChunkSize)) + if (!PMRValidateSize((IMG_UINT64) ui32NumVirtChunks * uiChunkSize)) { PVR_LOG_VA(PVR_DBG_ERROR, "PMR size exceeds limit #Chunks: %u ChunkSz %"IMG_UINT64_FMTSPECX"", @@ -591,6 +1533,7 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, psPrivData->psAttachment = psAttachment; psPrivData->pfnDestroy = pfnDestroy; psPrivData->bPoisonOnFree = bPoisonOnFree; + psPrivData->uiOriginPID = uiPid; psPrivData->ui32VirtPageCount = (ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT; @@ -623,12 +1566,8 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, } } - table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL); - if (IS_ERR_OR_NULL(table)) - { - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto errFreePhysAddr; - } + eError = _pvr_dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL, &table); + PVR_LOG_GOTO_IF_ERROR(eError, "_pvr_dma_buf_map_attachment", errFreePhysAddr); /* * We do a two pass process: first work out how many pages there @@ -647,16 +1586,19 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, goto errUnmap; } - if (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk)) + /* Obtain actual page count of dma buf */ + ui32ActualDmaBufPageCount = psAttachment->dmabuf->size / PAGE_SIZE; + + if (WARN_ON(ui32ActualDmaBufPageCount < ui32NumPhysChunks * uiPagesPerChunk)) { - PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual " - "number of physical dma buf pages don't match", + PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks greater than " + "number of physical dma buf pages", __func__)); eError = PVRSRV_ERROR_INVALID_PARAMS; goto errUnmap; } - psPrivData->ui32PhysPageCount = ui32PageCount; + psPrivData->ui32PhysPageCount = ui32ActualDmaBufPageCount; psPrivData->psSgTable = table; ui32PageCount = 0; sg = table->sgl; @@ -696,7 +1638,7 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, #if defined(PVRSRV_ENABLE_PROCESS_STATS) PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, psPrivData->ui32PhysPageCount << PAGE_SHIFT, - OSGetCurrentClientProcessIDKM()); + psPrivData->uiOriginPID); #endif uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); @@ -739,7 +1681,7 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, return PVRSRV_OK; errUnmap: - dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL); + _pvr_dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL); errFreePhysAddr: OSFreeMem(psPrivData->pasDevPhysAddr); errFreePrivData: @@ -749,31 +1691,173 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, return eError; } -static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap, - struct dma_buf_attachment *psAttachment) +static void PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment) +{ + struct dma_buf *psDmaBuf = psAttachment->dmabuf; + + PVR_UNREFERENCED_PARAMETER(psHeap); + + /* PMRUnlockSysPhysAddresses(psPMR) is redundant. + * See PMRUnlockPhysAddressesDmaBuf */ + + dma_buf_detach(psDmaBuf, psAttachment); + dma_buf_put(psDmaBuf); +} + +struct dma_buf * +PhysmemGetDmaBuf(PMR *psPMR) +{ + PMR_DMA_BUF_DATA *psPrivData; + + psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab); + if (psPrivData) + { + return psPrivData->psAttachment->dmabuf; + } + + return NULL; +} + +struct dma_resv * +PhysmemGetDmaResv(PMR *psPMR) +{ + struct dma_buf *psDmaBuf = PhysmemGetDmaBuf(psPMR); + + if (psDmaBuf) + { + return psDmaBuf->resv; + } + else + { + PMR_DMA_BUF_WRAPPER *psPMRWrapper = PMREnvDmaBufGetExportData(psPMR); + + if (psPMRWrapper) + { + return &psPMRWrapper->sDmaResv; + + } + + return NULL; + } +} + +#if defined(SUPPORT_SECURE_ALLOC_KM) && defined(PVR_ANDROID_HAS_DMA_HEAP_FIND) +struct dma_heap * +PhysmemGetDmaHeap(PMR *psPMR) +{ + struct dma_buf *psDmaBuf = PhysmemGetDmaBuf(psPMR); + + psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab); + if (psPrivData != NULL) + { + return psPrivData->psDmaHeap; + } + + return NULL; +} + +void +PhysmemSetDmaHeap(PMR *psPMR, struct dma_heap *psDmaHeap) +{ + struct dma_buf *psDmaBuf = PhysmemGetDmaBuf(psPMR); + + psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab); + if (psPrivData != NULL) + { + psPrivData->psDmaHeap = psDmaHeap; + } +} +#endif /* #if defined(SUPPORT_SECURE_ALLOC_KM) && defined(PVR_ANDROID_HAS_DMA_HEAP_FIND) */ + +static void +PhysmemDestroyPMRWrapper(PMR_DMA_BUF_WRAPPER *psPMRWrapper) +{ + dma_resv_fini(&psPMRWrapper->sDmaResv); + + OSLockDestroy(psPMRWrapper->hKernelMappingLock); + OSLockDestroy(psPMRWrapper->hDeviceMappingLock); + OSFreeMem(psPMRWrapper); +} + +void +PhysmemDmaBufExportFinalize(void *pvDmaBufExportData) { - struct dma_buf *psDmaBuf = psAttachment->dmabuf; + PMR_DMA_BUF_WRAPPER *psPMRWrapper = pvDmaBufExportData; - PVR_UNREFERENCED_PARAMETER(psHeap); + PhysmemDestroyPMRWrapper(psPMRWrapper); +} - dma_buf_detach(psDmaBuf, psAttachment); - dma_buf_put(psDmaBuf); +static PVRSRV_ERROR +PhysmemCreatePMRWrapper(PMR *psPMR, PMR_DMA_BUF_WRAPPER **ppsPMRWrapper) +{ + PMR_DMA_BUF_WRAPPER *psPMRWrapper; + PVRSRV_ERROR eError; + + psPMRWrapper = OSAllocZMem(sizeof(*psPMRWrapper)); + PVR_LOG_GOTO_IF_NOMEM(psPMRWrapper, eError, fail_alloc_mem); + + psPMRWrapper->psPMR = psPMR; + + eError = OSLockCreate(&psPMRWrapper->hKernelMappingLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate.1", + fail_kernel_mapping_lock_create); + + eError = OSLockCreate(&psPMRWrapper->hDeviceMappingLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate.2", + fail_device_mapping_lock_create); + + dma_resv_init(&psPMRWrapper->sDmaResv); + + *ppsPMRWrapper = psPMRWrapper; return PVRSRV_OK; +fail_device_mapping_lock_create: + OSLockDestroy(psPMRWrapper->hKernelMappingLock); +fail_kernel_mapping_lock_create: + OSFreeMem(psPMRWrapper); +fail_alloc_mem: + return eError; } -struct dma_buf * -PhysmemGetDmaBuf(PMR *psPMR) +static PVRSRV_ERROR +PhysmemGetOrCreatePMRWrapper(PMR *psPMR, PMR_DMA_BUF_WRAPPER **ppsPMRWrapper) { - PMR_DMA_BUF_DATA *psPrivData; + PMR_DMA_BUF_WRAPPER *psPMRWrapper; + PVRSRV_ERROR eError; - psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab); - if (psPrivData) + psPMRWrapper = PMREnvDmaBufGetExportData(psPMR); + if (!psPMRWrapper) { - return psPrivData->psAttachment->dmabuf; + PMRFactoryLock(); + + /* Check again with the factory lock held */ + psPMRWrapper = PMREnvDmaBufGetExportData(psPMR); + if (!psPMRWrapper) + { + eError = PhysmemCreatePMRWrapper(psPMR, &psPMRWrapper); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreatePMRWrapper", fail_create_pmr_wrapper); + + /* + * A PMR memory layout can't change once exported. + * This makes sure the exported and imported parties + * see the same layout of the memory. + */ + PMR_SetLayoutFixed(psPMR, IMG_TRUE); + + PMREnvDmaBufSetExportData(psPMR, psPMRWrapper); + } + + PMRFactoryUnlock(); } - return NULL; + *ppsPMRWrapper = psPMRWrapper; + + return PVRSRV_OK; + +fail_create_pmr_wrapper: + PMRFactoryUnlock(); + return eError; } PVRSRV_ERROR @@ -782,42 +1866,35 @@ PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, PMR *psPMR, IMG_INT *piFd) { + PMR_DMA_BUF_WRAPPER *psPMRWrapper; struct dma_buf *psDmaBuf; - IMG_DEVMEM_SIZE_T uiPMRSize; PVRSRV_ERROR eError; IMG_INT iFd; - mutex_lock(&g_HashLock); + eError = PhysmemGetOrCreatePMRWrapper(psPMR, &psPMRWrapper); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemExportDmaBuf", fail_get_pmr_wrapper); - PMRRefPMR(psPMR); + eError = PMRRefPMR(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRRefPMR", fail_ref_pmr); - PMR_LogicalSize(psPMR, &uiPMRSize); - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) { DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo); - sDmaBufExportInfo.priv = psPMR; + sDmaBufExportInfo.priv = psPMRWrapper; sDmaBufExportInfo.ops = &sPVRDmaBufOps; - sDmaBufExportInfo.size = uiPMRSize; + sDmaBufExportInfo.size = PMR_LogicalSize(psPMR); sDmaBufExportInfo.flags = O_RDWR; + sDmaBufExportInfo.resv = &psPMRWrapper->sDmaResv; psDmaBuf = dma_buf_export(&sDmaBufExportInfo); } -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) - psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, - uiPMRSize, O_RDWR, NULL); -#else - psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, - uiPMRSize, O_RDWR); -#endif if (IS_ERR_OR_NULL(psDmaBuf)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)", __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto fail_pmr_ref; + goto fail_export; } iFd = dma_buf_fd(psDmaBuf, O_RDWR); @@ -829,24 +1906,156 @@ PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, goto fail_dma_buf; } - mutex_unlock(&g_HashLock); *piFd = iFd; - /* A PMR memory lay out can't change once exported - * This makes sure the exported and imported parties see - * the same layout of the memory */ - PMR_SetLayoutFixed(psPMR, IMG_TRUE); - return PVRSRV_OK; fail_dma_buf: dma_buf_put(psDmaBuf); + return eError; -fail_pmr_ref: - mutex_unlock(&g_HashLock); - PMRUnrefPMR(psPMR); +fail_export: + (void) PMRUnrefPMR(psPMR); - PVR_ASSERT(eError != PVRSRV_OK); +fail_ref_pmr: + /* No need to destroy psPMRWrapper here. It will be (or already was) + * destroyed alongside the PMR. */ +fail_get_pmr_wrapper: + return eError; +} + +struct dma_buf * +PhysmemGEMPrimeExport( +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + struct drm_device *psDev, +#endif + + struct drm_gem_object *psObj, + int iFlags) +{ + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + struct dma_buf *psDmaBuf = NULL; + DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo); + + + sDmaBufExportInfo.priv = psObj; + sDmaBufExportInfo.ops = &sPVRDmaBufOpsGEM; + sDmaBufExportInfo.size = PMR_LogicalSize(psPMRWrapper->psPMR); + sDmaBufExportInfo.flags = iFlags; + sDmaBufExportInfo.resv = &psPMRWrapper->sDmaResv; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + psDmaBuf = drm_gem_dmabuf_export(psObj->dev, &sDmaBufExportInfo); +#else + psDmaBuf = drm_gem_dmabuf_export(psDev, &sDmaBufExportInfo); +#endif + if (IS_ERR(psDmaBuf)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)", + __func__, PTR_ERR(psDmaBuf))); + } + + return psDmaBuf; +} + +void +PhysmemGEMObjectFree(struct drm_gem_object *psObj) +{ + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + PVRSRV_ERROR eError; + + drm_gem_object_release(psObj); + + if (smp_load_acquire(&psPMRWrapper->psObj) == psObj) + { + smp_store_release(&psPMRWrapper->psObj, NULL); + } + + eError = PMRUnrefPMR(psPMRWrapper->psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnrefPMR"); + + OSFreeMem(psGEMObj); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) +static const struct drm_gem_object_funcs sPhysmemGEMObjFuncs = { + .export = PhysmemGEMPrimeExport, + .free = PhysmemGEMObjectFree, +}; +#endif + +PVRSRV_ERROR +PhysmemExportGemHandle(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR *psPMR, + IMG_UINT32 *puHandle) +{ + struct device *psDev = psDevNode->psDevConfig->pvOSDevice; + struct drm_device *psDRMDev = dev_get_drvdata(psDev); + struct drm_file *psDRMFile = OSGetDRMFile(psConnection); + PMR_DMA_BUF_WRAPPER *psPMRWrapper; + PMR_DMA_BUF_GEM_OBJ *psGEMObj; + bool bAlreadyExported; + PVRSRV_ERROR eError; + int iErr; + + eError = PhysmemGetOrCreatePMRWrapper(psPMR, &psPMRWrapper); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemExportGemHandle", fail_get_pmr_wrapper); + + psGEMObj = OSAllocZMem(sizeof(*psGEMObj)); + PVR_LOG_GOTO_IF_NOMEM(psGEMObj, eError, fail_alloc_mem); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) + psGEMObj->sBase.funcs = &sPhysmemGEMObjFuncs; +#endif + psGEMObj->psPMRWrapper = psPMRWrapper; + + eError = PMRRefPMR(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRRefPMR", fail_ref_pmr); + + drm_gem_private_object_init(psDRMDev, &psGEMObj->sBase, + PMR_LogicalSize(psPMR)); + + PMRFactoryLock(); + bAlreadyExported = smp_load_acquire(&psPMRWrapper->psObj) != NULL; + if (!bAlreadyExported) + { + smp_store_release(&psPMRWrapper->psObj, &psGEMObj->sBase); + } + PMRFactoryUnlock(); + + if (bAlreadyExported) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_STILL_REFERENCED, + fail_export_check); + } + + iErr = drm_gem_handle_create(psDRMFile, &psGEMObj->sBase, puHandle); + if (iErr) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, + fail_handle_create); + } + + /* The handle holds a reference on the object, so drop ours */ + drm_gem_object_put(&psGEMObj->sBase); + + return PVRSRV_OK; + +fail_export_check: +fail_handle_create: + /* Decrement reference on the GEM object and call .free() + * (PhysmemGEMObjectFree()). This will unreference the PMR and call + * OSMemFree() on psGEMObj. */ + drm_gem_object_put(&psGEMObj->sBase); + return eError; + +fail_ref_pmr: + OSFreeMem(psGEMObj); +fail_alloc_mem: +fail_get_pmr_wrapper: return eError; } @@ -897,46 +2106,6 @@ PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, return eError; } -PVRSRV_ERROR -PhysmemImportDmaBufLocked(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_INT fd, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 ui32NameSize, - const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], - PMR **ppsPMRPtr, - IMG_DEVMEM_SIZE_T *puiSize, - IMG_DEVMEM_ALIGN_T *puiAlign) -{ - PMR *psPMRPtr; - PVRSRV_ERROR eError; - - eError = PhysmemImportDmaBuf(psConnection, - psDevNode, - fd, - uiFlags, - ui32NameSize, - pszName, - &psPMRPtr, - puiSize, - puiAlign); - - if (eError == PVRSRV_OK) - { - eError = PMRLockSysPhysAddresses(psPMRPtr); - if (eError == PVRSRV_OK) - { - *ppsPMRPtr = psPMRPtr; - } - else - { - PMRUnrefPMR(psPMRPtr); - } - } - - return eError; -} - PVRSRV_ERROR PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDevNode, @@ -957,14 +2126,11 @@ PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, struct dma_buf *psDmaBuf; PVRSRV_ERROR eError; IMG_BOOL bHashTableCreated = IMG_FALSE; + IMG_PID uiPid = OSGetCurrentClientProcessIDKM(); PVR_UNREFERENCED_PARAMETER(psConnection); - if (!psDevNode) - { - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto errReturn; - } + PVR_GOTO_IF_INVALID_PARAM(psDevNode != NULL, eError, errReturn); /* Terminate string from bridge to prevent corrupt annotations in RI */ if (pszName != NULL) @@ -973,143 +2139,166 @@ PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, pszName0[ui32NameSize-1] = '\0'; } - mutex_lock(&g_HashLock); + PMRFactoryLock(); /* Get the buffer handle */ psDmaBuf = dma_buf_get(fd); if (IS_ERR_OR_NULL(psDmaBuf)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)", - __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); - eError = PVRSRV_ERROR_BAD_MAPPING; - goto errUnlockReturn; + __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BAD_MAPPING, errUnlockReturn); } - if (psDmaBuf->ops == &sPVRDmaBufOps) + if (psDmaBuf->ops == &sPVRDmaBufOps || psDmaBuf->ops == &sPVRDmaBufOpsGEM) { - PVRSRV_DEVICE_NODE *psPMRDevNode; - - /* We exported this dma_buf, so we can just get its PMR */ - psPMR = (PMR *) psDmaBuf->priv; - - /* However, we can't import it if it belongs to a different device */ - psPMRDevNode = PMR_DeviceNode(psPMR); - if (psPMRDevNode != psDevNode) + /* We exported this dma_buf, so we can just get its PMR. */ + if (psDmaBuf->ops == &sPVRDmaBufOps) { - PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", - __func__)); - eError = PVRSRV_ERROR_PMR_NOT_PERMITTED; - goto err; + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psDmaBuf->priv; + + psPMR = psPMRWrapper->psPMR; } - } - else - { - if (g_psDmaBufHash) + else { - /* We have a hash table so check if we've seen this dmabuf before */ - psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); + struct drm_gem_object *psObj = psDmaBuf->priv; + PMR_DMA_BUF_GEM_OBJ *psGEMObj = TO_PMR_DMA_BUF_GEM_OBJ(psObj); + PMR_DMA_BUF_WRAPPER *psPMRWrapper = psGEMObj->psPMRWrapper; + + psPMR = psPMRWrapper->psPMR; } - else + + /* However, we can't import it if it belongs to a different device. */ + if (PMR_DeviceNode(psPMR) != psDevNode) { - /* - * As different processes may import the same dmabuf we need to - * create a hash table so we don't generate a duplicate PMR but - * rather just take a reference on an existing one. - */ - g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE); - if (!g_psDmaBufHash) +#if defined(PVRSRV_ENABLE_XD_MEM) + /* and if it's not in a shareable physical address space. */ + if (PhysHeapSpasWithDevice(PMR_PhysHeap(psPMR), psDevNode) != PVRSRV_OK) +#endif /* defined(PVRSRV_ENABLE_XD_MEM) */ { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err; + PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, + errUnlockAndDMAPut); } - bHashTableCreated = IMG_TRUE; } } - - if (psPMR) + else if (g_psDmaBufHash != NULL) + { + /* We have a hash table so check if we've seen this dmabuf + * before. */ + psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); + } + else { - /* Reuse the PMR we already created */ - PMRRefPMR(psPMR); + /* As different processes may import the same dmabuf we need to + * create a hash table so we don't generate a duplicate PMR but + * rather just take a reference on an existing one. */ + g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE); + PVR_GOTO_IF_NOMEM(g_psDmaBufHash, eError, errUnlockAndDMAPut); - *ppsPMRPtr = psPMR; - PMR_LogicalSize(psPMR, puiSize); - *puiAlign = PAGE_SIZE; + bHashTableCreated = IMG_TRUE; } - /* No errors so far */ - eError = PVRSRV_OK; -err: - if (psPMR || (PVRSRV_OK != eError)) + if (psPMR != NULL) { - mutex_unlock(&g_HashLock); +#if defined(SUPPORT_PMR_DEFERRED_FREE) + if (PMR_IsZombie(psPMR)) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PMR_DMA_BUF_DATA *psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab); +#endif + + PMRReviveZombieAndRef(psPMR); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if (psPrivData != NULL) + { + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + psPrivData->uiOriginPID); + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + psPrivData->uiOriginPID); + } +#endif + + PVRSRVIonReviveMemAllocRecord(psDmaBuf); + } + else +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + + { + /* Reuse the PMR we already created */ + eError = PMRRefPMR(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRRefPMR", errUnlockAndDMAPut); + } + + /* If an existing PMR is found, the table wasn't created by this func + * call, so the error path can be safely ignored allowing for + * the factory lock can be dropped. */ + PVR_ASSERT(bHashTableCreated == IMG_FALSE); dma_buf_put(psDmaBuf); + PMRFactoryUnlock(); - if (PVRSRV_OK == eError) +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) + /* The device import can only be registered on an alive & healthy PMR + * therefore we wait for the potential zombie to be dequeued first. */ + eError = PMR_RegisterDeviceImport(psPMR, psDevNode); + if (eError != PVRSRV_OK) { - /* - * We expect a PMR to be immutable at this point - * But its explicitly set here to cover a corner case - * where a PMR created through non-DMA interface could be - * imported back again through DMA interface */ - PMR_SetLayoutFixed(psPMR, IMG_TRUE); + /* The factory lock might be taken in PMRUnrefPMR. */ + (void) PMRUnrefPMR(psPMR); + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register PMR with device: %u", + __func__, psDevNode->sDevId.ui32InternalID)); + goto errReturn; } - return eError; +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) */ + + *ppsPMRPtr = psPMR; + *puiSize = PMR_LogicalSize(psPMR); + *puiAlign = PAGE_SIZE; + + /* We expect a PMR to be immutable at this point. + * But its explicitly set here to cover a corner case + * where a PMR created through non-DMA interface could be + * imported back again through DMA interface. */ + PMR_SetLayoutFixed(psPMR, IMG_TRUE); + + return PVRSRV_OK; } /* Do we want this to be a sparse PMR? */ if (ui32NumVirtChunks > 1) { - IMG_UINT32 i; - /* Parameter validation */ - if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) || - uiChunkSize != PAGE_SIZE || - ui32NumPhysChunks > ui32NumVirtChunks) + if (psDmaBuf->size < (uiChunkSize * ui32NumPhysChunks) || + uiChunkSize != PAGE_SIZE) { PVR_DPF((PVR_DBG_ERROR, "%s: Requesting sparse buffer: " "uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to " "OS page size (%lu). uiChunkSize * ui32NumPhysChunks " "("IMG_DEVMEM_SIZE_FMTSPEC") must" - " be equal to the buffer size ("IMG_SIZE_FMTSPEC"). " - "ui32NumPhysChunks (%u) must be lesser or equal to " - "ui32NumVirtChunks (%u)", + " not be greater than the buffer size ("IMG_SIZE_FMTSPEC").", __func__, uiChunkSize, PAGE_SIZE, uiChunkSize * ui32NumPhysChunks, - psDmaBuf->size, - ui32NumPhysChunks, - ui32NumVirtChunks)); - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto errUnlockAndDMAPut; - } - - /* Parameter validation - Mapping table entries*/ - for (i = 0; i < ui32NumPhysChunks; i++) - { - if (pui32MappingTable[i] > ui32NumVirtChunks) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Requesting sparse buffer: " - "Entry in mapping table (%u) is out of allocation " - "bounds (%u)", - __func__, - (IMG_UINT32) pui32MappingTable[i], - (IMG_UINT32) ui32NumVirtChunks)); - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto errUnlockAndDMAPut; - } + psDmaBuf->size)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, + errUnlockAndDMAPut); } } else { - /* if ui32NumPhysChunks == 0 pui32MappingTable is NULL and because - * is ui32NumPhysChunks is set to 1 below we don't allow NULL array */ + /* if ui32NumPhysChunks == 0 then pui32MappingTable == NULL + * this is handled by the generated bridge code. + * Because ui32NumPhysChunks is set to 1 below, we don't allow NULL array */ if (pui32MappingTable == NULL) { - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto errUnlockAndDMAPut; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, + errUnlockAndDMAPut); } /* Make sure parameters are valid for non-sparse allocations as well */ @@ -1118,14 +2307,27 @@ PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, ui32NumVirtChunks = 1; } + { + IMG_DEVMEM_SIZE_T uiSize = ui32NumVirtChunks * uiChunkSize; + IMG_UINT32 uiLog2PageSize = PAGE_SHIFT; /* log2(uiChunkSize) */ + eError = PhysMemValidateParams(psDevNode, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiFlags, + uiPid, + &uiLog2PageSize, + &uiSize); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysMemValidateParams", errUnlockAndDMAPut); + } psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice); if (IS_ERR_OR_NULL(psAttachment)) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)", __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM)); - eError = PVRSRV_ERROR_BAD_MAPPING; - goto errUnlockAndDMAPut; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DMABUF_ATTACH, + errUnlockAndDMAPut); } /* @@ -1137,6 +2339,7 @@ PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, psAttachment, PhysmemDestroyDmaBuf, uiFlags, + uiPid, uiChunkSize, ui32NumPhysChunks, ui32NumVirtChunks, @@ -1144,16 +2347,17 @@ PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, ui32NameSize, pszName, &psPMR); - if (eError != PVRSRV_OK) - { - goto errDMADetach; - } + PVR_GOTO_IF_ERROR(eError, errDMADetach); + + /* The imported memory is assumed to be backed by a permanent allocation. */ + /* PMRLockSysPhysAddresses(psPMR) is redundant. + * See PMRLockPhysAddressesDmaBuf */ /* First time we've seen this dmabuf so store it in the hash table */ HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR); g_ui32HashRefCount++; - mutex_unlock(&g_HashLock); + PMRFactoryUnlock(); PVRSRVIonAddMemAllocRecord(psDmaBuf); @@ -1171,7 +2375,7 @@ PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, dma_buf_detach(psDmaBuf, psAttachment); errUnlockAndDMAPut: - if (IMG_TRUE == bHashTableCreated) + if (bHashTableCreated) { HASH_Delete(g_psDmaBufHash); g_psDmaBufHash = NULL; @@ -1179,118 +2383,9 @@ PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, dma_buf_put(psDmaBuf); errUnlockReturn: - mutex_unlock(&g_HashLock); + PMRFactoryUnlock(); errReturn: PVR_ASSERT(eError != PVRSRV_OK); return eError; } - -#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */ - -PVRSRV_ERROR -PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, - struct dma_buf_attachment *psAttachment, - PFN_DESTROY_DMABUF_PMR pfnDestroy, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEVMEM_SIZE_T uiChunkSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 *pui32MappingTable, - IMG_UINT32 ui32NameSize, - const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], - PMR **ppsPMRPtr) -{ - PVR_UNREFERENCED_PARAMETER(psHeap); - PVR_UNREFERENCED_PARAMETER(psAttachment); - PVR_UNREFERENCED_PARAMETER(pfnDestroy); - PVR_UNREFERENCED_PARAMETER(uiFlags); - PVR_UNREFERENCED_PARAMETER(uiChunkSize); - PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); - PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); - PVR_UNREFERENCED_PARAMETER(pui32MappingTable); - PVR_UNREFERENCED_PARAMETER(ui32NameSize); - PVR_UNREFERENCED_PARAMETER(pszName); - PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); - - return PVRSRV_ERROR_NOT_SUPPORTED; -} - -struct dma_buf * -PhysmemGetDmaBuf(PMR *psPMR) -{ - PVR_UNREFERENCED_PARAMETER(psPMR); - - return NULL; -} - -PVRSRV_ERROR -PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PMR *psPMR, - IMG_INT *piFd) -{ - PVR_UNREFERENCED_PARAMETER(psConnection); - PVR_UNREFERENCED_PARAMETER(psDevNode); - PVR_UNREFERENCED_PARAMETER(psPMR); - PVR_UNREFERENCED_PARAMETER(piFd); - - return PVRSRV_ERROR_NOT_SUPPORTED; -} - -PVRSRV_ERROR -PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_INT fd, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 ui32NameSize, - const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], - PMR **ppsPMRPtr, - IMG_DEVMEM_SIZE_T *puiSize, - IMG_DEVMEM_ALIGN_T *puiAlign) -{ - PVR_UNREFERENCED_PARAMETER(psConnection); - PVR_UNREFERENCED_PARAMETER(psDevNode); - PVR_UNREFERENCED_PARAMETER(fd); - PVR_UNREFERENCED_PARAMETER(uiFlags); - PVR_UNREFERENCED_PARAMETER(ui32NameSize); - PVR_UNREFERENCED_PARAMETER(pszName); - PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); - PVR_UNREFERENCED_PARAMETER(puiSize); - PVR_UNREFERENCED_PARAMETER(puiAlign); - - return PVRSRV_ERROR_NOT_SUPPORTED; -} - -PVRSRV_ERROR -PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_INT fd, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEVMEM_SIZE_T uiChunkSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 *pui32MappingTable, - IMG_UINT32 ui32NameSize, - const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], - PMR **ppsPMRPtr, - IMG_DEVMEM_SIZE_T *puiSize, - IMG_DEVMEM_ALIGN_T *puiAlign) -{ - PVR_UNREFERENCED_PARAMETER(psConnection); - PVR_UNREFERENCED_PARAMETER(psDevNode); - PVR_UNREFERENCED_PARAMETER(fd); - PVR_UNREFERENCED_PARAMETER(uiFlags); - PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); - PVR_UNREFERENCED_PARAMETER(puiSize); - PVR_UNREFERENCED_PARAMETER(puiAlign); - PVR_UNREFERENCED_PARAMETER(uiChunkSize); - PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); - PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); - PVR_UNREFERENCED_PARAMETER(pui32MappingTable); - PVR_UNREFERENCED_PARAMETER(ui32NameSize); - PVR_UNREFERENCED_PARAMETER(pszName); - - return PVRSRV_ERROR_NOT_SUPPORTED; -} -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_dmabuf_internal.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_dmabuf_internal.h new file mode 100644 index 000000000000..50a8ab37f442 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_dmabuf_internal.h @@ -0,0 +1,69 @@ +/**************************************************************************/ /*! +@File physmem_dmabuf_internal.h +@Title Internal header for dmabuf PMR factory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks importing Ion allocations +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(PHYSMEM_DMABUF_INTERNAL_H) +#define PHYSMEM_DMABUF_INTERNAL_H + +#include + +struct dma_buf; +struct drm_device; +struct drm_gem_object; + +struct dma_buf * +PhysmemGEMPrimeExport( +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + struct drm_device *psDev, +#endif + + struct drm_gem_object *psObj, + int iFlags); + +void +PhysmemGEMObjectFree(struct drm_gem_object *psObj); + +void +PhysmemDmaBufExportFinalize(void *pvDmaBufExportData); + +#endif /* !defined(PHYSMEM_DMABUF_INTERNAL_H) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_extmem_linux.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_extmem_linux.c index fbcc72189e08..994f675329b8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_extmem_linux.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_extmem_linux.c @@ -71,6 +71,7 @@ static void _FreeWrapData(PMR_WRAP_DATA *psPrivData) { OSFreeMem(psPrivData->ppsPageArray); OSFreeMem(psPrivData->ppvPhysAddr); + psPrivData->psVMArea = NULL; OSFreeMem(psPrivData); } @@ -78,7 +79,7 @@ static void _FreeWrapData(PMR_WRAP_DATA *psPrivData) /* Allocate the PMR private data */ static PVRSRV_ERROR _AllocWrapData(PMR_WRAP_DATA **ppsPrivData, PVRSRV_DEVICE_NODE *psDevNode, - IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiSize, IMG_CPU_VIRTADDR pvCpuVAddr, PVRSRV_MEMALLOCFLAGS_T uiFlags) { @@ -87,6 +88,12 @@ static PVRSRV_ERROR _AllocWrapData(PMR_WRAP_DATA **ppsPrivData, struct vm_area_struct *psVMArea; IMG_UINT32 ui32CPUCacheMode; + /* Obtain a reader lock on the process mmap lock while we are using + * the psVMArea. + */ + + mmap_read_lock(current->mm); + /* Find the VMA */ psVMArea = find_vma(current->mm, (uintptr_t)pvCpuVAddr); if (psVMArea == NULL) @@ -95,7 +102,8 @@ static PVRSRV_ERROR _AllocWrapData(PMR_WRAP_DATA **ppsPrivData, "%s: Couldn't find memory region containing start address %p", __func__, (void*) pvCpuVAddr)); - return PVRSRV_ERROR_INVALID_CPU_ADDR; + eError = PVRSRV_ERROR_INVALID_CPU_ADDR; + goto eUnlockReturn; } /* If requested size is larger than actual allocation @@ -108,7 +116,8 @@ static PVRSRV_ERROR _AllocWrapData(PMR_WRAP_DATA **ppsPrivData, "%s: End address %p is outside of the region returned by find_vma", __func__, (void*) (uintptr_t)((uintptr_t)pvCpuVAddr + uiSize))); - return PVRSRV_ERROR_BAD_PARAM_SIZE; + eError = PVRSRV_ERROR_BAD_PARAM_SIZE; + goto eUnlockReturn; } /* Find_vma locates a region with an end point past a given @@ -119,27 +128,27 @@ static PVRSRV_ERROR _AllocWrapData(PMR_WRAP_DATA **ppsPrivData, "%s: Start address %p is outside of the region returned by find_vma", __func__, (void*) pvCpuVAddr)); - return PVRSRV_ERROR_INVALID_CPU_ADDR; + eError = PVRSRV_ERROR_INVALID_CPU_ADDR; + goto eUnlockReturn; } - eError = DevmemCPUCacheMode(psDevNode, - uiFlags, - &ui32CPUCacheMode); + eError = DevmemCPUCacheMode(uiFlags, &ui32CPUCacheMode); if (eError != PVRSRV_OK) { - return eError; + goto eUnlockReturn; } /* Allocate and initialise private factory data */ psPrivData = OSAllocZMem(sizeof(*psPrivData)); if (psPrivData == NULL) { - return PVRSRV_ERROR_OUT_OF_MEMORY; + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto eUnlockReturn; } psPrivData->ui32CPUCacheFlags = ui32CPUCacheMode; - /* Assign the VMA area structure if needed later */ + /* Track the VMA area structure so that it can be checked later */ psPrivData->psVMArea = psVMArea; psPrivData->psDevNode = psDevNode; @@ -150,8 +159,8 @@ static PVRSRV_ERROR _AllocWrapData(PMR_WRAP_DATA **ppsPrivData, if (psPrivData == NULL) { OSFreeMem(psPrivData); - return PVRSRV_ERROR_OUT_OF_MEMORY; - + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto eUnlockReturn; } psPrivData->ppvPhysAddr = OSAllocZMem(sizeof(*(psPrivData->ppvPhysAddr)) * psPrivData->uiTotalNumPages); @@ -159,7 +168,8 @@ static PVRSRV_ERROR _AllocWrapData(PMR_WRAP_DATA **ppsPrivData, { OSFreeMem(psPrivData->ppsPageArray); OSFreeMem(psPrivData); - return PVRSRV_ERROR_OUT_OF_MEMORY; + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto eUnlockReturn; } if (uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | @@ -170,7 +180,11 @@ static PVRSRV_ERROR _AllocWrapData(PMR_WRAP_DATA **ppsPrivData, *ppsPrivData = psPrivData; - return PVRSRV_OK; + eError = PVRSRV_OK; +eUnlockReturn: + mmap_read_unlock(current->mm); + + return eError; } #if defined(SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK) @@ -222,6 +236,7 @@ static PVRSRV_ERROR _TryGetUserPages(PVRSRV_DEVICE_NODE *psDevNode, PMR_WRAP_DATA *psPrivData) { IMG_INT32 iMappedPages, i; + IMG_UINT64 ui64DmaMask = dma_get_mask(psDevNode->psDevConfig->pvOSDevice); /* Do the actual call */ @@ -250,12 +265,13 @@ static PVRSRV_ERROR _TryGetUserPages(PVRSRV_DEVICE_NODE *psDevNode, psPrivData->ppvPhysAddr[i].uiAddr = 0; } - /* APOLLO test chips TCF5 or ES2 can only access 4G maximum memory from the card. - * This is due to the 32 bit PCI card interface to the host - * Hence pages with physical address beyond 4G range cannot be accessed by the device - * An error is reported in such a case + + /* Check the data transfer capability of the DMA. * - * The same restriction may apply on to platforms as well*/ + * For instance: + * APOLLO test chips TCF5 or ES2 can only access 4G maximum memory from the card. + * Hence pages with a physical address beyond 4G range cannot be accessed by the + * device. An error is reported in such a case. */ if (psPrivData->ppvPhysAddr[i].uiAddr & ~ui64DmaMask) { PVR_DPF((PVR_DBG_ERROR, @@ -361,6 +377,10 @@ PMRSysPhysAddrExtMem(PMR_IMPL_PRIVDATA pvPriv, IMG_UINT32 ui32Log2PageSize, IMG_UINT32 ui32NumOfPages, IMG_DEVMEM_OFFSET_T *puiOffset, +#if defined(SUPPORT_STATIC_IPA) + IMG_UINT64 ui64IPAPolicyValue, + IMG_UINT64 ui64IPAClearMask, +#endif IMG_BOOL *pbValid, IMG_DEV_PHYADDR *psDevPAddr) { @@ -370,6 +390,11 @@ PMRSysPhysAddrExtMem(PMR_IMPL_PRIVDATA pvPriv, IMG_UINT32 uiPageIndex; IMG_UINT32 uiIdx; +#if defined(SUPPORT_STATIC_IPA) + PVR_UNREFERENCED_PARAMETER(ui64IPAPolicyValue); + PVR_UNREFERENCED_PARAMETER(ui64IPAClearMask); +#endif + if (PAGE_SHIFT != ui32Log2PageSize) { @@ -392,39 +417,31 @@ PMRSysPhysAddrExtMem(PMR_IMPL_PRIVDATA pvPriv, PVR_ASSERT(uiInPageOffset < uiPageSize); - /* We always handle CPU physical addresses in this PMR factory - * but this callback expects device physical addresses so we have to translate. */ - PhysHeapCpuPAddrToDevPAddr(psWrapData->psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL], - 1, - &psDevPAddr[uiIdx], - &psWrapData->ppvPhysAddr[uiPageIndex]); + /* The ExtMem is only enabled in UMA mode, in that mode, the device physical + * address translation will be handled by the PRM factory after this call. + * Here we just copy the physical address like other callback implementations. */ + psDevPAddr[uiIdx].uiAddr = psWrapData->ppvPhysAddr[uiPageIndex].uiAddr; pbValid[uiIdx] = (psDevPAddr[uiIdx].uiAddr)? IMG_TRUE:IMG_FALSE; psDevPAddr[uiIdx].uiAddr += uiInPageOffset; +#if defined(SUPPORT_STATIC_IPA) + psDevPAddr[uiIdx].uiAddr &= ~ui64IPAClearMask; + psDevPAddr[uiIdx].uiAddr |= ui64IPAPolicyValue; +#endif /* SUPPORT_STATIC_IPA */ } return PVRSRV_OK; } -static PVRSRV_ERROR +static void PMRFinalizeExtMem(PMR_IMPL_PRIVDATA pvPriv) { - PVRSRV_ERROR eError; PMR_WRAP_DATA *psWrapData = pvPriv; - eError = _WrapExtMemReleasePages(psWrapData); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Problem with _WrapExtMemReleasePages. " - "Not all resources could be cleaned up (%u).", - __func__, - eError)); - } - - return PVRSRV_OK; + PVRSRV_ERROR eError = _WrapExtMemReleasePages(psWrapData); + PVR_LOG_IF_ERROR(eError, "_WrapExtMemReleasePages"); } @@ -444,8 +461,8 @@ static void _UnmapPage(PMR_WRAP_DATA *psWrapData, PVR_DPF((PVR_DBG_ERROR, "%s: Unable to unmap wrapped extmem " "page - wrong cached mode flags passed. This may leak " "memory.", __func__)); - PVR_ASSERT(!"Found non-cpu cache mode flag when unmapping from " - "the cpu"); + PVR_DPF((PVR_DBG_ERROR, "Found non-cpu cache mode flag when unmapping from " + "the cpu")); } else { @@ -486,10 +503,7 @@ static void _MapPage(PMR_WRAP_DATA *psWrapData, break; } - puiLinCPUAddr = vmap(&psWrapData->ppsPageArray[uiPageIdx], - 1, - VM_READ | VM_WRITE, - prot); + puiLinCPUAddr = vmap(&psWrapData->ppsPageArray[uiPageIdx], 1, VM_MAP, prot); psMapData->bVMAP = IMG_TRUE; } @@ -632,6 +646,7 @@ PMRAcquireKernelMappingDataExtMem(PMR_IMPL_PRIVDATA pvPriv, PMR_WRAP_DATA *psWrapData = (PMR_WRAP_DATA*) pvPriv; WRAP_KERNEL_MAP_DATA *psKernMapData; IMG_UINT32 ui32PageIndex = uiOffset >> PAGE_SHIFT; + IMG_UINT32 ui32PageOffset = (IMG_UINT32)(uiOffset & ~PAGE_MASK); /* Offset was out of bounds */ if (ui32PageIndex > psWrapData->uiTotalNumPages) @@ -645,7 +660,7 @@ PMRAcquireKernelMappingDataExtMem(PMR_IMPL_PRIVDATA pvPriv, /* We can not map in more than one page with ioremap. * Only possible with physically contiguous pages */ - if (((uiOffset & PAGE_MASK) + uiSize) > PAGE_SIZE) + if ((ui32PageOffset + uiSize) > PAGE_SIZE) { PVR_DPF((PVR_DBG_ERROR, "%s: Error, cannot map more than one page for wrapped extmem.", @@ -677,7 +692,7 @@ PMRAcquireKernelMappingDataExtMem(PMR_IMPL_PRIVDATA pvPriv, goto e1; } - *ppvKernelAddressOut = ((IMG_CHAR *) psKernMapData->pvKernLinAddr) + (uiOffset & PAGE_MASK); + *ppvKernelAddressOut = ((IMG_CHAR *) psKernMapData->pvKernLinAddr) + ui32PageOffset; *phHandleOut = psKernMapData; return PVRSRV_OK; @@ -705,11 +720,11 @@ static void PMRReleaseKernelMappingDataExtMem(PMR_IMPL_PRIVDATA pvPriv, static inline void begin_user_mode_access(IMG_UINT *uiState) { #if defined(CONFIG_ARM) && defined(CONFIG_CPU_SW_DOMAIN_PAN) - *uiState = uaccess_save_enable(); + *uiState = uaccess_save_and_enable(); #elif defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) PVR_UNREFERENCED_PARAMETER(uiState); uaccess_enable_privileged(); -#elif defined(CONFIG_X86) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) +#elif defined(CONFIG_X86) PVR_UNREFERENCED_PARAMETER(uiState); __uaccess_begin(); #else @@ -725,7 +740,7 @@ static inline void end_user_mode_access(IMG_UINT uiState) #elif defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) PVR_UNREFERENCED_PARAMETER(uiState); uaccess_disable_privileged(); -#elif defined(CONFIG_X86) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) +#elif defined(CONFIG_X86) PVR_UNREFERENCED_PARAMETER(uiState); __uaccess_end(); #else @@ -744,8 +759,19 @@ static PVRSRV_ERROR _FlushUMVirtualRange(PVRSRV_DEVICE_NODE *psDevNode, mmap_read_lock(current->mm); - /* Check the addr space is not torn down in the mean time */ - psVMArea = psPrivData->psVMArea; + /* Check that the recorded psVMArea matches the one associated with + * this request. If not, fail the request. + */ + psVMArea = find_vma(current->mm, (uintptr_t)pvCpuVAddr); + if ((psVMArea != psPrivData->psVMArea) || (psVMArea == NULL)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Couldn't find memory region containing start address %p", + __func__, (void *)pvCpuVAddr)); + eError = PVRSRV_ERROR_INVALID_CPU_ADDR; + goto UMFlushUnlockReturn; + } + /* * Latest kernels enable "Privileged access never" feature in the kernel @@ -760,16 +786,70 @@ static PVRSRV_ERROR _FlushUMVirtualRange(PVRSRV_DEVICE_NODE *psDevNode, * */ begin_user_mode_access(&uiUserAccessState); { -#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_METAG) + if (OSCPUCacheOpAddressType(psDevNode) == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + IMG_CPU_PHYADDR sCPUPhysStart = {0}; + + eError = CacheOpExec(psDevNode, + pvCpuVAddr, + ((IMG_UINT8 *)pvCpuVAddr + uiSize), + sCPUPhysStart, + sCPUPhysStart, + PVRSRV_CACHE_OP_FLUSH); + } + else if (OSCPUCacheOpAddressType(psDevNode) == OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + { + IMG_CPU_PHYADDR sCPUPhysStart, sCPUPhysEnd; + IMG_UINT i = 0; - IMG_CPU_PHYADDR sCPUPhysStart = {0}; + for (i = 0; i < psPrivData->uiTotalNumPages; i++) + { + if (NULL != psPrivData->ppsPageArray[i]) + { + sCPUPhysStart.uiAddr = psPrivData->ppvPhysAddr[i].uiAddr; + sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + PAGE_SIZE; + + eError = CacheOpExec(psDevNode, + NULL, + NULL, + sCPUPhysStart, + sCPUPhysEnd, + PVRSRV_CACHE_OP_FLUSH); + if (eError != PVRSRV_OK) + { + break; + } + } + } + } + else if (OSCPUCacheOpAddressType(psDevNode) == OS_CACHE_OP_ADDR_TYPE_BOTH) + { + IMG_CPU_PHYADDR sCPUPhysStart, sCPUPhysEnd; + void *pvVirtStart, *pvVirtEnd; + IMG_UINT i = 0; - eError = CacheOpExec (psDevNode, - pvCpuVAddr, - ((IMG_UINT8 *)pvCpuVAddr + uiSize), - sCPUPhysStart, - sCPUPhysStart, - PVRSRV_CACHE_OP_CLEAN); + for (i = 0; i < psPrivData->uiTotalNumPages; i++) + { + if (NULL != psPrivData->ppsPageArray[i]) + { + pvVirtStart = pvCpuVAddr + (i * PAGE_SIZE); + pvVirtEnd = pvVirtStart + PAGE_SIZE; + sCPUPhysStart.uiAddr = psPrivData->ppvPhysAddr[i].uiAddr; + sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + PAGE_SIZE; + + eError = CacheOpExec(psDevNode, + pvVirtStart, + pvVirtEnd, + sCPUPhysStart, + sCPUPhysEnd, + PVRSRV_CACHE_OP_FLUSH); + if (eError != PVRSRV_OK) + { + break; + } + } + } + } if (PVRSRV_OK != eError) { @@ -779,44 +859,12 @@ static PVRSRV_ERROR _FlushUMVirtualRange(PVRSRV_DEVICE_NODE *psDevNode, pvCpuVAddr)); goto UMFlushFailed; } -#else - IMG_CPU_PHYADDR sCPUPhysStart, sCPUPhysEnd; - void *pvVirtStart, *pvVirtEnd; - IMG_UINT i = 0; - - for (i = 0; i < psPrivData->uiTotalNumPages; i++) - { - if (NULL != psPrivData->ppsPageArray[i]) - { - pvVirtStart = pvCpuVAddr + (i * PAGE_SIZE); - pvVirtEnd = pvVirtStart + PAGE_SIZE; - - sCPUPhysStart.uiAddr = psPrivData->ppvPhysAddr[i].uiAddr; - sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + PAGE_SIZE; - - eError = CacheOpExec (psDevNode, - pvVirtStart, - pvVirtEnd, - sCPUPhysStart, - sCPUPhysEnd, - PVRSRV_CACHE_OP_CLEAN); - - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to clean the virtual region cache %p", - __func__, - pvCpuVAddr)); - goto UMFlushFailed; - } - } - } -#endif } UMFlushFailed: end_user_mode_access(uiUserAccessState); +UMFlushUnlockReturn: mmap_read_unlock(current->mm); return eError; } @@ -830,7 +878,6 @@ static PMR_IMPL_FUNCTAB _sPMRWrapPFuncTab = { .pfnReadBytes = PMRReadBytesExtMem, .pfnWriteBytes = PMRWriteBytesExtMem, .pfnChangeSparseMem = NULL, - .pfnChangeSparseMemCPUMap = NULL, .pfnFinalize = &PMRFinalizeExtMem, }; @@ -916,8 +963,14 @@ PhysmemWrapExtMemOS(CONNECTION_DATA * psConnection, PMR_WRAP_DATA *psPrivData; PMR *psPMR; IMG_UINT uiTotalNumPages = (uiSize >> PAGE_SHIFT); + IMG_BOOL bIsPMRDestroyed = IMG_FALSE; IMG_UINT i = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) + /* Ignore the most significant byte. */ + pvCpuVAddr = (IMG_CPU_VIRTADDR)untagged_addr((uintptr_t)pvCpuVAddr); +#endif + eError = PhysmemValidateParam(uiSize, pvCpuVAddr, uiFlags); @@ -961,11 +1014,11 @@ PhysmemWrapExtMemOS(CONNECTION_DATA * psConnection, pui32MappingTable[i] = i; } - /* Avoid creating dummy page or zero page when the entire + /* Avoid creating scratch page or zero page when the entire * allocation is backed and pinned */ if (psPrivData->uiNumBackedPages == psPrivData->uiTotalNumPages) { - uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING; + uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING; } /* Create a suitable PMR */ @@ -1010,11 +1063,15 @@ PhysmemWrapExtMemOS(CONNECTION_DATA * psConnection, return PVRSRV_OK; e3: - PMRUnrefPMR(psPMR); + (void) PMRUnrefPMR(psPMR); + bIsPMRDestroyed = IMG_TRUE; e2: OSFreeMem(pui32MappingTable); e1: - _WrapExtMemReleasePages(psPrivData); + if (!bIsPMRDestroyed) + { + (void)_WrapExtMemReleasePages(psPrivData); + } e0: return eError; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_extmem_wrap.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_extmem_wrap.h index 5e0480efef50..015360736d03 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_extmem_wrap.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_extmem_wrap.h @@ -86,7 +86,7 @@ typedef struct _PMR_WRAP_DATA_ * for pfn mappings this stays empty */ struct page **ppsPageArray; - /* VM Area structure */ + /* VM Area structure reference */ struct vm_area_struct *psVMArea; /* This should always be filled and hold the physical addresses */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_osmem_linux.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_osmem_linux.c index 3a664506de4f..e624878d6862 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_osmem_linux.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_osmem_linux.c @@ -54,6 +54,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #include #include +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) +#include +#endif +#endif #if defined(CONFIG_X86) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) @@ -64,7 +70,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* include/ */ -#include "rgx_heaps.h" #include "img_types.h" #include "img_defs.h" #include "pvr_debug.h" @@ -77,6 +82,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pdump_km.h" #include "pmr.h" #include "pmr_impl.h" +#include "pmr_os.h" #include "cache_km.h" #include "devicemem_server_utils.h" #include "pvr_vmap.h" @@ -85,6 +91,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "physmem_cpumap_history.h" #endif +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) +/* services/server/env/linux */ +#include "env_connection.h" +#endif +#endif + /* ourselves */ #include "physmem_osmem.h" #include "physmem_osmem_linux.h" @@ -98,22 +111,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "kernel_compatibility.h" -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM; -#else -/* split_page not available on older kernels */ -#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM -#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0 -static IMG_UINT32 g_uiMaxOrder; -#endif -/* - These corresponds to the MMU min/max page sizes and associated PTE - alignment that can be used on the device for an allocation. It is - 4KB (min) and 2MB (max) respectively. -*/ -#define PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_4KB_PAGE_SHIFT -#define PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_2MB_PAGE_SHIFT /* Defines how many pages should be mapped at once to the kernel */ #define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */ @@ -123,12 +123,16 @@ static IMG_UINT32 g_uiMaxOrder; to provide side-band information associated with that address. These includes whether the address was obtained via alloc_page or dma_alloc and if address came allocated pre-aligned or an - adjustment was made manually to aligned it. + adjustment was made manually to aligned it. They can also indicate + a page entry in the array has been generated by us for ease of mapping + and is part of a higher order page, this is known as a CMA Ghost. */ +#define DMA_SET_CMA_GHOST(x) ((x) | ((dma_addr_t)0x03)) +#define DMA_IS_CMA_GHOST(x) (((x) & ((dma_addr_t)0x03)) == 0x03) #define DMA_SET_ADJUSTED_ADDR(x) ((x) | ((dma_addr_t)0x02)) -#define DMA_IS_ADDR_ADJUSTED(x) ((x) & ((dma_addr_t)0x02)) +#define DMA_IS_ADDR_ADJUSTED(x) (((x) & ((dma_addr_t)0x02)) == 0x02) #define DMA_SET_ALLOCPG_ADDR(x) ((x) | ((dma_addr_t)0x01)) -#define DMA_IS_ALLOCPG_ADDR(x) ((x) & ((dma_addr_t)0x01)) +#define DMA_IS_ALLOCPG_ADDR(x) (((x) & ((dma_addr_t)0x01)) == 0x01) #define DMA_GET_ALIGN_ADJUSTMENT(x) ((x>>2) & ((dma_addr_t)0x3ff)) #define DMA_SET_ALIGN_ADJUSTMENT(x,y) ((x) | (((dma_addr_t)y)<<0x02)) #define DMA_GET_ADDR(x) (((dma_addr_t)x) & ((dma_addr_t)~0xfff)) @@ -139,6 +143,27 @@ static IMG_UINT32 g_uiMaxOrder; typedef struct _PMR_OSPAGEARRAY_DATA_ { /* Device for which this allocation has been made */ PVRSRV_DEVICE_NODE *psDevNode; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + + /* DRM Node associated with connection that made the allocation */ + struct drm_file *psDRMFile; +#endif + /* Protects migration in progress count and action of idling the device */ + POSWR_LOCK hPagesUnderMigrationRWLock; + /* How many pages in the PMR are currently in process of migration, + * Must be protected by above RW lock. + */ + IMG_INT32 iNumOSPagesUnderMigration; + + /* + * CPU mappings associated with PMR. + * Must be protected with PMR lock. + */ + DLLIST_NODE sCpuMappingListHead; +#endif + /* The pid that made this allocation */ IMG_PID uiPid; @@ -157,14 +182,14 @@ typedef struct _PMR_OSPAGEARRAY_DATA_ { IMG_UINT32 uiTotalNumOSPages; /* - uiLog2AllocPageSize; + uiLog2DevPageSize; - size of each "page" -- this would normally be the same as + size of each requested device "page" -- this would normally be the same as PAGE_SHIFT, but we support the idea that we may allocate pages in larger chunks for better contiguity, using order>0 in the call to alloc_pages() */ - IMG_UINT32 uiLog2AllocPageSize; + IMG_UINT32 uiLog2DevPageSize; /* ui64DmaMask; @@ -187,9 +212,15 @@ typedef struct _PMR_OSPAGEARRAY_DATA_ { #define FLAG_POISON_ON_ALLOC (2U) #define FLAG_ONDEMAND (3U) -#define FLAG_IS_CMA (5U) +/* IS CMA ensures the allocation will go through the dma_alloc_coherent path */ +#define FLAG_DMA_CMA (5U) #define FLAG_UNSET_MEMORY_TYPE (6U) +#define FLAG_IS_ZOMBIE (7U) +/* Pref CMA appends a flag (if exists) to hint alloc_pages should source from CMA region */ +#define FLAG_PREF_CMA (8U) +#define FLAG_IS_MOVABLE (9U) + /* * Allocation flags related to the pages: * Zero - Should we Zero memory on alloc @@ -198,6 +229,7 @@ typedef struct _PMR_OSPAGEARRAY_DATA_ { * On demand - Is the allocation on Demand i.e Do we defer allocation to time of use. * CMA - Is CMA memory allocated via DMA framework * Unset Memory Type - Upon free do we need to revert the cache type before return to OS + * Zombie - Pages are part of a zombie PMR * */ IMG_UINT32 ui32AllocFlags; @@ -215,15 +247,22 @@ typedef struct _PMR_OSPAGEARRAY_DATA_ { * variable and are accounted for in the memory statistics */ IMG_UINT32 ui32CMAAdjustedPageCount; -#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) /* Handle on the parent PMR */ - void *hPMR; -#endif + PMR *hPMR; } PMR_OSPAGEARRAY_DATA; +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +typedef struct _OSMEM_PAGE_PRIVDATA_ +{ + PMR_OSPAGEARRAY_DATA *psPMRData; + /* Store index of entry to speed up migration */ + IMG_UINT32 uiPMRArrIdx; +} OSMEM_PAGE_PRIVDATA; +#endif + /*********************************** * Page pooling for uncached pages * ***********************************/ @@ -239,6 +278,7 @@ _FreeOSPage_CMA(struct device *dev, static void _FreeOSPage(IMG_UINT32 uiOrder, IMG_BOOL bUnsetMemoryType, + PMR_OSPAGEARRAY_DATA *psPageArrayData, struct page *psPage); static PVRSRV_ERROR @@ -250,6 +290,13 @@ static PVRSRV_ERROR _FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, IMG_UINT32 *puiPagesFreed); +static inline PVRSRV_ERROR +_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode, + struct page **ppsPage, + IMG_UINT32 uiNumPages, + IMG_BOOL bFlush, + IMG_UINT32 ui32CPUCacheFlags); + /* A struct for our page pool holding an array of zeroed (!) pages. * We always put units of page arrays to the pool but are * able to take individual pages */ @@ -277,6 +324,9 @@ typedef struct /* Caches to hold page pool and page array structures */ static struct kmem_cache *g_psLinuxPagePoolCache; static struct kmem_cache *g_psLinuxPageArray; +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +static struct kmem_cache *g_psLinuxPagePrivateData; +#endif /* Track what is live, all protected by pool lock. * x86 needs two page pools because we have to change the memory attributes @@ -333,14 +383,446 @@ static DEFINE_MUTEX(g_sPagePoolMutex); static LIST_HEAD(g_sPagePoolList_WC); static LIST_HEAD(g_sPagePoolList_UC); -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) -/* Global structure to manage GPU memory leak */ -static DEFINE_MUTEX(g_sUMALeakMutex); -static IMG_UINT32 g_ui32UMALeakCounter = 0; -#endif static IMG_BOOL g_bInitialisedOnAlloc = IMG_FALSE; +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + +static bool OSMemPageIsolate(struct page *psPage, isolate_mode_t uIsolateMode); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) +static int OSMemPageMigrate(struct address_space *psAddrSpace, + struct page *psDstPage, struct page *psSrcPage, enum migrate_mode eMigrateMode); +#else +static int OSMemPageMigrate(struct page *psDstPage, struct page *psSrcPage, enum migrate_mode eMigrateMode); +#endif +static void OSMemPagePutback(struct page *psPage); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION_DEBUG) +#define MIGRATE_DBG_LOG(x) PVR_DPF(x); +#else +#define MIGRATE_DBG_LOG(...) +#endif + +static INLINE OSMEM_PAGE_PRIVDATA* GetPrivateDataFromPage(struct page* psPage) +{ + return (OSMEM_PAGE_PRIVDATA*) page_private(psPage); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) +static const struct address_space_operations movable_callbacks = +{ + .isolate_page = OSMemPageIsolate, + .migratepage = OSMemPageMigrate, + .putback_page = OSMemPagePutback + +}; +#else +static const struct movable_operations movable_callbacks = +{ + .isolate_page = OSMemPageIsolate, + .migrate_page = OSMemPageMigrate, + .putback_page = OSMemPagePutback +}; +#endif + +static INLINE void OSMemSetMovablePageAttr(struct page* psPage, + PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 uiPageIndex) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + struct drm_file *psDRMFile = psPageArrayData->psDRMFile; +#endif + OSMEM_PAGE_PRIVDATA *psPrivData; + + if (!BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_MOVABLE)) + { + return; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + /* Allocations marked with movable may occur before we have a valid connection + * and thus DRM file. In this case ignore the request. An example of this would be + * a kernel originated request which wouldn't have a connection associated. + */ + if (!psDRMFile) + { + PVR_ASSERT(!"Attempt to make allocation movable without connection associated, " + "possible server origin"); + return; + } +#endif + + psPrivData = kmem_cache_alloc(g_psLinuxPagePrivateData, GFP_KERNEL); + if (psPrivData == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OS refused the memory allocation for the private page data. " + "Page will not be marked movable. PMR UID:%llu", + __func__, + PMRInternalGetUID(psPrivData->psPMRData->hPMR))); + return; + } + + psPrivData->psPMRData = psPageArrayData; + psPrivData->uiPMRArrIdx = uiPageIndex; + + SetPagePrivate(psPage); + set_page_private(psPage, (unsigned long)psPrivData); + + /* Take page lock here while we set movable as __SetPageMovable asserts it */ + lock_page(psPage); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + __SetPageMovable(psPage, psDRMFile->filp->f_inode->i_mapping); +#else + __SetPageMovable(psPage, &movable_callbacks); +#endif + unlock_page(psPage); +} + +/* Page should be locked before calling this function */ +static INLINE void OSMemSetPagePrivate(struct page* psPage) +{ + __ClearPageMovable(psPage); + set_page_private(psPage, 0); + ClearPagePrivate(psPage); +} + +static INLINE void OSMemUnsetMovablePageAttr(struct page* psPage, PMR_OSPAGEARRAY_DATA *psPageArrayData) +{ + OSMEM_PAGE_PRIVDATA *psPrivData; + + if (!BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_MOVABLE)) + { + return; + } + + /* Private data should only be set if page is movable */ + psPrivData = GetPrivateDataFromPage(psPage); + if (!psPrivData) + { + return; + } + + lock_page(psPage); + OSMemSetPagePrivate(psPage); + unlock_page(psPage); + + kmem_cache_free(g_psLinuxPagePrivateData, psPrivData); +} +static IMG_BOOL _MigrateGPUIdleGet(PMR_OSPAGEARRAY_DATA *psPMRData) +{ + PVRSRV_ERROR eError; + + /* Try take a ref on PMR, this ensures destruction will not occur while migrating */ + eError = PMRTryRefPMR(psPMRData->hPMR); + PVR_GOTO_IF_ERROR(eError, ErrorOut); + + OSWRLockAcquireWrite(psPMRData->hPagesUnderMigrationRWLock); + + /* If we just started migrating pages */ + if ((++psPMRData->iNumOSPagesUnderMigration) == 1) + { + eError = PVRSRVDeviceIdleLatchedGetKM(psPMRData->psDevNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDeviceIdleLatchedGetKM", ErrorOutDecr); + + OSWRLockReleaseWrite(psPMRData->hPagesUnderMigrationRWLock); + + PMRLockPMR(psPMRData->hPMR); + PMRNotifyMigrateInProgress(psPMRData->hPMR); + PMRUnlockPMR(psPMRData->hPMR); + } + else + { + OSWRLockReleaseWrite(psPMRData->hPagesUnderMigrationRWLock); + } + + return IMG_TRUE; + +ErrorOutDecr: + psPMRData->iNumOSPagesUnderMigration--; +#if defined(DEBUG) + PVR_DPF((PVR_DBG_WARNING, "%s: Failed to Idle GPU", __func__)); +#endif + OSWRLockReleaseWrite(psPMRData->hPagesUnderMigrationRWLock); + (void) PMRUnrefPMR(psPMRData->hPMR); +ErrorOut: + return IMG_FALSE; +} + +static void _MigrateGPUIdlePut(PMR_OSPAGEARRAY_DATA *psPMRData) +{ + PVRSRV_ERROR eError; + + OSWRLockAcquireWrite(psPMRData->hPagesUnderMigrationRWLock); + + /* If we just finished migrating pages */ + if ((--psPMRData->iNumOSPagesUnderMigration) == 0) + { + eError = PVRSRVDeviceIdleLatchedPutAsyncKM(psPMRData->psDevNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDeviceIdleLatchedPutAsyncKM", ErrorOut); + + OSWRLockReleaseWrite(psPMRData->hPagesUnderMigrationRWLock); + + PMRLockPMR(psPMRData->hPMR); + PMRNotifyMigrateComplete(psPMRData->hPMR); + PMRUnlockPMR(psPMRData->hPMR); + } + else + { + OSWRLockReleaseWrite(psPMRData->hPagesUnderMigrationRWLock); + } + + + /* Unref parent PMR to allow for subsequent destruction, + * if PMR resource no longer required and the bridge has + * dropped its handles to it, destruction will follow. + */ + (void) PMRUnrefPMR(psPMRData->hPMR); + + return; + +ErrorOut: + psPMRData->iNumOSPagesUnderMigration++; + OSWRLockReleaseWrite(psPMRData->hPagesUnderMigrationRWLock); + PVR_DPF((PVR_DBG_FATAL,"Request to Unidle GPU failed, unrecoverable state!")); +} + +static bool OSMemPageIsolate(struct page *psPage, isolate_mode_t uIsolateMode) +{ + OSMEM_PAGE_PRIVDATA *psPrivData = GetPrivateDataFromPage(psPage); + PMR_OSPAGEARRAY_DATA *psPMRData; + IMG_BOOL bSuccess; + + + PVR_GOTO_IF_FALSE(psPrivData != NULL, condition_failed); + psPMRData = psPrivData->psPMRData; + + /* Attempt to migrate pages before PMR wrapper has been setup, + * avoid migration at this time. + */ + PVR_GOTO_IF_FALSE(psPMRData->hPMR, condition_failed); + + bSuccess = _MigrateGPUIdleGet(psPMRData); + PVR_GOTO_IF_FALSE(bSuccess, condition_failed); + + PVR_GOTO_IF_FALSE(!PMR_IsMemLayoutFixed(psPMRData->hPMR), clear_movable); + PVR_GOTO_IF_FALSE(!PMR_IsGpuMultiMapped(psPMRData->hPMR), clear_movable); + PVR_GOTO_IF_FALSE(!PMR_IsKernelCpuMapped(psPMRData->hPMR), condition_failed_decr); +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PVR_GOTO_IF_FALSE(!PMR_IsZombie(psPMRData->hPMR), condition_failed_decr); +#endif + + + psPrivData->psPMRData->pagearray[psPrivData->uiPMRArrIdx] = NULL; + + MIGRATE_DBG_LOG((PVR_DBG_ERROR, "Isolated: PMR UID %llu, PMR Size 0x%llx, " + "OSPageSize %lu, PageAddr 0x%llx", + PMRInternalGetUID(psPMRData->hPMR), + (unsigned long long) PMR_LogicalSize(psPMRData->hPMR), + OSGetPageSize(), + (unsigned long long) psPage)); + + /* Movable */ + return true; + +clear_movable: + OSMemSetPagePrivate(psPage); + kmem_cache_free(g_psLinuxPagePrivateData, psPrivData); + +condition_failed_decr: + _MigrateGPUIdlePut(psPMRData); + +condition_failed: + /* Not movable */ + return false; +} + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) +static int OSMemPageMigrate(struct address_space *psAddrSpace, + struct page *psDstPage, struct page *psSrcPage, enum migrate_mode eMigrateMode) +#else +static int OSMemPageMigrate(struct page *psDstPage, struct page *psSrcPage, enum migrate_mode eMigrateMode) +#endif +{ + PVRSRV_ERROR eError; + int iLinuxError; + void* vpDst; + void* vpSrc; + PMR_OSPAGEARRAY_DATA *psPMRData; + OSMEM_PAGE_PRIVDATA *psSrcPagePrivData = GetPrivateDataFromPage(psSrcPage); + /* Align phys to logical */ + IMG_UINT32 uiLogicalAlignedOffset; + + PVR_UNREFERENCED_PARAMETER(eMigrateMode); + + if (!psSrcPagePrivData) + { + /* Shouldn't hit this case */ + PVR_DPF((PVR_DBG_ERROR, "Migrate invalid, potentially leaked PMR")); + return -EINVAL; + } + psPMRData = psSrcPagePrivData->psPMRData; + + /* Calc logical aligned index offset, dev page aligned down. */ + uiLogicalAlignedOffset = ( + IMG_PAGES2BYTES64(psSrcPagePrivData->uiPMRArrIdx, PAGE_SHIFT) & + ~(psPMRData->uiLog2DevPageSize - 1) + ) >> PAGE_SHIFT; + + /* If layout has been fixed after isolation, cancel the migration */ + PVR_RETURN_IF_FALSE(!PMR_IsMemLayoutFixed(psPMRData->hPMR), -EPERM); + + /* Unmap the page from any CPU mappings on this PMR. The mapping will + * be repaired by the fault handler installed on the vma that + * represents the CPU mapping. + */ + OSLinuxPMRUnmapPageInPMR(psPMRData->hPMR, + &psPMRData->sCpuMappingListHead, + uiLogicalAlignedOffset); + + /* Set the new page in place, if any errors occur with migration the + * putback callback will reset the original page. + */ + psPMRData->pagearray[psSrcPagePrivData->uiPMRArrIdx] = psDstPage; + + /* Apply caching reqs to page we receive from OS */ + eError = _ApplyOSPagesAttribute(psPMRData->psDevNode, + &psPMRData->pagearray[psSrcPagePrivData->uiPMRArrIdx], + 1, + IMG_FALSE, + psPMRData->ui32CPUCacheFlags); + PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "_ApplyOSPagesAttribute", -EPERM); + + /* Trigger remapping */ + eError = PMRRemapGPUPMR(psPMRData->hPMR, uiLogicalAlignedOffset); + if (eError != PVRSRV_OK) + { + iLinuxError = -EPERM; + if (eError != PVRSRV_ERROR_DEVICEMEM_REMAP_REJECTED) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMRRemapGPUPMR() failed to remap page.", __func__)); + } + goto error_reset_page_attrs; + } + + vpDst = kmap(psDstPage); + vpSrc = kmap(psSrcPage); + + + OSDeviceMemCopy(vpDst, vpSrc, PAGE_SIZE); + + kunmap(vpDst); + kunmap(vpSrc); + + MIGRATE_DBG_LOG((PVR_DBG_ERROR, "Migrated: PMR UID %llu, PMR Size 0x%llx, " + "OSPageSize %lu, SrcPageAddr 0x%llx, DstPageAddr 0x%llx", + PMRInternalGetUID(psSrcPagePrivData->psPMRData->hPMR), + (unsigned long long) PMR_LogicalSize(psSrcPagePrivData->psPMRData->hPMR), + OSGetPageSize(), + (unsigned long long) psSrcPage, + (unsigned long long) psDstPage)); + + get_page(psDstPage); + SetPagePrivate(psDstPage); + set_page_private(psDstPage, (unsigned long)psSrcPagePrivData); + + /* DstPage already locked prior to callback */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + __SetPageMovable(psDstPage, psPMRData->psDRMFile->filp->f_inode->i_mapping); +#else + __SetPageMovable(psDstPage, &movable_callbacks); +#endif + + /* Migration for this page no longer in progress */ + _MigrateGPUIdlePut(psSrcPagePrivData->psPMRData); + +#if defined(CONFIG_X86) + /* Reset caching attrs of page we give back to OS */ + if (!set_pages_array_wb(&psSrcPage, 1)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute for " + " page given back to kernel. PMR UID:%llu", + __func__, + (unsigned long long)PMRInternalGetUID(psSrcPagePrivData->psPMRData->hPMR))); + } +#endif + + set_page_private(psSrcPage, 0); + ClearPagePrivate(psSrcPage); + put_page(psSrcPage); + __ClearPageMovable(psSrcPage); + + return MIGRATEPAGE_SUCCESS; + +error_reset_page_attrs: +#if defined(CONFIG_X86) + /* Reset caching attrs of page we give back to OS */ + if (!set_pages_array_wb(&psDstPage, 1)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute for " + " page given from kernel. PMR UID:%llu", + __func__, + (unsigned long long)PMRInternalGetUID(psSrcPagePrivData->psPMRData->hPMR))); + } +#endif + return iLinuxError; +} + +static void OSMemPagePutback(struct page *psPage) +{ + OSMEM_PAGE_PRIVDATA *psPrivData = GetPrivateDataFromPage(psPage); + if (!psPrivData) + { + /* We shouldn't hit this case */ + PVR_DPF((PVR_DBG_ERROR, "Putback invalid, potentially leaked PMR")); + return; + } + + MIGRATE_DBG_LOG((PVR_DBG_ERROR, "Putback: PMR UID %llu, PMR Size 0x%llx, " + "OSPageSize %lu, PageAddr 0x%llx", + PMRInternalGetUID(psPrivData->psPMRData->hPMR), + (unsigned long long) PMR_LogicalSize(psPrivData->psPMRData->hPMR), + OSGetPageSize(), + (unsigned long long) psPage)); + + + /* Restore the page to our structures */ + psPrivData->psPMRData->pagearray[psPrivData->uiPMRArrIdx] = psPage; + + /* Migration for this page no longer in progress */ + _MigrateGPUIdlePut(psPrivData->psPMRData); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) +int LinuxRegisterMigrateCallbacks(struct file* filp) +{ + if (filp == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "filp NULL, cannot set movable callbacks, PID %u:%s", + OSGetCurrentClientProcessIDKM(), + OSGetCurrentClientProcessNameKM())); + return -EINVAL; + } + + filp->f_inode->i_mapping->a_ops = &movable_callbacks; + + return 0; +} + +void LinuxDeregisterMigrateCallbacks(struct file* filp) +{ + if (filp == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "filp NULL, cannot unset movable callbacks")); + return; + } + + filp->f_inode->i_mapping->a_ops = NULL; +} +#endif +#endif /* defined(SUPPORT_LINUX_OSPAGE_MIGRATION) */ + static inline IMG_BOOL _ShouldInitMem(IMG_UINT32 ui32AllocFlags) { @@ -413,7 +895,12 @@ _GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags, return IMG_TRUE; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 7, 0)) static struct shrinker g_sShrinker; +static struct shrinker *g_psShrinker = &g_sShrinker; +#else +static struct shrinker *g_psShrinker; +#endif /* Returning the number of pages that still reside in the page pool. */ static unsigned long @@ -429,7 +916,7 @@ _CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psSh { int remain; - PVR_ASSERT(psShrinker == &g_sShrinker); + PVR_ASSERT(psShrinker == g_psShrinker); (void)psShrinker; (void)psShrinkControl; @@ -449,7 +936,7 @@ _ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShr unsigned long uNumToScan = psShrinkControl->nr_to_scan; IMG_UINT32 uiPagesFreed; - PVR_ASSERT(psShrinker == &g_sShrinker); + PVR_ASSERT(psShrinker == g_psShrinker); (void)psShrinker; /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */ @@ -460,41 +947,12 @@ _ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShr &uiPagesFreed); uNumToScan -= uiPagesFreed; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) - { - int remain; - remain = _GetNumberOfPagesInPoolUnlocked(); - _PagePoolUnlock(); - return remain; - } -#else /* Returning the number of pages freed during the scan */ _PagePoolUnlock(); return psShrinkControl->nr_to_scan - uNumToScan; -#endif -} - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) -static int -_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) -{ - if (psShrinkControl->nr_to_scan != 0) - { - return _ScanObjectsInPagePool(psShrinker, psShrinkControl); - } - else - { - /* No pages are being reclaimed so just return the page count */ - return _CountObjectsInPagePool(psShrinker, psShrinkControl); - } } -static struct shrinker g_sShrinker = -{ - .shrink = _ShrinkPagePool, - .seeks = DEFAULT_SEEKS -}; -#else +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 7, 0)) static struct shrinker g_sShrinker = { .count_objects = _CountObjectsInPagePool, @@ -504,22 +962,52 @@ static struct shrinker g_sShrinker = #endif /* Register the shrinker so Linux can reclaim cached pages */ -void LinuxInitPhysmem(void) +PVRSRV_ERROR LinuxInitPhysmem(void) { + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + g_psLinuxPagePrivateData = kmem_cache_create("pvr-ppd", sizeof(OSMEM_PAGE_PRIVDATA), 0, 0, NULL); + if (!g_psLinuxPagePrivateData) + { + PVR_LOG_GOTO_WITH_ERROR("kmem_cache_create() g_psLinuxPagePrivateData", + eError, PVRSRV_ERROR_OUT_OF_MEMORY, e1); + } +#endif + g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL); + if (!g_psLinuxPageArray) + { + PVR_LOG_GOTO_WITH_ERROR("kmem_cache_create() g_psLinuxPageArray", + eError, PVRSRV_ERROR_OUT_OF_MEMORY, e2); + } g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL); if (g_psLinuxPagePoolCache) { /* Only create the shrinker if we created the cache OK */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) - register_shrinker(&g_sShrinker); -#elif (LINUX_VERSION_CODE < KERNEL_VERSION(6, 7, 0)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 7, 0)) register_shrinker(&g_sShrinker, "pvr-pp"); #else - shrinker_register(&g_sShrinker); + g_psShrinker = shrinker_alloc(0, "pvr-pp"); + if (g_psShrinker) + { + g_psShrinker->count_objects = _CountObjectsInPagePool; + g_psShrinker->scan_objects = _ScanObjectsInPagePool; + g_psShrinker->seeks = DEFAULT_SEEKS; + shrinker_register(g_psShrinker); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "Unable to allocate a shrinker for page pool cache")); + } #endif } + else + { + PVR_LOG_GOTO_WITH_ERROR("kmem_cache_create() g_psLinuxPagePoolCache", + eError, PVRSRV_ERROR_OUT_OF_MEMORY, e3); + } OSAtomicWrite(&g_iPoolCleanTasks, 0); @@ -541,6 +1029,20 @@ void LinuxInitPhysmem(void) g_bInitialisedOnAlloc = IMG_FALSE; #endif #endif + + return PVRSRV_OK; + +e3: + /* Free the page array */ + kmem_cache_destroy(g_psLinuxPageArray); +e2: +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + /* Free the page private data */ + kmem_cache_destroy(g_psLinuxPagePrivateData); +e1: +#endif + + return eError; } /* Unregister the shrinker and remove all pages from the pool that are still left */ @@ -565,16 +1067,23 @@ void LinuxDeinitPhysmem(void) PVR_ASSERT(_PagesInPoolUnlocked() == 0); /* Free the page cache */ - kmem_cache_destroy(g_psLinuxPagePoolCache); + if (g_psLinuxPagePoolCache) + kmem_cache_destroy(g_psLinuxPagePoolCache); #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 7, 0)) unregister_shrinker(&g_sShrinker); #else - shrinker_free(&g_sShrinker); + shrinker_free(g_psShrinker); #endif _PagePoolUnlock(); - kmem_cache_destroy(g_psLinuxPageArray); + if (g_psLinuxPageArray) + kmem_cache_destroy(g_psLinuxPageArray); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + if (g_psLinuxPagePrivateData) + kmem_cache_destroy(g_psLinuxPagePrivateData); +#endif } static void EnableOOMKiller(void) @@ -841,7 +1350,6 @@ _GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, #endif _DumpPoolStructure(); - return; } /* Same as _GetPagesFromPoolUnlocked but handles locking and @@ -849,17 +1357,15 @@ _GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, static inline void _GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32CPUCacheFlags, + IMG_UINT32 ui32AllocFlags, IMG_UINT32 uiPagesToAlloc, IMG_UINT32 uiOrder, - IMG_BOOL bZero, struct page **ppsPageArray, IMG_UINT32 *puiPagesFromPool) { -#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) - PVR_UNREFERENCED_PARAMETER(bZero); -#else - /* Don't get pages from pool if it doesn't provide zeroed pages */ - if (bZero) +#if !defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + /* Don't get pages from pool as it doesn't provide zeroed pages */ + if (BIT_ISSET(ui32AllocFlags, FLAG_ZERO)) { return; } @@ -867,9 +1373,13 @@ _GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, /* The page pool stores only order 0 pages. If we need zeroed memory we * directly allocate from the OS because it is faster than - * doing it within the driver. */ + * doing it within the driver. CMA is naturally more commonly used for higher + * order pages and so we reject that also. + */ if (uiOrder == 0 && - !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) + !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags) && + !BIT_ISSET(ui32AllocFlags, FLAG_DMA_CMA) && + !BIT_ISSET(ui32AllocFlags, FLAG_IS_MOVABLE)) { _PagePoolLock(); @@ -879,8 +1389,6 @@ _GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, puiPagesFromPool); _PagePoolUnlock(); } - - return; } /* Takes a page array and maps it into the kernel to write zeros */ @@ -901,7 +1409,7 @@ _MemsetPageArray(IMG_UINT32 uiNumToClean, { IMG_UINT32 uiToClean = MIN(uiNumToClean, uiMaxPagesToMap); - pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_WRITE, pgprot); + pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_MAP, pgprot); if (!pvAddr) { if (uiMaxPagesToMap <= 1) @@ -949,6 +1457,7 @@ _CleanupThread_CleanPages(void *pvData) LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry; struct list_head *psPoolHead = NULL; IMG_UINT32 *puiCounter = NULL; + #if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) PVRSRV_ERROR eError; pgprot_t pgprot; @@ -1037,7 +1546,7 @@ _CleanupThread_CleanPages(void *pvData) for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++) { - _FreeOSPage(0, IMG_TRUE, psPagePoolEntry->ppsPageArray[i]); + _FreeOSPage(0, IMG_TRUE, NULL, psPagePoolEntry->ppsPageArray[i]); } OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); @@ -1049,119 +1558,154 @@ _CleanupThread_CleanPages(void *pvData) #endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ } +static bool _PagesHaveOtherRefs(struct page **ppsPageArray, IMG_UINT32 uiNumPages) +{ + IMG_UINT32 ui32pageIndex; + + PVR_DPF_ENTERED; + + if (!ppsPageArray) + { + PVR_DPF_RETURN_RC(false); + } + + /* Iterate pages in ppsPageArray and return + false if any are found to have page_count() > 1 + */ + for (ui32pageIndex = 0; ui32pageIndex < uiNumPages; ui32pageIndex++) + { + struct page *psPage = ppsPageArray[ui32pageIndex]; + + if (page_count(psPage) > 1) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: page %d in page array found with page_count() of %d", + __func__, ui32pageIndex, page_count(psPage))); + PVR_DPF_RETURN_RC(true); + } + } + + PVR_DPF_RETURN_RC(false); +} /* Put page array to the page pool. * Handles locking and checks whether the pages are * suitable to be stored in the pool. */ static inline IMG_BOOL -_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags, - struct page **ppsPageArray, - IMG_UINT32 uiOrder, - IMG_UINT32 uiNumPages) +_PutPagesToPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32CPUCacheFlags, + IMG_UINT32 ui32AllocFlags, + struct page **ppsPageArray, + IMG_UINT32 uiOrder, + IMG_UINT32 uiNumPages) { LinuxCleanupData *psCleanupData; PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn; + IMG_UINT32 uiEntries; + IMG_UINT32 *puiCounter; + struct list_head *psPoolHead; #if defined(SUPPORT_PHYSMEM_TEST) PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); #endif - if (uiOrder == 0 && - !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) - { - IMG_UINT32 uiEntries; - IMG_UINT32 *puiCounter; - struct list_head *psPoolHead; + PVR_DPF_ENTERED; + /* The page pool stores only order 0 pages. CMA is naturally more + * commonly used for higher order pages and so we reject that also. + */ + if (uiOrder != 0 || + PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags) || + BIT_ISSET(ui32AllocFlags, FLAG_DMA_CMA) || + BIT_ISSET(ui32AllocFlags, FLAG_IS_MOVABLE)) + { + goto eExitFalse; + } - _PagePoolLock(); + if (_PagesHaveOtherRefs(ppsPageArray, uiNumPages)) + { + /* Pages still have other references, so + must not be put into our pool */ + goto eExitFalse; + } - uiEntries = _PagesInPoolUnlocked(); + _PagePoolLock(); - /* Check for number of current page pool entries and whether - * we have other asynchronous tasks in-flight */ - if ( (uiEntries < g_ui32PagePoolMaxEntries) && - ((uiEntries + uiNumPages) < - (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )) - { - if (OSAtomicIncrement(&g_iPoolCleanTasks) <= - PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS) - { -#if defined(SUPPORT_PHYSMEM_TEST) - if (!psPVRSRVData->hCleanupThread) - { - goto eDecrement; - } -#endif + uiEntries = _PagesInPoolUnlocked(); - psCleanupData = OSAllocMem(sizeof(*psCleanupData)); + /* Check for number of current page pool entries */ + if ( (uiEntries >= g_ui32PagePoolMaxEntries) || + ((uiEntries + uiNumPages) >= + (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )) + { + goto eUnlock; + } - if (!psCleanupData) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to get memory for deferred page pool cleanup. " - "Trying to free pages immediately", - __func__)); - goto eDecrement; - } + /* Check for whether we have other asynchronous tasks in-flight */ + if (OSAtomicIncrement(&g_iPoolCleanTasks) > + PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS) + { + goto eDecrement; + } - psCleanupThreadFn = &psCleanupData->sCleanupWork; - psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags; - psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL); +#if defined(SUPPORT_PHYSMEM_TEST) + if (!psPVRSRVData->hCleanupThread) + { + goto eDecrement; + } +#endif - if (!psCleanupData->psPoolEntry) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to get memory for deferred page pool cleanup. " - "Trying to free pages immediately", - __func__)); - goto eFreeCleanupData; - } + psCleanupData = OSAllocMem(sizeof(*psCleanupData)); - if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to get correct page pool", - __func__)); - goto eFreePoolEntry; - } + if (!psCleanupData) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page pool cleanup. " + "Trying to free pages immediately", + __func__)); + goto eDecrement; + } - /* Increase counter here to avoid deferred cleanup tasks piling up */ - *puiCounter = *puiCounter + uiNumPages; + psCleanupThreadFn = &psCleanupData->sCleanupWork; + psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags; + psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL); - psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray; - psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages; + if (!psCleanupData->psPoolEntry) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page pool cleanup. " + "Trying to free pages immediately", + __func__)); + goto eFreeCleanupData; + } - psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages; - psCleanupThreadFn->pvData = psCleanupData; - psCleanupThreadFn->bDependsOnHW = IMG_FALSE; - CLEANUP_THREAD_SET_RETRY_COUNT(psCleanupThreadFn, - CLEANUP_THREAD_RETRY_COUNT_DEFAULT); + if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get correct page pool", + __func__)); + goto eFreePoolEntry; + } - /* We must not hold the pool lock when calling AddWork because it might call us back to - * free pooled pages directly when unloading the driver */ - _PagePoolUnlock(); + /* Increase counter here to avoid deferred cleanup tasks piling up */ + *puiCounter = *puiCounter + uiNumPages; - PVRSRVCleanupThreadAddWork(psCleanupThreadFn); + psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray; + psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages; + psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages; + psCleanupThreadFn->pvData = psCleanupData; + psCleanupThreadFn->bDependsOnHW = IMG_FALSE; + psCleanupThreadFn->eCleanupType = PVRSRV_CLEANUP_TYPE_OSMEM; + CLEANUP_THREAD_SET_RETRY_COUNT(psCleanupThreadFn, + CLEANUP_THREAD_RETRY_COUNT_DEFAULT); - } - else - { - goto eDecrement; - } + /* We must not hold the pool lock when calling AddWork because it might call us back to + * free pooled pages directly when unloading the driver */ + _PagePoolUnlock(); - } - else - { - goto eUnlock; - } - } - else - { - goto eExitFalse; - } + PVRSRVCleanupThreadAddWork(psDevNode, psCleanupThreadFn); - return IMG_TRUE; + PVR_DPF_RETURN_RC(IMG_TRUE); eFreePoolEntry: OSFreeMem(psCleanupData->psPoolEntry); @@ -1172,16 +1716,47 @@ _PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags, eUnlock: _PagePoolUnlock(); eExitFalse: - return IMG_FALSE; + PVR_DPF_RETURN_RC(IMG_FALSE); } /* Get the GFP flags that we pass to the page allocator */ static inline gfp_t -_GetGFPFlags(IMG_BOOL bZero, - PVRSRV_DEVICE_NODE *psDevNode) +_GetGFPFlags(PMR_OSPAGEARRAY_DATA *psPageArrayData) { - struct device *psDev = psDevNode->psDevConfig->pvOSDevice; - gfp_t gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC; + /* If higher order pages then potentially ask the kernel to zero the allocation + * for us. If zero order we can do it ourselves later in a batch operation + */ + IMG_UINT32 uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; + IMG_BOOL bZero = uiOrder ? _ShouldInitMem(psPageArrayData->ui32AllocFlags) : IMG_FALSE; + struct device *psDev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; + + gfp_t gfp_flags = __GFP_NOWARN | __GFP_NOMEMALLOC; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_MOVABLE)) + { + gfp_flags |= GFP_HIGHUSER_MOVABLE; + } + else +#else + { + gfp_flags |= GFP_USER; + } +#endif + +#if defined(ANDROID) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) + /* Only Android has CMA suggestion flag, base Linux kernel uses heuristics based on memory + * pressure / availability to achieve similar goal. + */ + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_PREF_CMA)) + { + gfp_flags |= __GFP_CMA; + } +#endif +#endif #if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) /* Force use of HIGHMEM */ @@ -1281,9 +1856,7 @@ _PoisonDevicePage(PVRSRV_DEVICE_NODE *psDevNode, static PVRSRV_ERROR _AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode, PMR_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 uiLog2AllocPageSize, + IMG_UINT32 uiLog2DevPageSize, IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32CPUCacheFlags, IMG_PID uiPid, @@ -1291,18 +1864,14 @@ _AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode, { PVRSRV_ERROR eError; IMG_UINT32 uiNumOSPageSizeVirtPages; - IMG_UINT32 uiNumDevPageSizeVirtPages; PMR_OSPAGEARRAY_DATA *psPageArrayData; IMG_UINT64 ui64DmaMask = 0; - PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); /* Use of cast below is justified by the assertion that follows to * prove that no significant bits have been truncated */ uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1); PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize); - uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2AllocPageSize - PAGE_SHIFT); - /* Allocate the struct to hold the metadata */ psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL); if (psPageArrayData == NULL) @@ -1325,32 +1894,36 @@ _AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode, * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not * try to acquire the vmalloc hash table lock again. */ - psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages); + psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumOSPageSizeVirtPages); if (psPageArrayData->pagearray == NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e_free_kmem_cache; } - else + + if (BIT_ISSET(ui32AllocFlags, FLAG_DMA_CMA)) { - if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) + /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */ + psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumOSPageSizeVirtPages); + if (psPageArrayData->dmavirtarray == NULL) { - /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */ - psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages); - if (psPageArrayData->dmavirtarray == NULL) - { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto e_free_pagearray; - } + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_pagearray; + } - psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages); - if (psPageArrayData->dmaphysarray == NULL) - { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto e_free_cpuvirtaddrarray; - } + psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumOSPageSizeVirtPages); + if (psPageArrayData->dmaphysarray == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_cpuvirtaddrarray; } } + else + { + psPageArrayData->dmavirtarray = NULL; + psPageArrayData->dmaphysarray = NULL; + } + if (psDevNode->psDevConfig && psDevNode->psDevConfig->pvOSDevice) { @@ -1359,15 +1932,22 @@ _AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode, } /* Init metadata */ + psPageArrayData->hPMR = NULL; psPageArrayData->psDevNode = psDevNode; psPageArrayData->uiPid = uiPid; psPageArrayData->iNumOSPagesAllocated = 0; psPageArrayData->uiTotalNumOSPages = uiNumOSPageSizeVirtPages; - psPageArrayData->uiLog2AllocPageSize = uiLog2AllocPageSize; + psPageArrayData->uiLog2DevPageSize = uiLog2DevPageSize; psPageArrayData->ui64DmaMask = ui64DmaMask; psPageArrayData->ui32AllocFlags = ui32AllocFlags; psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags; psPageArrayData->ui32CMAAdjustedPageCount = 0; +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + eError = OSWRLockCreate(&psPageArrayData->hPagesUnderMigrationRWLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSWRLockCreate", e_free_cpuvirtaddrarray); + psPageArrayData->iNumOSPagesUnderMigration = 0; + dllist_init(&psPageArrayData->sCpuMappingListHead); +#endif *ppsPageArrayDataPtr = psPageArrayData; return PVRSRV_OK; @@ -1398,7 +1978,7 @@ _ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode, { void * pvAddr; - if (OSCPUCacheOpAddressType() == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + if (OSCPUCacheOpAddressType(psDevNode) == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) { pgprot_t pgprot = PAGE_KERNEL; @@ -1415,7 +1995,7 @@ _ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode, IMG_CPU_PHYADDR sUnused = { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) }; - pvAddr = pvr_vmap(ppsCleanArray, uiToClean, -1, pgprot); + pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_MAP, pgprot); if (!pvAddr) { PVR_DPF((PVR_DBG_ERROR, @@ -1488,7 +2068,7 @@ _ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode, if (bCPUUncached || bCPUWriteCombine) { /* On x86 if we already have a mapping (e.g. low memory) we need to change the mode of - current mapping before we map it ourselves */ + current mapping before we map it ourselves */ int ret = IMG_FALSE; switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) @@ -1558,7 +2138,6 @@ _AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData, IMG_UINT32 uiAllocIsMisaligned; size_t alloc_size = PAGE_SIZE << ui32AllocOrder; struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; - PVR_ASSERT(ui32AllocOrder == ui32MinOrder); do { @@ -1662,19 +2241,18 @@ _AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData, /* Store adjustments in PAGE_SIZE counts */ align_adjust = align_adjust >> PAGE_SHIFT; bus_addr = DMA_SET_ALIGN_ADJUSTMENT(bus_addr, align_adjust); - } - /* Taint bus_addr due to over-allocation, allows us to free - * memory correctly */ - bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr); + /* Taint bus_addr due to over-allocation, allows us to free + * memory correctly */ + bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr); + } uiAllocIsMisaligned = 0; } } } while (uiAllocIsMisaligned); /* Convert OSPageSize-based index into DevicePageSize-based index */ - psPageArrayData->ui32CMAAdjustedPageCount += (alloc_size - (PAGE_SIZE << ui32AllocOrder )); - + psPageArrayData->ui32CMAAdjustedPageCount += (alloc_size - (PAGE_SIZE << ui32AllocOrder)); psPageArrayData->dmavirtarray[uiPageIndex] = virt_addr; psPageArrayData->dmaphysarray[uiPageIndex] = bus_addr; psPageArrayData->pagearray[uiPageIndex] = page; @@ -1712,7 +2290,6 @@ _AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData, return PVRSRV_ERROR_OUT_OF_MEMORY; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) /* In case we need to, split the higher order page; this should only be used for order-0 allocations as higher order allocations should use DMA/CMA */ @@ -1720,11 +2297,13 @@ _AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData, { split_page(psPage, uiAllocOrder); } -#endif /* Store the page (or multiple split pages) in the page array */ for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++) { +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + OSMemSetMovablePageAttr(&psPage[ui32Count], psPageArrayData, uiPageIndex + ui32Count); +#endif psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]); } @@ -1735,24 +2314,47 @@ _AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData, #if defined(PVRSRV_ENABLE_MEMORY_STATS) static inline void _AddMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, - struct page *psPage) + const struct page *psPage, + IMG_UINT32 uiOrder) { IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) }; + IMG_UINT32 uiPageSize = 1 << psPageArrayData->uiLog2DevPageSize; PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, NULL, sCPUPhysAddr, - 1 << psPageArrayData->uiLog2AllocPageSize, + uiPageSize << uiOrder, psPageArrayData->uiPid DEBUG_MEMSTATS_VALUES); } static inline void _RemoveMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, - struct page *psPage) + const struct page *psPage) { - PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, - (IMG_UINT64) page_to_phys(psPage), +#if defined(SUPPORT_PMR_DEFERRED_FREE) + IMG_UINT32 uiStat = BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_ZOMBIE) + ? PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES + : PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; +#else + IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; +#endif + + PVRSRVStatsRemoveMemAllocRecord(uiStat, (IMG_UINT64) page_to_phys(psPage), psPageArrayData->uiPid); } +#if defined(SUPPORT_PMR_DEFERRED_FREE) +static inline void _TransferToMemZombieRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + const struct page *psPage) +{ + IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) }; + + PVRSRVStatsTransferMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, + PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES, + (IMG_UINT64) sCPUPhysAddr.uiAddr, + psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); +} +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + #else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ static inline void _IncrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) @@ -1761,11 +2363,29 @@ static inline void _IncrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) uiSize, uiPid); } -static inline void _DecrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) +static inline void _DecrMemAllocStat_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + size_t uiSize, IMG_PID uiPid) +{ + IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; +#if defined(SUPPORT_PMR_DEFERRED_FREE) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_ZOMBIE)) + { + uiStat = PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES; + } +#endif + + PVRSRVStatsDecrMemAllocStat(uiStat, uiSize, uiPid); +} + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +static inline void _ZombifyMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) { PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, uiSize, uiPid); + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES, + uiSize, uiPid); } +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ #endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ #endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ @@ -1787,16 +2407,21 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) { PVRSRV_ERROR eError; IMG_UINT32 uiArrayIndex = 0; + /* Order of the allocation to request, can be upgraded for large allocations + * to aid efficient allocation. */ IMG_UINT32 ui32Order; - IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + /* Min order the requested allocation can be. We express this in terms of the device + * page size to the OS page size. The OS page size is the minimum page size we can request + * else we cannot map it. Requesters may request further size / contiguity constraints in form + * of the log2AllocPageSize */ + IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; IMG_BOOL bIncreaseMaxOrder = IMG_TRUE; IMG_UINT32 ui32NumPageReq; IMG_UINT32 uiOSPagesToAlloc; IMG_UINT32 uiDevPagesFromPool = 0; - gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? _ShouldInitMem(psPageArrayData->ui32AllocFlags) : IMG_FALSE, /* Zero all pages later as batch */ - psPageArrayData->psDevNode); + gfp_t gfp_flags = _GetGFPFlags(psPageArrayData); gfp_t ui32GfpFlags; gfp_t ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY); @@ -1807,44 +2432,31 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) /* Try to get pages from the pool since it is faster; the page pool currently only supports zero-order pages - thus currently excludes all DMA/CMA allocated memory */ + thus currently excludes all DMA/CMA allocated memory. + _ShouldInitMem() must not be used for bZero argument since it only + applies to new pages allocated from the kernel. */ _GetPagesFromPoolLocked(psPageArrayData->psDevNode, psPageArrayData->ui32CPUCacheFlags, + psPageArrayData->ui32AllocFlags, uiOSPagesToAlloc, ui32MinOrder, - _ShouldInitMem(psPageArrayData->ui32AllocFlags), ppsPageArray, &uiDevPagesFromPool); uiArrayIndex = uiDevPagesFromPool; - if ((uiOSPagesToAlloc - uiDevPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD) + if ((uiOSPagesToAlloc - uiDevPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD && + !BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) { /* Small allocations: ask for one device page at a time */ ui32Order = ui32MinOrder; bIncreaseMaxOrder = IMG_FALSE; } else { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) /* Large zero-order or none zero-order allocations, ask for MAX(max-order, min-order) order pages at a time; alloc failures throttles this down to ZeroOrder allocations */ ui32Order = MAX(g_uiMaxOrder, ui32MinOrder); -#else - /* Because split_page() is not available on older kernels - we cannot mix-and-match any-order pages in the PMR; - only same-order pages must be present in page array. - So we unconditionally force it to use ui32MinOrder on - these older kernels */ - ui32Order = ui32MinOrder; -#if defined(DEBUG) - if (! BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) - { - /* Check that this is zero */ - PVR_ASSERT(! ui32Order); - } -#endif -#endif } /* Only if asking for more contiguity than we actually need, let it fail */ @@ -1857,32 +2469,24 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) while (ui32NumPageReq > ui32PageRemain) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) /* Pages to request is larger than that remaining so ask for less so never over allocate */ ui32Order = MAX(ui32Order >> 1, ui32MinOrder); -#else - /* Pages to request is larger than that remaining so - do nothing thus over allocate as we do not support - mix/match of any-order pages in PMR page-array in - older kernels (simplifies page free logic) */ - PVR_ASSERT(ui32Order == ui32MinOrder); -#endif ui32NumPageReq = (1 << ui32Order); ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; } - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) { /* As the DMA/CMA framework rounds-up request to the next power-of-two, we request multiple uiMinOrder pages to satisfy allocation request in order to minimise wasting memory */ eError = _AllocOSPage_CMA(psPageArrayData, - ui32GfpFlags, + gfp_flags, ui32Order, ui32MinOrder, - uiArrayIndex >> ui32MinOrder); + uiArrayIndex); } else { @@ -1896,6 +2500,26 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) if (eError == PVRSRV_OK) { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) + { + /* We encode non Ghost entries for CMA so we can record in order chunks */ + _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[uiArrayIndex], ui32Order); + } + else + { + /* Higher order pages get split using split_page() meaning we can't create a higher + * order record in case the logic below reduces the order request part way through + */ + IMG_UINT32 i; + for (i = 0; i < (1 << ui32Order); i++) + { + _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[uiArrayIndex + i], 0); + } + } +#endif +#endif /* Successful request. Move onto next. */ uiArrayIndex += ui32NumPageReq; } @@ -1909,18 +2533,13 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) ui32NumPageReq = (1 << ui32Order); ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; g_uiMaxOrder = ui32Order; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) - /* We should not trigger this code path in older kernels, - this is enforced by ensuring ui32Order == ui32MinOrder */ - PVR_ASSERT(ui32Order == ui32MinOrder); -#endif } else { /* Failed to alloc pages at required contiguity. Failed allocation */ PVR_DPF((PVR_DBG_ERROR, "%s: %s failed to honour request at %u of %u, flags = %x, order = %u (%s)", __func__, - BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA) ? "dma_alloc_coherent" : "alloc_pages", + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA) ? "dma_alloc_coherent" : "alloc_pages", uiArrayIndex, uiOSPagesToAlloc, ui32GfpFlags, @@ -1939,21 +2558,32 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) /* Construct table of page pointers to apply attributes */ ppsPageAttributeArray = &ppsPageArray[uiDevPagesFromPool]; - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) - { - IMG_UINT32 uiIdx, uiIdy, uiIdz; - ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiOSPagesToAlloc); - PVR_LOG_GOTO_IF_NOMEM(ppsPageAttributeArray, eError, e_free_pages); + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) + { + IMG_UINT32 i; + dma_addr_t *psBusAddrArray = &psPageArrayData->dmaphysarray[uiDevPagesFromPool]; - for (uiIdx = 0; uiIdx < uiOSPagesToAlloc; uiIdx += ui32NumPageReq) + /* Iterate over page array generating ghost CMA pages */ + for (i = 0; i < uiOSPagesToAlloc;) { - uiIdy = uiIdx >> ui32Order; - for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++) + IMG_UINT32 j; + IMG_UINT32 ui32NumPagesPerOrder = 1; + + /* Iterate and populate until we find the next real page */ + for (j = i + 1; + j < uiOSPagesToAlloc && + ppsPageAttributeArray[j] == NULL; + j++, ui32NumPagesPerOrder++) { - ppsPageAttributeArray[uiIdx+uiIdz] = ppsPageArray[uiIdy]; - ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz; + /* Generate ghost page */ + ppsPageAttributeArray[j] = ppsPageAttributeArray[i]; + ppsPageAttributeArray[j] += ui32NumPagesPerOrder; + /* Mark DMA addr as ghost */ + psBusAddrArray[j] = DMA_SET_CMA_GHOST(psBusAddrArray[i]); } + + i += ui32NumPagesPerOrder; } } @@ -2005,36 +2635,13 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes")); goto e_free_pages; } - else - { - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) - { - OSFreeMem(ppsPageAttributeArray); - } - } /* Update metadata */ psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages; #if defined(PVRSRV_ENABLE_PROCESS_STATS) { -#if defined(PVRSRV_ENABLE_MEMORY_STATS) - IMG_UINT32 ui32NumPages = - psPageArrayData->iNumOSPagesAllocated >> ui32MinOrder; - IMG_UINT32 i; - - for (i = 0; i < ui32NumPages; i++) - { - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) - { - _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); - } - else - { - _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << ui32MinOrder]); - } - } -#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) _IncrMemAllocStat_UmaPages(((uiOSPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)), psPageArrayData->uiPid); #endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ @@ -2048,25 +2655,23 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) { IMG_UINT32 ui32PageToFree; - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) { IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order; IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order; PVR_ASSERT(ui32Order == ui32MinOrder); - if (ppsPageAttributeArray) - { - OSFreeMem(ppsPageAttributeArray); - } - for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++) { - _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, - uiDevPageSize, - ui32MinOrder, - psPageArrayData->dmavirtarray[ui32PageToFree], - psPageArrayData->dmaphysarray[ui32PageToFree], - ppsPageArray[ui32PageToFree]); + if (!DMA_IS_CMA_GHOST(psPageArrayData->dmaphysarray[ui32PageToFree])) + { + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + uiDevPageSize, + ui32MinOrder, + psPageArrayData->dmavirtarray[ui32PageToFree], + psPageArrayData->dmaphysarray[ui32PageToFree], + ppsPageArray[ui32PageToFree]); + } psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0; psPageArrayData->dmavirtarray[ui32PageToFree] = NULL; ppsPageArray[ui32PageToFree] = NULL; @@ -2077,15 +2682,18 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) /* Free the pages we got from the pool */ for (ui32PageToFree = 0; ui32PageToFree < uiDevPagesFromPool; ui32PageToFree++) { - _FreeOSPage(ui32MinOrder, + /* Pages added to pool are always order 0 */ + _FreeOSPage(0, BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE), + psPageArrayData, ppsPageArray[ui32PageToFree]); ppsPageArray[ui32PageToFree] = NULL; } for (ui32PageToFree = uiDevPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++) { - _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]); + /* Higher order pages from _AllocOSPage are always split into order 0 using split_page */ + _FreeOSPage(0, IMG_FALSE, psPageArrayData, ppsPageArray[ui32PageToFree]); ppsPageArray[ui32PageToFree] = NULL; } } @@ -2108,13 +2716,16 @@ _CheckIfIndexInRange(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, IMG_UINT32 } static INLINE PVRSRV_ERROR -_CheckIfPageNotAllocated(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, struct page **ppsPageArray) +_CheckIfPageNotAllocated(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, IMG_UINT32 uiOrder, struct page **ppsPageArray) { - if (ppsPageArray[pui32Indices[ui32Index]] != NULL) + if (ppsPageArray[pui32Indices[ui32Index] << uiOrder] != NULL) { - PVR_DPF((PVR_DBG_ERROR, "%s: Mapping number %u at page array index %u already exists. " - "Page struct %p", __func__, pui32Indices[ui32Index], ui32Index, - ppsPageArray[pui32Indices[ui32Index]])); + PVR_DPF((PVR_DBG_ERROR, "%s: Mapping Idx Dev:%u|OS:%u at page array index %u already exists. " + "Page struct %p", __func__, + pui32Indices[ui32Index], + pui32Indices[ui32Index] << uiOrder, + ui32Index, + ppsPageArray[pui32Indices[ui32Index] << uiOrder])); return PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; } @@ -2127,19 +2738,18 @@ _CheckIfPageNotAllocated(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, struct * virtual range. */ static PVRSRV_ERROR _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, - IMG_UINT32 *puiAllocIndices, + IMG_UINT32 *pauiDevPageAllocIndices, IMG_UINT32 uiDevPagesToAlloc) { PVRSRV_ERROR eError; IMG_UINT32 i; struct page **ppsPageArray = psPageArrayData->pagearray; - IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; IMG_UINT32 uiDevPagesFromPool = 0; IMG_UINT32 uiOSPagesToAlloc = uiDevPagesToAlloc * (1 << uiOrder); IMG_UINT32 uiDevPagesAllocated = psPageArrayData->uiTotalNumOSPages >> uiOrder; const IMG_UINT32 ui32AllocFlags = psPageArrayData->ui32AllocFlags; - gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? _ShouldInitMem(ui32AllocFlags) : IMG_FALSE, /* Zero pages later as batch */ - psPageArrayData->psDevNode); + gfp_t ui32GfpFlags = _GetGFPFlags(psPageArrayData); /* We use this page array to receive pages from the pool and then reuse it afterwards to * store pages that need their cache attribute changed on x86 */ @@ -2172,12 +2782,15 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, * allocated only if: * - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 1 && uiOrder == 0 * - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 0 && uiOrder == 0 && - * !(BIT_ISSET(ui32AllocFlags, FLAG_ZERO) && g_bInitialisedOnAlloc) */ + * !(BIT_ISSET(ui32AllocFlags, FLAG_ZERO)) + * - !BIT_ISSET(ui32AllocFlags, FLAG_DMA_CMA) + * _ShouldInitMem() must not be used for bZero argument since it only + * applies to new pages allocated from the kernel. */ _GetPagesFromPoolLocked(psPageArrayData->psDevNode, psPageArrayData->ui32CPUCacheFlags, + psPageArrayData->ui32AllocFlags, uiDevPagesToAlloc, uiOrder, - _ShouldInitMem(ui32AllocFlags), ppsTempPageArray, &uiDevPagesFromPool); @@ -2189,25 +2802,36 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, /* Move pages we got from the pool to the array. */ for (i = 0; i < uiDevPagesFromPool; i++) { - eError = _CheckIfIndexInRange(i, puiAllocIndices, uiDevPagesAllocated); + eError = _CheckIfIndexInRange(i, pauiDevPageAllocIndices, uiDevPagesAllocated); PVR_GOTO_IF_ERROR(eError, e_free_pool_pages); - eError = _CheckIfPageNotAllocated(i, puiAllocIndices, ppsPageArray); + eError = _CheckIfPageNotAllocated(i, pauiDevPageAllocIndices, uiOrder, ppsPageArray); PVR_GOTO_IF_ERROR(eError, e_free_pool_pages); - ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[i]; + /* In this case the allocation order is the same for device and OS page size, we can use + * DevPageAllocIndices here without having to scale up from device to OS page indices. + */ + PVR_ASSERT(uiOrder == 0); + ppsPageArray[pauiDevPageAllocIndices[i]] = ppsTempPageArray[i]; } /* Allocate pages from the OS */ for (i = uiDevPagesFromPool; i < uiDevPagesToAlloc; i++) { - eError = _CheckIfIndexInRange(i, puiAllocIndices, uiDevPagesAllocated); + IMG_UINT32 uiOSPageAllocIndex = pauiDevPageAllocIndices[i] << uiOrder; + /* We can perform range checks in terms of device pages, then we scale the indices + * to OS page size for the storage array. + */ + eError = _CheckIfIndexInRange(i, pauiDevPageAllocIndices, uiDevPagesAllocated); PVR_GOTO_IF_ERROR(eError, e_free_pages); - eError = _CheckIfPageNotAllocated(i, puiAllocIndices, ppsPageArray); + eError = _CheckIfPageNotAllocated(i, pauiDevPageAllocIndices, uiOrder, ppsPageArray); PVR_GOTO_IF_ERROR(eError, e_free_pages); /* Allocated pages and assign them the array. */ - if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) + if (BIT_ISSET(ui32AllocFlags, FLAG_DMA_CMA)) { + IMG_UINT32 uiSubPageInOrder; + dma_addr_t *psBusAddrArray = &psPageArrayData->dmaphysarray[uiDevPagesFromPool]; + /* As the DMA/CMA framework rounds-up request to the next power-of-two, we request multiple uiMinOrder pages to satisfy allocation request in order to @@ -2216,31 +2840,49 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, ui32GfpFlags, uiOrder, uiOrder, - puiAllocIndices[i]); + uiOSPageAllocIndex); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages")); goto e_free_pages; } + + for (uiSubPageInOrder = 1; + uiSubPageInOrder < (1 << uiOrder); + uiSubPageInOrder++) + { + /* Generate ghost page */ + ppsPageArray[uiOSPageAllocIndex + uiSubPageInOrder] = ppsPageArray[uiOSPageAllocIndex]; + ppsPageArray[uiOSPageAllocIndex + uiSubPageInOrder] += uiSubPageInOrder; + /* Mark DMA addr as ghost */ + psBusAddrArray[uiOSPageAllocIndex + uiSubPageInOrder] = DMA_SET_CMA_GHOST(psBusAddrArray[uiOSPageAllocIndex]); + } } else { - DisableOOMKiller(); - ppsPageArray[puiAllocIndices[i]] = alloc_pages(ui32GfpFlags, uiOrder); - EnableOOMKiller(); + eError = _AllocOSPage(psPageArrayData, + ui32GfpFlags, + uiOrder, + uiOrder, + uiOSPageAllocIndex); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages")); + goto e_free_pages; + } } - if (ppsPageArray[puiAllocIndices[i]] != NULL) + if (ppsPageArray[uiOSPageAllocIndex] != NULL) { /* Append pages to the temporary array so it's easier to process * them later on. */ - if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) + if (BIT_ISSET(ui32AllocFlags, FLAG_DMA_CMA)) { IMG_UINT32 idx; struct page *psPageAddr; - psPageAddr = ppsPageArray[puiAllocIndices[i]]; + psPageAddr = ppsPageArray[uiOSPageAllocIndex]; /* "divide" CMA pages into OS pages if they have higher order */ for (idx = 0; idx < (1 << uiOrder); idx++) @@ -2252,7 +2894,7 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, } else { - ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]]; + ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[uiOSPageAllocIndex]; uiTempPageArrayIndex++; } } @@ -2324,7 +2966,8 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, for (i = 0; i < uiDevPagesToAlloc; i++) { _AddMemAllocRecord_UmaPages(psPageArrayData, - ppsPageArray[puiAllocIndices[i]]); + ppsPageArray[pauiDevPageAllocIndices[i] << uiOrder], + uiOrder); } #else _IncrMemAllocStat_UmaPages(((uiOSPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)), @@ -2335,31 +2978,43 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, return PVRSRV_OK; e_free_pages: - if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) + if (BIT_ISSET(ui32AllocFlags, FLAG_DMA_CMA)) { IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; /* Free the pages we just allocated from the CMA */ for (; i > uiDevPagesFromPool; i--) { + IMG_UINT32 idx = (i - 1) << uiOrder; + IMG_UINT32 uiSubPageInOrder; _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, uiDevPageSize, uiOrder, - psPageArrayData->dmavirtarray[puiAllocIndices[i-1]], - psPageArrayData->dmaphysarray[puiAllocIndices[i-1]], - ppsPageArray[puiAllocIndices[i-1]]); - psPageArrayData->dmaphysarray[puiAllocIndices[i-1]]= (dma_addr_t) 0; - psPageArrayData->dmavirtarray[puiAllocIndices[i-1]] = NULL; - ppsPageArray[puiAllocIndices[i-1]] = NULL; + psPageArrayData->dmavirtarray[pauiDevPageAllocIndices[idx]], + psPageArrayData->dmaphysarray[pauiDevPageAllocIndices[idx]], + ppsPageArray[pauiDevPageAllocIndices[idx]]); + psPageArrayData->dmaphysarray[pauiDevPageAllocIndices[idx]]= (dma_addr_t) 0; + psPageArrayData->dmavirtarray[pauiDevPageAllocIndices[idx]] = NULL; + ppsPageArray[pauiDevPageAllocIndices[idx]] = NULL; + + for (uiSubPageInOrder = 1; + uiSubPageInOrder < (1 << uiOrder); + uiSubPageInOrder++) + { + ppsPageArray[idx + uiSubPageInOrder] = NULL; + psPageArrayData->dmaphysarray[idx + uiSubPageInOrder] = (dma_addr_t)0; + psPageArrayData->dmavirtarray[idx + uiSubPageInOrder] = NULL; + } } + } else { /* Free the pages we just allocated from the OS */ for (; i > uiDevPagesFromPool; i--) { - _FreeOSPage(0, IMG_FALSE, ppsPageArray[puiAllocIndices[i-1]]); - ppsPageArray[puiAllocIndices[i-1]] = NULL; + _FreeOSPage(0, IMG_FALSE, psPageArrayData, ppsPageArray[pauiDevPageAllocIndices[i-1]]); + ppsPageArray[pauiDevPageAllocIndices[i-1]] = NULL; } } @@ -2368,12 +3023,13 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, for (i = 0; i < uiDevPagesFromPool; i++) { _FreeOSPage(0, BIT_ISSET(ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE), + psPageArrayData, ppsTempPageArray[i]); /* not using _CheckIfIndexInRange() to not print error message */ - if (puiAllocIndices[i] < uiDevPagesAllocated) + if (pauiDevPageAllocIndices[i] < uiDevPagesAllocated) { - ppsPageArray[puiAllocIndices[i]] = NULL; + ppsPageArray[pauiDevPageAllocIndices[i]] = NULL; } } @@ -2398,7 +3054,7 @@ _AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, /* Parameter checks */ PVR_ASSERT(NULL != psPageArrayData); - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) { PVR_ASSERT(psPageArrayData->dmaphysarray != NULL); PVR_ASSERT(psPageArrayData->dmavirtarray != NULL); @@ -2427,7 +3083,7 @@ _AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, _DumpPageArray(ppsPageArray, psPageArrayData->uiTotalNumOSPages >> - (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); + (psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT) ); PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData)); return PVRSRV_OK; @@ -2483,6 +3139,15 @@ _FreeOSPage_CMA(struct device *dev, virt_addr -= align_adjust << PAGE_SHIFT; } +#if defined(DEBUG) + /* We should never attempt to free a CMA Ghost as it is handled in layer above */ + if (DMA_IS_CMA_GHOST(dev_addr)) + { + PVR_DPF((PVR_DBG_ERROR, "Attempt to free CMA Ghost entry!")); + return; + } +#endif + dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr)); } } @@ -2497,6 +3162,7 @@ _FreeOSPage_CMA(struct device *dev, static void _FreeOSPage(IMG_UINT32 uiOrder, IMG_BOOL bUnsetMemoryType, + PMR_OSPAGEARRAY_DATA *psPageArrayData, struct page *psPage) { @@ -2518,15 +3184,38 @@ _FreeOSPage(IMG_UINT32 uiOrder, #else PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType); #endif + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + /* May call _FreeOSPage from clean pages for pool path, it has no + * association with PMR_OSPAGEARRAY_DATA at this point + */ + if (psPageArrayData) + { + OSMemUnsetMovablePageAttr(psPage, psPageArrayData); + } +#else + PVR_UNREFERENCED_PARAMETER(psPageArrayData); +#endif + __free_pages(psPage, uiOrder); } /* Free the struct holding the metadata */ -static PVRSRV_ERROR +static void _FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData) { PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData)); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + if (!dllist_is_empty(&psPageArrayData->sCpuMappingListHead)) + { + PVR_DPF((PVR_DBG_ERROR, "Attempt to free linux cpu mapping data while still in use.")); + OSWarnOn(1); + } +#endif + + psPageArrayData->hPMR = NULL; + /* Check if the page array actually still exists. * It might be the case that has been moved to the page pool */ if (psPageArrayData->pagearray != NULL) @@ -2534,211 +3223,207 @@ _FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData) OSFreeMemNoStats(psPageArrayData->pagearray); } - kmem_cache_free(g_psLinuxPageArray, psPageArrayData); + /* Check if we need to free additional DMA/CMA cpu kernel virtual address & device bus address arrays */ + if (psPageArrayData->dmaphysarray != NULL) + { + OSFreeMemNoStats(psPageArrayData->dmaphysarray); + } + if (psPageArrayData->dmavirtarray != NULL) + { + OSFreeMemNoStats(psPageArrayData->dmavirtarray); + } - return PVRSRV_OK; + kmem_cache_free(g_psLinuxPageArray, psPageArrayData); } /* Free all or some pages from a sparse page array */ static PVRSRV_ERROR _FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, - IMG_UINT32 *pai32FreeIndices, - IMG_UINT32 ui32FreePageCount) + IMG_UINT32 *paui32DevPageFreeIndices, + IMG_UINT32 uiDevPagesToFree) { - IMG_BOOL bSuccess; - IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; - IMG_UINT32 uiPageIndex, i, j, uiTempIdx = 0; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; struct page **ppsPageArray = psPageArrayData->pagearray; - IMG_UINT32 uiNumPages; - - struct page **ppsTempPageArray; - IMG_UINT32 uiTempArraySize; + IMG_UINT32 uiNumDevPages; + struct page **ppsOSPagesToFree; + IMG_UINT32 *puiDevIndicesToFree; + IMG_UINT32 uiOSPageCount; + IMG_UINT32 uiTransferedDevPageCount; + IMG_UINT32 i; /* We really should have something to free before we call this */ PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0); - if (pai32FreeIndices == NULL) + if (paui32DevPageFreeIndices == NULL) { - uiNumPages = psPageArrayData->uiTotalNumOSPages >> uiOrder; - uiTempArraySize = psPageArrayData->iNumOSPagesAllocated; + uiNumDevPages = psPageArrayData->uiTotalNumOSPages >> uiOrder; + uiOSPageCount = psPageArrayData->iNumOSPagesAllocated; } else { - uiNumPages = ui32FreePageCount; - uiTempArraySize = ui32FreePageCount << uiOrder; + uiNumDevPages = uiDevPagesToFree; + uiOSPageCount = uiNumDevPages << uiOrder; } -#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) - for (i = 0; i < uiNumPages; i++) + /* Construct the array of pages before any free operation so error recovery is possible. + * OSAllocMemNoStats required because this code may be run without the bridge lock held */ + ppsOSPagesToFree = OSAllocMemNoStats(uiOSPageCount * sizeof(*ppsOSPagesToFree)); + PVR_LOG_GOTO_IF_NOMEM(ppsOSPagesToFree, eError, err_array); + puiDevIndicesToFree = OSAllocMemNoStats(uiNumDevPages * sizeof(*puiDevIndicesToFree)); + PVR_LOG_GOTO_IF_NOMEM(puiDevIndicesToFree, eError, err_indices); + + /* Count how many allocated dev pages have been found. In case we need to unwind */ + uiTransferedDevPageCount = 0; + for (i = 0; i < uiNumDevPages; i++) { - IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; + IMG_UINT32 uiSubPageInOrder; + IMG_UINT32 idx = paui32DevPageFreeIndices ? paui32DevPageFreeIndices[i] : i; + /* Scale Device page index to OS page Idx */ + idx <<= uiOrder; + + if (ppsPageArray[idx] != NULL) + { + IMG_UINT32 uiOSFreeArrayIdx = uiTransferedDevPageCount << uiOrder; + puiDevIndicesToFree[uiTransferedDevPageCount] = idx; - if (NULL != ppsPageArray[idx]) + for (uiSubPageInOrder = 0; uiSubPageInOrder < (1 << uiOrder); uiSubPageInOrder++) + { + ppsOSPagesToFree[uiOSFreeArrayIdx + uiSubPageInOrder] = ppsPageArray[idx + uiSubPageInOrder]; + ppsPageArray[idx + uiSubPageInOrder] = NULL; + } + uiTransferedDevPageCount++; + } + else if (paui32DevPageFreeIndices != NULL) { - _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[idx]); + IMG_UINT32 j; + PVR_DPF((PVR_DBG_ERROR, "Invalid free index (%u), possible duplicate", idx)); + /* Restore the ppsPageArray to its initial state. + * Reverse the loop and swap the ppsTempPages back into ppsPageArray. */ + for (j = 0; j < uiTransferedDevPageCount; j++) + { + IMG_UINT32 uiSubPageInOrder; + + idx = puiDevIndicesToFree[j]; + ppsPageArray[idx] = ppsOSPagesToFree[j]; + + for (uiSubPageInOrder = 0; uiSubPageInOrder < (1 << uiOrder); uiSubPageInOrder++) + { + ppsPageArray[idx + uiSubPageInOrder] = ppsOSPagesToFree[j + uiSubPageInOrder]; + } + } + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK, err_indices); } } -#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) + for (i = 0; i < uiTransferedDevPageCount; i++) + { + /* Scale Device page index to OS page Idx */ + IMG_UINT32 idx = i << uiOrder; + + _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsOSPagesToFree[idx]); + } +#endif /* PVRSRV_ENABLE_PROCESS_STATS && PVRSRV_ENABLE_MEMORY_STATS */ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_FREE)) { - for (i = 0; i < uiNumPages; i++) + for (i = 0; i < uiTransferedDevPageCount; i++) { - IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; + /* Scale Device page index to OS page Idx */ + IMG_UINT32 idx = i << uiOrder; - if (NULL != ppsPageArray[idx]) - { - _PoisonDevicePage(psPageArrayData->psDevNode, - ppsPageArray[idx], - uiOrder, - psPageArrayData->ui32CPUCacheFlags, - PVRSRV_POISON_ON_FREE_VALUE); - } - else if (pai32FreeIndices != NULL) - { - /* Attempt to poison an index not containing a valid page */ - return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK; - } + _PoisonDevicePage(psPageArrayData->psDevNode, + ppsOSPagesToFree[idx], + uiOrder, + psPageArrayData->ui32CPUCacheFlags, + PVRSRV_POISON_ON_FREE_VALUE); } } - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) { - IMG_UINT32 uiDevNumPages = uiNumPages; - IMG_UINT32 uiDevPageSize = 1<uiLog2AllocPageSize; - - for (i = 0; i < uiDevNumPages; i++) + IMG_UINT64 ui64DevPageSize = IMG_PAGE2BYTES64(psPageArrayData->uiLog2DevPageSize); + for (i = 0; i < uiTransferedDevPageCount; i++) { - IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; - if (NULL != ppsPageArray[idx]) - { - _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, - uiDevPageSize, - uiOrder, - psPageArrayData->dmavirtarray[idx], - psPageArrayData->dmaphysarray[idx], - ppsPageArray[idx]); - psPageArrayData->dmaphysarray[idx] = (dma_addr_t)0; - psPageArrayData->dmavirtarray[idx] = NULL; - ppsPageArray[idx] = NULL; - uiTempIdx++; - } - else if (pai32FreeIndices != NULL) + IMG_UINT32 uiSubPageInOrder; + /* Scale Device page index to OS page Idx */ + IMG_UINT32 uiFreeIdx = i << uiOrder; + /* Get the page's original index in ppsPageArray, already scaled to dev pages */ + IMG_UINT32 idx = puiDevIndicesToFree[i]; + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + ui64DevPageSize, + uiOrder, + psPageArrayData->dmavirtarray[idx], + psPageArrayData->dmaphysarray[idx], + ppsOSPagesToFree[uiFreeIdx]); + for (uiSubPageInOrder = 0; + uiSubPageInOrder < (1 << uiOrder); + uiSubPageInOrder++) { -#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) - /* Attempt to keep memstats consistent in event of fail as we have - * freed some pages - */ - uiTempIdx <<= uiOrder; - _DecrMemAllocStat_UmaPages(uiTempIdx * PAGE_SIZE, - psPageArrayData->uiPid); -#endif - /* Attempt to free an already free index, could be duplicated free indices */ - return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK; + psPageArrayData->dmaphysarray[idx + uiSubPageInOrder] = (dma_addr_t)0; + psPageArrayData->dmavirtarray[idx + uiSubPageInOrder] = NULL; } } - uiTempIdx <<= uiOrder; } else { + IMG_BOOL bPagesInPool; - /* OSAllocMemNoStats required because this code may be run without the bridge lock held */ - ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize); - if (ppsTempPageArray == NULL) + /* Try to move the temp page array to the pool */ + bPagesInPool = _PutPagesToPoolLocked(psPageArrayData->psDevNode, + psPageArrayData->ui32CPUCacheFlags, + psPageArrayData->ui32AllocFlags, + ppsOSPagesToFree, + 0, + uiOSPageCount); + if (bPagesInPool) { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __func__)); - return PVRSRV_ERROR_OUT_OF_MEMORY; + /* Do NOT free ppsPagesToFree if it moved to the pool */ + ppsOSPagesToFree = NULL; } - - /* Put pages in a contiguous array so further processing is easier */ - for (i = 0; i < uiNumPages; i++) + else { - uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i; - if (NULL != ppsPageArray[uiPageIndex]) + /* Free pages and reset page caching attributes on x86 */ +#if defined(CONFIG_X86) + if (uiOSPageCount != 0 && BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) { - struct page *psPage = ppsPageArray[uiPageIndex]; + int iError; + iError = set_pages_array_wb(ppsOSPagesToFree, uiOSPageCount); - for (j = 0; j < (1<uiPid); #endif - OSFreeMemNoStats(ppsTempPageArray); - return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK; - } - } - - /* Try to move the temp page array to the pool */ - bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags, - ppsTempPageArray, - 0, - uiTempIdx); - if (bSuccess) - { - goto exit_ok; - } - - /* Free pages and reset page caching attributes on x86 */ -#if defined(CONFIG_X86) - if (uiTempIdx != 0 && BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) - { - int iError; - iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx); - - if (iError) + /* Free the pages */ + for (i = 0; i < uiOSPageCount; i++) { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __func__)); + _FreeOSPage(0, IMG_FALSE, psPageArrayData, ppsOSPagesToFree[i]); } } -#endif - - /* Free the pages */ - for (i = 0; i < uiTempIdx; i++) - { - __free_pages(ppsTempPageArray[i], 0); - } - - /* Free the temp page array here if it did not move to the pool */ - OSFreeMemNoStats(ppsTempPageArray); } -exit_ok: - #if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) - _DecrMemAllocStat_UmaPages(((uiTempIdx * PAGE_SIZE)-(psPageArrayData->ui32CMAAdjustedPageCount)), + _DecrMemAllocStat_UmaPages(psPageArrayData, + (uiOSPageCount * PAGE_SIZE) - psPageArrayData->ui32CMAAdjustedPageCount, psPageArrayData->uiPid); #endif - if (pai32FreeIndices && ((uiTempIdx >> uiOrder) != ui32FreePageCount)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Probable sparse duplicate indices: ReqFreeCount: %d " - "ActualFreedCount: %d", __func__, ui32FreePageCount, (uiTempIdx >> uiOrder))); - } /* Update metadata */ - psPageArrayData->iNumOSPagesAllocated -= uiTempIdx; + psPageArrayData->iNumOSPagesAllocated -= uiOSPageCount; PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated); - return PVRSRV_OK; + +err_indices: + OSFreeMemNoStats(puiDevIndicesToFree); +err_array: + if (ppsOSPagesToFree) + { + OSFreeMemNoStats(ppsOSPagesToFree); + } + return eError; } /* Free all the pages in a page array */ @@ -2747,9 +3432,9 @@ _FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) { IMG_BOOL bSuccess; IMG_UINT32 i; - IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages; - IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; - IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder; + IMG_UINT32 uiOSNumPages = psPageArrayData->uiTotalNumOSPages; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; + IMG_UINT32 uiDevNumPages = uiOSNumPages >> uiOrder; IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; struct page **ppsPageArray = psPageArrayData->pagearray; @@ -2758,18 +3443,22 @@ _FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) #if defined(PVRSRV_ENABLE_PROCESS_STATS) #if defined(PVRSRV_ENABLE_MEMORY_STATS) - for (i = 0; i < uiDevNumPages; i++) + for (i = 0; i < uiOSNumPages; i++) { - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) { - _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); - }else + if (!DMA_IS_CMA_GHOST(psPageArrayData->dmaphysarray[i])) + { + _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); + } + } + else { - _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << uiOrder]); + _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); } } #else - _DecrMemAllocStat_UmaPages(((uiNumPages * PAGE_SIZE)-(psPageArrayData->ui32CMAAdjustedPageCount)), + _DecrMemAllocStat_UmaPages(psPageArrayData, uiOSNumPages * PAGE_SIZE - psPageArrayData->ui32CMAAdjustedPageCount, psPageArrayData->uiPid); #endif #endif @@ -2778,8 +3467,11 @@ _FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) { for (i = 0; i < uiDevNumPages; i++) { + /* Scale Device page index to OS page idx */ + IMG_UINT32 idx = i << uiOrder; + _PoisonDevicePage(psPageArrayData->psDevNode, - ppsPageArray[i], + ppsPageArray[idx], uiOrder, psPageArrayData->ui32CPUCacheFlags, PVRSRV_POISON_ON_FREE_VALUE); @@ -2787,59 +3479,234 @@ _FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) } /* Try to move the page array to the pool */ - bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags, - ppsPageArray, - uiOrder, - uiNumPages); + bSuccess = _PutPagesToPoolLocked(psPageArrayData->psDevNode, + psPageArrayData->ui32CPUCacheFlags, + psPageArrayData->ui32AllocFlags, + ppsPageArray, + uiOrder, + uiOSNumPages); if (bSuccess) { psPageArrayData->pagearray = NULL; goto exit_ok; } - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) + { + for (i = 0; i < uiOSNumPages; i++) + { + if (!DMA_IS_CMA_GHOST(psPageArrayData->dmaphysarray[i])) + { + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + uiDevPageSize, + uiOrder, + psPageArrayData->dmavirtarray[i], + psPageArrayData->dmaphysarray[i], + ppsPageArray[i]); + + } + psPageArrayData->dmaphysarray[i] = (dma_addr_t)0; + psPageArrayData->dmavirtarray[i] = NULL; + ppsPageArray[i] = NULL; + } + } + else + { +#if defined(CONFIG_X86) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) + { + int ret; + + ret = set_pages_array_wb(ppsPageArray, uiOSNumPages); + if (ret) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", + __func__)); + } + } +#endif + + for (i = 0; i < uiOSNumPages; i++) + { + _FreeOSPage(uiOrder, IMG_FALSE, psPageArrayData, ppsPageArray[i]); + ppsPageArray[i] = NULL; + } + } + +exit_ok: + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated = 0; + return PVRSRV_OK; +} + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +static PVRSRV_ERROR PMRFreeZombiePagesOSMem(PMR_IMPL_ZOMBIEPAGES pvPriv) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psZombiePageArray = pvPriv; + + eError = _FreeOSPages(psZombiePageArray, + NULL, + 0 /* Unused */); + + if (eError != PVRSRV_OK) + { + goto e0; + } + + _FreeOSPagesArray(psZombiePageArray); + + return PVRSRV_OK; +e0: + return eError; +} + +/* Extracts ui32ExtractPageCount of pages referenced in pai32ExtractIndices from the psSrcPageArrayData + * Allocates a new PMR_OSPAGEARRAY_DATA object and fills it with the extracted pages information. + */ +static PVRSRV_ERROR +_ExtractPages(PMR_OSPAGEARRAY_DATA *psSrcPageArrayData, + IMG_UINT32 *pai32ExtractIndices, + IMG_UINT32 ui32ExtractPageCount, + PMR_OSPAGEARRAY_DATA** psOutPageArrayData) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i, uiSubPageInOrder, uiOrder; + PMR_OSPAGEARRAY_DATA* psDstPageArrayData; + + /* Alloc PMR_OSPAGEARRAY_DATA for the extracted pages */ + eError = _AllocOSPageArray(psSrcPageArrayData->psDevNode, + (IMG_UINT64)ui32ExtractPageCount << psSrcPageArrayData->uiLog2DevPageSize, + psSrcPageArrayData->uiLog2DevPageSize, + psSrcPageArrayData->ui32AllocFlags, + psSrcPageArrayData->ui32CPUCacheFlags, + psSrcPageArrayData->uiPid, + &psDstPageArrayData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "_AllocOSPageArray failed in _ExtractPages")); + return eError; + } + + uiOrder = psSrcPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; + + /* Transfer from src pagearray to dst pagearray */ + for (i = 0; i < ui32ExtractPageCount; i++) + { + IMG_UINT32 idxSrc = pai32ExtractIndices[i] << uiOrder; + IMG_UINT32 idxExtracted = i << uiOrder; + + if (psSrcPageArrayData->pagearray[idxSrc] != NULL) + { + for (uiSubPageInOrder = 0; uiSubPageInOrder < (1 << uiOrder); uiSubPageInOrder++) + { + psDstPageArrayData->pagearray[idxExtracted + uiSubPageInOrder] = + psSrcPageArrayData->pagearray[idxSrc + uiSubPageInOrder]; + + psSrcPageArrayData->pagearray[idxSrc + uiSubPageInOrder] = NULL; + } + } + } + + /* Do the same for dmaphysarray and dmavirtarray if allocated with CMA */ + if (BIT_ISSET(psSrcPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) + { + for (i = 0; i < ui32ExtractPageCount; i++) + { + IMG_UINT32 idxSrc = pai32ExtractIndices[i] << uiOrder; + IMG_UINT32 idxDst = i << uiOrder; + + if (psSrcPageArrayData->dmaphysarray[idxSrc] != (dma_addr_t)0 || + psSrcPageArrayData->dmavirtarray[idxSrc] != NULL) + { + for (uiSubPageInOrder = 0; uiSubPageInOrder < (1 << uiOrder); uiSubPageInOrder++) + { + psDstPageArrayData->dmaphysarray[idxDst + uiSubPageInOrder] = + psSrcPageArrayData->dmaphysarray[idxSrc + uiSubPageInOrder]; + psDstPageArrayData->dmavirtarray[idxDst + uiSubPageInOrder] = + psSrcPageArrayData->dmavirtarray[idxSrc + uiSubPageInOrder]; + + psSrcPageArrayData->dmaphysarray[idxSrc + uiSubPageInOrder] = (dma_addr_t)0; + psSrcPageArrayData->dmavirtarray[idxSrc + uiSubPageInOrder] = NULL; + } + } + } + } + + /* Update page counts */ + psSrcPageArrayData->iNumOSPagesAllocated -= ui32ExtractPageCount << uiOrder; + psDstPageArrayData->iNumOSPagesAllocated += ui32ExtractPageCount << uiOrder; + + *psOutPageArrayData = psDstPageArrayData; + return PVRSRV_OK; +} + +/* Extracts all allocated pages referenced psSrcPageArrayData + * Allocates a new PMR_OSPAGEARRAY_DATA object and fills it with the extracted + * pages information. + */ +static PVRSRV_ERROR +_ExtractAllPages(PMR_OSPAGEARRAY_DATA *psSrcPageArrayData, + PMR_OSPAGEARRAY_DATA **psOutPageArrayData) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + PMR_OSPAGEARRAY_DATA* psDstPageArrayData; + IMG_UINT32 uiPagesCopied = 0; + + /* Alloc PMR_OSPAGEARRAY_DATA for the extracted pages */ + eError = _AllocOSPageArray(psSrcPageArrayData->psDevNode, + (IMG_UINT64)psSrcPageArrayData->iNumOSPagesAllocated << psSrcPageArrayData->uiLog2DevPageSize, + psSrcPageArrayData->uiLog2DevPageSize, + psSrcPageArrayData->ui32AllocFlags, + psSrcPageArrayData->ui32CPUCacheFlags, + psSrcPageArrayData->uiPid, + &psDstPageArrayData); + if (eError != PVRSRV_OK) { - for (i = 0; i < uiDevNumPages; i++) - { - _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, - uiDevPageSize, - uiOrder, - psPageArrayData->dmavirtarray[i], - psPageArrayData->dmaphysarray[i], - ppsPageArray[i]); - psPageArrayData->dmaphysarray[i] = (dma_addr_t)0; - psPageArrayData->dmavirtarray[i] = NULL; - ppsPageArray[i] = NULL; - } + PVR_DPF((PVR_DBG_ERROR, "_AllocOSPageArray failed in _ExtractPages")); + return eError; } - else + + /* Transfer from src pagearray to dst pagearray */ + /* Iterate through all pages in psSrcPageArrayData but stop once + * we have copied psSrcPageArrayData->iNumOSPagesAllocated pages to + * psDstPageArrayData. + */ + + for (i = 0; ((i < psSrcPageArrayData->uiTotalNumOSPages) && + (uiPagesCopied < psSrcPageArrayData->iNumOSPagesAllocated)); i++) { -#if defined(CONFIG_X86) - if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) + if (psSrcPageArrayData->pagearray[i] != NULL) { - int ret; + psDstPageArrayData->pagearray[uiPagesCopied] = + psSrcPageArrayData->pagearray[i]; - ret = set_pages_array_wb(ppsPageArray, uiNumPages); - if (ret) + psSrcPageArrayData->pagearray[i] = NULL; + + if (BIT_ISSET(psSrcPageArrayData->ui32AllocFlags, FLAG_DMA_CMA) && + (psSrcPageArrayData->dmaphysarray[i] != (dma_addr_t)0 || + psSrcPageArrayData->dmavirtarray[i] != NULL)) { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", - __func__)); - } - } -#endif + psDstPageArrayData->dmaphysarray[uiPagesCopied] = + psSrcPageArrayData->dmaphysarray[i]; + psDstPageArrayData->dmavirtarray[uiPagesCopied] = + psSrcPageArrayData->dmavirtarray[i]; - for (i = 0; i < uiNumPages; i++) - { - _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]); - ppsPageArray[i] = NULL; + psSrcPageArrayData->dmaphysarray[i] = (dma_addr_t)0; + psSrcPageArrayData->dmavirtarray[i] = NULL; + } + uiPagesCopied++; } } + /* Update page counts */ + psDstPageArrayData->iNumOSPagesAllocated = psSrcPageArrayData->iNumOSPagesAllocated; + psSrcPageArrayData->iNumOSPagesAllocated = 0; -exit_ok: - /* Update metadata */ - psPageArrayData->iNumOSPagesAllocated = 0; + *psOutPageArrayData = psDstPageArrayData; return PVRSRV_OK; } +#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */ /* Free pages from a page array. * Takes care of mem stats and chooses correct free path depending on parameters. */ @@ -2870,7 +3737,7 @@ _FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, _DumpPageArray(psPageArrayData->pagearray, psPageArrayData->uiTotalNumOSPages >> - (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); + (psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT) ); return eError; } @@ -2884,7 +3751,7 @@ _FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, /* Destruction function is called after last reference disappears, * but before PMR itself is freed. */ -static PVRSRV_ERROR +static void PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv) { PVRSRV_ERROR eError; @@ -2893,35 +3760,134 @@ PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv) /* We can't free pages until now. */ if (psOSPageArrayData->iNumOSPagesAllocated != 0) { -#if defined(DEBUG) && defined(SUPPORT_VALIDATION) - PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); - IMG_UINT32 ui32UMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU; - mutex_lock(&g_sUMALeakMutex); + eError = _FreeOSPages(psOSPageArrayData, NULL, 0); + PVR_LOG_IF_ERROR(eError, "_FreeOSPages"); + } + + _FreeOSPagesArray(psOSPageArrayData); +} + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +static PVRSRV_ERROR PMRZombifyOSMem(PMR_IMPL_PRIVDATA pvPriv, PMR *psPMR) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PMR_OSPAGEARRAY_DATA *psPageArrayData = pvPriv; + IMG_UINT32 uiNumOSPages = psPageArrayData->uiTotalNumOSPages; + + BIT_SET(psPageArrayData->ui32AllocFlags, FLAG_IS_ZOMBIE); + + /* no need to check for free indices as it's always all the memory we're + * freeing */ + if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages) + { + /* _FreeOSPages_Sparse() path */ + + struct page *const *const ppsPageArray = psPageArrayData->pagearray; + const IMG_UINT32 uiFlags = psPageArrayData->ui32AllocFlags; + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_UINT32 i; - g_ui32UMALeakCounter++; - if (ui32UMALeakMax && g_ui32UMALeakCounter >= ui32UMALeakMax) + if (BIT_ISSET(uiFlags, FLAG_DMA_CMA)) + { + for (i = 0; i < uiNumOSPages; i++) + { + if (ppsPageArray[i] != NULL && !DMA_IS_CMA_GHOST(psPageArrayData->dmaphysarray[i])) + { + _TransferToMemZombieRecord_UmaPages(psPageArrayData, ppsPageArray[i]); + } + } + } + else { - g_ui32UMALeakCounter = 0; - mutex_unlock(&g_sUMALeakMutex); + for (i = 0; i < uiNumOSPages; i++) + { + if (ppsPageArray[i] != NULL) + { + _TransferToMemZombieRecord_UmaPages(psPageArrayData, ppsPageArray[i]); + } + } + } +#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + const IMG_UINT32 uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; + IMG_UINT32 uiNumDevPages = psPageArrayData->uiTotalNumOSPages >> uiOrder; + IMG_UINT32 i, j, uiAllocatedNumPages = 0; - PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv)); - return PVRSRV_OK; + if (BIT_ISSET(uiFlags, FLAG_DMA_CMA)) + { + for (i = 0; i < uiNumDevPages; i++) + { + IMG_UINT32 idx = i << uiOrder; + if (ppsPageArray[idx] != NULL) + { + uiAllocatedNumPages++; + } + } + uiAllocatedNumPages <<= uiOrder; + } + else + { + for (i = 0; i < uiNumDevPages; i++) + { + IMG_UINT32 idx = i << uiOrder; + if (ppsPageArray[idx] != NULL) + { + for (j = 0; j < (1<ui32CMAAdjustedPageCount, + psPageArrayData->uiPid + ); +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + } + else + { + /* _FreeOSPages_Fast() path */ - eError = _FreeOSPages(psOSPageArrayData, - NULL, - 0); - PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + struct page *const *const ppsPageArray = psPageArrayData->pagearray; + IMG_UINT32 i; + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_DMA_CMA)) + { + for (i = 0; i < uiNumOSPages; i++) + { + if (!DMA_IS_CMA_GHOST(psPageArrayData->dmaphysarray[i])) + { + _TransferToMemZombieRecord_UmaPages(psPageArrayData, ppsPageArray[i]); + } + } + } + else + { + for (i = 0; i < uiNumOSPages; i++) + { + _TransferToMemZombieRecord_UmaPages(psPageArrayData, ppsPageArray[i]); + } + } +#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + _ZombifyMemAllocStat_UmaPages( + uiNumOSPages * PAGE_SIZE - psPageArrayData->ui32CMAAdjustedPageCount, + psPageArrayData->uiPid + ); +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ } +#else /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ +#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ + + PVR_UNREFERENCED_PARAMETER(pvPriv); + PVR_UNREFERENCED_PARAMETER(psPMR); - eError = _FreeOSPagesArray(psOSPageArrayData); - PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */ return PVRSRV_OK; } +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ /* Callback function for locking the system physical page addresses. * This function must be called before the lookup address func. */ @@ -2945,15 +3911,45 @@ PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) return eError; } +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +static PVRSRV_ERROR +PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv, + PMR_IMPL_ZOMBIEPAGES *ppvZombiePages) +#else static PVRSRV_ERROR PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) +#endif { /* Just drops the refcount. */ PVRSRV_ERROR eError = PVRSRV_OK; PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_OSPAGEARRAY_DATA *psExtractedPagesPageArray = NULL; + + *ppvZombiePages = NULL; +#endif if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND)) { +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + if (psOSPageArrayData->iNumOSPagesAllocated == 0) + { + *ppvZombiePages = NULL; + return PVRSRV_OK; + } + + eError = _ExtractAllPages(psOSPageArrayData, + &psExtractedPagesPageArray); + PVR_LOG_GOTO_IF_ERROR(eError, "_ExtractAllPages", e0); + + if (psExtractedPagesPageArray) + { + /* Zombify pages to get proper stats */ + eError = PMRZombifyOSMem(psExtractedPagesPageArray, NULL); + PVR_WARN_IF_ERROR(eError, "PMRZombifyOSMem"); + } + *ppvZombiePages = psExtractedPagesPageArray; +#else /* Free Memory for deferred allocation */ eError = _FreeOSPages(psOSPageArrayData, NULL, @@ -2962,16 +3958,22 @@ PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) { return eError; } +#endif } - PVR_ASSERT(eError == PVRSRV_OK); return eError; + +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +#endif } static INLINE IMG_BOOL IsOffsetValid(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData, IMG_UINT32 ui32Offset) { - return (ui32Offset >> psOSPageArrayData->uiLog2AllocPageSize) < + return (ui32Offset >> psOSPageArrayData->uiLog2DevPageSize) < psOSPageArrayData->uiTotalNumOSPages; } @@ -2979,12 +3981,11 @@ static INLINE IMG_BOOL IsOffsetValid(const PMR_OSPAGEARRAY_DATA *psOSPageArrayDa static IMG_DEV_PHYADDR GetOffsetPA(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData, IMG_UINT32 ui32Offset) { - IMG_UINT32 ui32Log2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; - IMG_UINT32 ui32PageIndex = ui32Offset >> ui32Log2AllocPageSize; - IMG_UINT32 ui32InPageOffset = ui32Offset - (ui32PageIndex << ui32Log2AllocPageSize); + IMG_UINT32 ui32PageIndex = ui32Offset >> PAGE_SHIFT; + IMG_UINT32 ui32InPageOffset = ui32Offset - (ui32PageIndex << PAGE_SHIFT); IMG_DEV_PHYADDR sPA; - PVR_ASSERT(ui32InPageOffset < (1U << ui32Log2AllocPageSize)); + PVR_ASSERT(ui32InPageOffset < (1U << PAGE_SHIFT)); sPA.uiAddr = page_to_phys(psOSPageArrayData->pagearray[ui32PageIndex]); sPA.uiAddr += ui32InPageOffset; @@ -2998,13 +3999,22 @@ PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv, IMG_UINT32 ui32Log2PageSize, IMG_UINT32 ui32NumOfPages, IMG_DEVMEM_OFFSET_T *puiOffset, +#if defined(SUPPORT_STATIC_IPA) + IMG_UINT64 ui64IPAPolicyValue, + IMG_UINT64 ui64IPAClearMask, +#endif IMG_BOOL *pbValid, IMG_DEV_PHYADDR *psDevPAddr) { const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; IMG_UINT32 uiIdx; - if (psOSPageArrayData->uiLog2AllocPageSize < ui32Log2PageSize) +#if defined(SUPPORT_STATIC_IPA) + PVR_UNREFERENCED_PARAMETER(ui64IPAPolicyValue); + PVR_UNREFERENCED_PARAMETER(ui64IPAClearMask); +#endif + + if (psOSPageArrayData->uiLog2DevPageSize < ui32Log2PageSize) { PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical addresses from PMR " @@ -3039,6 +4049,12 @@ PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv, } } #endif +#if defined(SUPPORT_STATIC_IPA) + /* Modify the physical address with the associated IPA values */ + psDevPAddr[uiIdx].uiAddr &= ~ui64IPAClearMask; + psDevPAddr[uiIdx].uiAddr |= ui64IPAPolicyValue; +#endif + } } @@ -3069,25 +4085,23 @@ PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, IMG_UINT32 ui32PageOffset=0; size_t uiMapOffset=0; IMG_UINT32 ui32PageCount = 0; - IMG_UINT32 uiLog2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; - IMG_UINT32 uiOSPageShift = OSGetPageShift(); - IMG_UINT32 uiPageSizeDiff = 0; + IMG_UINT32 uiLog2DevPageSize = psOSPageArrayData->uiLog2DevPageSize; struct page **pagearray; PMR_OSPAGEARRAY_KERNMAP_DATA *psData; - /* For cases device page size greater than the OS page size, - * multiple physically contiguous OS pages constitute one device page. - * However only the first page address of such an ensemble is stored - * as part of the mapping table in the driver. Hence when mapping the PMR - * in part/full, all OS pages that constitute the device page - * must also be mapped to kernel. - * - * For the case where device page size less than OS page size, - * treat it the same way as the page sizes are equal */ - if (uiLog2AllocPageSize > uiOSPageShift) +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + OSWRLockAcquireRead(psOSPageArrayData->hPagesUnderMigrationRWLock); + if (psOSPageArrayData->iNumOSPagesUnderMigration != 0) { - uiPageSizeDiff = uiLog2AllocPageSize - uiOSPageShift; + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Requested kernel mapping from PMR " + "in process of page migration, retry.", + __func__)); + OSWRLockReleaseRead(psOSPageArrayData->hPagesUnderMigrationRWLock); + return PVRSRV_ERROR_RETRY; } + OSWRLockReleaseRead(psOSPageArrayData->hPagesUnderMigrationRWLock); +#endif /* Zero offset and size as a special meaning which means map in the @@ -3106,17 +4120,13 @@ PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, { size_t uiEndoffset; - ui32PageOffset = uiOffset >> uiLog2AllocPageSize; - uiMapOffset = uiOffset - (ui32PageOffset << uiLog2AllocPageSize); + ui32PageOffset = uiOffset >> uiLog2DevPageSize; + uiMapOffset = uiOffset - (ui32PageOffset << uiLog2DevPageSize); uiEndoffset = uiOffset + uiSize - 1; /* Add one as we want the count, not the offset */ - /* Page count = amount of device pages (note uiLog2AllocPageSize being used) */ - ui32PageCount = (uiEndoffset >> uiLog2AllocPageSize) + 1; + /* Page count = amount of device pages (note uiLog2DevPageSize being used) */ + ui32PageCount = (uiEndoffset >> uiLog2DevPageSize) + 1; ui32PageCount -= ui32PageOffset; - - /* The OS page count to be mapped might be different if the - * OS page size is lesser than the device page size */ - ui32PageCount <<= uiPageSizeDiff; } switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags)) @@ -3137,55 +4147,20 @@ PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, goto e0; } - if (uiPageSizeDiff) - { - /* Each device page can be broken down into ui32SubPageCount OS pages */ - IMG_UINT32 ui32SubPageCount = 1 << uiPageSizeDiff; - IMG_UINT32 i; - struct page **psPage = &psOSPageArrayData->pagearray[ui32PageOffset]; - - /* Allocate enough memory for the OS page pointers for this mapping */ - pagearray = OSAllocMem(ui32PageCount * sizeof(pagearray[0])); - - if (pagearray == NULL) - { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto e0; - } - - /* construct array that holds the page pointers that constitute the requested - * mapping */ - for (i = 0; i < ui32PageCount; i++) - { - IMG_UINT32 ui32OSPageArrayIndex = i / ui32SubPageCount; - IMG_UINT32 ui32OSPageArrayOffset = i % ui32SubPageCount; - - /* - * The driver only stores OS page pointers for the first OS page - * within each device page (psPage[ui32OSPageArrayIndex]). - * Get the next OS page structure at device page granularity, - * then calculate OS page pointers for all the other pages. - */ - pagearray[i] = psPage[ui32OSPageArrayIndex] + ui32OSPageArrayOffset; - } - } - else - { - pagearray = &psOSPageArrayData->pagearray[ui32PageOffset]; - } + pagearray = &psOSPageArrayData->pagearray[ui32PageOffset]; psData = OSAllocMem(sizeof(*psData)); if (psData == NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto e1; + goto e0; } - pvAddress = pvr_vmap(pagearray, ui32PageCount, VM_READ | VM_WRITE, prot); + pvAddress = pvr_vmap(pagearray, ui32PageCount, VM_MAP, prot); if (pvAddress == NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto e2; + goto e1; } *ppvKernelAddressOut = pvAddress + uiMapOffset; @@ -3194,11 +4169,6 @@ PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, psData->PageProps = prot; *phHandleOut = psData; - if (uiPageSizeDiff) - { - OSFreeMem(pagearray); - } - #if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) { IMG_CPU_PHYADDR pvAddrPhy; @@ -3219,13 +4189,8 @@ PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, /* error exit paths follow */ -e2: - OSFreeMem(psData); e1: - if (uiPageSizeDiff) - { - OSFreeMem(pagearray); - } + OSFreeMem(psData); e0: PVR_ASSERT(eError != PVRSRV_OK); return eError; @@ -3265,6 +4230,9 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, IMG_UINT32 *pai32AllocIndices, IMG_UINT32 ui32FreePageCount, IMG_UINT32 *pai32FreeIndices, +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_IMPL_ZOMBIEPAGES *ppvZombiePages, +#endif IMG_UINT32 uiFlags) { PVRSRV_ERROR eError; @@ -3274,11 +4242,6 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, struct page **psPageArray = psPMRPageArrayData->pagearray; void **psDMAVirtArray = psPMRPageArrayData->dmavirtarray; dma_addr_t *psDMAPhysArray = psPMRPageArrayData->dmaphysarray; - - struct page *psPage; - dma_addr_t psDMAPAddr; - void *pvDMAVAddr; - IMG_UINT32 ui32AdtnlAllocPages = 0; /*uiLog2AllocPageSize - PAGE_SHIFT; - IMG_BOOL bCMA = BIT_ISSET(psPMRPageArrayData->ui32AllocFlags, FLAG_IS_CMA); + IMG_UINT32 uiOrder = psPMRPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; + IMG_BOOL bCMA = BIT_ISSET(psPMRPageArrayData->ui32AllocFlags, FLAG_DMA_CMA); /* Check SPARSE flags and calculate pages to allocate and free */ @@ -3317,6 +4280,10 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, ui32FreePageCount = 0; } +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + *ppvZombiePages = NULL; +#endif + if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages)) { eError = PVRSRV_ERROR_INVALID_PARAMS; @@ -3352,13 +4319,13 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, { uiFreepgidx = pai32FreeIndices[ui32Loop]; - if (uiFreepgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) + if (uiFreepgidx >= (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) { eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; goto e0; } - if (NULL == psPageArray[uiFreepgidx]) + if (NULL == (psPageArray[uiFreepgidx << uiOrder])) { eError = PVRSRV_ERROR_INVALID_PARAMS; PVR_DPF((PVR_DBG_ERROR, @@ -3383,35 +4350,20 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, { uiAllocpgidx = pai32AllocIndices[ui32Loop]; - if (uiAllocpgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) + if (uiAllocpgidx >= (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) { eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; goto e0; } - if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) - { - if ((NULL != psPageArray[uiAllocpgidx]) || - (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx])) - { - eError = PVRSRV_ERROR_INVALID_PARAMS; - PVR_DPF((PVR_DBG_ERROR, - "%s: Trying to allocate already allocated page again", - __func__)); - goto e0; - } - } - else + if ((NULL != psPageArray[uiAllocpgidx << uiOrder]) || + (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx])) { - if ((NULL == psPageArray[uiAllocpgidx]) || - (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) ) - { - eError = PVRSRV_ERROR_INVALID_PARAMS; - PVR_DPF((PVR_DBG_ERROR, - "%s: Unable to remap memory due to missing page", - __func__)); - goto e0; - } + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to allocate already allocated page again", + __func__)); + goto e0; } } @@ -3420,21 +4372,21 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, /* Allocate new pages from the OS */ if (0 != ui32AdtnlAllocPages) { - eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages); - if (PVRSRV_OK != eError) - { - PVR_DPF((PVR_DBG_MESSAGE, - "%s: New Addtl Allocation of pages failed", - __func__)); - goto e0; - } + eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: New Addtl Allocation of pages failed", + __func__)); + goto e0; + } - psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; - /*Mark the corresponding pages of translation table as valid */ - for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) - { - psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; - } + psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; + /*Mark the corresponding pages of translation table as valid */ + for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) + { + psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; + } } @@ -3443,50 +4395,48 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, /* Move the corresponding free pages to alloc request */ for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++) { - uiAllocpgidx = pai32AllocIndices[ui32Index]; - uiFreepgidx = pai32FreeIndices[ui32Loop]; - - psPage = psPageArray[uiAllocpgidx]; - psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx]; + IMG_UINT32 uiOrderIdx; + uiAllocpgidx = pai32AllocIndices[ui32Index] << uiOrder; + uiFreepgidx = pai32FreeIndices[ui32Loop] << uiOrder; - if (bCMA) - { - pvDMAVAddr = psDMAVirtArray[uiAllocpgidx]; - psDMAPAddr = psDMAPhysArray[uiAllocpgidx]; - psDMAVirtArray[uiAllocpgidx] = psDMAVirtArray[uiFreepgidx]; - psDMAPhysArray[uiAllocpgidx] = psDMAPhysArray[uiFreepgidx]; - } - - /* Is remap mem used in real world scenario? Should it be turned to a - * debug feature? The condition check needs to be out of loop, will be - * done at later point though after some analysis */ - if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) - { - psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; - psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; - psPageArray[uiFreepgidx] = NULL; - if (bCMA) - { - psDMAVirtArray[uiFreepgidx] = NULL; - psDMAPhysArray[uiFreepgidx] = (dma_addr_t)0; - } - } - else + for (uiOrderIdx = 0; uiOrderIdx < (1 << uiOrder); uiOrderIdx++) { - psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx; - psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; - psPageArray[uiFreepgidx] = psPage; + psPageArray[uiAllocpgidx + uiOrderIdx] = psPageArray[uiFreepgidx + uiOrderIdx]; if (bCMA) { - psDMAVirtArray[uiFreepgidx] = pvDMAVAddr; - psDMAPhysArray[uiFreepgidx] = psDMAPAddr; + psDMAVirtArray[uiAllocpgidx + uiOrderIdx] = psDMAVirtArray[uiFreepgidx + uiOrderIdx]; + psDMAPhysArray[uiAllocpgidx + uiOrderIdx] = psDMAPhysArray[uiFreepgidx + uiOrderIdx]; + psDMAVirtArray[uiFreepgidx + uiOrderIdx] = NULL; + psDMAPhysArray[uiFreepgidx + uiOrderIdx] = (dma_addr_t)0; } + psPageArray[uiFreepgidx + uiOrderIdx] = NULL; } + + psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; } - /* Free the additional free pages */ + /* Free or zombie the additional free pages */ if (0 != ui32AdtnlFreePages) { +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_OSPAGEARRAY_DATA *psExtractedPagesPageArray = NULL; + + eError = _ExtractPages(psPMRPageArrayData, + &pai32FreeIndices[ui32Loop], + ui32AdtnlFreePages, + &psExtractedPagesPageArray); + if (eError != PVRSRV_OK) + { + goto e0; + } + + /* Zombify pages to get proper stats */ + eError = PMRZombifyOSMem(psExtractedPagesPageArray, NULL); + PVR_LOG_IF_ERROR(eError, "psExtractedPagesPageArray"); + + *ppvZombiePages = psExtractedPagesPageArray; +#else eError = _FreeOSPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages); @@ -3494,6 +4444,7 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, { goto e0; } +#endif /* SUPPORT_PMR_PAGES_DEFERRED_FREE */ psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; while (ui32Loop < ui32FreePageCount) { @@ -3508,37 +4459,6 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, return eError; } -/*************************************************************************/ /*! -@Function PMRChangeSparseMemCPUMapOSMem -@Description This function Changes CPU maps accordingly -@Return PVRSRV_ERROR failure code -*/ /**************************************************************************/ -static -PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv, - const PMR *psPMR, - IMG_UINT64 sCpuVAddrBase, - IMG_UINT32 ui32AllocPageCount, - IMG_UINT32 *pai32AllocIndices, - IMG_UINT32 ui32FreePageCount, - IMG_UINT32 *pai32FreeIndices) -{ - struct page **psPageArray; - PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv; - IMG_CPU_PHYADDR sCPUPAddr; - - sCPUPAddr.uiAddr = 0; - psPageArray = psPMRPageArrayData->pagearray; - - return OSChangeSparseMemCPUAddrMap((void **)psPageArray, - sCpuVAddrBase, - sCPUPAddr, - ui32AllocPageCount, - pai32AllocIndices, - ui32FreePageCount, - pai32FreeIndices, - IMG_FALSE); -} - static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = { .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem, .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem, @@ -3548,22 +4468,38 @@ static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = { .pfnReadBytes = NULL, .pfnWriteBytes = NULL, .pfnChangeSparseMem = &PMRChangeSparseMemOSMem, - .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem, .pfnFinalize = &PMRFinalizeOSMem, +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + .pfnFreeZombiePages = &PMRFreeZombiePagesOSMem, +#endif +#if defined(SUPPORT_PMR_DEFERRED_FREE) + .pfnZombify = &PMRZombifyOSMem, +#endif }; +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +DLLIST_NODE * +LinuxOSGetCPUMappingPrivateDataList(PMR *psPMR) +{ + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = PMRGetPrivateData(psPMR, &_sPMROSPFuncTab); + PVR_ASSERT(psOSPageArrayData); + + return &psOSPageArrayData->sCpuMappingListHead; +} +#endif + /* Wrapper around OS page allocation. */ static PVRSRV_ERROR DoPageAlloc(PMR_OSPAGEARRAY_DATA *psPrivData, IMG_UINT32 *puiAllocIndices, IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumLogicalChunks, IMG_UINT32 ui32Log2AllocPageSize) { PVRSRV_ERROR eError = PVRSRV_OK; /* Do we fill the whole page array or just parts (sparse)? */ - if (ui32NumPhysChunks == ui32NumVirtChunks) + if (ui32NumPhysChunks == ui32NumLogicalChunks) { /* Allocate the physical pages */ eError = _AllocOSPages(psPrivData, @@ -3581,20 +4517,30 @@ DoPageAlloc(PMR_OSPAGEARRAY_DATA *psPrivData, return eError; } -static void _EncodeAllocationFlags(IMG_UINT32 uiLog2AllocPageSize, +static void _EncodeAllocationFlags(IMG_UINT32 uiLog2DevPageSize, PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_UINT32* ui32AllocFlags) { + if (PVRSRV_CHECK_OS_LINUX_PREFER_CMA(uiFlags)) + { + BIT_SET(*ui32AllocFlags, FLAG_PREF_CMA); + } /* * Use CMA framework if order is greater than OS page size; please note * that OSMMapPMRGeneric() has the same expectation as well. */ - /* IsCMA? */ - if (uiLog2AllocPageSize > PAGE_SHIFT) + else if (uiLog2DevPageSize > PAGE_SHIFT) + { + BIT_SET(*ui32AllocFlags, FLAG_DMA_CMA); + } + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + if (PVRSRV_CHECK_OS_LINUX_MOVABLE(uiFlags)) { - BIT_SET(*ui32AllocFlags, FLAG_IS_CMA); + BIT_SET(*ui32AllocFlags, FLAG_IS_MOVABLE); } +#endif /* OnDemand? */ if (PVRSRV_CHECK_ON_DEMAND(uiFlags)) @@ -3632,8 +4578,8 @@ static void _EncodeAllocationFlags(IMG_UINT32 uiLog2AllocPageSize, } void PhysmemGetOSRamMemStats(PHEAP_IMPL_DATA pvImplData, - IMG_UINT64 *pui64TotalSize, - IMG_UINT64 *pui64FreeSize) + IMG_UINT64 *pui64TotalSize, + IMG_UINT64 *pui64FreeSize) { struct sysinfo sMeminfo; si_meminfo(&sMeminfo); @@ -3650,9 +4596,9 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, CONNECTION_DATA *psConnection, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumLogicalChunks, IMG_UINT32 *puiAllocIndices, - IMG_UINT32 uiLog2AllocPageSize, + IMG_UINT32 uiLog2DevPageSize, PVRSRV_MEMALLOCFLAGS_T uiFlags, const IMG_CHAR *pszAnnotation, IMG_PID uiPid, @@ -3667,14 +4613,18 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, IMG_UINT32 ui32CPUCacheFlags; IMG_UINT32 ui32AllocFlags = 0; PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); + IMG_UINT32 ui32ValidPageSizeMask = psDevNode->psMMUDevAttrs->ui32ValidPageSizeMask; + IMG_UINT32 ui32PageSize = IMG_PAGE2BYTES32(uiLog2DevPageSize); PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_LOG_GOTO_IF_INVALID_PARAM(ui32PageSize & ui32ValidPageSizeMask, eError, errorOnParam); + /* * The host driver (but not guest) can still use this factory for firmware * allocations */ - if (PVRSRV_VZ_MODE_IS(GUEST) && PVRSRV_CHECK_FW_MAIN(uiFlags)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDevNode) && PVRSRV_CHECK_FW_MAIN(uiFlags)) { PVR_ASSERT(0); eError = PVRSRV_ERROR_INVALID_PARAMS; @@ -3682,7 +4632,7 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, } /* Select correct caching mode */ - eError = DevmemCPUCacheMode(psDevNode, uiFlags, &ui32CPUCacheFlags); + eError = DevmemCPUCacheMode(uiFlags, &ui32CPUCacheFlags); if (eError != PVRSRV_OK) { goto errorOnParam; @@ -3693,7 +4643,18 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN; } - _EncodeAllocationFlags(uiLog2AllocPageSize, uiFlags, &ui32AllocFlags); + _EncodeAllocationFlags(uiLog2DevPageSize, uiFlags, &ui32AllocFlags); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + /* Reject sparse and non4k PMR marked for migrate */ + if (BIT_ISSET(ui32AllocFlags, FLAG_IS_MOVABLE) && + ((ui32NumPhysChunks != ui32NumLogicalChunks|| ui32NumLogicalChunks > 1) || + uiLog2DevPageSize != PAGE_SHIFT) + ) + { + PVR_LOG_GOTO_WITH_ERROR("PhysmemNewOSRamBackedPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, errorOnParam); + } +#endif #if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) @@ -3706,26 +4667,32 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, very restrictive conditions, also there is a maximum alignment value which must not exceed the largest device page-size. If these are not met then fail the aligned-requested allocation */ - if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) + if (uiLog2DevPageSize > PAGE_SHIFT) { - IMG_UINT32 uiAlign = 1 << uiLog2AllocPageSize; - if (uiAlign > uiSize || uiAlign > (1 << PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ)) + IMG_UINT64 ui64InvalidSizeMask = ui32PageSize - 1; + IMG_UINT32 ui32PageSizeMask = psDevNode->psMMUDevAttrs->ui32ValidPageSizeMask; + IMG_UINT32 ui32MaxPageSize = 1; + + while (ui32PageSizeMask > 1) + { + ui32PageSizeMask >>= 1; + ui32MaxPageSize <<= 1; + } + + if ((uiSize & ui64InvalidSizeMask) || (ui32PageSize > ui32MaxPageSize)) { PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PA alignment: size 0x%llx, align 0x%x", - __func__, uiSize, uiAlign)); + __func__, uiSize, ui32PageSize)); eError = PVRSRV_ERROR_INVALID_ALIGNMENT; goto errorOnParam; } - PVR_ASSERT(uiLog2AllocPageSize > PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ); } /* Create Array structure that hold the physical pages */ eError = _AllocOSPageArray(psDevNode, uiSize, - ui32NumPhysChunks, - ui32NumVirtChunks, - uiLog2AllocPageSize, + uiLog2DevPageSize, ui32AllocFlags, ui32CPUCacheFlags, uiPid, @@ -3735,10 +4702,23 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, goto errorOnAllocPageArray; } +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + if (psConnection) + { + psPrivData->psDRMFile = ((ENV_CONNECTION_DATA*)psConnection->hOsPrivateData)->psDRMFile; + } + else + { + psPrivData->psDRMFile = NULL; + } +#endif +#endif + if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) { eError = DoPageAlloc(psPrivData, puiAllocIndices, ui32NumPhysChunks, - ui32NumVirtChunks, uiLog2AllocPageSize); + ui32NumLogicalChunks, uiLog2DevPageSize); if (eError != PVRSRV_OK) { goto errorOnAllocPages; @@ -3769,9 +4749,9 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, eError = PMRCreatePMR(psPhysHeap, uiSize, ui32NumPhysChunks, - ui32NumVirtChunks, + ui32NumLogicalChunks, puiAllocIndices, - uiLog2AllocPageSize, + uiLog2DevPageSize, uiPMRFlags, pszAnnotation, &_sPMROSPFuncTab, @@ -3784,10 +4764,7 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, goto errorOnCreate; } -#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) psPrivData->hPMR = psPMR; -#endif - *ppsPMRPtr = psPMR; return PVRSRV_OK; @@ -3800,8 +4777,7 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, } errorOnAllocPages: - eError2 = _FreeOSPagesArray(psPrivData); - PVR_ASSERT(eError2 == PVRSRV_OK); + _FreeOSPagesArray(psPrivData); errorOnAllocPageArray: errorOnParam: diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_osmem_linux.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_osmem_linux.h index 89706fffdc5c..2581cceeb219 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_osmem_linux.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_osmem_linux.h @@ -43,7 +43,18 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef PHYSMEM_OSMEM_LINUX_H #define PHYSMEM_OSMEM_LINUX_H -void LinuxInitPhysmem(void); +PVRSRV_ERROR LinuxInitPhysmem(void); void LinuxDeinitPhysmem(void); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + +DLLIST_NODE * +LinuxOSGetCPUMappingPrivateDataList(PMR *psPMR); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) +int LinuxRegisterMigrateCallbacks(struct file* filp); +void LinuxDeregisterMigrateCallbacks(struct file* filp); +#endif +#endif + #endif /* PHYSMEM_OSMEM_LINUX_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_test.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_test.c index b4553b476785..00c02f16d3e2 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_test.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_test.c @@ -139,44 +139,319 @@ static const IMG_UINT8 gui8Patterns[] = { 0x6cU, }; - -/* Following function does minimal required initialisation for mem test using dummy device node */ static PVRSRV_ERROR -PhysMemTestInit(PVRSRV_DEVICE_NODE **ppsDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig) +PMRContiguousSparseMappingTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) { - PVRSRV_DEVICE_NODE *psDeviceNode; - PVRSRV_ERROR eError; + PVRSRV_ERROR eError, eError1; + PHYS_HEAP *psHeap; + PHYS_HEAP_POLICY psHeapPolicy; + + PMR *psPMR = NULL; + PMR *psSpacingPMR = NULL, *psSecondSpacingPMR = NULL; + IMG_UINT32 aui32MappingTableFirstAlloc[4] = {0,1,2,3}; + IMG_UINT32 aui32MappingTableSecondAlloc[8] = {4,5,6,7,8,9,10,11}; + IMG_UINT32 aui32MappingTableThirdAlloc[4] = {12,13,14,15}; + IMG_UINT32 ui32NoMappingTable = 0; + IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; + IMG_BOOL *pbValid; + IMG_DEV_PHYADDR *apsDevPAddr; + IMG_UINT32 ui32NumOfPages = 16; + size_t uiMappedSize, uiPageSize; + IMG_UINT32 i, uiAttempts; + IMG_HANDLE hPrivData = NULL; + void *pvKernAddr = NULL; - /* Dummy device node */ - psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); - PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); + eError = PhysHeapAcquireByID(PVRSRV_GET_PHYS_HEAP_HINT(uiFlags), + psDeviceNode, + &psHeap); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByID", ErrorReturn); - psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT; - psDeviceNode->psDevConfig = psDevConfig; - psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; + psHeapPolicy = PhysHeapGetPolicy(psHeap); - /* Initialise Phys mem heaps */ - eError = PhysHeapInitDeviceHeaps(psDeviceNode, psDevConfig); - PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapInitDeviceHeaps", ErrorSysDevDeInit); + PhysHeapRelease(psHeap); - *ppsDeviceNode = psDeviceNode; + /* If this is the case then it's not supported and so don't attempt the test */ + if (psHeapPolicy != PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) + { + return PVRSRV_OK; + } - return PVRSRV_OK; + uiPageSize = OSGetPageSize(); -ErrorSysDevDeInit: - psDevConfig->psDevNode = NULL; - OSFreeMem(psDeviceNode); - return eError; -} + /* Allocate OS memory for PMR page list */ + apsDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); + PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem"); -/* Undo initialisation done for mem test */ -static void -PhysMemTestDeInit(PVRSRV_DEVICE_NODE *psDeviceNode) -{ - /* Deinitialise Phys mem heaps */ - PhysHeapDeInitDeviceHeaps(psDeviceNode); + /* Allocate OS memory for PMR page state */ + pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL)); + PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem); + + /* Allocate OS memory for write buffer */ + pcWriteBuffer = OSAllocMem(uiPageSize * ui32NumOfPages); + PVR_LOG_GOTO_IF_NOMEM(pcWriteBuffer, eError, ErrorFreePMRPageStateMem); + OSCachedMemSet(pcWriteBuffer, 0xF, uiPageSize); + + /* Allocate OS memory for read buffer */ + pcReadBuffer = OSAllocMem(uiPageSize * ui32NumOfPages); + PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); + + /* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED_WC attributes */ + uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; + + /* + * Construct a sparse PMR attempting to ensure the allocations + * are physically non contiguous but sequentially placed in the mapping + * table. + */ + for (uiAttempts = 3; uiAttempts > 0; uiAttempts--) + { + /* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */ + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + ui32NumOfPages * uiPageSize, + 4, + ui32NumOfPages, + aui32MappingTableFirstAlloc, + OSGetPageShift(), + uiFlags, + sizeof("PMRContiguousSparseMappingTest"), + "PMRContiguousSparseMappingTest", + OSGetCurrentClientProcessIDKM(), + &psPMR, + PDUMP_NONE, + NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); + goto ErrorFreeReadBuffer; + } + + /* Allocate some memory from the same physheap so that we can ensure + * the allocations aren't linear + */ + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + ui32NumOfPages * uiPageSize, + 1, + 1, + &ui32NoMappingTable, + OSGetPageShift(), + uiFlags, + sizeof("PMRContiguousSparseMappingTest"), + "PMRContiguousSparseMappingTest", + OSGetCurrentClientProcessIDKM(), + &psSpacingPMR, + PDUMP_NONE, + NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); + goto ErrorUnrefPMR; + } + + /* Allocate 8 more physical pages on the Sparse PMR */ + eError = PMR_ChangeSparseMem(psPMR, + 8, + aui32MappingTableSecondAlloc, + 0, + NULL, + uiFlags | SPARSE_RESIZE_ALLOC); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_ChangeSparseMem", ErrorUnrefSpacingPMR); + + /* Allocate some more memory from the same physheap so that we can ensure + * the allocations aren't linear + */ + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + ui32NumOfPages * uiPageSize, + 1, + 1, + &ui32NoMappingTable, + OSGetPageShift(), + uiFlags, + sizeof("PMRContiguousSparseMappingTest"), + "PMRContiguousSparseMappingTest", + OSGetCurrentClientProcessIDKM(), + &psSecondSpacingPMR, + PDUMP_NONE, + NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); + goto ErrorUnrefSpacingPMR; + } + + /* Allocate final 4 physical pages on the Sparse PMR */ + eError = PMR_ChangeSparseMem(psPMR, + 4, + aui32MappingTableThirdAlloc, + 0, + NULL, + uiFlags | SPARSE_RESIZE_ALLOC); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_ChangeSparseMem", ErrorUnrefSecondSpacingPMR); + + /* + * Check we have in fact managed to obtain a PMR with non contiguous + * physical pages. + */ + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to lock PMR")); + goto ErrorUnrefSecondSpacingPMR; + } + + /* Get the Device physical addresses of the pages */ + eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid, CPU_USE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses")); + goto ErrorUnlockPhysAddresses; + } - OSFreeMem(psDeviceNode); + { + IMG_BOOL bPhysicallyContiguous = IMG_TRUE; + IMG_DEV_PHYADDR sPrevDevPAddr = apsDevPAddr[0]; + for (i = 1; i < ui32NumOfPages && bPhysicallyContiguous; i++) + { + if (apsDevPAddr[i].uiAddr != sPrevDevPAddr.uiAddr + uiPageSize) + { + bPhysicallyContiguous = IMG_FALSE; + } + sPrevDevPAddr = apsDevPAddr[i]; + } + + if (bPhysicallyContiguous) + { + /* We haven't yet managed to create the mapping scenario we + * require: unwind and attempt again. + */ + eError1 = PMRUnlockSysPhysAddresses(psPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR")); + } + eError1 = PMRUnrefPMR(psPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR")); + } + eError1 = PMRUnrefPMR(psSpacingPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to free Spacing PMR")); + } + eError1 = PMRUnrefPMR(psSecondSpacingPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to free Second Spacing PMR")); + } + } else { + /* We have the scenario, break out of the attempt loop */ + break; + } + } + } + + if (uiAttempts == 0) + { + /* We can't create the scenario, very unlikely this would happen */ + PVR_LOG_GOTO_IF_ERROR(PVRSRV_ERROR_MEMORY_TEST_FAILED, + "Unable to create Non Contiguous PMR scenario", + ErrorFreeReadBuffer); + } + + /* We have the PMR scenario to test, now attempt to map the whole PMR, + * write and then read from it + */ + eError = PMRAcquireSparseKernelMappingData(psPMR, 0, ui32NumOfPages * uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); + goto ErrorUnlockPhysAddresses; + } + + OSCachedMemCopyWMB(pvKernAddr, pcWriteBuffer, ui32NumOfPages * uiPageSize); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivData); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + + /* + * Release and reacquire the mapping to exercise the mapping paths + */ + eError = PMRAcquireSparseKernelMappingData(psPMR, 0, ui32NumOfPages * uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); + goto ErrorUnlockPhysAddresses; + } + + OSCachedMemSetWMB(pcReadBuffer, 0x0, ui32NumOfPages * uiPageSize); + OSCachedMemCopyWMB(pcReadBuffer, pvKernAddr, ui32NumOfPages * uiPageSize); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivData); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + + for (i = 0; i < ui32NumOfPages * uiPageSize; i++) + { + if (pcReadBuffer[i] != pcWriteBuffer[i]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%hhx), expected (0x%hhx)! @ %u", + __func__, pcReadBuffer[i], pcWriteBuffer[i], i)); + eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; + goto ErrorUnlockPhysAddresses; + } + } + +ErrorUnlockPhysAddresses: + /* Unlock and Unref the PMR to destroy it */ + eError1 = PMRUnlockSysPhysAddresses(psPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR")); + } + +ErrorUnrefSecondSpacingPMR: + eError1 = PMRUnrefPMR(psSecondSpacingPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to free Second Spacing PMR")); + } +ErrorUnrefSpacingPMR: + eError1 = PMRUnrefPMR(psSpacingPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to free Spacing PMR")); + } +ErrorUnrefPMR: + eError1 = PMRUnrefPMR(psPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR")); + } + +ErrorFreeReadBuffer: + OSFreeMem(pcReadBuffer); +ErrorFreeWriteBuffer: + OSFreeMem(pcWriteBuffer); +ErrorFreePMRPageStateMem: + OSFreeMem(pbValid); +ErrorFreePMRPageListMem: + OSFreeMem(apsDevPAddr); +ErrorReturn: + return eError; } /* Test for PMR factory validation */ @@ -202,9 +477,8 @@ PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFla PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem"); /* Allocate OS memory for PMR page state */ - pbValid = OSAllocMem(ui32NumOfPages * sizeof(IMG_BOOL)); + pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL)); PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem); - OSCachedMemSet(pbValid, 0, ui32NumOfPages * sizeof(IMG_BOOL)); /* Allocate OS memory for write buffer */ pcWriteBuffer = OSAllocMem(uiPageSize); @@ -226,9 +500,9 @@ PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFla } /* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED_WC attributes */ - uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ - PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; /* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */ @@ -263,7 +537,7 @@ PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFla } /* Get the Device physical addresses of the pages */ - eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid); + eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid, CPU_USE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses")); @@ -317,8 +591,8 @@ PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFla PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); goto ErrorUnlockPhysAddresses; } - OSCachedMemSet(pcReadBuffer, 0x0, uiPageSize); - OSCachedMemCopy(pcReadBuffer, pvKernAddr, uiMappedSize); + OSCachedMemSetWMB(pcReadBuffer, 0x0, uiPageSize); + OSCachedMemCopyWMB(pcReadBuffer, pvKernAddr, uiMappedSize); eError = PMRReleaseKernelMappingData(psPMR, hPrivData); PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); @@ -555,8 +829,8 @@ MemTestPatterns(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags uiPageSize = OSGetPageSize(); /* Allocate PMR with READ | WRITE | WRITE_COMBINE attributes */ - uiFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ - PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + uiFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; /*Allocate a PMR from given physical heap */ @@ -641,11 +915,23 @@ PhysMemTestRun(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags, if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, - "%s: PMR validation test failed!", - __func__)); + "%s: PMR Contiguous PhysHeap self test failed! %"PVRSRV_MEMALLOCFLAGS_FMTSPEC, + __func__, + uiFlags)); + return eError; + } + + eError = PMRContiguousSparseMappingTest(psDeviceNode, uiFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR Non-contiguous PhysHeap self test failed! %"PVRSRV_MEMALLOCFLAGS_FMTSPEC, + __func__, + uiFlags)); return eError; } + for (i = 0; i < ui32Passes; i++) { /* Mem test */ @@ -663,46 +949,44 @@ PhysMemTestRun(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags, } PVRSRV_ERROR -PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses) +PhysMemTest(PVRSRV_DEVICE_NODE *psDeviceNode, void *pvDevConfig, IMG_UINT32 ui32MemTestPasses) { - PVRSRV_DEVICE_NODE *psDeviceNode; PVRSRV_DEVICE_CONFIG *psDevConfig = pvDevConfig; - PVRSRV_ERROR eError; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; /* validate memtest passes requested */ ui32MemTestPasses = (ui32MemTestPasses > PHYSMEM_TEST_PASSES_MAX)? PHYSMEM_TEST_PASSES_MAX : ui32MemTestPasses; - /* Do minimal initialisation before test */ - eError = PhysMemTestInit(&psDeviceNode, psDevConfig); - if (eError != PVRSRV_OK) + for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) { - PVR_DPF((PVR_DBG_ERROR, "%s: Test failed to initialize", __func__)); - return eError; + PHYS_HEAP_CONFIG *psHeapConfig = &psDevConfig->pasPhysHeaps[i]; + + if (psHeapConfig->ui32UsageFlags & PHYS_HEAP_USAGE_GPU_LOCAL) + { + /* GPU local mem (should be only up to 1 heap) */ + eError = PhysMemTestRun(psDeviceNode, PHYS_HEAP_USAGE_GPU_LOCAL, ui32MemTestPasses); + PVR_LOG_GOTO_IF_ERROR(eError, "GPU local memory test failed!", ErrorPhysMemTestEnd); + } + + if (psHeapConfig->ui32UsageFlags & PHYS_HEAP_USAGE_CPU_LOCAL) + { + /* CPU local mem (should be only up to 1 heap) */ + eError = PhysMemTestRun(psDeviceNode, PHYS_HEAP_USAGE_CPU_LOCAL, ui32MemTestPasses); + PVR_LOG_GOTO_IF_ERROR(eError, "CPU local memory test failed!", ErrorPhysMemTestEnd); + } } - /* GPU local mem */ - eError = PhysMemTestRun(psDeviceNode, 0, ui32MemTestPasses); + +ErrorPhysMemTestEnd: if (eError != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "GPU local memory test failed!")); - goto ErrorPhysMemTestDeinit; + PVR_DPF((PVR_DBG_ERROR, "PhysMemTest: Failed.")); } - - /* CPU local mem */ - eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL), ui32MemTestPasses); - if (eError != PVRSRV_OK) + else { - PVR_DPF((PVR_DBG_ERROR, "CPU local memory test failed!")); - goto ErrorPhysMemTestDeinit; + PVR_LOG(("PhysMemTest: Passed.")); } - PVR_LOG(("PhysMemTest: Passed.")); - goto PhysMemTestPassed; - -ErrorPhysMemTestDeinit: - PVR_DPF((PVR_DBG_ERROR, "PhysMemTest: Failed.")); -PhysMemTestPassed: - PhysMemTestDeInit(psDeviceNode); - return eError; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_test.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_test.h index 684c729d0e51..75795afe9cab 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_test.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/physmem_test.h @@ -42,10 +42,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef SRVSRV_PHYSMEM_TEST_H #define SRVSRV_PHYSMEM_TEST_H + +#include "device.h" + /* * PhysMemTest */ PVRSRV_ERROR -PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses); +PhysMemTest(PVRSRV_DEVICE_NODE *psDeviceNode, void *pvDevConfig, IMG_UINT32 ui32MemTestPasses); #endif /* SRVSRV_PHYSMEM_TEST_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_env.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_env.c new file mode 100644 index 000000000000..e0c74b4b508d --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_env.c @@ -0,0 +1,83 @@ +/*************************************************************************/ /*! +@File +@Title Linux environment PMR functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "img_defs.h" + +#include "pmr_env.h" +#include "physmem_dmabuf_internal.h" + +void +PMREnvInitialize(PMR_ENV *psPMREnv) +{ + memset(psPMREnv, 0, sizeof(*psPMREnv)); +} + +void +PMREnvFinalize(PMR_ENV *psPMREnv) +{ + void *pvDmaBufExportData; + + pvDmaBufExportData = smp_load_acquire(&psPMREnv->pvDmaBufExportData); + if (pvDmaBufExportData) + PhysmemDmaBufExportFinalize(pvDmaBufExportData); +} + +void +PMREnvDmaBufSetExportData(PMR *psPMR, void *pvData) +{ + PMR_ENV *psPMREnv = PMREnvGetData(psPMR); + + smp_store_release(&psPMREnv->pvDmaBufExportData, pvData); +} + +void * +PMREnvDmaBufGetExportData(PMR *psPMR) +{ + PMR_ENV *psPMREnv = PMREnvGetData(psPMR); + void *pvDmaBufExportData; + + pvDmaBufExportData = smp_load_acquire(&psPMREnv->pvDmaBufExportData); + + return pvDmaBufExportData; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_env.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_env.h new file mode 100644 index 000000000000..52a172e80b82 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_env.h @@ -0,0 +1,90 @@ +/*************************************************************************/ /*! +@File +@Title Environment PMR functions and data +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Environment specific PMR functions and data +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PMR_ENV_H) +#define PMR_ENV_H + +#include "pvrsrv_memallocflags.h" +#include "powervr/mem_types.h" +#include "pmr_impl.h" + +/* Structures, functions and definitions shared with the generic PMR code */ +struct _PMR_ENV_ +{ + /* + * The smp_store_release and smp_load_acquire functions are used + * to access this field, in order to avoid taking locks, where + * possible. + */ + void *pvDmaBufExportData; +}; + +typedef struct _PMR_ENV_ PMR_ENV; + +#define DECLARE_PMR_ENV_DATA(name) PMR_ENV name; + +/* The generic PMR code calls these functions, via the macros that follow */ +void PMREnvInitialize(PMR_ENV *psPMREnv); +void PMREnvFinalize(PMR_ENV *psPMREnv); + +#define PMR_ENV_INITIALIZE(psPMR, sEnvData) PMREnvInitialize(&psPMR->sEnvData) +#define PMR_ENV_FINALIZE(psPMR, sEnvData) PMREnvFinalize(&psPMR->sEnvData) + +/* The generic PMR code provides this function, using the macro that follows */ +PMR_ENV *PMREnvGetData(PMR *psPMR); + +#define DEFINE_PMR_ENV_GET_DATA(psPMR, sEnvData) \ + PMR_ENV *PMREnvGetData(PMR *psPMR) { return &psPMR->sEnvData; } + +/* + * Structures, functions and definitions used by the environment specific + * PMR code. + */ + +void +PMREnvDmaBufSetExportData(PMR *psPMR, void *pvData); + +void * +PMREnvDmaBufGetExportData(PMR *psPMR); + +#endif /* !defined(PMR_ENV_H) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_os.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_os.c index 7cf4df444089..63645c930dba 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_os.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pmr_os.c @@ -45,10 +45,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #include #include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) #include #include -#endif #include "img_defs.h" #include "pvr_debug.h" @@ -56,6 +54,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "devicemem_server_utils.h" #include "pmr.h" #include "pmr_os.h" +#include "physmem_osmem_linux.h" #if defined(PVRSRV_ENABLE_PROCESS_STATS) #include "process_stats.h" @@ -67,6 +66,81 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "kernel_compatibility.h" + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION_DEBUG) +#define MIGRATE_DBG_LOG(x) PVR_DPF(x); +#else +#define MIGRATE_DBG_LOG(...) +#endif + +typedef struct _PMR_OS_CPU_MAPPING_ +{ + DLLIST_NODE sListNode; + struct vm_area_struct *ps_vma; +} PMR_OS_CPU_MAPPING; + + +static PVRSRV_ERROR +PMROSLinkCPUMapping(PMR *psPMR, struct vm_area_struct *psVMArea) +{ + PMR_OS_CPU_MAPPING *psCPUMapping; + DLLIST_NODE *psListHead; + + PMRLockHeldAssert(psPMR); + + psCPUMapping = OSAllocMem(sizeof(*psCPUMapping)); + PVR_RETURN_IF_NOMEM(psCPUMapping); + + psCPUMapping->ps_vma = psVMArea; + + psListHead = LinuxOSGetCPUMappingPrivateDataList(psPMR); + PVR_ASSERT(psListHead); + + dllist_add_to_head(psListHead, &psCPUMapping->sListNode); + + return PVRSRV_OK; +} + +static void +PMROSUnlinkCPUMapping(PMR *psPMR, struct vm_area_struct *psTargetVMArea) +{ + PDLLIST_NODE pNext, pNode; + DLLIST_NODE *psListHead; + PMR_OS_CPU_MAPPING *psExtractedMapping = NULL; + + PMRLockHeldAssert(psPMR); + + psListHead = LinuxOSGetCPUMappingPrivateDataList(psPMR); + PVR_ASSERT(psListHead); + + dllist_foreach_node(psListHead, pNode, pNext) + { + PMR_OS_CPU_MAPPING *psCheckMapping = IMG_CONTAINER_OF(pNode, + PMR_OS_CPU_MAPPING, + sListNode); + if (psTargetVMArea == psCheckMapping->ps_vma) + { + psExtractedMapping = psCheckMapping; + break; + } + } + + PVR_ASSERT(psExtractedMapping != NULL); + + if (psExtractedMapping) + { + dllist_remove_node(&psExtractedMapping->sListNode); + OSFreeMem(psExtractedMapping); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unable to find vm_area_struct", __func__)); + } +} +#endif + /* * x86_32: * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM @@ -94,8 +168,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. static void MMapPMROpen(struct vm_area_struct *ps_vma) { PMR *psPMR = ps_vma->vm_private_data; + PVRSRV_ERROR eError; - /* Our VM flags should ensure this function never gets called */ + /* VM_DONTCOPY should ensure this function never gets called */ PVR_DPF((PVR_DBG_WARNING, "%s: Unexpected mmap open call, this is probably an application bug.", __func__)); @@ -107,20 +182,42 @@ static void MMapPMROpen(struct vm_area_struct *ps_vma) ps_vma->vm_end - ps_vma->vm_start, psPMR)); - /* In case we get called anyway let's do things right by increasing the refcount and - * locking down the physical addresses. */ - PMRRefPMR(psPMR); + /* Should this entry-point be called for one of our PMRs we must increase + * the refcount and lock down the physical addresses. + */ + eError = PMRRefPMR(psPMR); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PMRRefPMR"); - if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__)); - PMRUnrefPMR(psPMR); - } + eError = PMRLockSysPhysAddresses(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", ErrUnref); + + /* MMapPMROpen() is called when a process is forked, but only if + * mappings are to be inherited - so increment mapping count of the + * PMR to prevent its layout being changed (if sparse). + */ + PMRClientCpuMapCountIncr(psPMR); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + PMRLockPMR(psPMR); + eError = PMROSLinkCPUMapping(psPMR, ps_vma); + PMRUnlockPMR(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMROSLinkCPUMapping", ErrUnlock); +#endif + + return; + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +ErrUnlock: + PMRUnlockSysPhysAddresses(psPMR); +#endif +ErrUnref: + (void) PMRUnrefPMR(psPMR); } static void MMapPMRClose(struct vm_area_struct *ps_vma) { PMR *psPMR = ps_vma->vm_private_data; + PVRSRV_ERROR eError; #if defined(PVRSRV_ENABLE_PROCESS_STATS) #if defined(PVRSRV_ENABLE_MEMORY_STATS) @@ -143,8 +240,17 @@ static void MMapPMRClose(struct vm_area_struct *ps_vma) #endif #endif +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + PMRLockPMR(psPMR); + PMROSUnlinkCPUMapping(psPMR, ps_vma); + PMRUnlockPMR(psPMR); +#endif + PMRUnlockSysPhysAddresses(psPMR); - PMRUnrefPMR(psPMR); + /* Decrement the mapping count before Unref of PMR (as Unref could destroy the PMR) */ + PMRClientCpuMapCountDecr(psPMR); + eError = PMRUnrefPMR(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnrefPMR"); } /* @@ -193,13 +299,6 @@ static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr, return iRetVal; } -static const struct vm_operations_struct gsMMapOps = -{ - .open = &MMapPMROpen, - .close = &MMapPMRClose, - .access = MMapVAccess, -}; - static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, struct vm_area_struct *ps_vma, IMG_DEVMEM_OFFSET_T uiOffset, @@ -209,18 +308,9 @@ static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, IMG_BOOL bUseMixedMap) { IMG_INT32 iStatus; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) pfn_t sPFN; -#else - unsigned long uiPFN; -#endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0); -#else - uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT; - PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr); -#endif /* * vm_insert_page() allows insertion of individual pages into user @@ -251,11 +341,7 @@ static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, #else iStatus = vm_insert_mixed(ps_vma, ps_vma->vm_start + uiOffset, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) sPFN); -#else - uiPFN); -#endif #endif } else @@ -263,11 +349,7 @@ static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, /* Since kernel 3.7 this sets VM_MIXEDMAP internally */ iStatus = vm_insert_page(ps_vma, ps_vma->vm_start + uiOffset, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) pfn_t_to_page(sPFN)); -#else - pfn_to_page(uiPFN)); -#endif } } else @@ -312,11 +394,7 @@ static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, iStatus = remap_pfn_range(ps_vma, ps_vma->vm_start + uiOffset, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) pfn_t_to_pfn(sPFN), -#else - uiPFN, -#endif uiNumContiguousBytes, ps_vma->vm_page_prot); } @@ -324,6 +402,92 @@ static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, return iStatus; } +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +static vm_fault_t MMapPMRFault(struct vm_fault *ps_vmf) +{ + PVRSRV_ERROR eError; + vm_fault_t iFault; + PMR *psPMR = ps_vmf->vma->vm_private_data; + + IMG_CPU_PHYADDR sCpuPAddr; + IMG_BOOL bValid; + IMG_UINT64 uiAddrOffsetBytes = ps_vmf->address - ps_vmf->vma->vm_start; + unsigned long ulPFN; + + PVR_ASSERT(psPMR != NULL); + + if (!PVRSRV_CHECK_OS_LINUX_MOVABLE(PMR_Flags(psPMR))) + { + PVR_DPF((PVR_DBG_WARNING, "Attempt to fault non movable PMR.")); + return VM_FAULT_SIGBUS; + } + + MIGRATE_DBG_LOG((PVR_DBG_ERROR, "Fault offset: %llu, addr 0x%llx, vm_start 0x%llx, addr offset b 0x%llx, PMR 0x%llx", + (unsigned long long) ps_vmf->pgoff, + (unsigned long long) ps_vmf->address, + (unsigned long long) ps_vmf->address - ps_vmf->vma->vm_start, + (unsigned long long) uiAddrOffsetBytes, + (unsigned long long) psPMR + )); + + PMRLockPMR(psPMR); + + /* Obtain map range pfns */ + eError = PMR_CpuPhysAddr(psPMR, + PAGE_SHIFT, + 1, + uiAddrOffsetBytes, + &sCpuPAddr, + &bValid, + MAPPING_USE | CPU_USE); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_CpuPhysAddr", ErrSigFaultReturn); + PVR_LOG_GOTO_IF_FALSE(bValid, "Attempted remap with invalid PMR offset", ErrSigFaultReturn); + + ulPFN = pfn_t_to_pfn(phys_to_pfn_t(sCpuPAddr.uiAddr, 0)); + + iFault = vmf_insert_pfn_prot(ps_vmf->vma, + ps_vmf->address, + ulPFN, + ps_vmf->vma->vm_page_prot); + + if (iFault != VM_FAULT_NOPAGE) + { + PVR_DPF((PVR_DBG_ERROR, "vmf_insert_pfn_prot failed in MMapPMRFault")); + goto ErrReturn; + } + + + PMRUnlockPMR(psPMR); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, + (void*)(uintptr_t)(ps_vmf->vma->vm_start + uiAddrOffsetBytes), + sCpuPAddr, + PAGE_SIZE, + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_VALUES); +#endif + + return iFault; + +ErrSigFaultReturn: + iFault = VM_FAULT_SIGBUS; +ErrReturn: + PMRUnlockPMR(psPMR); + return iFault; +} +#endif + +static const struct vm_operations_struct gsMMapOps = +{ + .open = &MMapPMROpen, + .close = &MMapPMRClose, + .access = &MMapVAccess, +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + .fault = &MMapPMRFault +#endif +}; + PVRSRV_ERROR OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) { @@ -344,25 +508,56 @@ OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) IMG_BOOL *pbValid; IMG_BOOL bUseMixedMap = IMG_FALSE; IMG_BOOL bUseVMInsertPage = IMG_FALSE; + vm_flags_t uVMFlags = ps_vma->vm_flags; /* if writeable but not shared mapping is requested then fail */ - PVR_RETURN_IF_INVALID_PARAM(((ps_vma->vm_flags & VM_WRITE) == 0) || - ((ps_vma->vm_flags & VM_SHARED) != 0)); + PVR_RETURN_IF_INVALID_PARAM(((uVMFlags & VM_WRITE) == 0) || + ((uVMFlags & VM_SHARED) != 0)); - eError = PMRLockSysPhysAddresses(psPMR); - if (eError != PVRSRV_OK) + uiLength = ps_vma->vm_end - ps_vma->vm_start; + + /* Check early if the requested mapping size doesn't exceed the virtual + * PMR size. */ + if (PMR_LogicalSize(psPMR) < uiLength) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BAD_MAPPING, ErrReturn); + } + + uiLog2PageSize = PMR_GetLog2Contiguity(psPMR); + + /* Check the number of PFNs to be mapped is valid. */ + uiNumOfPFNs = uiLength >> uiLog2PageSize; + if (uiNumOfPFNs == 0) { - goto e0; + /* print as 64-bit value to avoid Smatch warning */ + PVR_LOG_VA(PVR_DBG_ERROR, + "uiLength is invalid. Must be >= %" IMG_UINT64_FMTSPEC ".", + IMG_UINT64_C(1) << uiLog2PageSize); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BAD_MAPPING, ErrReturn); } - sPageProt = vm_get_page_prot(ps_vma->vm_flags); + /* + * Take a reference on the PMR so that it can't be freed while mapped + * into the user process. + */ + eError = PMRRefPMR(psPMR); + PVR_GOTO_IF_ERROR(eError, ErrReturn); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, ErrUnrefPMR); + + /* Increment mapping count of the PMR so that its layout cannot be + * changed (if sparse). + */ + PMRLockPMR(psPMR); + + PMRClientCpuMapCountIncr(psPMR); + sPageProt = vm_get_page_prot(uVMFlags); - eError = DevmemCPUCacheMode(psDevNode, - PMR_Flags(psPMR), - &ui32CPUCacheFlags); + eError = DevmemCPUCacheMode(PMR_Flags(psPMR), &ui32CPUCacheFlags); if (eError != PVRSRV_OK) { - goto e0; + goto ErrUnlockPhysAddr; } switch (ui32CPUCacheFlags) @@ -389,54 +584,39 @@ OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) default: eError = PVRSRV_ERROR_INVALID_PARAMS; - goto e1; + goto ErrUnlockPhysAddr; } ps_vma->vm_page_prot = sPageProt; - vm_flags_set(ps_vma, VM_IO); + uVMFlags |= VM_IO; /* Don't include the mapping in core dumps */ - vm_flags_set(ps_vma, VM_DONTDUMP); + uVMFlags |= VM_DONTDUMP; /* * Disable mremap because our nopage handler assumes all * page requests have already been validated. */ - vm_flags_set(ps_vma, VM_DONTEXPAND); - + uVMFlags |= VM_DONTEXPAND; /* Don't allow mapping to be inherited across a process fork */ - vm_flags_set(ps_vma, VM_DONTCOPY); - - uiLength = ps_vma->vm_end - ps_vma->vm_start; + uVMFlags |= VM_DONTCOPY; +#if defined(PMR_OS_USE_VM_INSERT_PAGE) /* Is this mmap targeting non order-zero pages or does it use pfn mappings? * If yes, don't use vm_insert_page */ - uiLog2PageSize = PMR_GetLog2Contiguity(psPMR); - -#if defined(PMR_OS_USE_VM_INSERT_PAGE) bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM); #endif /* Can we use stack allocations */ - uiNumOfPFNs = uiLength >> uiLog2PageSize; if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC) { psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr)); - if (psCpuPAddr == NULL) - { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto e1; - } + PVR_LOG_GOTO_IF_NOMEM(psCpuPAddr, eError, ErrUnlockPhysAddr); /* Should allocation fail, clean-up here before exiting */ pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid)); - if (pbValid == NULL) - { - eError = PVRSRV_ERROR_OUT_OF_MEMORY; - OSFreeMem(psCpuPAddr); - goto e2; - } + PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrFreeCpuPAddr); } else { @@ -450,10 +630,11 @@ OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) uiNumOfPFNs, 0, psCpuPAddr, - pbValid); + pbValid, + CPU_USE | MAPPING_USE); if (eError != PVRSRV_OK) { - goto e3; + goto ErrFreeValid; } /* @@ -464,26 +645,15 @@ OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) */ if (bUseVMInsertPage) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) pfn_t sPFN; -#else - unsigned long uiPFN; -#endif for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx) { if (pbValid[uiOffsetIdx]) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0); if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) -#else - uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT; - PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr); - - if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0) -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ { bUseMixedMap = IMG_TRUE; break; @@ -493,14 +663,17 @@ OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) if (bUseMixedMap) { - vm_flags_set(ps_vma, VM_MIXEDMAP); + uVMFlags |= VM_MIXEDMAP; } } else { - vm_flags_set(ps_vma, VM_PFNMAP); + uVMFlags |= VM_PFNMAP; } + /* Actually initialise the flags */ + pvr_vm_flags_init(ps_vma, uVMFlags); + /* For each PMR page-size contiguous bytes, map page(s) into user VMA */ for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<vm_ops = &gsMMapOps; - /* - * Take a reference on the PMR so that it can't be freed while mapped - * into the user process. - */ - PMRRefPMR(psPMR); - #if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) /* record the stats */ MMapStatsAddOrUpdatePMR(psPMR, uiLength); #endif +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + eError = PMROSLinkCPUMapping(psPMR, ps_vma); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLinkCpuMapping", ErrUnlockPhysAddr); +#endif + + PMRUnlockPMR(psPMR); return PVRSRV_OK; /* Error exit paths follow */ -e3: +ErrFreeValid: if (pbValid != abValid) { OSFreeMem(pbValid); } -e2: +ErrFreeCpuPAddr: if (psCpuPAddr != asCpuPAddr) { OSFreeMem(psCpuPAddr); } -e1: +ErrUnlockPhysAddr: + PMRClientCpuMapCountDecr(psPMR); + PMRUnlockPMR(psPMR); PMRUnlockSysPhysAddresses(psPMR); -e0: +ErrUnrefPMR: + (void) PMRUnrefPMR(psPMR); +ErrReturn: return eError; } + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +static void +OSLinuxUnmapPageInCPUMapping(PMR *psPMR, struct vm_area_struct *ps_vma, IMG_UINT32 ui32LogicalPgOffset) +{ + IMG_UINT64 ui64OffsetBytes = IMG_PAGES2BYTES64(ui32LogicalPgOffset, PAGE_SHIFT); + + MIGRATE_DBG_LOG((PVR_DBG_ERROR, "CPU PTE Zap, PMR 0x%llx, Offset %u, OffsetB 0x%llx, vm_start 0x%llx, vm_addr 0x%llx", + (unsigned long long) psPMR, + ui32LogicalPgOffset, + (unsigned long long) ui64OffsetBytes, + (unsigned long long) ps_vma->vm_start, + (unsigned long long) ps_vma->vm_start + ui64OffsetBytes)); + + zap_vma_ptes(ps_vma, ps_vma->vm_start + ui64OffsetBytes, PAGE_SIZE); + + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) + + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, + (IMG_UINT64)ps_vma->vm_start + ui64OffsetBytes, + OSGetCurrentClientProcessIDKM()); +#endif +} + +void +OSLinuxPMRUnmapPageInPMR(PMR *psPMR, DLLIST_NODE *psMappingListHead, IMG_UINT32 ui32LogicalPgOffset) +{ + DLLIST_NODE *psNext, *psNode; + + PMRLockPMR(psPMR); + + dllist_foreach_node(psMappingListHead, psNode, psNext) + { + PMR_OS_CPU_MAPPING *psCPUMapping = IMG_CONTAINER_OF(psNode, + PMR_OS_CPU_MAPPING, + sListNode); + + OSLinuxUnmapPageInCPUMapping(psPMR, psCPUMapping->ps_vma, ui32LogicalPgOffset); + } + + PMRUnlockPMR(psPMR); + +} +#endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_bridge_k.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_bridge_k.c index f7af259fc52d..77739189b0aa 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_bridge_k.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_bridge_k.c @@ -79,6 +79,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "srvcore.h" #include "common_srvcore_bridge.h" +#include "kernel_compatibility.h" PVRSRV_ERROR InitDMABUFBridge(void); void DeinitDMABUFBridge(void); @@ -106,6 +107,7 @@ static ATOMIC_T g_iNumActiveDriverThreads; static ATOMIC_T g_iNumActiveKernelThreads; static IMG_HANDLE g_hDriverThreadEventObject; + #if defined(DEBUG_BRIDGE_KM) static DI_ENTRY *gpsDIBridgeStatsEntry; @@ -235,6 +237,17 @@ static IMG_INT64 BridgeStatsWrite(const IMG_CHAR *pcBuffer, #endif /* defined(DEBUG_BRIDGE_KM) */ +PVRSRV_ERROR LinuxGetThreadActivityStats(LINUX_THREAD_ACTIVITY_STATS *psThreadStats) +{ + PVR_RETURN_IF_FALSE(psThreadStats != NULL, PVRSRV_ERROR_INVALID_PARAMS); + + psThreadStats->i32KernelThreadCount = OSAtomicRead(&g_iNumActiveKernelThreads); + psThreadStats->i32DriverThreadCount = OSAtomicRead(&g_iNumActiveDriverThreads); + psThreadStats->i32SuspendedThreadCount = OSAtomicRead(&g_iDriverSuspendCount); + + return PVRSRV_OK; +} + PVRSRV_ERROR OSPlatformBridgeInit(void) { PVRSRV_ERROR eError; @@ -248,7 +261,7 @@ PVRSRV_ERROR OSPlatformBridgeInit(void) eError = OSEventObjectCreate("Global driver thread event object", &g_hDriverThreadEventObject); - PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", error_); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", EventCreateError); #if defined(DEBUG_BRIDGE_KM) { @@ -267,17 +280,18 @@ PVRSRV_ERROR OSPlatformBridgeInit(void) &g_BridgeDispatchTable[0], DI_ENTRY_TYPE_GENERIC, &gpsDIBridgeStatsEntry); - PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", error_); + PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", DIEntryCreateError); } #endif return PVRSRV_OK; -error_: - if (g_hDriverThreadEventObject) { - OSEventObjectDestroy(g_hDriverThreadEventObject); - g_hDriverThreadEventObject = NULL; - } +#if defined(DEBUG_BRIDGE_KM) +DIEntryCreateError: +#endif +EventCreateError: + OSEventObjectDestroy(g_hDriverThreadEventObject); + g_hDriverThreadEventObject = NULL; return eError; } @@ -285,18 +299,13 @@ PVRSRV_ERROR OSPlatformBridgeInit(void) void OSPlatformBridgeDeInit(void) { #if defined(DEBUG_BRIDGE_KM) - if (gpsDIBridgeStatsEntry != NULL) - { - DIDestroyEntry(gpsDIBridgeStatsEntry); - } + DIDestroyEntry(gpsDIBridgeStatsEntry); #endif - DeinitDMABUFBridge(); + OSEventObjectDestroy(g_hDriverThreadEventObject); + g_hDriverThreadEventObject = NULL; - if (g_hDriverThreadEventObject != NULL) { - OSEventObjectDestroy(g_hDriverThreadEventObject); - g_hDriverThreadEventObject = NULL; - } + DeinitDMABUFBridge(); } PVRSRV_ERROR LinuxBridgeBlockClientsAccess(struct pvr_drm_private *psDevPriv, @@ -304,7 +313,7 @@ PVRSRV_ERROR LinuxBridgeBlockClientsAccess(struct pvr_drm_private *psDevPriv, { PVRSRV_ERROR eError; IMG_HANDLE hEvent; - IMG_INT iSuspendCount; + __maybe_unused IMG_INT iSuspendCount; eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent); if (eError != PVRSRV_OK) @@ -358,7 +367,7 @@ PVRSRV_ERROR LinuxBridgeBlockClientsAccess(struct pvr_drm_private *psDevPriv, PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(struct pvr_drm_private *psDevPriv) { PVRSRV_ERROR eError; - IMG_INT iSuspendCount; + __maybe_unused IMG_INT iSuspendCount; /* resume the driver and then signal so any waiting threads wake up */ if (OSAtomicCompareExchange(&psDevPriv->suspended, _SUSPENDED, @@ -441,9 +450,21 @@ static PVRSRV_ERROR _WaitForDriverUnsuspend(void) return PVRSRV_OK; } -PVRSRV_ERROR PVRSRVDriverThreadEnter(void) +PVRSRV_ERROR PVRSRVDriverThreadEnter(void *pvData) { PVRSRV_ERROR eError; + CONNECTION_DATA *psConnection = (CONNECTION_DATA *)pvData; + + /* Block if the associated device has been placed into a FROZEN state. + * In this case we must await a PVRSRVDeviceThaw() completion request. + * Device is obtained from the incoming psConnection if pvData is non-NULL. + */ + if (likely(pvData != NULL)) + { + PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); + PVRSRVBlockIfFrozen(psDevNode); + OSAtomicIncrement(&psDevNode->iThreadsActive); + } /* increment first so there is no race between this value and * g_iDriverSuspendCount in LinuxBridgeBlockClientsAccess() */ @@ -484,9 +505,19 @@ PVRSRV_ERROR PVRSRVDriverThreadEnter(void) return PVRSRV_OK; } -void PVRSRVDriverThreadExit(void) +void PVRSRVDriverThreadExit(void *pvData) { + CONNECTION_DATA *psConnection = (CONNECTION_DATA *)pvData; OSAtomicDecrement(&g_iNumActiveDriverThreads); + + /* Decrement the number of threads active on this device if the + * connection is known. + */ + if (psConnection != NULL) + { + PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); + OSAtomicDecrement(&psDevNode->iThreadsActive); + } /* if the driver is being suspended then we need to signal the * event object as the thread suspending the driver is waiting * for active threads to exit @@ -517,7 +548,7 @@ PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct psSrvkmCmd->bridge_id, psSrvkmCmd->bridge_func_id); - error = PVRSRVDriverThreadEnter(); + error = PVRSRVDriverThreadEnter(psConnection); PVR_LOG_GOTO_IF_ERROR(error, "PVRSRVDriverThreadEnter", e0); sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id; @@ -531,7 +562,7 @@ PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct error = BridgedDispatchKM(psConnection, &sBridgePackageKM); e0: - PVRSRVDriverThreadExit(); + PVRSRVDriverThreadExit(psConnection); return OSPVRSRVToNativeError(error); } @@ -553,7 +584,7 @@ PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma) return -ENOENT; } - eError = PVRSRVDriverThreadEnter(); + eError = PVRSRVDriverThreadEnter(psConnection); PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDriverThreadEnter", e0); /* @@ -574,6 +605,16 @@ PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma) } mutex_lock(&g_sMMapMutex); + + /* Forcibly clear the VM_MAYWRITE flag as this is inherited from the + * kernel mmap code and we do not want to produce a potentially writable + * mapping from a read-only mapping. + */ + if (!BITMASK_HAS(ps_vma->vm_flags, VM_WRITE)) + { + pvr_vm_flags_clear(ps_vma, VM_MAYWRITE); + } + /* Note: PMRMMapPMR will take a reference on the PMR. * Unref the handle immediately, because we have now done * the required operation on the PMR (whether it succeeded or not) @@ -588,12 +629,12 @@ PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma) goto e0; } - PVRSRVDriverThreadExit(); + PVRSRVDriverThreadExit(psConnection); return 0; e0: - PVRSRVDriverThreadExit(); + PVRSRVDriverThreadExit(psConnection); PVR_DPF((PVR_DBG_ERROR, "Failed with error: %s", PVRSRVGetErrorString(eError))); PVR_ASSERT(eError != PVRSRV_OK); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_bridge_k.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_bridge_k.h index 8022c2f83431..2885dd465eaf 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_bridge_k.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_bridge_k.h @@ -48,6 +48,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_error.h" #include "pvr_drv.h" +typedef struct LINUX_THREAD_ACTIVITY_STATS +{ + IMG_INT32 i32KernelThreadCount; + IMG_INT32 i32DriverThreadCount; + IMG_INT32 i32SuspendedThreadCount; +} LINUX_THREAD_ACTIVITY_STATS; + /*! ****************************************************************************** @Function LinuxBridgeBlockClientsAccess @@ -79,6 +86,17 @@ PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(struct pvr_drm_private *psDevPriv); void LinuxBridgeNumActiveKernelThreadsIncrement(void); void LinuxBridgeNumActiveKernelThreadsDecrement(void); +/*************************************************************************/ /*! + @Function LinuxGetThreadActivityStats + @Description Getter for active and suspended thread stats. + + @Output psThreadStats Struct to be populated with thread activity + stats. + + @Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR LinuxGetThreadActivityStats(LINUX_THREAD_ACTIVITY_STATS *psThreadStats); + /*! ****************************************************************************** @Function PVRSRVDriverThreadEnter @@ -88,9 +106,11 @@ void LinuxBridgeNumActiveKernelThreadsDecrement(void); will call try_to_freeze() on behalf of the client thread. When the driver is resumed the function will exit and allow the thread into the driver. + @Input Reference to Connection data. NULL if no associated + connection / device. @Return PVRSRV_ERROR ******************************************************************************/ -PVRSRV_ERROR PVRSRVDriverThreadEnter(void); +PVRSRV_ERROR PVRSRVDriverThreadEnter(void *pvData); /*! ****************************************************************************** @@ -101,7 +121,9 @@ PVRSRV_ERROR PVRSRVDriverThreadEnter(void); The function also signals the driver that a thread left the driver context so if it's waiting to suspend it knows that the number of threads decreased. + @Input Reference to Connection data. NULL if no associated + connection / device. ******************************************************************************/ -void PVRSRVDriverThreadExit(void); +void PVRSRVDriverThreadExit(void *pvData); #endif /* PVR_BRIDGE_K_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_buffer_sync.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_buffer_sync.c index 5d9ec73376d2..529cafd2bcf3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_buffer_sync.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_buffer_sync.c @@ -77,13 +77,7 @@ struct pvr_buffer_sync_append_data { static struct dma_resv * pmr_reservation_object_get(struct _PMR_ *pmr) { - struct dma_buf *dmabuf; - - dmabuf = PhysmemGetDmaBuf(pmr); - if (dmabuf) - return dmabuf->resv; - - return NULL; + return PhysmemGetDmaResv(pmr); } static int @@ -300,7 +294,7 @@ pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx, } } - WARN_ON((i != nr_pmrs)); + WARN_ON(i != nr_pmrs); return data; @@ -677,8 +671,8 @@ pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data) data->update_fence->name, resv); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) dma_resv_add_fence(resv, - &data->update_fence->base, - DMA_RESV_USAGE_WRITE); + &data->update_fence->base, + DMA_RESV_USAGE_WRITE); #else dma_resv_add_excl_fence(resv, &data->update_fence->base); @@ -689,8 +683,8 @@ pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data) data->update_fence->name, resv); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) dma_resv_add_fence(resv, - &data->update_fence->base, - DMA_RESV_USAGE_READ); + &data->update_fence->base, + DMA_RESV_USAGE_READ); #else dma_resv_add_shared_fence(resv, &data->update_fence->base); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_counting_timeline.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_counting_timeline.c index 3fa890316dcc..240872e0b0fd 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_counting_timeline.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_counting_timeline.c @@ -145,6 +145,12 @@ struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( if (!timeline->context) goto err_free_timeline; + timeline->current_value = 0; + timeline->next_value = 1; + kref_init(&timeline->kref); + spin_lock_init(&timeline->active_fences_lock); + INIT_LIST_HEAD(&timeline->active_fences); + srv_err = PVRSRVRegisterDriverDbgRequestNotify( &timeline->dbg_request_handle, pvr_counting_fence_timeline_debug_request, @@ -156,12 +162,6 @@ struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( goto err_free_timeline_ctx; } - timeline->current_value = 0; - timeline->next_value = 1; - kref_init(&timeline->kref); - spin_lock_init(&timeline->active_fences_lock); - INIT_LIST_HEAD(&timeline->active_fences); - err_out: return timeline; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_debug.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_debug.c index 0c9cb650e0cc..cf59a2d5aa3e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_debug.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_debug.c @@ -42,6 +42,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ #include +#include #include #include "img_types.h" @@ -99,7 +100,7 @@ AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal); - OSStringLCopy(gsDebugCCB[giOffset].pcMesg, szBuffer, + OSStringSafeCopy(gsDebugCCB[giOffset].pcMesg, szBuffer, PVRSRV_DEBUG_CCB_MESG_MAX); giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX; @@ -240,22 +241,13 @@ static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space; } -/*************************************************************************/ /*! -@Function PVRSRVReleasePrintf -@Description To output an important message to the user in release builds -@Input pszFormat The message format string -@Input ... Zero or more arguments for use by the format string -*/ /**************************************************************************/ -void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) +void PVRSRVReleasePrintfVArgs(const IMG_CHAR *pszFormat, va_list vaArgs) { - va_list vaArgs; unsigned long ulLockFlags = 0; IMG_CHAR *pszBuf = gszBuffer; IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); IMG_INT32 result; - va_start(vaArgs, pszFormat); - spin_lock_irqsave(&gsDebugLock, ulLockFlags); result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K: %u: ", current->pid); @@ -272,6 +264,20 @@ void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) } spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); +} + +/*************************************************************************/ /*! +@Function PVRSRVReleasePrintf +@Description To output an important message to the user in release builds +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +*/ /**************************************************************************/ +void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) +{ + va_list vaArgs; + + va_start(vaArgs, pszFormat); + PVRSRVReleasePrintfVArgs(pszFormat, vaArgs); va_end(vaArgs); } @@ -372,34 +378,34 @@ void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, { case DBGPRIV_FATAL: { - OSStringLCopy(pszBuf, "PVR_K:(Fatal): ", ui32BufSiz); + OSStringSafeCopy(pszBuf, "PVR_K:(Fatal): ", ui32BufSiz); PVRSRV_REPORT_ERROR(); break; } case DBGPRIV_ERROR: { - OSStringLCopy(pszBuf, "PVR_K:(Error): ", ui32BufSiz); + OSStringSafeCopy(pszBuf, "PVR_K:(Error): ", ui32BufSiz); PVRSRV_REPORT_ERROR(); break; } case DBGPRIV_WARNING: { - OSStringLCopy(pszBuf, "PVR_K:(Warn): ", ui32BufSiz); + OSStringSafeCopy(pszBuf, "PVR_K:(Warn): ", ui32BufSiz); break; } case DBGPRIV_MESSAGE: { - OSStringLCopy(pszBuf, "PVR_K:(Mesg): ", ui32BufSiz); + OSStringSafeCopy(pszBuf, "PVR_K:(Mesg): ", ui32BufSiz); break; } case DBGPRIV_VERBOSE: { - OSStringLCopy(pszBuf, "PVR_K:(Verb): ", ui32BufSiz); + OSStringSafeCopy(pszBuf, "PVR_K:(Verb): ", ui32BufSiz); break; } case DBGPRIV_DEBUG: { - OSStringLCopy(pszBuf, "PVR_K:(Debug): ", ui32BufSiz); + OSStringSafeCopy(pszBuf, "PVR_K:(Debug): ", ui32BufSiz); break; } case DBGPRIV_CALLTRACE: @@ -407,7 +413,7 @@ void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, case DBGPRIV_BUFFERED: default: { - OSStringLCopy(pszBuf, "PVR_K: ", ui32BufSiz); + OSStringSafeCopy(pszBuf, "PVR_K: ", ui32BufSiz); break; } } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_debugfs.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_debugfs.c index fa6a94c646bd..63c557bc8459 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_debugfs.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_debugfs.c @@ -75,7 +75,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define _DRIVER_THREAD_ENTER() \ do { \ - PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(); \ + PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(NULL); \ if (eLocalError != PVRSRV_OK) \ { \ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \ @@ -85,7 +85,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. } while (0) #define _DRIVER_THREAD_EXIT() \ - PVRSRVDriverThreadExit() + PVRSRVDriverThreadExit(NULL) #define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR @@ -121,14 +121,7 @@ static void _WriteData(void *pvNativeHandle, const void *pvData, static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, va_list pArgs) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) seq_vprintf(pvNativeHandle, pszFmt, pArgs); -#else - IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; - - vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFmt, pArgs); - seq_printf(pvNativeHandle, "%s", szBuffer); -#endif } static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) @@ -139,11 +132,7 @@ static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) static IMG_BOOL _HasOverflowed(void *pvNativeHandle) { struct seq_file *psSeqFile = pvNativeHandle; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) return seq_has_overflowed(psSeqFile); -#else - return psSeqFile->count == psSeqFile->size; -#endif } static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { @@ -419,14 +408,13 @@ static ssize_t _Write(struct file *psFile, const char __user *pszBuffer, PVR_LOG_RETURN_IF_FALSE(psIter->pfnWrite != NULL, "pfnWrite is NULL", -EIO); + _DRIVER_THREAD_ENTER(); /* Make sure we allocate the smallest amount of needed memory*/ ui64Count = psIter->ui32WriteLenMax; PVR_LOG_GOTO_IF_FALSE(uiCount <= ui64Count, "uiCount too long", return_); ui64Count = MIN(uiCount + 1, ui64Count); - _DRIVER_THREAD_ENTER(); - /* allocate buffer with one additional byte for NUL character */ pcLocalBuffer = OSAllocMem(ui64Count); PVR_LOG_GOTO_IF_FALSE(pcLocalBuffer != NULL, "OSAllocMem() failed", diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_drm.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_drm.c index bd42146ad208..145f40c5b884 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_drm.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_drm.c @@ -61,6 +61,7 @@ #include #include #include +#include #include "module_common.h" #include "pvr_drm.h" @@ -68,13 +69,23 @@ #include "pvrversion.h" #include "services_kernel_client.h" #include "pvr_sync_ioctl_drm.h" +#include "physmem_dmabuf_internal.h" #include "kernel_compatibility.h" +#include "dkf_server.h" +#include "dkp_impl.h" + #define PVR_DRM_DRIVER_NAME PVR_DRM_NAME #define PVR_DRM_DRIVER_DESC "Imagination Technologies PVR DRM" #define PVR_DRM_DRIVER_DATE "20170530" +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#define PVR_DRM_DRIVER_PRIME 0 +#else +#define PVR_DRM_DRIVER_PRIME DRIVER_PRIME +#endif + /* * Protects global PVRSRV_DATA on a multi device system. i.e. this is used to * protect the PVRSRVCommonDeviceXXXX() APIs in the Server common layer which @@ -222,10 +233,45 @@ const struct dev_pm_ops pvr_pm_ops = { .restore = pvr_pm_restore, }; +#if defined(SUPPORT_LINUX_FDINFO) +static void pvr_drm_show_drm_info(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + int pid, void *hPrivHandle) +{ + struct pvr_drm_private *priv; + struct drm_device *pdev; + + pdev = (struct drm_device *)hPrivHandle; + + /* The only field possibly valid in 'priv' is the dev_node field */ + priv = (struct pvr_drm_private *)pdev->dev_private; + + if (priv->dev_node == psDevNode) { + + /* For kernels post 6.5.0 the mandatory driver fields are produced + * by the 'drm_show_fdinfo' routine in the kernel. + * Avoid duplicating this information here. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0)) + PVRDKPOutput(priv->hDeviceDKPRef, + "drm-driver:\t%s\n", + PVR_DRM_DRIVER_NAME); +#if defined(CONFIG_PCI) + if (dev_is_pci(pdev->dev)) { + struct pci_dev *pcidev = to_pci_dev(pdev->dev); + + PVRDKPOutput(priv->hDeviceDKPRef, + "drm-pdev:\t%04x:%02x:%02x.%d\n", + pci_domain_nr(pcidev->bus), + pcidev->bus->number, + PCI_SLOT(pcidev->devfn), + PCI_FUNC(pcidev->devfn)); + } +#endif /* defined(CONFIG_PCI) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0) */ + } +} +#endif /* SUPPORT_LINUX_FDINFO */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) -static -#endif int pvr_drm_load(struct drm_device *ddev, unsigned long flags) { struct pvr_drm_private *priv; @@ -236,18 +282,10 @@ int pvr_drm_load(struct drm_device *ddev, unsigned long flags) dev_set_drvdata(ddev->dev, ddev); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) - /* - * Older kernels do not have render drm_minor member in drm_device, - * so we fallback to primary node for device identification - */ - deviceId = ddev->primary->index; -#else if (ddev->render) deviceId = ddev->render->index; else /* when render node is NULL, fallback to primary node */ deviceId = ddev->primary->index; -#endif priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { @@ -292,12 +330,29 @@ int pvr_drm_load(struct drm_device *ddev, unsigned long flags) } #endif +#if defined(SUPPORT_LINUX_FDINFO) + srv_err = PVRSRVRegisterDKP(ddev, + "drm-pvr-drm", + pvr_drm_show_drm_info, + DKP_CONNECTION_FLAG_ALL, + &priv->hDeviceDKPRef); + + if (srv_err != PVRSRV_OK) { + err = -ENODEV; + DRM_ERROR("device %p initialisation failed (err=%d)\n", + ddev->dev, err); + goto err_device_deinit; + } +#endif + mutex_unlock(&g_device_mutex); return 0; -#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) +#if defined(SUPPORT_LINUX_FDINFO) || (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) err_device_deinit: +#endif +#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) drm_mode_config_cleanup(ddev); PVRSRVDeviceDeinit(priv->dev_node); #endif @@ -312,9 +367,6 @@ int pvr_drm_load(struct drm_device *ddev, unsigned long flags) return err; } -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) -static -#endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) int pvr_drm_unload(struct drm_device *ddev) #else @@ -336,6 +388,10 @@ void pvr_drm_unload(struct drm_device *ddev) if (ddev->dev->dma_parms == &priv->dma_parms) ddev->dev->dma_parms = NULL; +#if defined(SUPPORT_LINUX_FDINFO) + PVRSRVUnRegisterDKP(ddev, priv->hDeviceDKPRef); +#endif + kfree(priv); ddev->dev_private = NULL; @@ -376,23 +432,25 @@ static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile) module_put(THIS_MODULE); } -/* - * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set. - */ static struct drm_ioctl_desc pvr_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM, - DRM_RENDER_ALLOW | DRM_UNLOCKED), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(PVR_SRVKM_INIT, drm_pvr_srvkm_init, - DRM_RENDER_ALLOW | DRM_UNLOCKED), + DRM_RENDER_ALLOW), #if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) DRM_IOCTL_DEF_DRV(PVR_SYNC_RENAME_CMD, pvr_sync_rename_ioctl, - DRM_RENDER_ALLOW | DRM_UNLOCKED), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(PVR_SYNC_FORCE_SW_ONLY_CMD, pvr_sync_force_sw_only_ioctl, - DRM_RENDER_ALLOW | DRM_UNLOCKED), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(PVR_SW_SYNC_CREATE_FENCE_CMD, pvr_sw_sync_create_fence_ioctl, - DRM_RENDER_ALLOW | DRM_UNLOCKED), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(PVR_SW_SYNC_INC_CMD, pvr_sw_sync_inc_ioctl, - DRM_RENDER_ALLOW | DRM_UNLOCKED), + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(PVR_EXP_FENCE_SYNC_FORCE_CMD, pvr_sync_ioctl_force_exp_only, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(PVR_SYNC_CREATE_EXPORT_FENCE_CMD, + pvr_export_fence_sync_create_fence_ioctl, + DRM_RENDER_ALLOW), #endif }; @@ -409,6 +467,43 @@ static long pvr_compat_ioctl(struct file *file, unsigned int cmd, } #endif /* defined(CONFIG_COMPAT) */ +#if defined(SUPPORT_LINUX_FDINFO) +/* + * Produce the PVR specific fdinfo (utilization figures etc.) when queried. + * + * For kernels post 6.5 there is a helper function called 'drm_show_fdinfo' + * which will generate the mandatory keys (drm-driver, drm-pdev, drm-client-id) + * so we do not need to generate these if running on a later kernel etc. + * + */ +static void pvr_show_fdinfo(struct seq_file *seq_file, struct file *file) +{ + struct drm_file *dfile = file->private_data; + struct drm_device *dev = dfile->minor->dev; + struct drm_printer p = drm_seq_file_printer(seq_file); + PVRSRV_CONNECTION_PRIV *pvr_connection = dfile->driver_priv; + struct pvr_drm_private *priv; + int my_pid; + + /* Grab the PID from the associated drm_file->pid->numbers[0].nr */ + my_pid = dfile->pid->numbers[0].nr; + + priv = (struct pvr_drm_private *)dev->dev_private; + + /* Generate driver-specific keys */ + PVRDKFTraverse((DKF_VPRINTF_FUNC*)drm_vprintf, + &p, + priv->dev_node, + my_pid, + pvr_connection->ui32Type); + + /* Call into OS-specific drm_show_fdinfo if it is supported */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)) + drm_show_fdinfo(seq_file, file); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0) */ +} +#endif /* SUPPORT_LINUX_FDINFO */ + const struct file_operations pvr_drm_fops = { .owner = THIS_MODULE, .open = drm_open, @@ -420,24 +515,29 @@ const struct file_operations pvr_drm_fops = { .mmap = PVRSRV_MMap, .poll = drm_poll, .read = drm_read, -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) - .fasync = drm_fasync, -#endif +#if defined(SUPPORT_LINUX_FDINFO) + .show_fdinfo = pvr_show_fdinfo, +#endif /* SUPPORT_LINUX_FDINFO */ }; const struct drm_driver pvr_drm_generic_driver = { - .driver_features = DRIVER_MODESET | DRIVER_RENDER, + .driver_features = DRIVER_MODESET | DRIVER_RENDER | + DRIVER_GEM | PVR_DRM_DRIVER_PRIME, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) .load = NULL, .unload = NULL, -#else - .load = pvr_drm_load, - .unload = pvr_drm_unload, -#endif .open = pvr_drm_open, .postclose = pvr_drm_release, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + /* prime_fd_to_handle is not supported */ +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) + .gem_prime_export = PhysmemGEMPrimeExport, + .gem_free_object = PhysmemGEMObjectFree, +#endif .ioctls = pvr_drm_ioctls, .num_ioctls = ARRAY_SIZE(pvr_drm_ioctls), .fops = &pvr_drm_fops, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_drv.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_drv.h index ee4fdf44259c..827145d60c50 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_drv.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_drv.h @@ -90,20 +90,24 @@ struct pvr_drm_private { * the OS called suspend on this device. */ atomic_t suspended; + + /* Padding for 8-byte alignment */ + unsigned int uiPad1; + + /* Handle associated with DKP for this DRM device */ + void *hDeviceDKPRef; }; extern const struct dev_pm_ops pvr_pm_ops; extern const struct drm_driver pvr_drm_generic_driver; extern const struct file_operations pvr_drm_fops; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) int pvr_drm_load(struct drm_device *ddev, unsigned long flags); #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) int pvr_drm_unload(struct drm_device *ddev); #else void pvr_drm_unload(struct drm_device *ddev); #endif -#endif int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, struct drm_file *file); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_common.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_common.c new file mode 100644 index 000000000000..30640d542b0e --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_common.c @@ -0,0 +1,363 @@ +/*************************************************************************/ /*! +@File pvr_dvfs_common.c +@Title PowerVR devfreq device common utilities +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Linux DVFS and PDVFS shared code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(NO_HARDWARE) + +#include +#include +#include +#if defined(CONFIG_PM_OPP) +#include +#endif + +#include "pvrsrv.h" + +/* + * Common DVFS support code shared between SUPPORT_LINUX_DVFS and + * SUPPORT_PDVFS, primarily for OPP table support. + * + * Note that PDVFS implements the Linux/OS devfreq module in + * the firmware, so no devfreq API calls should be used here. + */ +#include "pvr_dvfs.h" +#include "pvr_dvfs_common.h" + +#include "kernel_compatibility.h" + + +/*************************************************************************/ /*! +@Function GetOPPValues + +@Description Common code to store the OPP points in the freq_table, + for use with the IMG DVFS/devfreq implementation, or with + the Proactive DVFS. + Requires CONFIG_PM_OPP support in the kernel. + +@Input dev OS Device node +@Output min_freq Min clock freq (Hz) +@Output min_volt Min voltage (V) +@Output max_freq Max clock freq (Hz) +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +#if defined(CONFIG_PM_OPP) +int GetOPPValues(struct device *dev, + unsigned long *min_freq, + unsigned long *min_volt, + unsigned long *max_freq, + struct pvr_opp_freq_table *pvr_freq_table) +{ + struct dev_pm_opp *opp; + int count, i, err = 0; + unsigned long freq; + + unsigned long *freq_table; + + count = dev_pm_opp_get_opp_count(dev); + if (count <= 0) + { + dev_err(dev, "Could not fetch OPP count, %d\n", count); + return -EINVAL; + } + + dev_info(dev, "Found %d OPP points.\n", count); + + freq_table = devm_kcalloc(dev, count, sizeof(*freq_table), GFP_ATOMIC); + if (! freq_table) + { + return -ENOMEM; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + /* Start RCU read-side critical section to map frequency to OPP */ + rcu_read_lock(); +#endif + + /* Iterate over OPP table; Iteration 0 finds "opp w/ freq >= 0 Hz". */ + freq = 0; + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) + { + err = PTR_ERR(opp); + dev_err(dev, "Couldn't find lowest frequency, %d\n", err); + goto exit; + } + + *min_volt = dev_pm_opp_get_voltage(opp); + *max_freq = *min_freq = freq_table[0] = freq; + dev_info(dev, "opp[%d/%d]: (%lu Hz, %lu uV)\n", 1, count, freq, *min_volt); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + dev_pm_opp_put(opp); +#endif + + /* Iteration i > 0 finds "opp w/ freq >= (opp[i-1].freq + 1)". */ + for (i = 1; i < count; i++) + { + freq++; + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) + { + err = PTR_ERR(opp); + dev_err(dev, "Couldn't find %dth frequency, %d\n", i, err); + goto exit; + } + + freq_table[i] = freq; + *max_freq = freq; + dev_info(dev, + "opp[%d/%d]: (%lu Hz, %lu uV)\n", + i + 1, + count, + freq, + dev_pm_opp_get_voltage(opp)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + dev_pm_opp_put(opp); +#endif + } + +exit: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_unlock(); +#endif + + if (!err) + { + pvr_freq_table->freq_table = freq_table; + pvr_freq_table->num_levels = count; + } + else + { + devm_kfree(dev, freq_table); + } + + return err; +} +#endif + +/*************************************************************************/ /*! +@Function DVFSCopyOPPTable + +@Description Copies the OPP points (voltage/frequency) to the target + firmware structure RGXFWIF_OPP_INFO which must be pre-allocated. + The source values are expected to be read into the kernel + Power Manager from the platform's Device Tree. + Requires CONFIG_OF and CONFIG_PM_OPP support in the kernel. + +@Input psDeviceNode Device node +@Input psOPPInfo Target OPP data buffer +@Input ui32MaxOPPLevels Maximum number of OPP levels allowed in buffer. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +#if defined(SUPPORT_FW_OPP_TABLE) && defined(CONFIG_OF) && defined(CONFIG_PM_OPP) +PVRSRV_ERROR DVFSCopyOPPTable(PPVRSRV_DEVICE_NODE psDeviceNode, + RGXFWIF_OPP_INFO *psOPPInfo, + IMG_UINT32 ui32MaxOPPLevels) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + struct device *psDev = NULL; + OPP_LEVEL *psOPPValue; + struct dev_pm_opp *opp; + struct pvr_opp_freq_table pvr_freq_table = {0}; + unsigned long min_freq = 0, max_freq = 0, min_volt = 0; + unsigned int i, err; + unsigned long *freq_table; + + if (!psDeviceNode || !psOPPInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid device or argument", __func__)); + return PVRSRV_ERROR_INVALID_DEVICE; + } + + psDev = psDeviceNode->psDevConfig->pvOSDevice; + + err = GetOPPValues(psDev, &min_freq, &min_volt, &max_freq, &pvr_freq_table); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: DVFS OPP table not initialised.", __func__)); + return PVRSRV_ERROR_NOT_INITIALISED; + } + + freq_table = pvr_freq_table.freq_table; + + if (pvr_freq_table.num_levels > ui32MaxOPPLevels) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Too many OPP levels (%u), max (%u).", __func__, + pvr_freq_table.num_levels, ui32MaxOPPLevels)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto exit; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + /* Start RCU read-side critical section to map frequency to OPP */ + rcu_read_lock(); +#endif + + /* Loop over the OPP/frequency levels */ + psOPPValue = &psOPPInfo->asOPPValues[0]; + for (i=0; iui32Freq = freq_table[i]; + opp = dev_pm_opp_find_freq_exact(psDev, freq_table[i], IMG_TRUE); + if (IS_ERR(opp)) + { + err = PTR_ERR(opp); + dev_err(psDev, "Couldn't find %dth frequency, %d\n", i, err); + eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + goto exit; + } + psOPPValue->ui32Volt = dev_pm_opp_get_voltage(opp); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + dev_pm_opp_put(opp); +#endif + psOPPValue++; + } + + PVR_DPF((PVR_DBG_WARNING, "%s: Copied %u OPP points to the FW processor table.", __func__, + pvr_freq_table.num_levels)); + psOPPInfo->ui32MaxOPPPoint = pvr_freq_table.num_levels - 1; +exit: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_unlock(); +#endif + + /* Free memory allocated in GetOPPValues */ + devm_kfree(psDev, freq_table); + + return eError; +} +#endif + +/*************************************************************************/ /*! +@Function InitPDVFS + +@Description Initialise the device for Proactive DVFS support. + Prepares the OPP table from the devicetree, if enabled. + +@Input psDeviceNode Device node +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +#if defined(SUPPORT_PDVFS) +PVRSRV_ERROR InitPDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) +{ +#if !(defined(CONFIG_PM_OPP) && defined(CONFIG_OF)) + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + return PVRSRV_OK; +#else + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL; + struct device *psDev; + int err; + + if (!psDeviceNode) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_ASSERT(psDeviceNode->psDevConfig); + + psDev = psDeviceNode->psDevConfig->pvOSDevice; + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + + /* Setup the OPP table from the device tree for Proactive DVFS. */ + err = dev_pm_opp_of_add_table(psDev); + if (err == 0) + { + psDVFSDeviceCfg->bDTConfig = IMG_TRUE; + } + else + { + /* + * If there are no device tree or system layer provided operating points + * then return an error + */ + if (psDVFSDeviceCfg->pasOPPTable) + { + psDVFSDeviceCfg->bDTConfig = IMG_FALSE; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "No system or device tree opp points found, %d", err)); + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + } + return PVRSRV_OK; +#endif +} + +/*************************************************************************/ /*! +@Function DeinitPDVFS + +@Description De-Initialise the device for Proactive DVFS support. + +@Input psDeviceNode Device node +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +void DeinitPDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) +{ +#if !(defined(CONFIG_PM_OPP) && defined(CONFIG_OF)) + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +#else + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL; + struct device *psDev = NULL; + + /* Check the device exists */ + if (!psDeviceNode) + { + return; + } + + psDev = psDeviceNode->psDevConfig->pvOSDevice; + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + + if (psDVFSDeviceCfg->bDTConfig) + { + /* + * Remove OPP entries for this device; only static entries from + * the device tree are present. + */ + dev_pm_opp_of_remove_table(psDev); + } +#endif +} +#endif /* SUPPORT_PDVFS */ + +#endif /* !NO_HARDWARE */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_common.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_common.h new file mode 100644 index 000000000000..87eccce5762e --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_common.h @@ -0,0 +1,123 @@ +/*************************************************************************/ /*! +@File pvr_dvfs_common.h +@Title System level interface for DVFS and PDVFS +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_DVFS_COMMON_H +#define PVR_DVFS_COMMON_H + +#include "opaque_types.h" +#include "pvrsrv_error.h" +#if defined(SUPPORT_FW_OPP_TABLE) && defined(CONFIG_OF) && defined(CONFIG_PM_OPP) +#include "rgx_fwif_km.h" +#endif + +struct pvr_opp_freq_table +{ + unsigned long *freq_table; + int num_levels; +}; + +/*************************************************************************/ /*! +@Function GetOPPValues + +@Description Common code to store the OPP points in the pvr_freq_table, + for use with the IMG DVFS/devfreq implementation, or with + the Proactive DVFS. + Requires CONFIG_PM_OPP support in the kernel. + +@Input dev OS Device node +@Output min_freq Min clock freq (Hz) +@Output min_volt Min voltage (V) +@Output max_freq Max clock freq (Hz) +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +#if defined(CONFIG_PM_OPP) +int GetOPPValues(struct device *dev, + unsigned long *min_freq, + unsigned long *min_volt, + unsigned long *max_freq, + struct pvr_opp_freq_table *pvr_freq_table); +#endif + +/*************************************************************************/ /*! +@Function DVFSCopyOPPTable + +@Description Copies the OPP points (voltage/frequency) to the target + firmware structure RGXFWIF_OPP_INFO which must be pre-allocated. + The source values are expected to be read into the kernel + Power Manager from the platform's Device Tree. + Requires CONFIG_OF and CONFIG_PM_OPP support in the kernel. + +@Input psDeviceNode Device node +@Input psOPPInfo Target OPP data buffer +@Input ui32MaxOPPLevels Maximum number of OPP levels allowed in buffer. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +#if defined(SUPPORT_FW_OPP_TABLE) && defined(CONFIG_OF) && defined(CONFIG_PM_OPP) +PVRSRV_ERROR DVFSCopyOPPTable(PPVRSRV_DEVICE_NODE psDeviceNode, + RGXFWIF_OPP_INFO *psOPPInfo, + IMG_UINT32 ui32MaxOPPLevels); +#endif + +#if defined(SUPPORT_PDVFS) +/*************************************************************************/ /*! +@Function InitPDVFS + +@Description Initialise the device for Proactive DVFS support. + Prepares the OPP table from the devicetree, if enabled. + +@Input psDeviceNode Device node +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR InitPDVFS(PPVRSRV_DEVICE_NODE psDeviceNode); + +/*************************************************************************/ /*! +@Function InitPDVFS + +@Description Initialise the device for Proactive DVFS support. + +@Input psDeviceNode Device node +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +void DeinitPDVFS(PPVRSRV_DEVICE_NODE psDeviceNode); +#endif + +#endif /* PVR_DVFS_COMMON_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_device.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_device.c index e65a4ab8c6a5..f5e1a057a0ee 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_device.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_dvfs_device.c @@ -1,8 +1,8 @@ /*************************************************************************/ /*! -@File +@File pvr_dvfs_device.c @Title PowerVR devfreq device implementation @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Linux module setup +@Description Linux DVFS module setup @License Dual MIT/GPLv2 The contents of this file are subject to the MIT license as set out below. @@ -67,27 +67,39 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "syscommon.h" +#include "pvr_dvfs.h" #include "pvr_dvfs_device.h" +#include "pvr_dvfs_common.h" +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + +#if !defined(CONFIG_PM_DEVFREQ) +#error "PVR DVFS governor requires kernel support for devfreq (CONFIG_PM_DEVFREQ = 1)" +#endif +#include "linux/governor.h" +#include "pvr_dvfs_governor.h" + +#if defined(CONFIG_PM_DEVFREQ_EVENT) +#include "linux/devfreq-event.h" +#include "pvr_dvfs_events.h" +#endif +#endif /* SUPPORT_PVR_DVFS_GOVERNOR */ #include "kernel_compatibility.h" + +/* Default constants for PVR-Balanced (PVR) governor */ +#define PVR_UPTHRESHOLD (90) +#define PVR_DOWNDIFFERENTIAL (5) + static int _device_get_devid(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); int deviceId; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) - /* - * Older kernels do not have render drm_minor member in drm_device, - * so we fallback to primary node for device identification - */ - deviceId = ddev->primary->index; -#else if (ddev->render) deviceId = ddev->render->index; else /* when render node is NULL, fallback to primary node */ deviceId = ddev->primary->index; -#endif return deviceId; } @@ -95,17 +107,13 @@ static int _device_get_devid(struct device *dev) static IMG_INT32 devfreq_target(struct device *dev, unsigned long *requested_freq, IMG_UINT32 flags) { int deviceId = _device_get_devid(dev); - PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(deviceId); + PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByKernelDevID(deviceId); RGX_DATA *psRGXData = NULL; IMG_DVFS_DEVICE *psDVFSDevice = NULL; IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL; RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL; IMG_UINT32 ui32Freq, ui32CurFreq, ui32Volt; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) - struct opp *opp; -#else struct dev_pm_opp *opp; -#endif /* Check the device is registered */ if (!psDeviceNode) @@ -124,7 +132,7 @@ static IMG_INT32 devfreq_target(struct device *dev, unsigned long *requested_fre } psRGXTimingInfo = psRGXData->psRGXTimingInfo; - if (!psDVFSDevice->bEnabled) + if (psDVFSDevice->eState != PVR_DVFS_STATE_READY) { *requested_freq = psRGXTimingInfo->ui32CoreClockSpeed; return 0; @@ -170,15 +178,15 @@ static IMG_INT32 devfreq_target(struct device *dev, unsigned long *requested_fre /* Increasing frequency, change voltage first */ if (ui32Freq > ui32CurFreq) { - psDVFSDeviceCfg->pfnSetVoltage(ui32Volt); + psDVFSDeviceCfg->pfnSetVoltage(psDeviceNode->psDevConfig->hSysData, ui32Volt); } - psDVFSDeviceCfg->pfnSetFrequency(ui32Freq); + psDVFSDeviceCfg->pfnSetFrequency(psDeviceNode->psDevConfig->hSysData, ui32Freq); /* Decreasing frequency, change frequency first */ if (ui32Freq < ui32CurFreq) { - psDVFSDeviceCfg->pfnSetVoltage(ui32Volt); + psDVFSDeviceCfg->pfnSetVoltage(psDeviceNode->psDevConfig->hSysData, ui32Volt); } psRGXTimingInfo->ui32CoreClockSpeed = ui32Freq; @@ -192,13 +200,17 @@ static IMG_INT32 devfreq_target(struct device *dev, unsigned long *requested_fre static int devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { int deviceId = _device_get_devid(dev); - PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(deviceId); + PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByKernelDevID(deviceId); PVRSRV_RGXDEV_INFO *psDevInfo = NULL; IMG_DVFS_DEVICE *psDVFSDevice = NULL; RGX_DATA *psRGXData = NULL; RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL; - RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; + RGXFWIF_GPU_UTIL_STATS *psGpuUtilStats = NULL; PVRSRV_ERROR eError; +#if defined(CONFIG_PM_DEVFREQ_EVENT) && defined(SUPPORT_PVR_DVFS_GOVERNOR) + struct pvr_profiling_dev_status *pvr_stat = stat->private_data; + int err; +#endif /* Check the device is registered */ if (!psDeviceNode) @@ -227,26 +239,41 @@ static int devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status return 0; } + psGpuUtilStats = kzalloc(sizeof(*psGpuUtilStats), GFP_KERNEL); + + if (!psGpuUtilStats) + { + return -ENOMEM; + } + eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode, psDVFSDevice->hGpuUtilUserDVFS, - &sGpuUtilStats); + psGpuUtilStats); if (eError != PVRSRV_OK) { + kfree(psGpuUtilStats); return -EAGAIN; } - stat->busy_time = sGpuUtilStats.ui64GpuStatActive; - stat->total_time = sGpuUtilStats.ui64GpuStatCumulative; + stat->busy_time = psGpuUtilStats->ui64GpuStatActive; + stat->total_time = psGpuUtilStats->ui64GpuStatCumulative; + + kfree(psGpuUtilStats); + +#if defined(CONFIG_PM_DEVFREQ_EVENT) && defined(SUPPORT_PVR_DVFS_GOVERNOR) + err = pvr_get_dev_status_get_events(psDVFSDevice->psProfilingDevice, pvr_stat); + if (err) + return err; +#endif return 0; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) static IMG_INT32 devfreq_cur_freq(struct device *dev, unsigned long *freq) { int deviceId = _device_get_devid(dev); - PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(deviceId); + PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByKernelDevID(deviceId); RGX_DATA *psRGXData = NULL; /* Check the device is registered */ @@ -267,180 +294,664 @@ static IMG_INT32 devfreq_cur_freq(struct device *dev, unsigned long *freq) return 0; } -#endif static struct devfreq_dev_profile img_devfreq_dev_profile = { + .polling_ms = 10, .target = devfreq_target, .get_dev_status = devfreq_get_dev_status, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) .get_cur_freq = devfreq_cur_freq, -#endif }; -static int FillOPPTable(struct device *dev, PVRSRV_DEVICE_NODE *psDeviceNode) +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + +/* DEVFREQ governor name */ +#define DEVFREQ_GOV_PVR_BALANCED "pvr_balanced" + +static unsigned long _get_max_freq(struct devfreq *devfreq_dev) { - const IMG_OPP *iopp; - int i, err = 0; - IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) + return devfreq_dev->scaling_max_freq; +#else + return devfreq_dev->max_freq; +#endif +} - /* Check the device exists */ - if (!dev || !psDeviceNode) +/* + * Calculate the utilisation percentage of X cycles as a proportion + * of Y cycles + */ +static inline +unsigned long UTILISATION_PC(unsigned long X, unsigned long Y) +{ + PVR_ASSERT(Y > 0); + +/* + if (X < UINT_MAX / 100UL) + return min(100UL, X * 100UL / Y); + else + return min(100UL, X / (Y / 100UL)); +*/ + /* Prevent overflow */ + if (X < UINT32_MAX / 100U) + return (X * 100U / Y); + else + return (X / (Y / 100U)); +} + +static int pvr_governor_get_target(struct devfreq *devfreq_dev, + unsigned long *freq) +{ + struct devfreq_dev_status *stat; + struct pvr_profiling_dev_status *pvr_stat; + IMG_DVFS_GOVERNOR_CFG *data = devfreq_dev->data; + unsigned long long a, b; + unsigned int pvr_upthreshold = PVR_UPTHRESHOLD; + unsigned int pvr_downdifferential = PVR_DOWNDIFFERENTIAL; + unsigned long ui32Util, ui32UtilDM, ui32UtilBus; + int iGeomEventID, iFragEventID, iCompEventID, iSLCReadEventID, iSLCWriteEventID; + int iSLCReadEventID2, iSLCWriteEventID2; + int err; + + if (!devfreq_dev) { return -ENODEV; } - psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + /* + * Governor main function for setting target frequency, based on + * metrics for + - utilisation + - memory bus bandwidth + - ALU counters etc. + */ - for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable; - i < psDVFSDeviceCfg->ui32OPPTableSize; - i++, iopp++) + err = devfreq_update_stats(devfreq_dev); + if (err) { - err = dev_pm_opp_add(dev, iopp->ui32Freq, iopp->ui32Volt); - if (err) { - dev_err(dev, "Could not add OPP entry, %d\n", err); - return err; + dev_warn(&devfreq_dev->dev, "get_dev_status: error (%d)", err); + return err; + } + + stat = &devfreq_dev->last_status; + pvr_stat = stat->private_data; + if (!pvr_stat) + { + return -EINVAL; + } + + iGeomEventID = pvr_find_event_by_name("geom-cycle"); + iFragEventID = pvr_find_event_by_name("3d-cycle"); + iCompEventID = pvr_find_event_by_name("comp-cycle"); + if (iGeomEventID < 0 || iFragEventID < 0 || iCompEventID < 0) + { + return -EINVAL; + } + + if (pvr_stat->event_data[iGeomEventID].total_count == 0 || + pvr_stat->event_data[iFragEventID].total_count == 0 || + pvr_stat->event_data[iCompEventID].total_count == 0) + { + /* no error to avoid verbose kernel logs before the firmware/GPU + * is available. */ + return 0; + } + + /* GEOM utilisation */ + if (iGeomEventID >= 0) + { + ui32Util = + UTILISATION_PC(pvr_stat->event_data[iGeomEventID].load_count, + pvr_stat->event_data[iGeomEventID].total_count); + ui32UtilDM = max(ui32UtilDM, ui32Util); + } + + /* 3D utilisation */ + if (iFragEventID >= 0) + { + ui32Util = + UTILISATION_PC(pvr_stat->event_data[iFragEventID].load_count, + pvr_stat->event_data[iFragEventID].total_count); + ui32UtilDM = max(ui32UtilDM, ui32Util); + } + + /* Compute utilisation */ + if (iCompEventID >= 0) + { + ui32Util = + UTILISATION_PC(pvr_stat->event_data[iCompEventID].load_count, + pvr_stat->event_data[iCompEventID].total_count); + ui32UtilDM = max(ui32UtilDM, ui32Util); + } + + if (ui32UtilDM > 1000U) + { + /* Illegal value, keep current frequency */ + *freq = stat->current_frequency; + return 0; + } + + iSLCReadEventID = pvr_find_event_by_name("slc-read"); + iSLCWriteEventID = pvr_find_event_by_name("slc-write"); + iSLCReadEventID2 = pvr_find_event_by_name("slc-read-2"); + iSLCWriteEventID2 = pvr_find_event_by_name("slc-write-2"); + if (iSLCReadEventID2 >= 0 && iSLCWriteEventID2 >= 0) + { + /* Use XE-series memory bus counters */ + + ui32UtilBus = 0; + + while (iSLCWriteEventID < ARRAY_SIZE(g_pvr_governor_events)) + { + ui32Util = + UTILISATION_PC(pvr_stat->event_data[iSLCReadEventID].load_count, + pvr_stat->event_data[iSLCReadEventID].total_count); + ui32UtilBus = ui32UtilBus + ui32Util; + + ui32Util = + UTILISATION_PC(pvr_stat->event_data[iSLCWriteEventID].load_count, + pvr_stat->event_data[iSLCWriteEventID].total_count); + ui32UtilBus = ui32UtilBus + ui32Util; + iSLCReadEventID += 2; + iSLCWriteEventID += 2; } } + else if (iSLCReadEventID >= 0 && iSLCWriteEventID >= 0) + { + /* Use DXT-series memory bus counters */ + + ui32Util = + UTILISATION_PC(pvr_stat->event_data[iSLCReadEventID].load_count / data->uiNumMembus, + pvr_stat->event_data[iSLCReadEventID].total_count); + ui32UtilBus = ui32Util; + + ui32Util = + UTILISATION_PC(pvr_stat->event_data[iSLCWriteEventID].load_count / data->uiNumMembus, + pvr_stat->event_data[iSLCWriteEventID].total_count); + ui32UtilBus = ui32UtilBus + ui32Util; + } + + if (ui32UtilBus > 1000U) + { + /* Illegal value, use current frequency */ + *freq = stat->current_frequency; + return 0; + } + +#if defined(DEBUG) + iSLCReadEventID = pvr_find_event_by_name("slc-read"); + iSLCWriteEventID = pvr_find_event_by_name("slc-write"); + dev_info(&devfreq_dev->dev, "Geom: %10lu, 3D: %10lu, Comp: %10lu, SLC: %10lu cycles. DM %lu pc, Bus %lu pc.", + pvr_stat->event_data[iGeomEventID].load_count, + pvr_stat->event_data[iFragEventID].load_count, + pvr_stat->event_data[iCompEventID].load_count, + pvr_stat->event_data[iSLCReadEventID].load_count + pvr_stat->event_data[iSLCWriteEventID].load_count, + ui32UtilDM, + ui32UtilBus); +#endif + + if (ui32UtilDM > 2 * ui32UtilBus) { + // DM pipeline limited + ui32Util = ui32UtilDM; + } + else if (ui32UtilBus > 2 * ui32UtilDM) { + // Mem bandwidth limited + ui32Util = ui32UtilBus; + } + else { + // Average utilisation to smooth random fluctuations + ui32Util = (ui32UtilDM + ui32UtilBus) / 2; + } + + /* Set MAX if it's busy enough */ + if (ui32Util > pvr_upthreshold) + { + *freq = _get_max_freq(devfreq_dev); + return 0; + } + + /* Set MAX if we do not know the initial frequency */ + if (stat->current_frequency == 0) { + *freq = _get_max_freq(devfreq_dev); + return 0; + } + a = (unsigned long long) ui32Util * stat->current_frequency; + b = div_u64(a, (pvr_upthreshold - pvr_downdifferential / 2)); + *freq = (unsigned long) b; return 0; } -static void ClearOPPTable(struct device *dev, PVRSRV_DEVICE_NODE *psDeviceNode) +static void pvr_governor_suspend(struct devfreq *devfreq_dev) { -#if (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) || \ - (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) - const IMG_OPP *iopp; - int i; - IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL; + /* Nothing to do */ +} - /* Check the device exists */ - if (!dev || !psDeviceNode) - { +static void pvr_governor_resume(struct devfreq *devfreq_dev) +{ + int deviceId; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_DVFS_DEVICE *psDVFSDevice = NULL; + struct pvr_profiling_device *psProfilingDevice; + + if (!devfreq_dev || !dev_get_drvdata(&devfreq_dev->dev)) return; - } - psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + deviceId = _device_get_devid(&devfreq_dev->dev); + psDeviceNode = PVRSRVGetDeviceInstanceByKernelDevID(deviceId); - for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable; - i < psDVFSDeviceCfg->ui32OPPTableSize; - i++, iopp++) + /* Check the device is registered */ + if (!psDeviceNode) { - dev_pm_opp_remove(dev, iopp->ui32Freq); + return; } -#endif + + psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + psProfilingDevice = psDVFSDevice->psProfilingDevice; + + /* Reset the profiling events */ + pvr_governor_reset_events(psProfilingDevice); } -static int GetOPPValues(struct device *dev, - unsigned long *min_freq, - unsigned long *min_volt, - unsigned long *max_freq) +static int pvr_governor_event_handler(struct devfreq *devfreq_dev, + unsigned int event, void *data) { -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) - struct opp *opp; + if (!devfreq_dev) + { + pr_err("%s: devfreq_dev not ready.\n", __func__); + return -ENODEV; + } + + /* + * We cannot take the deviceId here, as the DRM device + * may not be initialised. Null pointer in + * struct drm_device *ddev = dev_get_drvdata(dev) + */ + + switch (event) { + case DEVFREQ_GOV_START: + dev_info(&devfreq_dev->dev,"GOV_START event.\n"); + devfreq_monitor_start(devfreq_dev); + break; + + case DEVFREQ_GOV_STOP: + dev_info(&devfreq_dev->dev,"GOV_STOP event.\n"); + devfreq_monitor_stop(devfreq_dev); + break; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) + case DEVFREQ_GOV_UPDATE_INTERVAL: + dev_info(&devfreq_dev->dev,"GOV_UPDATE_INTERVAL event.\n"); + devfreq_update_interval(devfreq_dev, (unsigned int *)data); + break; + #else - struct dev_pm_opp *opp; + case DEVFREQ_GOV_INTERVAL: + dev_info(&devfreq_dev->dev,"GOV_INTERVAL event.\n"); + devfreq_interval_update(devfreq_dev, (unsigned int *)data); + break; #endif - int count, i, err = 0; - unsigned long freq; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \ - (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) - unsigned int *freq_table; + case DEVFREQ_GOV_SUSPEND: + dev_info(&devfreq_dev->dev,"GOV_SUSPEND event.\n"); + pvr_governor_suspend(devfreq_dev); + devfreq_monitor_suspend(devfreq_dev); + break; + + case DEVFREQ_GOV_RESUME: + dev_info(&devfreq_dev->dev,"GOV_RESUME event.\n"); + devfreq_monitor_resume(devfreq_dev); + pvr_governor_resume(devfreq_dev); + break; + + default: + printk("Unknown event.\n"); + break; + } + + return 0; +} + +static struct devfreq_governor pvr_balanced_governor = { + .name = DEVFREQ_GOV_PVR_BALANCED, + .get_target_freq = pvr_governor_get_target, + .event_handler = pvr_governor_event_handler, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) + .attrs = DEVFREQ_GOV_ATTR_POLLING_INTERVAL + | DEVFREQ_GOV_ATTR_TIMER, #else - unsigned long *freq_table; + .immutable = true, #endif +}; + +int pvr_governor_init(void) +{ + int ret; - count = dev_pm_opp_get_opp_count(dev); - if (count < 0) + ret = devfreq_add_governor(&pvr_balanced_governor); + if (ret) { - dev_err(dev, "Could not fetch OPP count, %d\n", count); - return count; + pr_err("%s: failed to install governor %d\n", __func__, ret); } - dev_info(dev, "Found %d OPP points.\n", count); + return ret; +} -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) - freq_table = devm_kcalloc(dev, count, sizeof(*freq_table), GFP_ATOMIC); -#else - freq_table = kcalloc(count, sizeof(*freq_table), GFP_ATOMIC); -#endif - if (! freq_table) +void pvr_governor_exit(void) +{ + int ret; + + ret = devfreq_remove_governor(&pvr_balanced_governor); + if (ret) + { + pr_err("Failed to remove governor (%u)\n", ret); + } +} + +#if defined(CONFIG_PM_DEVFREQ_EVENT) + +static int pvr_get_dev_status_get_events(struct pvr_profiling_device *pvr_prof_dev, + struct pvr_profiling_dev_status *pvr_stat) +{ + struct devfreq_event_data event_data; + int i, ret = 0; + + for (i = 0; i < pvr_prof_dev->num_events; i++) + { + if (!pvr_prof_dev->edev[i]) + continue; + + ret = devfreq_event_get_event(pvr_prof_dev->edev[i], &event_data); + if (ret < 0) + return ret; + + pvr_stat->event_data[i].load_count = event_data.load_count; + pvr_stat->event_data[i].total_count = event_data.total_count; + } + + return ret; +} + +static int pvr_governor_reset_events(struct pvr_profiling_device *pvr_prof_dev) +{ + int i, ret = 0; + + for (i = 0; i < pvr_prof_dev->num_events; i++) + { + if (!pvr_prof_dev->edev[i]) + continue; + + ret = devfreq_event_set_event(pvr_prof_dev->edev[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int pvr_events_enable(struct devfreq_event_dev *edev) +{ + /* Enable counters */ + + dev_dbg(&edev->dev, "%s (Enable)\n", edev->desc->name); + + return 0; +} + +static int pvr_events_disable(struct devfreq_event_dev *edev) +{ + /* Disable counters */ + + dev_dbg(&edev->dev, "%s (Disable)\n", edev->desc->name); + + return 0; +} + +static int pvr_find_event_by_name(const char *name) +{ + int idx; + + for (idx = 0; idx < ARRAY_SIZE(g_pvr_governor_events); idx++) + if (strcmp(name, g_pvr_governor_events[idx].name) == 0) + return idx; + + return -EINVAL; +} + +static int pvr_governor_set_event(struct devfreq_event_dev *edev) +{ + int id = pvr_find_event_by_name(edev->desc->name); + if (id < 0) + return -EINVAL; + + /* clear the last value */ + g_pvr_governor_events[id].last_value = 0; + + return 0; +} + +static int pvr_governor_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct pvr_profiling_device *info = devfreq_event_get_drvdata(edev); + volatile unsigned long perf_ctr; + int id = pvr_find_event_by_name(edev->desc->name); + if (id < 0) + return -EINVAL; + + /* + * Read performance counter + */ + perf_ctr = OSReadHWReg32(info->pvRegsBaseKM, g_pvr_governor_events[id].cntr); + + /* Calculate deltas */ + if (perf_ctr >= g_pvr_governor_events[id].last_value) + { + edata->load_count = perf_ctr - g_pvr_governor_events[id].last_value; + } + else + { + edata->load_count = UINT32_MAX - g_pvr_governor_events[id].last_value + perf_ctr + 1; + } + + g_pvr_governor_events[id].last_value = perf_ctr; + + /* + * Calc the utilisation as a fraction of the total cycles + */ + if (edev->desc->event_type & PVR_EVENT_OP_PERF_CNTR) + { + edata->total_count = info->stat->event_data[0].load_count; + + /* + * Timer has 256-cycle granularity + * Cycle counters have 1-cycle or 256-cycle granularity depending on GPU variant + * SLC read/write counters have 1 event granularity + */ + if ((g_pvr_governor_events[0].shift > 0) && (g_pvr_governor_events[id].shift == 0)) + { + int timer_shift = g_pvr_governor_events[0].shift; + if (edata->total_count < (UINT32_MAX >> timer_shift)) + { + edata->total_count <<= timer_shift; /*!< convert total to cycles */ + } + else + { + /* avoid overflow but some loss of precision will occur */ + edata->load_count >>= timer_shift; /*!< convert load to ticks */ + } + } + } + else { + /* timer runs continuously, effective load is 100% */ + edata->total_count = edata->load_count; + } + + return 0; +} + +static const struct devfreq_event_ops pvr_governor_event_ops = { + .enable = pvr_events_enable, + .disable = pvr_events_disable, + .set_event = pvr_governor_set_event, + .get_event = pvr_governor_get_event, +}; + +int pvr_events_register(struct device *dev, PVRSRV_DEVICE_NODE *psDeviceNode) +{ + IMG_DVFS_DEVICE *psDVFSDevice; + struct pvr_profiling_device *pvr_prof_dev; + struct devfreq_event_dev **edev; + struct devfreq_event_desc *desc; + int size, err; + int idx; + + if (!psDeviceNode) + return -ENODEV; + + pvr_prof_dev = devm_kzalloc(dev, sizeof(*pvr_prof_dev), GFP_KERNEL); + if (!pvr_prof_dev) return -ENOMEM; + + pvr_prof_dev->dev = dev; + pvr_prof_dev->num_events = ARRAY_SIZE(g_pvr_governor_events); + + /* Alloc the event descriptor table */ + desc = devm_kcalloc(dev, pvr_prof_dev->num_events, sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + for (idx = 0; idx < pvr_prof_dev->num_events; idx++) + { + if (!g_pvr_governor_events[idx].name) + continue; + + desc[idx].name = g_pvr_governor_events[idx].name; + desc[idx].event_type = (idx < PERF_CNTR_OFF) ? PVR_EVENT_OP_TIMER : PVR_EVENT_OP_PERF_CNTR; + desc[idx].event_type |= PVR_EVENT_OP_PERF_VER; + desc[idx].driver_data = pvr_prof_dev; + desc[idx].ops = &pvr_governor_event_ops; } + pvr_prof_dev->desc = desc; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) - /* Start RCU read-side critical section to map frequency to OPP */ - rcu_read_lock(); -#endif + size = sizeof(struct devfreq_event_dev *) * pvr_prof_dev->num_events; + pvr_prof_dev->edev = devm_kzalloc(dev, size, GFP_KERNEL); + if (!pvr_prof_dev->edev) + return -ENOMEM; - /* Iterate over OPP table; Iteration 0 finds "opp w/ freq >= 0 Hz". */ - freq = 0; - opp = dev_pm_opp_find_freq_ceil(dev, &freq); - if (IS_ERR(opp)) + /* Populate the events */ + for (idx = 0, edev = pvr_prof_dev->edev; idx < pvr_prof_dev->num_events; idx++) { - err = PTR_ERR(opp); - dev_err(dev, "Couldn't find lowest frequency, %d\n", err); - goto exit; + edev[idx] = devm_devfreq_event_add_edev(dev, &desc[idx]); + if (IS_ERR(edev[idx])) + { + dev_err(dev, "failed to add devfreq-event device (idx=%d)\n", idx); + return PTR_ERR(edev[idx]); + } + + /* enable */ + err = devfreq_event_enable_edev(edev[idx]); + if (err) + { + dev_err(dev, "failed to enable devfreq-event device (idx=%d)\n", idx); + return PTR_ERR(edev[idx]); + } + + dev_info(dev, "%s: new PVR devfreq-event device registered %s (%s)\n", + __func__, dev_name(dev), desc[idx].name); } - *min_volt = dev_pm_opp_get_voltage(opp); - *max_freq = *min_freq = freq_table[0] = freq; - dev_info(dev, "opp[%d/%d]: (%lu Hz, %lu uV)\n", 1, count, freq, *min_volt); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) - dev_pm_opp_put(opp); -#endif + /* Allocate the profiling stats, passed to governor */ + pvr_prof_dev->stat = devm_kzalloc(dev, sizeof(struct pvr_profiling_dev_status), GFP_KERNEL); + if (!pvr_prof_dev->stat) + return -ENOMEM; + + /* Map the reg bank */ + pvr_prof_dev->reg_size = psDeviceNode->psDevConfig->ui32RegsSize; + pvr_prof_dev->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase, + psDeviceNode->psDevConfig->ui32RegsSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + if (!pvr_prof_dev->pvRegsBaseKM) + { + dev_err(dev, "failed to map register bank."); + return -ENODEV; + } + + dev_info(dev, "Mapped regbank at %p, size 0x%x Bytes.\n", pvr_prof_dev->pvRegsBaseKM, + pvr_prof_dev->reg_size); + + /* Update the DVFS device */ + psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + psDVFSDevice->psProfilingDevice = pvr_prof_dev; + + return 0; +} + +void pvr_events_unregister(struct device *dev, IMG_DVFS_DEVICE *psDVFSDevice) +{ + struct pvr_profiling_device *pvr_prof_dev = psDVFSDevice->psProfilingDevice; + struct devfreq_event_dev **edev; + int idx, err; - /* Iteration i > 0 finds "opp w/ freq >= (opp[i-1].freq + 1)". */ - for (i = 1; i < count; i++) + /* Remove the events */ + for (idx = 0, edev = pvr_prof_dev->edev; idx < pvr_prof_dev->num_events; idx++) { - freq++; - opp = dev_pm_opp_find_freq_ceil(dev, &freq); - if (IS_ERR(opp)) + err = devfreq_event_disable_edev(edev[idx]); + if (err) { - err = PTR_ERR(opp); - dev_err(dev, "Couldn't find %dth frequency, %d\n", i, err); - goto exit; + dev_warn(dev, "failed to disable devfreq-event device (idx=%d)\n", idx); } + devm_devfreq_event_remove_edev(dev, edev[idx]); + } - freq_table[i] = freq; - *max_freq = freq; - dev_info(dev, - "opp[%d/%d]: (%lu Hz, %lu uV)\n", - i + 1, - count, - freq, - dev_pm_opp_get_voltage(opp)); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) - dev_pm_opp_put(opp); -#endif + /* Unmap the register bank */ + if (pvr_prof_dev != NULL && pvr_prof_dev->pvRegsBaseKM != NULL) + { + OSUnMapPhysToLin((void __force *) pvr_prof_dev->pvRegsBaseKM, + pvr_prof_dev->reg_size); } -exit: -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) - rcu_read_unlock(); -#endif + /* devfreq_event resources are managed by the kernel */ +} +#endif /* CONFIG_PM_DEVFREQ_EVENT */ + +#endif /* SUPPORT_PVR_DVFS_GOVERNOR */ + +static int FillOPPTable(struct device *dev, PVRSRV_DEVICE_NODE *psDeviceNode) +{ + const IMG_OPP *iopp; + int i, err = 0; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) - if (!err) + /* Check the device exists */ + if (!dev || !psDeviceNode) { - img_devfreq_dev_profile.freq_table = freq_table; - img_devfreq_dev_profile.max_state = count; + return -ENODEV; } - else -#endif + + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + if (!psDVFSDeviceCfg->pasOPPTable) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) - devm_kfree(dev, freq_table); -#else - kfree(freq_table); -#endif + dev_err(dev, "No DVFS OPP table provided in system layer and no device tree support."); + return -ENODATA; } - return err; + for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable; + i < psDVFSDeviceCfg->ui32OPPTableSize; + i++, iopp++) + { + err = dev_pm_opp_add(dev, iopp->ui32Freq, iopp->ui32Volt); + if (err) { + dev_err(dev, "Could not add OPP entry, %d\n", err); + return err; + } + } + + return 0; } + #if defined(CONFIG_DEVFREQ_THERMAL) static int RegisterCoolingDevice(struct device *dev, IMG_DVFS_DEVICE *psDVFSDevice, @@ -448,7 +959,6 @@ static int RegisterCoolingDevice(struct device *dev, { struct device_node *of_node; int err = 0; - PVRSRV_VZ_RET_IF_MODE(GUEST, err); if (!psDVFSDevice) { @@ -488,8 +998,6 @@ PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) PVRSRV_ERROR eError; int err; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); - #if !defined(CONFIG_PM_OPP) return PVRSRV_ERROR_NOT_SUPPORTED; #endif @@ -499,7 +1007,11 @@ PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) return PVRSRV_ERROR_INVALID_PARAMS; } - if (psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bInitPending) + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); + + PVR_ASSERT(psDeviceNode->psDevConfig); + + if (psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.eState == PVR_DVFS_STATE_INIT_PENDING) { PVR_DPF((PVR_DBG_ERROR, "DVFS initialise pending for device node %p", @@ -510,13 +1022,13 @@ PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) psDev = psDeviceNode->psDevConfig->pvOSDevice; psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; - psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bInitPending = IMG_TRUE; + psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.eState = PVR_DVFS_STATE_INIT_PENDING; #if defined(SUPPORT_SOC_TIMER) if (! psDeviceNode->psDevConfig->pfnSoCTimerRead) { PVR_DPF((PVR_DBG_ERROR, "System layer SoC timer callback not implemented")); - return PVRSRV_ERROR_NOT_IMPLEMENTED; + //return PVRSRV_ERROR_NOT_IMPLEMENTED; } #endif @@ -527,33 +1039,57 @@ PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) return eError; } -#if defined(CONFIG_OF) err = dev_pm_opp_of_add_table(psDev); - if (err) + if (err == 0) + { + psDVFSDeviceCfg->bDTConfig = IMG_TRUE; + } + else { /* * If there are no device tree or system layer provided operating points * then return an error */ - if (err != -ENODEV || !psDVFSDeviceCfg->pasOPPTable) + if ((err == -ENOTSUPP || err == -ENODEV) && psDVFSDeviceCfg->pasOPPTable) + { + err = FillOPPTable(psDev, psDeviceNode); + if (err != 0 && err != -ENODATA) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to fill OPP table with data, %d", err)); + eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + goto err_exit; + } + } + else { PVR_DPF((PVR_DBG_ERROR, "Failed to init opp table from devicetree, %d", err)); - eError = TO_IMG_ERR(err); + eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; goto err_exit; } } + + +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) +#if defined(CONFIG_PM_DEVFREQ_EVENT) + err = pvr_events_register(psDev, psDeviceNode); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to init PVR profiling events, %d", err)); + goto err_exit; + } #endif - if (psDVFSDeviceCfg->pasOPPTable) + err = pvr_governor_init(); + if (err != 0) { - err = FillOPPTable(psDev, psDeviceNode); - if (err) - { - PVR_DPF((PVR_DBG_ERROR, "Failed to fill OPP table with data, %d", err)); - eError = TO_IMG_ERR(err); - goto err_exit; - } + PVR_DPF((PVR_DBG_ERROR, "Failed to init PVR governor, %d", err)); + goto err_exit; } + psDVFSDevice->bGovernorReady = true; +#endif + + PVR_ASSERT(psDev); + PVR_ASSERT(psDeviceNode); PVR_TRACE(("PVR DVFS init pending: dev = %p, PVR device = %p", psDev, psDeviceNode)); @@ -572,6 +1108,7 @@ PVRSRV_ERROR RegisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode) IMG_DVFS_GOVERNOR_CFG *psDVFSGovernorCfg = NULL; RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL; struct device *psDev; + struct pvr_opp_freq_table pvr_freq_table = {0}; unsigned long min_freq = 0, max_freq = 0, min_volt = 0; PVRSRV_ERROR eError; int err; @@ -581,7 +1118,7 @@ PVRSRV_ERROR RegisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode) return PVRSRV_ERROR_INVALID_PARAMS; } - if (!psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bInitPending) + if (psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.eState != PVR_DVFS_STATE_INIT_PENDING) { PVR_DPF((PVR_DBG_ERROR, "DVFS initialise not yet pending for device node %p", @@ -594,10 +1131,9 @@ PVRSRV_ERROR RegisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode) psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; psDVFSGovernorCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSGovernorCfg; psRGXTimingInfo = ((RGX_DATA *)psDeviceNode->psDevConfig->hDevData)->psRGXTimingInfo; - psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bInitPending = IMG_FALSE; - psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bReady = IMG_TRUE; + psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.eState = PVR_DVFS_STATE_READY; - err = GetOPPValues(psDev, &min_freq, &min_volt, &max_freq); + err = GetOPPValues(psDev, &min_freq, &min_volt, &max_freq, &pvr_freq_table); if (err) { PVR_DPF((PVR_DBG_ERROR, "Failed to read OPP points, %d", err)); @@ -605,27 +1141,49 @@ PVRSRV_ERROR RegisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode) goto err_exit; } + img_devfreq_dev_profile.freq_table = pvr_freq_table.freq_table; + img_devfreq_dev_profile.max_state = pvr_freq_table.num_levels; img_devfreq_dev_profile.initial_freq = min_freq; img_devfreq_dev_profile.polling_ms = psDVFSDeviceCfg->ui32PollMs; psRGXTimingInfo->ui32CoreClockSpeed = min_freq; - psDVFSDeviceCfg->pfnSetFrequency(min_freq); - psDVFSDeviceCfg->pfnSetVoltage(min_volt); + psDVFSDeviceCfg->pfnSetFrequency(psDeviceNode->psDevConfig->hSysData, min_freq); + psDVFSDeviceCfg->pfnSetVoltage(psDeviceNode->psDevConfig->hSysData, min_volt); +#if !defined(SUPPORT_PVR_DVFS_GOVERNOR) + /* Use the Linux 'simple_ondemand' governor */ psDVFSDevice->data.upthreshold = psDVFSGovernorCfg->ui32UpThreshold; psDVFSDevice->data.downdifferential = psDVFSGovernorCfg->ui32DownDifferential; +#endif + +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + psDVFSDevice->data.ui32UpThreshold = psDVFSGovernorCfg->ui32UpThreshold; + psDVFSDevice->data.ui32DownDifferential = psDVFSGovernorCfg->ui32DownDifferential; + +#if defined(SUPPORT_RGX) + { + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice; + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_MEMBUS)) + { + psDVFSDevice->data.uiNumMembus = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_MEMBUS); + } + else + { + psDVFSDevice->data.uiNumMembus = 1; + } + } +#endif +#endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) psDVFSDevice->psDevFreq = devm_devfreq_add_device(psDev, &img_devfreq_dev_profile, - "simple_ondemand", +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + "pvr_balanced", &psDVFSDevice->data); #else - psDVFSDevice->psDevFreq = devfreq_add_device(psDev, - &img_devfreq_dev_profile, - "simple_ondemand", - &psDVFSDevice->data); + "simple_ondemand", + &psDVFSDevice->data); #endif if (IS_ERR(psDVFSDevice->psDevFreq)) @@ -637,6 +1195,17 @@ PVRSRV_ERROR RegisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode) eError = TO_IMG_ERR(PTR_ERR(psDVFSDevice->psDevFreq)); goto err_exit; } +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 50))) + /* Handle Linux kernel bug where a NULL return can occur. */ + if (psDVFSDevice->psDevFreq == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to add as devfreq device %p, NULL return", + psDVFSDevice->psDevFreq)); + eError = TO_IMG_ERR(-EINVAL); + goto err_exit; + } +#endif eError = SuspendDVFS(psDeviceNode); if (eError != PVRSRV_OK) @@ -645,16 +1214,17 @@ PVRSRV_ERROR RegisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode) goto err_exit; } -#if defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) - psDVFSDevice->psDevFreq->policy.user.min_freq = min_freq; - psDVFSDevice->psDevFreq->policy.user.max_freq = max_freq; -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) psDVFSDevice->psDevFreq->scaling_min_freq = min_freq; psDVFSDevice->psDevFreq->scaling_max_freq = max_freq; #else psDVFSDevice->psDevFreq->min_freq = min_freq; psDVFSDevice->psDevFreq->max_freq = max_freq; #endif +#if defined(CONFIG_PM_DEVFREQ_EVENT) && defined(SUPPORT_PVR_DVFS_GOVERNOR) + psDVFSDevice->psDevFreq->last_status.private_data = + (void *)psDVFSDevice->psProfilingDevice->stat; +#endif err = devfreq_register_opp_notifier(psDev, psDVFSDevice->psDevFreq); if (err) @@ -665,11 +1235,14 @@ PVRSRV_ERROR RegisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode) } #if defined(CONFIG_DEVFREQ_THERMAL) - err = RegisterCoolingDevice(psDev, psDVFSDevice, psDVFSDeviceCfg->psPowerOps); - if (err) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) { - eError = TO_IMG_ERR(err); - goto err_exit; + err = RegisterCoolingDevice(psDev, psDVFSDevice, psDVFSDeviceCfg->psPowerOps); + if (err) + { + eError = TO_IMG_ERR(err); + goto err_exit; + } } #endif @@ -697,7 +1270,7 @@ void UnregisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode) return; } - PVRSRV_VZ_RETN_IF_MODE(GUEST); + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVNODE, psDeviceNode); psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; psDev = psDeviceNode->psDevConfig->pvOSDevice; @@ -723,22 +1296,11 @@ void UnregisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode) PVR_DPF((PVR_DBG_ERROR, "Failed to unregister OPP notifier")); } -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) - devfreq_remove_device(psDVFSDevice->psDevFreq); -#else devm_devfreq_remove_device(psDev, psDVFSDevice->psDevFreq); -#endif - psDVFSDevice->psDevFreq = NULL; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) && \ - LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) - kfree(img_devfreq_dev_profile.freq_table); -#endif - - psDVFSDevice->bInitPending = IMG_FALSE; - psDVFSDevice->bReady = IMG_FALSE; + psDVFSDevice->eState = PVR_DVFS_STATE_DEINIT; } void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) @@ -752,30 +1314,40 @@ void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) return; } - PVRSRV_VZ_RETN_IF_MODE(GUEST); + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVNODE, psDeviceNode); psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; psDev = psDeviceNode->psDevConfig->pvOSDevice; - /* Remove OPP entries for this device */ - ClearOPPTable(psDev, psDeviceNode); - -#if defined(CONFIG_OF) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) || \ - (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) - dev_pm_opp_of_remove_table(psDev); -#endif +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + if (psDVFSDevice->psProfilingDevice) + { + pvr_events_unregister(psDev, psDVFSDevice); + } + if (psDVFSDevice->bGovernorReady) + { + pvr_governor_exit(); + psDVFSDevice->bGovernorReady = false; + } #endif + /* + * Remove OPP entries for this device; both static entries from + * the device tree and dynamic entries. + */ + dev_pm_opp_remove_table(psDev); + SORgxGpuUtilStatsUnregister(psDVFSDevice->hGpuUtilUserDVFS); psDVFSDevice->hGpuUtilUserDVFS = NULL; - psDVFSDevice->bInitPending = IMG_FALSE; - psDVFSDevice->bReady = IMG_FALSE; + psDVFSDevice->eState = PVR_DVFS_STATE_NONE; } PVRSRV_ERROR SuspendDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) { IMG_DVFS_DEVICE *psDVFSDevice = NULL; +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + int err; +#endif /* Check the device is registered */ if (!psDeviceNode) @@ -784,7 +1356,21 @@ PVRSRV_ERROR SuspendDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) } psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; - psDVFSDevice->bEnabled = IMG_FALSE; + if (psDVFSDevice->eState == PVR_DVFS_STATE_DEINIT) + { + /* Device is shutting down, nothing to do. */ + return PVRSRV_OK; + } + psDVFSDevice->eState = PVR_DVFS_STATE_OFF; +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + /* Communicate power suspend to devfreq framework */ + err = devfreq_suspend_device(psDVFSDevice->psDevFreq); + if (err < 0) + { + PVR_DPF((PVR_DBG_WARNING, "Failed to suspend DVFS (%d)", err)); + return PVRSRV_ERROR_INVALID_DEVICE; + } +#endif return PVRSRV_OK; } @@ -792,6 +1378,9 @@ PVRSRV_ERROR SuspendDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) PVRSRV_ERROR ResumeDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) { IMG_DVFS_DEVICE *psDVFSDevice = NULL; +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + int err; +#endif /* Check the device is registered */ if (!psDeviceNode) @@ -802,9 +1391,22 @@ PVRSRV_ERROR ResumeDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; /* Not supported in GuestOS drivers */ - psDVFSDevice->bEnabled = !PVRSRV_VZ_MODE_IS(GUEST); + psDVFSDevice->eState = PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode) ? PVR_DVFS_STATE_NONE : PVR_DVFS_STATE_READY; +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, psDeviceNode)) + { + /* Communicate power resume to devfreq framework */ + err = devfreq_resume_device(psDVFSDevice->psDevFreq); + if (err < 0) + { + PVR_DPF((PVR_DBG_WARNING, "Failed to resume DVFS (%d)", err)); + return PVRSRV_ERROR_INVALID_DEVICE; + } + } +#endif return PVRSRV_OK; } + #endif /* !NO_HARDWARE */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_export_fence.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_export_fence.c new file mode 100644 index 000000000000..9a192b1b6fd1 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_export_fence.c @@ -0,0 +1,472 @@ +/* + * @File + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +#include "pvr_export_fence.h" +#include "osfunc_common.h" + +struct pvr_exp_fence_context { + struct kref kref; + unsigned int context; + char context_name[32]; + char driver_name[32]; + atomic_t seqno; + atomic_t fence_count; + + void *cmd_complete_handle; + + /* lock for signal and fence lists */ + spinlock_t list_lock; + struct list_head signal_list; + struct list_head fence_list; +}; + +struct pvr_exp_fence { + struct dma_fence base; + struct pvr_exp_fence_context *fence_context; + PSYNC_CHECKPOINT checkpoint_handle; + /* dma_fence fd (needed for hwperf) */ + int fd; + /* Lock for the dma fence */ + spinlock_t lock; + /* fence will point to the dma_fence in base */ + struct dma_fence *fence; + struct list_head fence_head; + struct list_head signal_head; +}; + +#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ + do { \ + if (pfnDumpDebugPrintf) \ + pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ + ## __VA_ARGS__); \ + else \ + pr_err(fmt "\n", ## __VA_ARGS__); \ + } while (0) + +static inline bool +pvr_exp_fence_sync_is_signaled(struct pvr_exp_fence *exp_fence, u32 fence_sync_flags) +{ + if (exp_fence->checkpoint_handle) { + return SyncCheckpointIsSignalled(exp_fence->checkpoint_handle, + fence_sync_flags); + } + return false; +} + +const char *pvr_exp_fence_context_name(struct pvr_exp_fence_context *fctx) +{ + return fctx->context_name; +} + +void pvr_exp_fence_context_value_str(struct pvr_exp_fence_context *fctx, + char *str, int size) +{ + snprintf(str, size, "%d", atomic_read(&fctx->seqno)); +} + +static void +pvr_exp_fence_context_fences_dump(struct pvr_exp_fence_context *fctx, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + unsigned long flags; + char value[128]; + + spin_lock_irqsave(&fctx->list_lock, flags); + pvr_exp_fence_context_value_str(fctx, value, sizeof(value)); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "exp_fence_ctx: @%s", value); + spin_unlock_irqrestore(&fctx->list_lock, flags); +} + +static inline unsigned +pvr_exp_fence_context_seqno_next(struct pvr_exp_fence_context *fence_context) +{ + if (fence_context) + return atomic_inc_return(&fence_context->seqno) - 1; + else + return 0xfeedface; +} + +static void +pvr_exp_fence_context_signal_fences(void *data) +{ + struct pvr_exp_fence_context *fctx = (struct pvr_exp_fence_context *)data; + struct pvr_exp_fence *pvr_exp_fence, *tmp; + unsigned long flags1; + int chkpt_ct = 0; + int chkpt_sig_ct = 0; + + LIST_HEAD(signal_list); + + /* + * We can't call fence_signal while holding the lock as we can end up + * in a situation whereby pvr_fence_foreign_signal_sync, which also + * takes the list lock, ends up being called as a result of the + * fence_signal below, i.e. fence_signal(fence) -> fence->callback() + * -> fence_signal(foreign_fence) -> foreign_fence->callback() where + * the foreign_fence callback is pvr_fence_foreign_signal_sync. + * + * So extract the items we intend to signal and add them to their own + * queue. + */ + spin_lock_irqsave(&fctx->list_lock, flags1); + list_for_each_entry_safe(pvr_exp_fence, tmp, &fctx->signal_list, signal_head) { + chkpt_ct++; + if (pvr_exp_fence_sync_is_signaled(pvr_exp_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { + chkpt_sig_ct++; + list_move_tail(&pvr_exp_fence->signal_head, &signal_list); + } + } + spin_unlock_irqrestore(&fctx->list_lock, flags1); + + list_for_each_entry_safe(pvr_exp_fence, tmp, &signal_list, signal_head) { + spin_lock_irqsave(&pvr_exp_fence->fence_context->list_lock, flags1); + list_del(&pvr_exp_fence->signal_head); + spin_unlock_irqrestore(&pvr_exp_fence->fence_context->list_lock, flags1); + dma_fence_signal(pvr_exp_fence->fence); + dma_fence_put(pvr_exp_fence->fence); + } +} + +static const char *pvr_exp_fence_get_driver_name(struct dma_fence *fence) +{ + struct pvr_exp_fence *pvr_exp_fence = to_pvr_exp_fence(fence); + + if (pvr_exp_fence && pvr_exp_fence->fence_context) + return pvr_exp_fence->fence_context->driver_name; + else + return "***NO_DRIVER***"; +} + +static const char *pvr_exp_fence_get_timeline_name(struct dma_fence *fence) +{ + struct pvr_exp_fence *pvr_exp_fence = to_pvr_exp_fence(fence); + + if (pvr_exp_fence && pvr_exp_fence->fence_context) + return pvr_exp_fence_context_name(pvr_exp_fence->fence_context); + else + return "***NO_TIMELINE***"; +} + +static void pvr_exp_fence_value_str(struct dma_fence *fence, char *str, int size) +{ + snprintf(str, size, "%llu", (u64) fence->seqno); +} + +static void pvr_exp_fence_timeline_value_str(struct dma_fence *fence, + char *str, int size) +{ + struct pvr_exp_fence *pvr_exp_fence = to_pvr_exp_fence(fence); + + if (pvr_exp_fence && pvr_exp_fence->fence_context) + pvr_exp_fence_context_value_str(pvr_exp_fence->fence_context, str, size); +} + +static bool pvr_exp_fence_enable_signaling(struct dma_fence *fence) +{ + struct pvr_exp_fence *exp_fence = to_pvr_exp_fence(fence); + unsigned long flags; + + if (!exp_fence) + return false; + + if (pvr_exp_fence_sync_is_signaled(exp_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + return false; + + dma_fence_get(&exp_fence->base); + + spin_lock_irqsave(&exp_fence->fence_context->list_lock, flags); + list_add_tail(&exp_fence->signal_head, &exp_fence->fence_context->signal_list); + spin_unlock_irqrestore(&exp_fence->fence_context->list_lock, flags); + + return true; +} + +static void pvr_exp_fence_context_destroy_kref(struct kref *kref) +{ + struct pvr_exp_fence_context *fence_context = + container_of(kref, struct pvr_exp_fence_context, kref); + unsigned int fence_count; + + if (WARN_ON(!list_empty_careful(&fence_context->fence_list))) + pvr_exp_fence_context_fences_dump(fence_context, NULL, NULL); + + PVRSRVUnregisterCmdCompleteNotify(fence_context->cmd_complete_handle); + + fence_count = atomic_read(&fence_context->fence_count); + if (WARN_ON(fence_count)) + pr_debug("%s context has %u fence(s) remaining\n", + fence_context->context_name, fence_count); + + kfree(fence_context); +} + +static void pvr_exp_fence_release(struct dma_fence *fence) +{ + struct pvr_exp_fence *pvr_exp_fence = to_pvr_exp_fence(fence); + unsigned long flags; + + if (pvr_exp_fence) { + if (pvr_exp_fence->fence_context) { + spin_lock_irqsave(&pvr_exp_fence->fence_context->list_lock, flags); + list_del(&pvr_exp_fence->fence_head); + atomic_dec(&pvr_exp_fence->fence_context->fence_count); + spin_unlock_irqrestore(&pvr_exp_fence->fence_context->list_lock, flags); + + kref_put(&pvr_exp_fence->fence_context->kref, + pvr_exp_fence_context_destroy_kref); + } + + if (pvr_exp_fence->checkpoint_handle) { + SyncCheckpointFree(pvr_exp_fence->checkpoint_handle); + pvr_exp_fence->checkpoint_handle = NULL; + } + + kfree(pvr_exp_fence); + } +} + +static const struct dma_fence_ops pvr_exp_fence_ops = { + .get_driver_name = pvr_exp_fence_get_driver_name, + .get_timeline_name = pvr_exp_fence_get_timeline_name, + .fence_value_str = pvr_exp_fence_value_str, + .timeline_value_str = pvr_exp_fence_timeline_value_str, + .enable_signaling = pvr_exp_fence_enable_signaling, + .wait = dma_fence_default_wait, + .release = pvr_exp_fence_release, +}; + +struct pvr_exp_fence_context * +pvr_exp_fence_context_create(const char *context_name, const char *driver_name) +{ + struct pvr_exp_fence_context *fence_context; + PVRSRV_ERROR srv_err; + + fence_context = kzalloc(sizeof(*fence_context), GFP_KERNEL); + if (!fence_context) + return NULL; + + fence_context->context = dma_fence_context_alloc(1); + OSStringSafeCopy(fence_context->context_name, context_name, + sizeof(fence_context->context_name)); + OSStringSafeCopy(fence_context->driver_name, driver_name, + sizeof(fence_context->driver_name)); + atomic_set(&fence_context->seqno, 0); + atomic_set(&fence_context->fence_count, 0); + kref_init(&fence_context->kref); + + spin_lock_init(&fence_context->list_lock); + INIT_LIST_HEAD(&fence_context->signal_list); + INIT_LIST_HEAD(&fence_context->fence_list); + + srv_err = PVRSRVRegisterCmdCompleteNotify(&fence_context->cmd_complete_handle, + pvr_exp_fence_context_signal_fences, + fence_context); + if (srv_err != PVRSRV_OK) { + pr_err("%s: failed to register command complete callback (%s)\n", + __func__, PVRSRVGetErrorString(srv_err)); + kfree(fence_context); + return NULL; + } + + return fence_context; +} + +void pvr_exp_fence_context_destroy(struct pvr_exp_fence_context *fence_context) +{ + if (fence_context) { + kref_put(&fence_context->kref, pvr_exp_fence_context_destroy_kref); + } +} + +struct dma_fence * +pvr_exp_fence_create(struct pvr_exp_fence_context *fence_context, int fd, u64 *sync_pt_idx) +{ + struct pvr_exp_fence *pvr_exp_fence; + unsigned long flags; + unsigned int seqno; + struct pvr_exp_fence_context *pfence_context = fence_context; + + if (WARN_ON(!fence_context)) + return NULL; + + pvr_exp_fence = kzalloc(sizeof(*pvr_exp_fence), GFP_KERNEL); + if (WARN_ON(!pvr_exp_fence)) + return NULL; + + spin_lock_init(&pvr_exp_fence->lock); + + INIT_LIST_HEAD(&pvr_exp_fence->fence_head); + INIT_LIST_HEAD(&pvr_exp_fence->signal_head); + + pvr_exp_fence->fence_context = pfence_context; + + /* No sync checkpoint is assigned until attached to a kick */ + pvr_exp_fence->checkpoint_handle = NULL; + + seqno = pvr_exp_fence_context_seqno_next(pfence_context); + + pvr_exp_fence->fence = &pvr_exp_fence->base; + + dma_fence_init(&pvr_exp_fence->base, &pvr_exp_fence_ops, + &pvr_exp_fence->lock, pfence_context->context, seqno); + + atomic_inc(&pfence_context->fence_count); + + spin_lock_irqsave(&fence_context->list_lock, flags); + list_add_tail(&pvr_exp_fence->fence_head, &fence_context->fence_list); + spin_unlock_irqrestore(&fence_context->list_lock, flags); + + kref_get(&pfence_context->kref); + + pvr_exp_fence->fd = fd; + + *sync_pt_idx = pvr_exp_fence_context_seqno_next(pfence_context); + + return &pvr_exp_fence->base; +} + +enum PVRSRV_ERROR_TAG +pvr_exp_fence_assign_checkpoint(PVRSRV_FENCE fence_to_resolve, + struct dma_fence *fence, + PSYNC_CHECKPOINT_CONTEXT checkpoint_context, + PSYNC_CHECKPOINT *assigned_checkpoint) +{ + struct SYNC_CHECKPOINT_TAG *new_sync_checkpoint; + struct pvr_exp_fence *pvr_exp_fence; + PVRSRV_FENCE export_fence_fd = fence_to_resolve; + PVRSRV_ERROR err; + + pvr_exp_fence = to_pvr_exp_fence(fence); + if (!pvr_exp_fence) { + pr_err("%s: Invalid fence_to_resolve\n", __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + if (pvr_exp_fence->checkpoint_handle) { + /* export fence already has a sync checkpoint assigned */ + *assigned_checkpoint = pvr_exp_fence->checkpoint_handle; + return PVRSRV_OK; + } + + /* Ensure fd assigned to export fence when it was created is passed + * to SyncCheckpointAlloc() (so correct fd is used in HWPerf event) + * as the export fence may only be part of the check fence. + */ + if (pvr_exp_fence->fd != PVRSRV_NO_FENCE) { + export_fence_fd = pvr_exp_fence->fd; + } + + err = SyncCheckpointAlloc(checkpoint_context, + PVRSRV_NO_TIMELINE, export_fence_fd, + fence->ops->get_timeline_name(fence), &new_sync_checkpoint); + if (unlikely(err != PVRSRV_OK)) { + pr_err("%s: SyncCheckpointAlloc() failed (err%d)\n", + __func__, err); + *assigned_checkpoint = NULL; + goto err_out; + } + + pvr_exp_fence->checkpoint_handle = new_sync_checkpoint; + *assigned_checkpoint = new_sync_checkpoint; + return PVRSRV_OK; + +err_out: + return err; +} + +enum PVRSRV_ERROR_TAG +pvr_exp_fence_rollback(struct dma_fence *fence) +{ + struct pvr_exp_fence *pvr_exp_fence; + PVRSRV_ERROR err; + + pvr_exp_fence = to_pvr_exp_fence(fence); + if (!pvr_exp_fence) { + pr_err("%s: Invalid fence\n", __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + if (pvr_exp_fence->checkpoint_handle) { + /* Free the assigned sync checkpoint */ + SyncCheckpointFree(pvr_exp_fence->checkpoint_handle); + pvr_exp_fence->checkpoint_handle = NULL; + } + + return PVRSRV_OK; + +err_out: + return err; +} + +bool pvr_is_exp_fence(struct dma_fence *fence) +{ + return (fence->ops == &pvr_exp_fence_ops); +} + +struct pvr_exp_fence *to_pvr_exp_fence(struct dma_fence *fence) +{ + if (pvr_is_exp_fence(fence)) + return container_of(fence, struct pvr_exp_fence, base); + + return NULL; +} + +struct SYNC_CHECKPOINT_TAG * +pvr_exp_fence_get_checkpoint(struct pvr_exp_fence *export_fence) +{ + if (export_fence) { + return export_fence->checkpoint_handle; + } + else + return NULL; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_export_fence.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_export_fence.h new file mode 100644 index 000000000000..0ffe8a11ce40 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_export_fence.h @@ -0,0 +1,77 @@ +/* + * @File + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__PVR_EXPORT_FENCES_H__) +#define __PVR_EXPORT_FENCES_H__ + +#include "pvr_linux_fence.h" +#include "services_kernel_client.h" + +struct pvr_exp_fence_context; +struct pvr_exp_fence; + +struct pvr_exp_fence_context *pvr_exp_fence_context_create(const char *name, + const char *driver_name); +void pvr_exp_fence_context_destroy(struct pvr_exp_fence_context *fence_context); +struct dma_fence *pvr_exp_fence_create(struct pvr_exp_fence_context *fence_context, + int fd, + u64 *sync_pt_idx); + +const char *pvr_exp_fence_context_name(struct pvr_exp_fence_context *fctx); +void pvr_exp_fence_context_value_str(struct pvr_exp_fence_context *fctx, + char *str, int size); + +enum PVRSRV_ERROR_TAG pvr_exp_fence_assign_checkpoint(PVRSRV_FENCE fence_to_resolve, + struct dma_fence *fence, + PSYNC_CHECKPOINT_CONTEXT checkpoint_context, + PSYNC_CHECKPOINT *assigned_checkpoint); + +enum PVRSRV_ERROR_TAG pvr_exp_fence_rollback(struct dma_fence *fence); + +bool pvr_is_exp_fence(struct dma_fence *fence); + +struct pvr_exp_fence *to_pvr_exp_fence(struct dma_fence *fence); + +struct SYNC_CHECKPOINT_TAG * +pvr_exp_fence_get_checkpoint(struct pvr_exp_fence *export_fence); + +#endif /* !defined(__PVR_EXPORT_FENCES_H__) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence.c index e94522a647ae..130c68d825fc 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence.c @@ -49,6 +49,7 @@ #include "pvr_fence.h" #include "services_kernel_client.h" #include "sync_checkpoint_external.h" +#include "osfunc_common.h" #define CREATE_TRACE_POINTS #include "pvr_fence_trace.h" @@ -157,7 +158,7 @@ pvr_fence_context_fences_dump(struct pvr_fence_context *fctx, } PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, - " | @%s (foreign)", value); + " | @%s (foreign)", fence_value_str); } spin_unlock_irqrestore(&fctx->list_lock, flags); } @@ -362,7 +363,7 @@ pvr_fence_context_create_internal(struct workqueue_struct *fence_status_wq, fctx->fence_wq = fence_status_wq; fctx->fence_context = dma_fence_context_alloc(1); - strlcpy(fctx->name, name, sizeof(fctx->name)); + OSStringSafeCopy(fctx->name, name, sizeof(fctx->name)); srv_err = PVRSRVRegisterCmdCompleteNotify(&fctx->cmd_complete_handle, pvr_fence_context_signal_fences, @@ -480,6 +481,8 @@ pvr_fence_context_create(void *dev_cookie, goto err_out; } + fctx->dev_cookie = dev_cookie; + eError = pvr_fence_context_register_dbg(&fctx->dbg_request_handle, dev_cookie, fctx); @@ -489,6 +492,10 @@ pvr_fence_context_create(void *dev_cookie, goto err_destroy_ctx; } + PVR_FENCE_CTX_TRACE(fctx, + "%s: Created fence context (%s)[%p], dev_cookie %p\n", + __func__, fctx->name, fctx, dev_cookie); + return fctx; err_destroy_ctx: @@ -506,7 +513,7 @@ static void pvr_fence_context_destroy_kref(struct kref *kref) trace_pvr_fence_context_destroy_kref(fctx); - schedule_work(&fctx->destroy_work); + queue_work(NativeSyncGetFenceCtxDestroyWq(), &fctx->destroy_work); } /** @@ -800,7 +807,7 @@ pvr_fence_foreign_release(struct dma_fence *fence) PVR_FENCE_TRACE(&pvr_fence->base, "released fence for foreign fence %llu#%d (%s)\n", (u64) pvr_fence->fence->context, - pvr_fence->fence->seqno, pvr_fence->name); + (int)pvr_fence->fence->seqno, pvr_fence->name); trace_pvr_fence_foreign_release(pvr_fence); spin_lock_irqsave(&fctx->list_lock, flags); @@ -831,8 +838,6 @@ pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb) struct pvr_fence *pvr_fence = container_of(cb, struct pvr_fence, cb); struct pvr_fence_context *fctx = pvr_fence->fctx; - WARN_ON_ONCE(is_pvr_fence(fence)); - /* Callback registered by dma_fence_add_callback can be called from an atomic ctx */ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_CTX_ATOMIC); @@ -843,7 +848,7 @@ pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb) PVR_FENCE_TRACE(&pvr_fence->base, "foreign fence %llu#%d signalled (%s)\n", (u64) pvr_fence->fence->context, - pvr_fence->fence->seqno, pvr_fence->name); + (int)pvr_fence->fence->seqno, pvr_fence->name); /* Drop the reference on the base fence */ dma_fence_put(&pvr_fence->base); @@ -882,15 +887,30 @@ pvr_fence_create_from_fence(struct pvr_fence_context *fctx, unsigned long flags; PVRSRV_ERROR srv_err; int err; + bool mirror_other_dev_fence = false; + char tempString[40] = {0}; - if (pvr_fence) { - if (WARN_ON(fence->ops == &pvr_fence_foreign_ops)) - return NULL; - dma_fence_get(fence); + PVR_FENCE_TRACE(fence, "%s: fence @%p, pvr_fence = %p fctx = %p, dev %p\n", + __func__, fence, pvr_fence, fctx, (fctx ? fctx->dev_cookie : NULL)); - PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n", - name); - return pvr_fence; + if (pvr_fence) { + if ((SyncCheckpointCommonDeviceIDs(sync_checkpoint_ctx, + pvr_fence->fctx->dev_cookie)) || + (fctx->dev_cookie == pvr_fence->fctx->dev_cookie)) { + if (WARN_ON(fence->ops == &pvr_fence_foreign_ops)) + return NULL; + dma_fence_get(fence); + + PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n", + name); + return pvr_fence; + } else { + snprintf(tempString, sizeof(tempString), "Mirror(FWAddr0x%x)", + SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint)); + mirror_other_dev_fence = true; + + PVR_FENCE_TRACE(fence, "MIRRORED FENCE '%s'", tempString); + } } if (!try_module_get(THIS_MODULE)) @@ -901,15 +921,24 @@ pvr_fence_create_from_fence(struct pvr_fence_context *fctx, * here */ pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL); - if (!pvr_fence) + if (!pvr_fence) { + pr_err("%s: kmem_cache_alloc() failed", + __func__); goto err_module_put; + } srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx, - SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, + (mirror_other_dev_fence ? + SYNC_CHECKPOINT_MIRRORED_CHECKPOINT : + SYNC_CHECKPOINT_FOREIGN_CHECKPOINT), fence_fd, - name, &pvr_fence->sync_checkpoint); - if (srv_err != PVRSRV_OK) + mirror_other_dev_fence ? tempString : name, + &pvr_fence->sync_checkpoint); + if (srv_err != PVRSRV_OK) { + pr_err("%s: SyncCheckpointAlloc() failed (srv_err=%d)", + __func__, srv_err); goto err_free_pvr_fence; + } INIT_LIST_HEAD(&pvr_fence->fence_head); INIT_LIST_HEAD(&pvr_fence->signal_head); @@ -941,7 +970,7 @@ pvr_fence_create_from_fence(struct pvr_fence_context *fctx, PVR_FENCE_TRACE(&pvr_fence->base, "created fence from foreign fence %llu#%d (%s)\n", (u64) pvr_fence->fence->context, - pvr_fence->fence->seqno, name); + (int)pvr_fence->fence->seqno, name); err = dma_fence_add_callback(fence, &pvr_fence->cb, pvr_fence_foreign_signal_sync); @@ -952,6 +981,7 @@ pvr_fence_create_from_fence(struct pvr_fence_context *fctx, goto err_put_ref; } + /* * The fence has already signalled so set the sync as signalled. * The "signalled" hwperf packet should be emitted because the @@ -962,7 +992,7 @@ pvr_fence_create_from_fence(struct pvr_fence_context *fctx, PVR_FENCE_TRACE(&pvr_fence->base, "foreign fence %llu#%d already signaled (%s)\n", (u64) pvr_fence->fence->context, - pvr_fence->fence->seqno, + (int)pvr_fence->fence->seqno, name); dma_fence_put(&pvr_fence->base); } @@ -1085,6 +1115,7 @@ pvr_fence_get_checkpoint(struct pvr_fence *update_fence) * pvr_sync_file.c if the driver determines any GPU work * is stuck waiting for a sync checkpoint representing a * foreign sync to be signalled. + * @fctx: fence context * @nr_ufos: number of ufos in vaddrs * @vaddrs: array of FW addresses of UFOs which the * driver is waiting on. diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence.h index 21870ba9113e..4e2b17096669 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence.h @@ -44,13 +44,6 @@ #if !defined(__PVR_FENCE_H__) #define __PVR_FENCE_H__ -#include - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) -static inline void pvr_fence_cleanup(void) -{ -} -#else #include "services_kernel_client.h" #include "pvr_linux_fence.h" #include @@ -98,6 +91,7 @@ struct pvr_fence_context { struct kref kref; struct work_struct destroy_work; + void *dev_cookie; }; /** @@ -188,14 +182,16 @@ u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx, u32 nr_ufos, u32 *vaddrs); +#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) static inline void pvr_fence_cleanup(void) { /* * Ensure all PVR fence contexts have been destroyed, by flushing - * the global workqueue. + * the context destruction workqueue. */ - flush_scheduled_work(); + flush_workqueue(NativeSyncGetFenceCtxDestroyWq()); } +#endif #if defined(PVR_FENCE_DEBUG) #define PVR_FENCE_CTX_TRACE(c, fmt, ...) \ @@ -235,5 +231,4 @@ static inline void pvr_fence_cleanup(void) #define PVR_FENCE_ERR(f, fmt, ...) \ DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ #endif /* !defined(__PVR_FENCE_H__) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence_trace.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence_trace.h index e2f044caad1c..95b2a0dc6c7c 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence_trace.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_fence_trace.h @@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(pvr_fence_foreign, fence->fence->ops->fence_value_str( fence->fence, __entry->foreign_val, sizeof(__entry->foreign_val)) : - (void) strlcpy(__entry->foreign_val, + (void) OSStringSafeCopy(__entry->foreign_val, "unknown", sizeof(__entry->foreign_val)); __entry->foreign_context = fence->fence->context; ), diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_gputrace.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_gputrace.c index b451a4de02d0..bbfa3504495a 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_gputrace.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_gputrace.c @@ -41,11 +41,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ #include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) +#include #include -#else -#include -#endif #include "pvrsrv_error.h" #include "pvrsrv_apphint.h" @@ -58,13 +55,32 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv.h" #include "pvrsrv_tlstreams.h" #include "tlclient.h" +#include "tlstream.h" #include "pvr_debug.h" +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) #define CREATE_TRACE_POINTS #include "rogue_trace_events.h" +#endif + +#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) +#define CREATE_TRACE_POINTS +#include "gpu_work.h" +#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ + +#if defined(SUPPORT_RGX) +#define TRACE_CLK RGXTIMECORR_CLOCK_SCHED +#define TRACE_CLK_STR "local\n" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) #if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -#include "pvr_gpuwork.h" +#define TRACE_FS_CLK "/sys/kernel/tracing/trace_clock" +#else /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)*/ +#define TRACE_FS_CLK "/sys/kernel/debug/tracing/trace_clock" #endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) */ + +#endif /* defined(SUPPORT_RGX) */ /****************************************************************************** Module internal implementation @@ -85,15 +101,21 @@ typedef struct RGX_HWPERF_FTRACE_DATA { POS_LOCK hFTraceResourceLock; IMG_HANDLE hGPUTraceCmdCompleteHandle; - IMG_HANDLE hGPUTraceTLStream; + IMG_HANDLE hGPUFTraceTLStream; IMG_UINT64 ui64LastSampledTimeCorrOSTimeStamp; IMG_UINT32 ui32FTraceLastOrdinal; + IMG_BOOL bTrackOrdinals; } RGX_HWPERF_FTRACE_DATA; /* This lock ensures state change of GPU_TRACING on/off is done atomically */ static POS_LOCK ghGPUTraceStateLock; static IMG_BOOL gbFTraceGPUEventsEnabled = PVRSRV_APPHINT_ENABLEFTRACEGPU; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) +/* This is the FTrace clock source prior to driver initialisation */ +static IMG_CHAR gszLastClockSource[32] = {0}; +#endif + /* This lock ensures that the reference counting operation on the FTrace UFO * events and enable/disable operation on firmware event are performed as * one atomic operation. This should ensure that there are no race conditions @@ -119,6 +141,119 @@ static PVRSRV_ERROR _GpuTraceDisable( IMG_BOOL bDeInit); static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE); +static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, void *pBuffer, + IMG_UINT32 ui32ReadLen); + +static void _FTrace_FWOnReaderOpenCB(void *pvArg) +{ + PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg; + psRgxDevInfo->bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_FTRACE] = IMG_FALSE; +} + +static void _FTrace_FWOnReaderCloseCB(void *pvArg) +{ + PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg; + psRgxDevInfo->bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_FTRACE] = IMG_TRUE; +} + +/* Currently supported by default */ +#if defined(SUPPORT_TL_PRODUCER_CALLBACK) +static PVRSRV_ERROR GPUTraceTLCB(IMG_HANDLE hStream, + IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser) +{ + /* IN DEV: Not required as the streams goal is to be a copy of HWPerf */ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(hStream); + PVR_UNREFERENCED_PARAMETER(ui32ReqOp); + PVR_UNREFERENCED_PARAMETER(ui32Resp); + PVR_UNREFERENCED_PARAMETER(pvUser); + + return eError; +} +#endif /* defined(SUPPORT_TL_PRODUCER_CALLBACK) */ + +#if defined(SUPPORT_RGX) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) +/* Configure the FTrace clock source to use the DDK apphint clock source */ +static void PVRGpuTraceInitFTraceClockSource(void) +{ + int ret, i, j; + bool bFound = false; + loff_t pos = 0; + char str[64]; + + /* Just force the value to be what the DDK says it is + Note for filp_open, the mode is only used for O_CREAT + Hence its value doesn't matter in this context + */ + struct file *filep = filp_open(TRACE_FS_CLK, O_RDWR, 0); + PVR_LOG_RETURN_VOID_IF_FALSE(!IS_ERR(filep), "TraceFS not found"); + + PVR_LOG_VA(PVR_DBG_MESSAGE, + "Writing %s to %s to enable parallel HWPerf and FTrace support", + TRACE_CLK_STR, TRACE_FS_CLK); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) + ret = kernel_read(filep, str, sizeof(str)-1, &pos); +#else + ret = kernel_read(filep, pos, str, sizeof(str)-1); +#endif + PVR_LOG_RETURN_VOID_IF_FALSE((ret > 0), "TraceFS Read failed"); + str[ret] = 0; + pos = 0; + + /* Determine clock value. trace_clock value is stored like [] + File example: [local] global counter mono mono_raw x86-tsc + */ + for (i = 0, j = 0; i < sizeof(str) && j < sizeof(gszLastClockSource); i++) + { + if (str[i] == ']') + { + break; + } + else if (str[i] == '[') + { + bFound = true; + } + else if (bFound) + { + gszLastClockSource[j] = str[i]; + j++; + } + } + PVR_LOG_VA(PVR_DBG_MESSAGE, "Got %s from FTraceFS", gszLastClockSource); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) + ret = kernel_write(filep, TRACE_CLK_STR, sizeof(TRACE_CLK_STR), &pos); +#else + ret = kernel_write(filep, TRACE_CLK_STR, sizeof(TRACE_CLK_STR), pos); +#endif + PVR_LOG_IF_FALSE((ret > 0), "Setting FTrace clock source failed"); + + filp_close(filep, NULL); +} + +/* Reset the FTrace clock source to the original clock source */ +static void PVRGpuTraceDeinitFTraceClockSource(void) +{ + int ret; + loff_t pos = 0; + + /* Return the original value of the file */ + struct file *filep = filp_open(TRACE_FS_CLK, O_WRONLY, 0); + PVR_LOG_RETURN_VOID_IF_FALSE(!IS_ERR(filep), "TraceFS not found"); + + /* FTraceFS write will ignore any writes to it that don't match a clock source */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) + ret = kernel_write(filep, gszLastClockSource, sizeof(gszLastClockSource), &pos); +#else + ret = kernel_write(filep, gszLastClockSource, sizeof(gszLastClockSource), pos); +#endif + PVR_LOG_IF_FALSE((ret >= 0), "Setting FTrace clock source failed"); + + filp_close(filep, NULL); +} +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) */ +#endif /* defined(SUPPORT_RGX) */ PVRSRV_ERROR PVRGpuTraceSupportInit(void) { @@ -137,19 +272,23 @@ PVRSRV_ERROR PVRGpuTraceSupportInit(void) eError = OSLockCreate(&ghGPUTraceStateLock); PVR_LOG_RETURN_IF_ERROR (eError, "OSLockCreate"); -#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) - eError = GpuTraceWorkPeriodInitialize(); - PVR_LOG_RETURN_IF_ERROR (eError, "GpuTraceWorkPeriodInitialize"); -#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ +#if defined(SUPPORT_RGX) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + PVRGpuTraceInitFTraceClockSource(); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) */ +#endif /* defined(SUPPORT_RGX) */ return PVRSRV_OK; } void PVRGpuTraceSupportDeInit(void) { -#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) - GpuTraceSupportDeInitialize(); -#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ +#if defined(SUPPORT_RGX) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + PVRGpuTraceDeinitFTraceClockSource(); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) */ +#endif /* defined(SUPPORT_RGX) */ + if (ghGPUTraceStateLock) { OSLockDestroy(ghGPUTraceStateLock); @@ -162,13 +301,57 @@ void PVRGpuTraceSupportDeInit(void) } } +/* Called from RGXHWPerfInitOnDemandL2Stream() which is alway called from + * a critical section protected by hHWPerfLock. */ +PVRSRV_ERROR PVRGpuTraceInitStream(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PVRSRV_ERROR eError; + IMG_CHAR pszFTraceStreamName[sizeof(PVRSRV_TL_FTRACE_RGX_FW_STREAM) + 4]; + /* + 4 is used to allow names up to "ftrace_999", which is enough */ + + IMG_HANDLE hStream = NULL; + + /* form the FTrace stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszFTraceStreamName, sizeof(pszFTraceStreamName), "%s%d", + PVRSRV_TL_FTRACE_RGX_FW_STREAM, + psDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form FTrace stream name for device %d", + __func__, + psDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = TLStreamCreate(&hStream, + pszFTraceStreamName, + psDevInfo->ui32RGXL2HWPerfBufSize, + TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT, + _FTrace_FWOnReaderOpenCB, psDevInfo, + _FTrace_FWOnReaderCloseCB, psDevInfo, +#if !defined(SUPPORT_TL_PRODUCER_CALLBACK) + NULL, NULL +#else + /* Not enabled by default */ + GPUTraceTLCB, psDevInfo +#endif + ); + PVR_RETURN_IF_ERROR(eError); + + psDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_FTRACE] = hStream; + psDevInfo->uiHWPerfStreamCount++; + PVR_ASSERT(psDevInfo->uiHWPerfStreamCount <= RGX_HWPERF_L2_STREAM_LAST); + + return PVRSRV_OK; +} + PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) { PVRSRV_ERROR eError; RGX_HWPERF_FTRACE_DATA *psData; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVRSRV_VZ_RET_IF_MODE(GUEST, DEVNODE, psDeviceNode, PVRSRV_OK); psData = OSAllocZMem(sizeof(RGX_HWPERF_FTRACE_DATA)); psDevInfo->pvGpuFtraceData = psData; @@ -193,7 +376,7 @@ void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData; - PVRSRV_VZ_RETN_IF_MODE(GUEST); + PVRSRV_VZ_RETN_IF_MODE(GUEST, DEVNODE, psDeviceNode); if (psData) { /* first disable the tracing, to free up TL resources */ @@ -211,7 +394,7 @@ void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) } } -IMG_BOOL PVRGpuTraceIsEnabled(void) +static INLINE IMG_BOOL PVRGpuTraceIsEnabled(void) { return gbFTraceGPUEventsEnabled; } @@ -220,6 +403,8 @@ void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) { if (PVRGpuTraceIsEnabled()) { + IMG_BOOL bEnable = IMG_FALSE; + PVRSRV_ERROR eError = PVRGpuTraceSetEnabled(psDeviceNode, IMG_TRUE); if (eError != PVRSRV_OK) { @@ -227,6 +412,7 @@ void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) " (%s)", PVRSRVGetErrorString(eError))); } +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) /* below functions will enable FTrace events which in turn will * execute HWPerf callbacks that set appropriate filter values * note: unfortunately the functions don't allow to pass private @@ -237,17 +423,36 @@ void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) /* single events can be enabled by calling trace_set_clr_event() * with the event name, e.g.: * trace_set_clr_event("rogue", "rogue_ufo_update", 1) */ -#if defined(CONFIG_EVENT_TRACING) /* this is a kernel config option */ - if (trace_set_clr_event("rogue", NULL, 1)) + if (trace_set_clr_event("rogue", NULL, 1) != 0) { PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"rogue\" event" - " group")); + " group")); } else { PVR_LOG(("FTrace events from \"rogue\" group enabled")); + bEnable = IMG_TRUE; + } +#endif + +#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) + if (trace_set_clr_event("power", "gpu_work_period", 1) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu_work_period\" event")); + } + else + { + PVR_LOG(("FTrace event from \"gpu_work_period\" enabled")); + bEnable = IMG_TRUE; + } +#endif + + if (bEnable) + { + /* this enables FTrace globally (if not enabled nothing will appear + * in the FTrace buffer) */ + tracing_on(); } -#endif /* defined(CONFIG_EVENT_TRACING) */ } } @@ -258,7 +463,8 @@ static PVRSRV_ERROR _GpuTraceEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo) PVRSRV_ERROR eError = PVRSRV_OK; RGX_HWPERF_FTRACE_DATA *psFtraceData; PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode; - IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; + IMG_CHAR pszFTraceStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 4]; + IMG_UINT64 uiFilter; PVR_DPF_ENTERED; @@ -269,66 +475,75 @@ static PVRSRV_ERROR _GpuTraceEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo) PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock)); /* return if already enabled */ - if (psFtraceData->hGPUTraceTLStream) + if (psFtraceData->hGPUFTraceTLStream != NULL) { return PVRSRV_OK; } -#if defined(SUPPORT_RGX) + uiFilter = +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) + RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO | +#endif +#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_TRACE_EVENT_GPU_WORK_PERIOD) | +#endif + 0; + +#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) /* Signal FW to enable event generation */ if (psRgxDevInfo->bFirmwareInitialised) { - IMG_UINT64 ui64UFOFilter = psRgxDevInfo->ui64HWPerfFilter & - (RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO); - - eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode, - RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE, - RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | - ui64UFOFilter); - PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out); + eError = PVRSRVRGXCtrlHWPerfFW(psRgxDevNode, + RGX_HWPERF_L2_STREAM_FTRACE, + uiFilter, HWPERF_FILTER_OPERATION_SET); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfFW", err_out); } else #endif { /* only set filter and exit */ - psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | - ((RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO) & - psRgxDevInfo->ui64HWPerfFilter); - - PVR_DPF((PVR_DBG_WARNING, - "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", - psRgxDevInfo->ui64HWPerfFilter)); + (void) RGXHWPerfFwSetEventFilter(psRgxDevInfo, RGX_HWPERF_L2_STREAM_FTRACE, uiFilter); return PVRSRV_OK; } - /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ - if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", - PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32OsDeviceID) < 0) + /* form the FTrace stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszFTraceStreamName, sizeof(pszFTraceStreamName), "%s%d", + PVRSRV_TL_FTRACE_RGX_FW_STREAM, psRgxDevNode->sDevId.i32KernelDeviceID) < 0) { PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to form HWPerf stream name for device %d", + "%s: Failed to form FTrace stream name for device %d", __func__, - psRgxDevNode->sDevId.i32OsDeviceID)); + psRgxDevNode->sDevId.i32KernelDeviceID)); return PVRSRV_ERROR_INVALID_PARAMS; } - /* Open the TL Stream for HWPerf data consumption */ + /* In case there was any data copied from the L1 buffer to the FTrace L2 + * buffer in the narrow window between disabling the FTrace and MISR + * running, drop this data to make the ordinal tracking correct. */ + TLStreamReset(psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_FTRACE]); + + /* Open the TL Stream for FTrace data consumption */ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, - pszHWPerfStreamName, + pszFTraceStreamName, PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, - &psFtraceData->hGPUTraceTLStream); + &psFtraceData->hGPUFTraceTLStream); PVR_LOG_GOTO_IF_ERROR(eError, "TLClientOpenStream", err_out); #if defined(SUPPORT_RGX) - if (RGXTimeCorrGetClockSource(psRgxDevNode) != RGXTIMECORR_CLOCK_SCHED) + if (RGXTimeCorrGetClockSource(psRgxDevNode) != TRACE_CLK) { - /* Set clock source for timer correlation data to sched_clock */ - psRgxDevInfo->ui32LastClockSource = RGXTimeCorrGetClockSource(psRgxDevNode); - RGXTimeCorrSetClockSource(psRgxDevNode, RGXTIMECORR_CLOCK_SCHED); + /* Set clock source for timer correlation data to hwperf clock */ + eError = RGXTimeCorrSetClockSource(psRgxDevNode, TRACE_CLK); + PVR_LOG_IF_ERROR(eError, "RGXTimeCorrSetClockSource"); } #endif + /* Reset the ordinal tracking flag. We should skip checking for gaps in + * ordinal on the first run after FTrace is enabled in case another HWPerf + * consumer was connected while FTrace was disabled. */ + psFtraceData->bTrackOrdinals = IMG_FALSE; + /* Reset the OS timestamp coming from the timer correlation data * associated with the latest HWPerf event we processed. */ @@ -345,11 +560,10 @@ static PVRSRV_ERROR _GpuTraceEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo) err_out: PVR_DPF_RETURN_RC(eError); - err_close_stream: TLClientCloseStream(DIRECT_BRIDGE_HANDLE, - psFtraceData->hGPUTraceTLStream); - psFtraceData->hGPUTraceTLStream = NULL; + psFtraceData->hGPUFTraceTLStream); + psFtraceData->hGPUFTraceTLStream = NULL; goto err_out; } @@ -374,16 +588,12 @@ static PVRSRV_ERROR _GpuTraceDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL /* if FW is not yet initialised, just set filter and exit */ if (!psRgxDevInfo->bFirmwareInitialised) { - psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_NONE; -#if !defined(NO_HARDWARE) - PVR_DPF((PVR_DBG_WARNING, - "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", - psRgxDevInfo->ui64HWPerfFilter)); -#endif + (void) RGXHWPerfFwSetEventFilter(psRgxDevInfo, RGX_HWPERF_L2_STREAM_FTRACE, + RGX_HWPERF_EVENT_MASK_NONE); return PVRSRV_OK; } - if (NULL == psFtraceData->hGPUTraceTLStream) + if (psFtraceData->hGPUFTraceTLStream == NULL) { /* Tracing already disabled, just return */ return PVRSRV_OK; @@ -392,10 +602,11 @@ static PVRSRV_ERROR _GpuTraceDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL #if defined(SUPPORT_RGX) if (!bDeInit) { - eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode, - RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE, - (RGX_HWPERF_EVENT_MASK_NONE)); - PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); + eError = PVRSRVRGXCtrlHWPerfFW(psRgxDevNode, + RGX_HWPERF_L2_STREAM_FTRACE, + RGX_HWPERF_EVENT_MASK_NONE, + HWPERF_FILTER_OPERATION_SET); + PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfFW"); } #endif @@ -408,35 +619,16 @@ static PVRSRV_ERROR _GpuTraceDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL psFtraceData->hGPUTraceCmdCompleteHandle = NULL; } - if (psFtraceData->hGPUTraceTLStream) - { - IMG_PBYTE pbTmp = NULL; - IMG_UINT32 ui32Tmp = 0; - - /* We have to flush both the L1 (FW) and L2 (Host) buffers in case there - * are some events left unprocessed in this FTrace/systrace "session" - * (note that even if we have just disabled HWPerf on the FW some packets - * could have been generated and already copied to L2 by the MISR handler). - * - * With the following calls we will both copy new data to the Host buffer - * (done by the producer callback in TLClientAcquireData) and advance - * the read offset in the buffer to catch up with the latest events. - */ - eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, - psFtraceData->hGPUTraceTLStream, - &pbTmp, &ui32Tmp); - PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); - - /* Let close stream perform the release data on the outstanding acquired data */ - eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, - psFtraceData->hGPUTraceTLStream); - PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); + /* Let close stream perform the release data on the outstanding acquired + * data */ + eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, + psFtraceData->hGPUFTraceTLStream); + PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); - psFtraceData->hGPUTraceTLStream = NULL; - } + psFtraceData->hGPUFTraceTLStream = NULL; #if defined(SUPPORT_RGX) - if (psRgxDevInfo->ui32LastClockSource != RGXTIMECORR_CLOCK_SCHED) + if (psRgxDevInfo->ui32LastClockSource != TRACE_CLK) { RGXTimeCorrSetClockSource(psRgxDevNode, psRgxDevInfo->ui32LastClockSource); } @@ -451,8 +643,6 @@ static PVRSRV_ERROR _GpuTraceSetEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, PVRSRV_ERROR eError = PVRSRV_OK; RGX_HWPERF_FTRACE_DATA *psFtraceData; - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); - PVR_DPF_ENTERED; PVR_ASSERT(psRgxDevInfo); @@ -473,7 +663,7 @@ static PVRSRV_ERROR _GpuTraceSetEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, static PVRSRV_ERROR _GpuTraceSetEnabledForAllDevices(IMG_BOOL bNewValue) { - PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_IMPLEMENTED; PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); PVRSRV_DEVICE_NODE *psDeviceNode; @@ -483,11 +673,17 @@ static PVRSRV_ERROR _GpuTraceSetEnabledForAllDevices(IMG_BOOL bNewValue) /* enable/disable GPU trace on all devices */ while (psDeviceNode) { - eError = _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); - if (eError != PVRSRV_OK) + PVRSRV_RGXDEV_INFO *psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psRgxDevInfo)) +#endif { - break; + eError = _GpuTraceSetEnabled(psRgxDevInfo, bNewValue); + if (eError != PVRSRV_OK) + break; } + psDeviceNode = psDeviceNode->psNext; } @@ -502,6 +698,8 @@ PVRSRV_ERROR PVRGpuTraceSetEnabled(PVRSRV_DEVICE_NODE *psDeviceNode, return _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); } +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) + /* ----- HWPerf to FTrace packet processing and events injection ------------ */ static const IMG_CHAR *_HWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType) @@ -532,7 +730,6 @@ static const IMG_CHAR *_HWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType) return aszKickType[eKickType]; } - void PVRGpuTraceEnqueueEvent( PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FirmwareCtx, @@ -625,21 +822,31 @@ static void _GpuTraceEventsLost( { trace_rogue_events_lost(eStreamId, ui32GpuId, ui32LastOrdinal, ui32CurrOrdinal); } +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) /* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */ static uint64_t CalculateEventTimestamp( PVRSRV_RGXDEV_INFO *psDevInfo, uint32_t ui32TimeCorrIndex, uint64_t ui64EventTimestamp) { - RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; + RGXFWIF_GPU_UTIL_FW *psGpuUtilFW = psDevInfo->psRGXFWIfGpuUtilFW; RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; - RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex]; - uint64_t ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp; - uint64_t ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp; - uint64_t ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs; + RGXFWIF_TIME_CORR *psTimeCorr; + uint64_t ui64CRTimeStamp; + uint64_t ui64OSTimeStamp; + uint64_t ui64CRDeltaToOSDeltaKNs; uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns; + RGXFwSharedMemCacheOpValue(psGpuUtilFW->sTimeCorr[ui32TimeCorrIndex], INVALIDATE); + psTimeCorr = &psGpuUtilFW->sTimeCorr[ui32TimeCorrIndex]; + ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp; + /* This is configurable using the AppHint 'SecondaryTimerCorrOSClockSource' + * and can be: sched, mono, mono_raw. */ + ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp; + ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs; + if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp) { /* The previous packet had a time reference (time correlation data) more @@ -687,8 +894,10 @@ static void _GpuTraceSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo, psHWPerfPkt->ui64Timestamp); PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d", - pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType)); + pszWorkName != NULL ? pszWorkName : "?", psHWPerfPktData->ui32DMContext, + psHWPerfPktData->ui32IntJobRef, eSwType)); +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) _GpuTraceWorkSwitch(ui64Timestamp, psDevInfo->psDeviceNode->sDevId.ui32InternalID, psHWPerfPktData->ui32DMContext, @@ -697,19 +906,13 @@ static void _GpuTraceSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo, psHWPerfPktData->ui32IntJobRef, pszWorkName, eSwType); - -#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) - GpuTraceWorkPeriod(psHWPerfPktData->ui32PID, - psDevInfo->psDeviceNode->sDevId.ui32InternalID, - ui64Timestamp, - psHWPerfPktData->ui32IntJobRef, - (eSwType == PVR_GPUTRACE_SWITCH_TYPE_BEGIN) ? - PVR_GPU_WORK_EVENT_START : PVR_GPU_WORK_EVENT_END); -#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ PVR_DPF_RETURN; } +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) static void _GpuTraceUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) { @@ -750,12 +953,14 @@ static void _GpuTraceFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo, _GpuTraceFirmware(ui64Timestamp, psDevInfo->psDeviceNode->sDevId.ui32InternalID, pszWorkName, eSwType); } +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) { RGX_HWPERF_EVENT_TYPE eType; RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) IMG_UINT32 ui32HwEventTypeIndex; static const struct { IMG_CHAR* pszName; @@ -790,33 +995,48 @@ static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo, { "TDM", _T(BEGIN) }, /* RGX_HWPERF_HW_TDMKICK */ { "TDM", _T(END) }, /* RGX_HWPERF_HW_TDMFINISHED */ { "NULL", _T(SINGLE) }, /* RGX_HWPERF_HW_NULLKICK */ + { "GPU_WORK_PERIOD",_T(SINGLE) }, /* RGX_HWPERF_HW_GPU_WORK_PERIOD */ #undef _T }; static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1, "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE"); +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ PVR_ASSERT(psHWPerfPkt); eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt); - if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1) + if (psFtraceData->bTrackOrdinals) { - RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt); - _GpuTraceEventsLost(eStreamId, psDevInfo->psDeviceNode->sDevId.ui32InternalID, - psFtraceData->ui32FTraceLastOrdinal, - psHWPerfPkt->ui32Ordinal); - PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)", - eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal)); + if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1) + { +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) + _GpuTraceEventsLost(RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt), + psDevInfo->psDeviceNode->sDevId.ui32InternalID, + psFtraceData->ui32FTraceLastOrdinal, + psHWPerfPkt->ui32Ordinal); +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ + PVR_DPF((PVR_DBG_WARNING, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)", + RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt), psFtraceData->ui32FTraceLastOrdinal, + psHWPerfPkt->ui32Ordinal)); + } + } + else + { + psFtraceData->bTrackOrdinals = IMG_TRUE; } psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal; +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) /* Process UFO packets */ if (eType == RGX_HWPERF_UFO) { _GpuTraceUfoEvent(psDevInfo, psHWPerfPkt); return IMG_TRUE; } +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) { /* this ID belongs to range 0, so index directly in range 0 */ @@ -830,7 +1050,9 @@ static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo, } if (ui32HwEventTypeIndex >= ARRAY_SIZE(aszHwEventTypeMap)) + { goto err_unsupported; + } if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL) { @@ -855,28 +1077,57 @@ static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo, aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); } + + return IMG_TRUE; } - else if (HWPERF_PACKET_IS_FW_TYPE(eType)) +#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) + else if (psDevInfo->psDeviceNode->bGPUWorkPeriodFTraceEnabled && + eType == RGX_HWPERF_TRACE_EVENT_GPU_WORK_PERIOD && + RGX_HWPERF_GET_OSID(psHWPerfPkt) == RGXFW_HOST_DRIVER_ID) + { + RGX_HWPERF_GPU_WORK_PERIOD_DATA *psHWPerfPktData; + IMG_UINT64 ui64StartTimestamp, ui64EndTimestamp; + + psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); + ui64StartTimestamp = CalculateEventTimestamp(psDevInfo, + psHWPerfPktData->ui32StartTimeCorrIndex, + psHWPerfPktData->ui64GPUWorkPeriodStartTime); + ui64EndTimestamp = CalculateEventTimestamp(psDevInfo, + psHWPerfPktData->ui32TimeCorrIndex, + psHWPerfPkt->ui64Timestamp); + + PVR_ASSERT(ui64EndTimestamp > ui64StartTimestamp); + + trace_gpu_work_period(psDevInfo->psDeviceNode->sDevId.ui32InternalID, + psHWPerfPktData->ui32UID, + ui64StartTimestamp, + ui64EndTimestamp, + ui64EndTimestamp - ui64StartTimestamp); + + return IMG_TRUE; + } +#endif +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) + if (HWPERF_PACKET_IS_FW_TYPE(eType)) { _GpuTraceFirmwareEvent(psDevInfo, psHWPerfPkt, aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); - } - else - { - goto err_unsupported; - } - return IMG_TRUE; + return IMG_TRUE; + } +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) err_unsupported: +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType)); return IMG_FALSE; } - -static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, - void *pBuffer, IMG_UINT32 ui32ReadLen) +static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, void *pBuffer, + IMG_UINT32 ui32ReadLen) { IMG_UINT32 ui32TlPackets = 0; IMG_UINT32 ui32HWPerfPackets = 0; @@ -946,15 +1197,13 @@ static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, PVR_DPF_RETURN; } - static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) { - PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle; - RGX_HWPERF_FTRACE_DATA* psFtraceData; - PVRSRV_ERROR eError; - IMG_PBYTE pBuffer; - IMG_UINT32 ui32ReadLen; - IMG_BOOL bFTraceLockAcquired = IMG_FALSE; + PVRSRV_RGXDEV_INFO *psDeviceInfo = hCmdCompHandle; + RGX_HWPERF_FTRACE_DATA *psFtraceData; + PVRSRV_ERROR eError; + IMG_PBYTE pBuffer; + IMG_UINT32 ui32ReadLen; PVR_DPF_ENTERED; @@ -962,79 +1211,61 @@ static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) psFtraceData = psDeviceInfo->pvGpuFtraceData; - /* Command-complete notifiers can run concurrently. If this is - * happening, just bail out and let the previous call finish. + /* Command-complete notifiers can run concurrently. If this is happening, + * just bail out and let the previous call finish. * This is ok because we can process the queued packets on the next call. */ - bFTraceLockAcquired = OSTryLockAcquire(psFtraceData->hFTraceResourceLock); - if (IMG_FALSE == bFTraceLockAcquired) + if (!OSTryLockAcquire(psFtraceData->hFTraceResourceLock)) { PVR_DPF_RETURN; } - /* If this notifier is called, it means the TL resources will be valid at-least - * until the end of this call, since the DeInit function will wait on the hFTraceResourceLock - * to clean-up the TL resources and un-register the notifier, so just assert here. + /* If this notifier is called, it means the TL resources will be valid + * at-least until the end of this call, since the DeInit function will wait + * on the hFTraceResourceLock to clean-up the TL resources and un-register + * the notifier, so just assert here. */ - PVR_ASSERT(psFtraceData->hGPUTraceTLStream); + PVR_ASSERT(psFtraceData->hGPUFTraceTLStream != NULL); /* If we have a valid stream attempt to acquire some data */ - eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen); - if (eError == PVRSRV_OK) + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + psFtraceData->hGPUFTraceTLStream, + &pBuffer, &ui32ReadLen); + if (eError != PVRSRV_OK) { - /* Process the HWPerf packets and release the data */ - if (ui32ReadLen > 0) + if (eError != PVRSRV_ERROR_TIMEOUT) { - PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceCmdCompleteNotify: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen)); + PVR_LOG_ERROR(eError, "TLClientAcquireData"); + } - /* Process the transport layer data for HWPerf packets... */ - _GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen); + goto unlock; + } - eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream); - if (eError != PVRSRV_OK) - { - PVR_LOG_ERROR(eError, "TLClientReleaseData"); + /* Process the HWPerf packets and release the data */ + if (ui32ReadLen > 0) + { + PVR_DPF((PVR_DBG_VERBOSE, "%s: DATA AVAILABLE offset=%p, length=%d", + __func__, pBuffer, ui32ReadLen)); - /* Serious error, disable FTrace GPU events */ + /* Process the transport layer data for HWPerf packets... */ + _GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen); - /* Release TraceLock so we always have the locking - * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/ - OSLockRelease(psFtraceData->hFTraceResourceLock); - OSLockAcquire(psFtraceData->hFTraceResourceLock); - _GpuTraceDisable(psDeviceInfo, IMG_FALSE); - OSLockRelease(psFtraceData->hFTraceResourceLock); - goto out; + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, + psFtraceData->hGPUFTraceTLStream); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "TLClientReleaseData"); - } - } /* else no data, ignore */ - } - else if (eError != PVRSRV_ERROR_TIMEOUT) - { - PVR_LOG_ERROR(eError, "TLClientAcquireData"); - } - if (bFTraceLockAcquired) - { - OSLockRelease(psFtraceData->hFTraceResourceLock); + /* Serious error, disable FTrace GPU events */ + _GpuTraceDisable(psDeviceInfo, IMG_FALSE); + } } -out: - PVR_DPF_RETURN; -} -#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -PVRSRV_ERROR -PVRSRVGpuTraceWorkPeriodEventStatsRegister(IMG_HANDLE - *phGpuWorkPeriodEventStats) -{ - return GpuTraceWorkPeriodEventStatsRegister(phGpuWorkPeriodEventStats); -} +unlock: + OSLockRelease(psFtraceData->hFTraceResourceLock); -void -PVRSRVGpuTraceWorkPeriodEventStatsUnregister( - IMG_HANDLE hGpuWorkPeriodEventStats) -{ - GpuTraceWorkPeriodEventStatsUnregister(hGpuWorkPeriodEventStats); + PVR_DPF_RETURN; } -#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ /* ----- AppHint interface -------------------------------------------------- */ @@ -1043,12 +1274,44 @@ static PVRSRV_ERROR _GpuTraceIsEnabledCallback( const void *private_data, IMG_BOOL *value) { - PVR_UNREFERENCED_PARAMETER(device); PVR_UNREFERENCED_PARAMETER(private_data); - *value = gbFTraceGPUEventsEnabled; +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (device && PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, device)) + { + *value = IMG_FALSE; - return PVRSRV_OK; + return PVRSRV_OK; + } + else +#endif + { +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData ? psPVRSRVData->psDeviceNodeList : NULL; + + while (psDeviceNode) + { + PVRSRV_RGXDEV_INFO *psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psRgxDevInfo)) + { + *value = gbFTraceGPUEventsEnabled; + return PVRSRV_OK; + } + + psDeviceNode = psDeviceNode->psNext; + } + + *value = IMG_FALSE; + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#else + *value = gbFTraceGPUEventsEnabled; + + return PVRSRV_OK; +#endif + } } static PVRSRV_ERROR _GpuTraceSetEnabledCallback( @@ -1056,37 +1319,76 @@ static PVRSRV_ERROR _GpuTraceSetEnabledCallback( const void *private_data, IMG_BOOL value) { - PVR_UNREFERENCED_PARAMETER(device); +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + if (device && PVRSRV_VZ_MODE_IS(GUEST, DEVNODE, device)) + { + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } +#endif /* Lock down the state to avoid concurrent writes */ OSLockAcquire(ghGPUTraceStateLock); if (value != gbFTraceGPUEventsEnabled) { -#if defined(PVRSRV_NEED_PVR_TRACE) +#if defined(PVRSRV_NEED_PVR_DPF) const IMG_CHAR *pszOperation = value ? "enable" : "disable"; + /* in case MESSAGE level is compiled out */ + PVR_UNREFERENCED_PARAMETER(pszOperation); #endif - if (_GpuTraceSetEnabledForAllDevices(value) != PVRSRV_OK) +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) + if (trace_set_clr_event("rogue", NULL, (int) value) != 0) { - PVR_TRACE(("FAILED to %s GPU FTrace for all devices", pszOperation)); + PVR_DPF((PVR_DBG_ERROR, "FAILED to %s GPU FTrace event group", + pszOperation)); goto err_restore_state; } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "FTrace events from \"rogue\" group %sd", + pszOperation)); + } +#endif -#if defined(CONFIG_EVENT_TRACING) /* this is a kernel config option */ - if (trace_set_clr_event("rogue", NULL, (int) value) != 0) +#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) + if (trace_set_clr_event("power", "gpu_work_period", (int) value) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "FAILED to %s gpu_work_period GPU FTrace event", + pszOperation)); + goto err_restore_state; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "FTrace event from \"gpu_work_period\" %sd", + pszOperation)); + } +#endif + + if (value) + { + /* this enables FTrace globally (if not enabled nothing will appear + * in the FTrace buffer) */ + tracing_on(); + } + + /* The HWPerf supplier is activated here, + The FTrace consumer is activated above, + The consumer should be active before the supplier */ + if (_GpuTraceSetEnabledForAllDevices(value) != PVRSRV_OK) { - PVR_TRACE(("FAILED to %s GPU FTrace event group", pszOperation)); + PVR_DPF((PVR_DBG_ERROR, "FAILED to %s GPU FTrace for all devices", + pszOperation)); goto err_restore_state; } -#endif /* defined(CONFIG_EVENT_TRACING) */ - PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED")); + PVR_DPF((PVR_DBG_MESSAGE, "%s GPU FTrace", value ? "ENABLED" : "DISABLED")); gbFTraceGPUEventsEnabled = value; } else { - PVR_TRACE(("GPU FTrace already %s!", value ? "enabled" : "disabled")); + PVR_DPF((PVR_DBG_MESSAGE, "GPU FTrace already %s!", + value ? "enabled" : "disabled")); } OSLockRelease(ghGPUTraceStateLock); @@ -1096,7 +1398,16 @@ static PVRSRV_ERROR _GpuTraceSetEnabledCallback( err_restore_state: /* On failure, partial enable/disable might have resulted. Try best to * restore to previous state. Ignore error */ - _GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled); + (void) _GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled); + +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) + (void) trace_set_clr_event("rogue", NULL, + (int) gbFTraceGPUEventsEnabled); +#endif +#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) + (void) trace_set_clr_event("power", "gpu_work_period", + (int) gbFTraceGPUEventsEnabled); +#endif OSLockRelease(ghGPUTraceStateLock); @@ -1105,11 +1416,6 @@ static PVRSRV_ERROR _GpuTraceSetEnabledCallback( void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) { - /* Do not register callback handlers if we are in GUEST mode */ - if (PVRSRV_VZ_MODE_IS(GUEST)) - { - return; - } PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFTraceGPU, _GpuTraceIsEnabledCallback, _GpuTraceSetEnabledCallback, @@ -1122,10 +1428,6 @@ void PVRGpuTraceEnableUfoCallback(void) { PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); PVRSRV_DEVICE_NODE *psDeviceNode; -#if defined(SUPPORT_RGX) - PVRSRV_RGXDEV_INFO *psRgxDevInfo; - PVRSRV_ERROR eError; -#endif /* Lock down events state, for consistent value of guiUfoEventRef */ OSLockAcquire(ghLockFTraceEventLock); @@ -1138,16 +1440,14 @@ void PVRGpuTraceEnableUfoCallback(void) while (psDeviceNode) { #if defined(SUPPORT_RGX) - IMG_UINT64 ui64Filter; + PVRSRV_ERROR eError; - psRgxDevInfo = psDeviceNode->pvDevice; - ui64Filter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) | - psRgxDevInfo->ui64HWPerfFilter; /* Small chance exists that ui64HWPerfFilter can be changed here and * the newest filter value will be changed to the old one + UFO event. * This is not a critical problem. */ - eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, - IMG_FALSE, ui64Filter); + eError = PVRSRVRGXCtrlHWPerfFW(psDeviceNode, RGX_HWPERF_L2_STREAM_FTRACE, + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO), + HWPERF_FILTER_OPERATION_BIT_OR); if (eError == PVRSRV_ERROR_NOT_INITIALISED) { /* If we land here that means that the FW is not initialised yet. @@ -1156,7 +1456,7 @@ void PVRGpuTraceEnableUfoCallback(void) } else if (eError != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32OsDeviceID)); + PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32KernelDeviceID)); } #endif psDeviceNode = psDeviceNode->psNext; @@ -1169,9 +1469,6 @@ void PVRGpuTraceEnableUfoCallback(void) void PVRGpuTraceDisableUfoCallback(void) { -#if defined(SUPPORT_RGX) - PVRSRV_ERROR eError; -#endif PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); PVRSRV_DEVICE_NODE *psDeviceNode; @@ -1195,16 +1492,14 @@ void PVRGpuTraceDisableUfoCallback(void) while (psDeviceNode) { #if defined(SUPPORT_RGX) - IMG_UINT64 ui64Filter; - PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; - ui64Filter = ~(RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) & - psRgxDevInfo->ui64HWPerfFilter; /* Small chance exists that ui64HWPerfFilter can be changed here and * the newest filter value will be changed to the old one + UFO event. * This is not a critical problem. */ - eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, - IMG_FALSE, ui64Filter); + eError = PVRSRVRGXCtrlHWPerfFW(psDeviceNode, RGX_HWPERF_L2_STREAM_FTRACE, + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO), + HWPERF_FILTER_OPERATION_BIT_CLR); if (eError == PVRSRV_ERROR_NOT_INITIALISED) { /* If we land here that means that the FW is not initialised yet. @@ -1214,7 +1509,7 @@ void PVRGpuTraceDisableUfoCallback(void) else if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d", - psDeviceNode->sDevId.i32OsDeviceID)); + psDeviceNode->sDevId.i32KernelDeviceID)); } #endif @@ -1231,14 +1526,13 @@ void PVRGpuTraceEnableFirmwareActivityCallback(void) #if defined(SUPPORT_RGX) PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); PVRSRV_DEVICE_NODE *psDeviceNode; - PVRSRV_RGXDEV_INFO *psRgxDevInfo; - uint64_t ui64Filter, ui64FWEventsFilter = 0; + IMG_UINT64 uiFilter = 0; int i; for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) { - ui64FWEventsFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i); + uiFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i); } OSLockAcquire(ghLockFTraceEventLock); @@ -1249,11 +1543,9 @@ void PVRGpuTraceEnableFirmwareActivityCallback(void) while (psDeviceNode) { PVRSRV_ERROR eError; - psRgxDevInfo = psDeviceNode->pvDevice; - ui64Filter = psRgxDevInfo->ui64HWPerfFilter | ui64FWEventsFilter; - eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, - IMG_FALSE, ui64Filter); + eError = PVRSRVRGXCtrlHWPerfFW(psDeviceNode, RGX_HWPERF_L2_STREAM_FTRACE, + uiFilter, HWPERF_FILTER_OPERATION_BIT_OR); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware" @@ -1274,7 +1566,7 @@ void PVRGpuTraceDisableFirmwareActivityCallback(void) #if defined(SUPPORT_RGX) PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); PVRSRV_DEVICE_NODE *psDeviceNode; - IMG_UINT64 ui64FWEventsFilter = ~0; + IMG_UINT64 uiFilter = 0; int i; /* We have to check if lock is valid because on driver unload @@ -1294,17 +1586,14 @@ void PVRGpuTraceDisableFirmwareActivityCallback(void) for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) { - ui64FWEventsFilter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i); + uiFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i); } /* Disable all FW events on all the devices */ while (psDeviceNode) { - PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice; - IMG_UINT64 ui64Filter = psRgxDevInfo->ui64HWPerfFilter & ui64FWEventsFilter; - - if (PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, - IMG_FALSE, ui64Filter) != PVRSRV_OK) + if (PVRSRVRGXCtrlHWPerfFW(psDeviceNode, RGX_HWPERF_L2_STREAM_FTRACE, + uiFilter, HWPERF_FILTER_OPERATION_BIT_CLR) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings.")); } @@ -1317,6 +1606,75 @@ void PVRGpuTraceDisableFirmwareActivityCallback(void) #endif /* defined(SUPPORT_RGX) */ } +#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int PVRGpuTraceEnableWorkPeriodCallback(void) +#else +void PVRGpuTraceEnableWorkPeriodCallback(void) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */ +{ + PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_IMPLEMENTED; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + + OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); + psDeviceNode = psPVRSRVData->psDeviceNodeList; + + /* enable/disable GPU trace on all devices */ + while (psDeviceNode) + { +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + PVRSRV_RGXDEV_INFO *psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psRgxDevInfo)) +#endif + { + psDeviceNode->bGPUWorkPeriodFTraceEnabled = true; + eError = PVRSRV_OK; + } + + psDeviceNode = psDeviceNode->psNext; + } + + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + if (eError != PVRSRV_OK) + return -ENODEV; + return 0; +#else + return; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */ +} + +void PVRGpuTraceDisableWorkPeriodCallback(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + + if (!psPVRSRVData) + return; + + OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); + psDeviceNode = psPVRSRVData->psDeviceNodeList; + + while (psDeviceNode) + { +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + PVRSRV_RGXDEV_INFO *psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVINFO, psRgxDevInfo)) +#endif + { + psDeviceNode->bGPUWorkPeriodFTraceEnabled = false; + } + + psDeviceNode = psDeviceNode->psNext; + } + + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); +} +#endif /****************************************************************************** End of file (pvr_gputrace.c) ******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_ion_stats.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_ion_stats.h index c34180785453..c4748138ccd8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_ion_stats.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_ion_stats.h @@ -56,6 +56,11 @@ void PVRSRVIonStatsDestroy(void); void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf); void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf); + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +void PVRSRVIonZombifyMemAllocRecord(const struct dma_buf *psDmaBuf); +void PVRSRVIonReviveMemAllocRecord(const struct dma_buf *psDmaBuf); +#endif #else static INLINE PVRSRV_ERROR PVRSRVIonStatsInitialise(void) { @@ -75,6 +80,17 @@ static INLINE void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf) { PVR_UNREFERENCED_PARAMETER(psDmaBuf); } + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +static INLINE void PVRSRVIonZombifyMemAllocRecord(const struct dma_buf *psDmaBuf) +{ + PVR_UNREFERENCED_PARAMETER(psDmaBuf); +} +static INLINE void PVRSRVIonReviveMemAllocRecord(const struct dma_buf *psDmaBuf) +{ + PVR_UNREFERENCED_PARAMETER(psDmaBuf); +} +#endif #endif /* defined(PVRSRV_ENABLE_PVR_ION_STATS) */ #endif /* PVR_ION_STATS_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_pci_drv.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_pci_drv.c deleted file mode 100644 index 89bdf48d0489..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_pci_drv.c +++ /dev/null @@ -1,229 +0,0 @@ -/* - * @File - * @Title PowerVR DRM PCI driver - * @Codingstyle LinuxKernel - * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved - * @License Dual MIT/GPLv2 - * - * The contents of this file are subject to the MIT license as set out below. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * Alternatively, the contents of this file may be used under the terms of - * the GNU General Public License Version 2 ("GPL") in which case the provisions - * of GPL are applicable instead of those above. - * - * If you wish to allow use of your version of this file only under the terms of - * GPL, and not to allow others to use your version of this file under the terms - * of the MIT license, indicate your decision by deleting the provisions above - * and replace them with the notice and other provisions required by GPL as set - * out in the file called "GPL-COPYING" included in this distribution. If you do - * not delete the provisions above, a recipient may use your version of this file - * under the terms of either the MIT license or GPL. - * - * This License is also included in this distribution in the file called - * "MIT-COPYING". - * - * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS - * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING - * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR - * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR - * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER - * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -#include -#include -#else -#include -#endif - -#include -#include - -#include "module_common.h" -#include "pvr_drv.h" -#include "pvrmodule.h" -#include "sysinfo.h" - -/* This header must always be included last */ -#include "kernel_compatibility.h" - -static struct drm_driver pvr_drm_pci_driver; - -static int pvr_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) - struct drm_device *ddev; - int ret; - - DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); - - ddev = drm_dev_alloc(&pvr_drm_pci_driver, &pdev->dev); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) - if (IS_ERR(ddev)) - return PTR_ERR(ddev); -#else - if (!ddev) - return -ENOMEM; -#endif - - ret = pci_enable_device(pdev); - if (ret) - goto err_drm_dev_put; - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)) - ddev->pdev = pdev; -#endif - - /* - * drm_get_pci_dev calls sets the drvdata at this point, to ddev. - * We set the drvdata in the load callback, so there is no need - * to do it again here. The platform driver equivalent of - * drm_get_pci_dev, drm_platform_init, doesn't set the drvdata, - * which is why it is done in the load callback. - * - * The load callback, called from drm_dev_register, is deprecated, - * because of potential race conditions. Calling the function here, - * before calling drm_dev_register, avoids those potential races. - */ - BUG_ON(pvr_drm_pci_driver.load != NULL); - ret = pvr_drm_load(ddev, 0); - if (ret) - goto err_pci_dev_disable; - - ret = drm_dev_register(ddev, 0); - if (ret) - goto err_drm_dev_unload; - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) - DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", - pvr_drm_pci_driver.name, - pvr_drm_pci_driver.major, - pvr_drm_pci_driver.minor, - pvr_drm_pci_driver.patchlevel, - pvr_drm_pci_driver.date, - pci_name(pdev), - ddev->primary->index); -#endif - return 0; - -err_drm_dev_unload: - pvr_drm_unload(ddev); -err_pci_dev_disable: - pci_disable_device(pdev); -err_drm_dev_put: - drm_dev_put(ddev); - return ret; -#else - DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); - - return drm_get_pci_dev(pdev, ent, &pvr_drm_pci_driver); -#endif -} - -static void pvr_remove(struct pci_dev *pdev) -{ - struct drm_device *ddev = pci_get_drvdata(pdev); - - DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) - drm_dev_unregister(ddev); - - /* The unload callback, called from drm_dev_unregister, is - * deprecated. Call the unload function directly. - */ - BUG_ON(pvr_drm_pci_driver.unload != NULL); - pvr_drm_unload(ddev); - - pci_disable_device(pdev); - - drm_dev_put(ddev); -#else - drm_put_dev(ddev); -#endif -} - -static void pvr_shutdown(struct pci_dev *pdev) -{ - struct drm_device *ddev = pci_get_drvdata(pdev); - - DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); - - PVRSRVDeviceShutdown(ddev); -} - -static const struct pci_device_id pvr_pci_ids[] = { - { PCI_DEVICE(SYS_RGX_DEV_VENDOR_ID, SYS_RGX_DEV_DEVICE_ID) }, -#if defined(SYS_RGX_DEV1_DEVICE_ID) - { PCI_DEVICE(SYS_RGX_DEV_VENDOR_ID, SYS_RGX_DEV1_DEVICE_ID) }, -#endif -#if defined(SYS_RGX_DEV_FROST_VENDOR_ID) - { PCI_DEVICE(SYS_RGX_DEV_FROST_VENDOR_ID, SYS_RGX_DEV_FROST_DEVICE_ID) }, -#endif - { 0 } -}; - -MODULE_DEVICE_TABLE(pci, pvr_pci_ids); - -static struct pci_driver pvr_pci_driver = { - .name = DRVNAME, - .driver.pm = &pvr_pm_ops, - .id_table = pvr_pci_ids, - .probe = pvr_probe, - .remove = pvr_remove, - .shutdown = pvr_shutdown, -}; - -static int __init pvr_init(void) -{ - int err; - - DRM_DEBUG_DRIVER("\n"); - - pvr_drm_pci_driver = pvr_drm_generic_driver; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) && \ - LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) - pvr_drm_pci_driver.set_busid = drm_pci_set_busid; -#endif - - err = PVRSRVDriverInit(); - if (err) - return err; - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) - return pci_register_driver(&pvr_pci_driver); -#else - return drm_pci_init(&pvr_drm_pci_driver, &pvr_pci_driver); -#endif -} - -static void __exit pvr_exit(void) -{ - DRM_DEBUG_DRIVER("\n"); - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) - pci_unregister_driver(&pvr_pci_driver); -#else - drm_pci_exit(&pvr_drm_pci_driver, &pvr_pci_driver); -#endif - PVRSRVDriverDeinit(); - - DRM_DEBUG_DRIVER("done\n"); -} - -module_init(pvr_init); -module_exit(pvr_exit); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_platform_drv.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_platform_drv.c index 9f8c3608bd12..57e27ebcc1c8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_platform_drv.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_platform_drv.c @@ -66,12 +66,12 @@ /* This header must always be included last */ #include "kernel_compatibility.h" -static struct drm_driver pvr_drm_platform_driver; - unsigned int _corefreq_div = 800000000; module_param_named(corefreq_div, _corefreq_div, uint, 0644); MODULE_PARM_DESC(corefreq_div, "core frequency div from 1600M, default 800M"); +MODULE_IMPORT_NS(DMA_BUF); +static struct drm_driver pvr_drm_platform_driver; #if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) /* @@ -79,7 +79,6 @@ MODULE_PARM_DESC(corefreq_div, "core frequency div from 1600M, default 800M"); * parameter description should also be updated to match. */ #define MAX_DEVICES 16 - static unsigned int pvr_num_devices = 1; static struct platform_device **pvr_devices; @@ -93,7 +92,7 @@ static int pvr_num_devices_set(const char *val, if (err) return err; - if (pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES) + if (pvr_num_devices == 0 || pvr_num_devices > PVRSRV_MAX_DEVICES) return -EINVAL; return 0; @@ -104,9 +103,13 @@ static const struct kernel_param_ops pvr_num_devices_ops = { .get = param_get_uint, }; +#define STR(s) #s +#define STRINGIFY(s) STR(s) + module_param_cb(num_devices, &pvr_num_devices_ops, &pvr_num_devices, 0444); MODULE_PARM_DESC(num_devices, - "Number of platform devices to register (default: 1 - max: 16)"); + "Number of platform devices to register (default: 1 - max: " + STRINGIFY(PVRSRV_MAX_DEVICES) ")"); #endif /* defined(NO_HARDWARE) */ #endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ @@ -128,7 +131,7 @@ static int pvr_devices_register(void) }; unsigned int i; - BUG_ON(pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES); + BUG_ON(pvr_num_devices == 0 || pvr_num_devices > PVRSRV_MAX_DEVICES); pvr_devices = kmalloc_array(pvr_num_devices, sizeof(*pvr_devices), GFP_KERNEL); @@ -166,25 +169,15 @@ static void pvr_devices_unregister(void) static int pvr_probe(struct platform_device *pdev) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) struct drm_device *ddev; int ret; DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); ddev = drm_dev_alloc(&pvr_drm_platform_driver, &pdev->dev); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) if (IS_ERR(ddev)) return PTR_ERR(ddev); -#else - if (!ddev) - return -ENOMEM; -#endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) - /* Needed by drm_platform_set_busid */ - ddev->platformdev = pdev; -#endif /* * The load callback, called from drm_dev_register, is deprecated, @@ -216,11 +209,6 @@ static int pvr_probe(struct platform_device *pdev) err_drm_dev_put: drm_dev_put(ddev); return ret; -#else - DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); - - return drm_platform_init(&pvr_drm_platform_driver, pdev); -#endif } static int pvr_remove(struct platform_device *pdev) @@ -229,7 +217,6 @@ static int pvr_remove(struct platform_device *pdev) DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) drm_dev_unregister(ddev); /* The unload callback, called from drm_dev_unregister, is @@ -239,9 +226,6 @@ static int pvr_remove(struct platform_device *pdev) pvr_drm_unload(ddev); drm_dev_put(ddev); -#else - drm_put_dev(ddev); -#endif return 0; } @@ -254,7 +238,6 @@ static void pvr_shutdown(struct platform_device *pdev) PVRSRVDeviceShutdown(ddev); } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) static const struct of_device_id pvr_of_ids[] = { #if defined(SYS_RGX_OF_COMPATIBLE) { .compatible = SYS_RGX_OF_COMPATIBLE, }, @@ -262,28 +245,37 @@ static const struct of_device_id pvr_of_ids[] = { {}, }; -#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE) +#if !defined(CHROMIUMOS_KERNEL) MODULE_DEVICE_TABLE(of, pvr_of_ids); #endif -#endif static struct platform_device_id pvr_platform_ids[] = { #if defined(SYS_RGX_DEV_NAME) { SYS_RGX_DEV_NAME, 0 }, +#endif +#if defined(SYS_RGX_DEV_NAME_0) + { SYS_RGX_DEV_NAME_0, 0 }, +#endif +#if defined(SYS_RGX_DEV_NAME_1) + { SYS_RGX_DEV_NAME_1, 0 }, +#endif +#if defined(SYS_RGX_DEV_NAME_2) + { SYS_RGX_DEV_NAME_2, 0 }, +#endif +#if defined(SYS_RGX_DEV_NAME_3) + { SYS_RGX_DEV_NAME_3, 0 }, #endif { } }; -#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE) +#if !defined(CHROMIUMOS_KERNEL) MODULE_DEVICE_TABLE(platform, pvr_platform_ids); #endif static struct platform_driver pvr_platform_driver = { .driver = { .name = DRVNAME, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) .of_match_table = of_match_ptr(pvr_of_ids), -#endif .pm = &pvr_pm_ops, }, .id_table = pvr_platform_ids, @@ -299,10 +291,6 @@ static int __init pvr_init(void) DRM_DEBUG_DRIVER("\n"); pvr_drm_platform_driver = pvr_drm_generic_driver; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \ - (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) - pvr_drm_platform_driver.set_busid = drm_platform_set_busid; -#endif err = PVRSRVDriverInit(); if (err) @@ -326,5 +314,5 @@ static void __exit pvr_exit(void) DRM_DEBUG_DRIVER("done\n"); } -late_initcall(pvr_init); +module_init(pvr_init); module_exit(pvr_exit); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_procfs.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_procfs.c index af8c9c7524f2..34408fc64875 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_procfs.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_procfs.c @@ -72,10 +72,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_bridge_k.h" #include "pvr_uaccess.h" #include "osdi_impl.h" +#include "kernel_compatibility.h" #define _DRIVER_THREAD_ENTER() \ do { \ - PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(); \ + PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(NULL); \ if (eLocalError != PVRSRV_OK) \ { \ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \ @@ -85,7 +86,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. } while (0) #define _DRIVER_THREAD_EXIT() \ - PVRSRVDriverThreadExit() + PVRSRVDriverThreadExit(NULL) #define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR @@ -121,14 +122,7 @@ static void _WriteData(void *pvNativeHandle, const void *pvData, static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, va_list pArgs) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) seq_vprintf(pvNativeHandle, pszFmt, pArgs); -#else - IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; - - vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFmt, pArgs); - seq_printf(pvNativeHandle, "%s", szBuffer); -#endif } static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) @@ -139,11 +133,7 @@ static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) static IMG_BOOL _HasOverflowed(void *pvNativeHandle) { struct seq_file *psSeqFile = pvNativeHandle; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) return seq_has_overflowed(psSeqFile); -#else - return psSeqFile->count == psSeqFile->size; -#endif } static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { @@ -418,13 +408,13 @@ static ssize_t _Write(struct file *psFile, const char __user *pszBuffer, PVR_LOG_RETURN_IF_FALSE(psIter->pfnWrite != NULL, "pfnWrite is NULL", -EIO); + _DRIVER_THREAD_ENTER(); + /* Make sure we allocate the smallest amount of needed memory*/ ui64Count = psIter->ui32WriteLenMax; PVR_LOG_GOTO_IF_FALSE(uiCount <= ui64Count, "uiCount too long", return_); ui64Count = MIN(uiCount+1, ui64Count); - _DRIVER_THREAD_ENTER(); - /* allocate buffer with one additional byte for NUL character */ pcLocalBuffer = OSAllocMem(ui64Count); PVR_LOG_GOTO_IF_FALSE(pcLocalBuffer != NULL, "OSAllocMem() failed", diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sw_fence.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sw_fence.c index 4f2404a21161..cccc4a64e321 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sw_fence.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sw_fence.c @@ -48,6 +48,7 @@ #include #include "pvr_sw_fence.h" +#include "osfunc_common.h" struct pvr_sw_fence_context { struct kref kref; @@ -159,9 +160,9 @@ pvr_sw_fence_context_create(const char *context_name, const char *driver_name) return NULL; fence_context->context = dma_fence_context_alloc(1); - strlcpy(fence_context->context_name, context_name, + OSStringSafeCopy(fence_context->context_name, context_name, sizeof(fence_context->context_name)); - strlcpy(fence_context->driver_name, driver_name, + OSStringSafeCopy(fence_context->driver_name, driver_name, sizeof(fence_context->driver_name)); atomic_set(&fence_context->seqno, 0); atomic_set(&fence_context->fence_count, 0); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync2.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync2.c deleted file mode 100644 index d454de883bca..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync2.c +++ /dev/null @@ -1,2759 +0,0 @@ -/* - * @File pvr_sync.c - * @Title Kernel driver for Android's sync mechanism - * @Codingstyle LinuxKernel - * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved - * @License Dual MIT/GPLv2 - * - * The contents of this file are subject to the MIT license as set out below. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * Alternatively, the contents of this file may be used under the terms of - * the GNU General Public License Version 2 ("GPL") in which case the provisions - * of GPL are applicable instead of those above. - * - * If you wish to allow use of your version of this file only under the terms of - * GPL, and not to allow others to use your version of this file under the terms - * of the MIT license, indicate your decision by deleting the provisions above - * and replace them with the notice and other provisions required by GPL as set - * out in the file called "GPL-COPYING" included in this distribution. If you do - * not delete the provisions above, a recipient may use your version of this file - * under the terms of either the MIT license or GPL. - * - * This License is also included in this distribution in the file called - * "MIT-COPYING". - * - * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS - * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING - * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR - * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR - * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER - * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include "pvr_drv.h" -#include "pvr_fd_sync_kernel.h" -#include "services_kernel_client.h" -#include "pvr_sync.h" -#include "pvrsrv_sync_km.h" -#include "sync_checkpoint_external.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) -#include -#include -#else -#include <../drivers/staging/android/sync.h> -#include <../drivers/staging/android/sw_sync.h> -#endif - -#include "linux_sw_sync.h" - -#include "pvr_sync_api.h" - -#include "kernel_compatibility.h" - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) - -static inline int sync_fence_get_status(struct sync_fence *psFence) -{ - return psFence->status; -} - -static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) -{ - return pt->parent; -} - -static inline int sync_pt_get_status(struct sync_pt *pt) -{ - return pt->status; -} - -static inline ktime_t sync_pt_get_timestamp(struct sync_pt *pt) -{ - return pt->timestamp; -} - -#define for_each_sync_pt(s, f, c) \ - list_for_each_entry((s), &(f)->pt_list_head, pt_list) - -#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ - -static inline int sync_fence_get_status(struct sync_fence *psFence) -{ - int iStatus = atomic_read(&psFence->status); - - /* - * When Android sync was rebased on top of fences the sync_fence status - * values changed from 0 meaning 'active' to 'signalled' and, likewise, - * values greater than 0 went from meaning 'signalled' to 'active' - * (where the value corresponds to the number of active sync points). - * - * Convert to the old style status values. - */ - return iStatus > 0 ? 0 : iStatus ? iStatus : 1; -} - -static inline int sync_pt_get_status(struct sync_pt *pt) -{ - /* No error state for raw dma-buf fences */ - return fence_is_signaled(&pt->base) ? 1 : 0; -} - -static inline ktime_t sync_pt_get_timestamp(struct sync_pt *pt) -{ - return pt->base.timestamp; -} - -#define for_each_sync_pt(s, f, c) \ - for ((c) = 0, (s) = (f)->num_fences == 0 ? \ - NULL : (struct sync_pt *)(f)->cbs[0].sync_pt; \ - (c) < (f)->num_fences; \ - (c)++, (s) = (c) < (f)->num_fences ? \ - (struct sync_pt *)(f)->cbs[c].sync_pt : NULL) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ - -/* #define DEBUG_OUTPUT 1 */ - -#ifdef DEBUG_OUTPUT -#define DPF(fmt, ...) pr_err("pvr_sync2: " fmt "\n", __VA_ARGS__) -#else -#define DPF(fmt, ...) do {} while (0) -#endif - -#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ...) \ - do { \ - if (pfnDumpDebugPrintf) { \ - pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \ - } else { \ - pr_info("pvr_sync2: " __VA_ARGS__); \ - } \ - } while (0) - -#if defined(PDUMP) -#define SYNC_MAX_POOL_SIZE 0 -#else -#define SYNC_MAX_POOL_SIZE 10 -#endif - -enum { - SYNC_TL_TYPE = 0, - SYNC_PT_FENCE_TYPE = 1, - SYNC_PT_CLEANUP_TYPE = 2, - SYNC_PT_FOREIGN_FENCE_TYPE = 3, - SYNC_PT_FOREIGN_CLEANUP_TYPE = 4, -}; - -/* Services client sync prim wrapper. This is used to hold debug information - * and make it possible to cache unused syncs. - */ -struct pvr_sync_native_sync_prim { - /* List for the sync pool support. */ - struct list_head list; - - /* Base services sync prim structure */ - struct PVRSRV_CLIENT_SYNC_PRIM_TAG *client_sync; - - /* The next queued value which should be used */ - u32 next_value; - - /* Every sync data will get some unique id */ - u32 id; - - /* FWAddr used by the client sync */ - u32 vaddr; - - /* The type this sync is used for in our driver. Used in - * pvr_sync_debug_request(). - */ - u8 type; - - /* A debug class name also printed in pvr_sync_debug_request(). */ - char class[32]; -}; - -struct pvr_sync_native_sync_checkpoint { - /* List for the sync pool support. */ - struct list_head list; - - /* Base services sync checkpoint */ - PSYNC_CHECKPOINT client_sync_checkpoint; - - /* Every sync data will get some unique id */ - u32 id; - - /* FWAddr used by the client sync */ - u32 vaddr; - - /* The type this sync is used for in our driver. Used in - * pvr_sync_debug_request(). - */ - u8 type; - - /* A debug class name also printed in pvr_sync_debug_request(). */ - char class[32]; - - /* We store the foreign sync fence (if applicable), for debug purposes. */ - struct sync_fence *foreign_sync_fence; - char foreign_sync_fence_name[32]; -}; - -struct pvr_sw_sync_timeline { - /* sw_sync_timeline must come first to allow casting of a ptr */ - /* to the wrapping struct to a ptr to the sw_sync_timeline */ - struct sw_sync_timeline *sw_sync_timeline; - u64 current_value; - u64 next_value; - /* Reference count for this object */ - struct kref kref; -}; - -/* This is the actual timeline metadata. We might keep this around after the - * base sync driver has destroyed the pvr_sync_timeline_wrapper object. - */ -struct pvr_sync_timeline { - /* Back reference to the sync_timeline. Not always valid */ - struct sync_timeline *obj; - - /* Global timeline list support */ - struct list_head list; - - /* List of sync points alive on this timeline. */ - struct list_head sync_list; - - /* Timeline sync */ - struct pvr_sync_timeline_kernel_pair *kernel; - - /* Reference count for this object */ - struct kref kref; - - /* Used only by pvr_sync_update_all_timelines(). False if the timeline - * has been detected as racing with pvr_sync_destroy_timeline(). - */ - bool valid; -}; - -/* This is the IMG extension of a sync_timeline */ -struct pvr_sync_timeline_wrapper { - /* Original timeline struct. Needs to come first. */ - struct sync_timeline obj; - - /* Pointer to extra timeline data. Separated life-cycle. */ - struct pvr_sync_timeline *timeline; -}; - -struct pvr_sync_timeline_kernel_pair { - /* Binary sync point representing the android native sync in hw. */ - struct pvr_sync_native_sync_prim *fence_sync; - - /* Sync points can go away when there are deferred hardware operations - * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until - * the hardware is finished, so we add it to a defer list which is - * processed periodically ("defer-free"). - * - * Note that the defer-free list is global, not per-timeline. - */ - struct list_head list; -}; - -struct pvr_sync_kernel_pair { - /* Binary sync point representing the android native sync in hw. */ - struct pvr_sync_native_sync_checkpoint *fence_sync; - - /* Sync points can go away when there are deferred hardware operations - * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until - * the hardware is finished, so we add it to a defer list which is - * processed periodically ("defer-free"). - * - * Note that the defer-free list is global, not per-timeline. - */ - struct list_head list; -}; - -struct pvr_sync_data { - /* Every sync point has a services sync object. This object is used - * by the hardware to enforce ordering -- it is attached as a source - * dependency to various commands. - */ - struct pvr_sync_kernel_pair *kernel; - - /* The timeline update value for this sync point. */ - u32 timeline_update_value; - - /* This refcount is incremented at create and dup time, and decremented - * at free time. It ensures the object doesn't start the defer-free - * process until it is no longer referenced. - */ - struct kref kref; -}; - -/* This is the IMG extension of a sync_pt */ -struct pvr_sync_pt { - /* Original sync_pt structure. Needs to come first. */ - struct sync_pt pt; - - /* Private shared data */ - struct pvr_sync_data *sync_data; - - /* The timeline on which this pvr_sync_pt was created */ - struct pvr_sync_timeline *timeline; -}; - -/* This is the IMG extension of a sync_fence */ -struct pvr_sync_fence { - /* Original sync_fence structure. Needs to come first. */ - struct sync_fence *fence; - - /* To ensure callbacks are always received for fences / sync_pts, even - * after the fence has been 'put' (freed), we must take a reference to - * the fence. We still need to 'put' the fence ourselves, but this might - * happen in irq context, where fput() is not allowed (in kernels <3.6). - * We must add the fence to a list which is processed in WQ context. - */ - struct list_head list; -}; - -/* Any sync point from a foreign (non-PVR) timeline needs to have a "shadow" - * sync prim. This is modelled as a software operation. The foreign driver - * completes the operation by calling a callback we registered with it. - */ -struct pvr_sync_fence_waiter { - /* Base sync driver waiter structure */ - struct sync_fence_waiter waiter; - - /* "Shadow" sync prim backing the foreign driver's sync_pt */ - struct pvr_sync_kernel_pair *kernel; - - /* Optimizes lookup of fence for defer-put operation */ - struct pvr_sync_fence *sync_fence; -}; - -/* Global data for the sync driver */ -static struct { - /* Complete notify handle */ - void *command_complete_handle; - - /* Defer-free workqueue. Syncs may still be in use by the HW when freed, - * so we have to keep them around until the HW is done with them at - * some later time. This workqueue iterates over the list of free'd - * syncs, checks if they are in use, and frees the sync device memory - * when done with. - */ - struct workqueue_struct *defer_free_wq; - struct work_struct defer_free_work; - - struct work_struct check_status_work; - - /* Context used to create client sync prims. */ - struct SYNC_PRIM_CONTEXT_TAG *sync_prim_context; - - /* Unique id counter for the sync prims */ - atomic_t sync_id; - - /* The global event object (used to wait between checks for - * deferred-free sync status). - */ - void *event_object_handle; - - /* struct used to register with sync_checkpoint.c */ - PFN_SYNC_CHECKPOINT_STRUCT sync_checkpoint_ops; -} pvr_sync_data; - -/* List of timelines created by this driver */ -static LIST_HEAD(timeline_list); -static DEFINE_SPINLOCK(timeline_list_lock); - -/* Sync pool support */ -static LIST_HEAD(sync_pool_free_list); -static LIST_HEAD(sync_pool_active_list); -static DEFINE_MUTEX(sync_pool_mutex); -static s32 sync_pool_size;// = 0; -static u32 sync_pool_created;// = 0; -static u32 sync_pool_reused;// = 0; - -/* pvr_sync_pt_active_list is used for debug - when a - * pvr sync_native_sync_checkpoint is created it is added - * to this list (which contains all existing points for - * all pvr timelines). - */ -static LIST_HEAD(pvr_sync_pt_active_list); -static DEFINE_SPINLOCK(pvr_sync_pt_active_list_spinlock); -/* pvr_sw_sync_pt_active_list is used for debug - when a - * pvr sw_sync_native_sync_checkpoint is created it is added - * to this list (which contains all existing points for - * all pvr sw timelines). - */ -static LIST_HEAD(pvr_sw_sync_pt_active_list); -static DEFINE_MUTEX(pvr_sw_sync_pt_active_list_mutex); - -/* The "defer-free" sync_checkpoint list. Driver global. */ -static LIST_HEAD(sync_checkpoint_free_list); -static DEFINE_SPINLOCK(sync_checkpoint_free_list_spinlock); - -/* The "defer-free-timeline" object list. Driver global. */ -static LIST_HEAD(timeline_free_list); -static DEFINE_SPINLOCK(timeline_free_list_spinlock); - -/* The "defer-put" object list. Driver global. */ -static LIST_HEAD(sync_fence_put_list); -static DEFINE_SPINLOCK(sync_fence_put_list_spinlock); - -static void pvr_sync_update_all_timelines(void *command_complete_handle); -static void pvr_sync_free_checkpoint_list_mem(void *mem_ptr); - -static void _dump_fence(struct sync_fence *fence, - DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, - void *dump_debug_file) -{ -#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 0)) - struct sync_pt *sync_point; - char time_str[16] = { '\0' }; - char pt_value_str[64] = { '\0' }; - char timeline_value_str[64] = { '\0' }; - char value_str[132] = { '\0' }; - int status = sync_fence_get_status(fence); - int i; - - PVR_DUMPDEBUG_LOG(dump_debug_printf, - dump_debug_file, - "[%p] %s: %s ref=%u Sync Points:\n", - fence, - fence->name, - (status > 0 ? - "Signalled" : status ? - "Error" : "Active"), - atomic_read(&fence->kref.refcount)); - - for_each_sync_pt(sync_point, fence, i) { - - struct sync_timeline *timeline = sync_pt_parent(sync_point); - ktime_t timestamp = sync_pt_get_timestamp(sync_point); - struct timeval tv = ktime_to_timeval(timestamp); - int i_pt_status = sync_pt_get_status(sync_point); - - char time_pt[16] = { '\0' }; - const struct fence_ops *fence_ops = sync_point->base.ops; - - snprintf(time_str, - sizeof(time_str), - "@%ld.%06ld", - tv.tv_sec, - tv.tv_usec); - - if (timeline->ops->pt_value_str && - timeline->ops->timeline_value_str) { - timeline->ops->pt_value_str(sync_point, pt_value_str, sizeof(pt_value_str)); - timeline->ops->timeline_value_str(timeline, - timeline_value_str, - sizeof(timeline_value_str)); - snprintf(value_str, sizeof(value_str), "%s / %s", - timeline_value_str, pt_value_str); - } - fence_ops->timeline_value_str(&sync_point->base, time_pt, sizeof(time_pt)); - - PVR_DUMPDEBUG_LOG(dump_debug_printf, - dump_debug_file, - "\t@%u Ref=%u TS=%s State=%s %s TLN=%s\n", - sync_point->base.seqno, - atomic_read(&sync_point->base.refcount.refcount), - time_pt, - (i_pt_status > 0 ? "signalled" : i_pt_status ? - "error" : "active"), - value_str, - fence_ops->get_timeline_name(&sync_point->base)); - } -#else - PVR_DUMPDEBUG_LOG(dump_debug_printf, - dump_debug_file, - "Fence stats not available on this platform!"); -#endif -} - -/* Sync prim helpers */ -static inline void set_sync_prim_value(struct pvr_sync_native_sync_prim *sync, - u32 value) -{ - *(sync->client_sync->pui32LinAddr) = value; -} - -static inline u32 get_sync_prim_value(struct pvr_sync_native_sync_prim *sync) -{ - return *(sync->client_sync->pui32LinAddr); -} - -static inline void complete_sync_prim(struct pvr_sync_native_sync_prim *sync) -{ - *(sync->client_sync->pui32LinAddr) = sync->next_value; -} - -static inline int is_sync_prim_met(struct pvr_sync_native_sync_prim *sync) -{ - return *(sync->client_sync->pui32LinAddr) == sync->next_value; -} - -/* Checkpoint helpers */ -static inline u32 get_sync_checkpoint_value(struct pvr_sync_native_sync_checkpoint *sync) -{ - PVRSRV_SYNC_CHECKPOINT_STATE checkpoint_state = PVRSRV_SYNC_CHECKPOINT_ACTIVE; - - if (SyncCheckpointIsSignalled(sync->client_sync_checkpoint, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) - checkpoint_state = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; - else if (SyncCheckpointIsErrored(sync->client_sync_checkpoint, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) - checkpoint_state = PVRSRV_SYNC_CHECKPOINT_ERRORED; - - return (u32)checkpoint_state; -} - -static inline char get_sync_checkpoint_char(struct pvr_sync_native_sync_checkpoint *sync) -{ - char cState = 'A'; - - if (SyncCheckpointIsSignalled(sync->client_sync_checkpoint, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) - cState = 'S'; - else if (SyncCheckpointIsErrored(sync->client_sync_checkpoint, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) - cState = 'E'; - - return cState; -} - -static inline void error_sync_checkpoint(struct pvr_sync_native_sync_checkpoint *sync, - u32 fence_sync_flags) -{ - SyncCheckpointError(sync->client_sync_checkpoint, fence_sync_flags); -} - -static inline void complete_sync_checkpoint(struct pvr_sync_native_sync_checkpoint *sync, - u32 fence_sync_flags) -{ - SyncCheckpointSignal(sync->client_sync_checkpoint, fence_sync_flags); -} - -static inline int is_sync_checkpoint_met(struct pvr_sync_native_sync_checkpoint *sync, - u32 fence_sync_flags) -{ - return (int)SyncCheckpointIsSignalled(sync->client_sync_checkpoint, fence_sync_flags); -} - -static inline int is_sync_checkpoint_errored(struct pvr_sync_native_sync_checkpoint *sync, - u32 fence_sync_flags) -{ - return (int)SyncCheckpointIsErrored(sync->client_sync_checkpoint, fence_sync_flags); -} - -/* Timeline helpers */ -static inline struct pvr_sync_timeline *get_timeline(struct sync_timeline *obj) -{ - return ((struct pvr_sync_timeline_wrapper *)obj)->timeline; -} - -static inline struct pvr_sync_timeline *get_timeline_pt(struct sync_pt *pt) -{ - return get_timeline(sync_pt_parent(pt)); -} - -static inline int -pvr_sync_has_kernel_signaled(struct pvr_sync_kernel_pair *kernel, u32 fence_sync_flags) -{ - /* Idle syncs are always signaled */ - if (!kernel) - return 1; - - return is_sync_checkpoint_met(kernel->fence_sync, fence_sync_flags); -} - -#ifdef DEBUG_OUTPUT - -static char *debug_info_timeline(struct pvr_sync_timeline *timeline) -{ - static char info[256]; - - if (timeline->kernel->fence_sync) { - snprintf(info, sizeof(info), - "n='%s' id=%u fw=0x%x tl_curr=%u tl_next=%u", - timeline->obj ? timeline->obj->name : "?", - timeline->kernel->fence_sync->id, - timeline->kernel->fence_sync->vaddr, - get_sync_prim_value(timeline->kernel->fence_sync), - timeline->kernel->fence_sync->next_value); - } else { - snprintf(info, sizeof(info), - "n='%s' id=n/a fw=n/a tl_curr=n/a tl_next=n/a", - timeline->obj ? timeline->obj->name : "?"); - } - - return info; -} - -static char *debug_info_sync_pt(struct sync_pt *pt) -{ - //struct pvr_sync_timeline *timeline = get_timeline_pt(pt); - //struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)pt; - //struct pvr_sync_kernel_pair *kernel = pvr_pt->sync_data->kernel; - static char info[256], info1[256]; - -#if 1 - info[0] = '\0'; - info1[0] = '\0'; -#else - if (kernel) { - if (timeline->kernel->fence_sync) { - snprintf(info, sizeof(info), - "status=%d tl_taken=%u ref=%d # sync: id=%u fw=0x%x curr=%u next=%u%s # tl: %s", - pvr_sync_has_kernel_signaled(kernel, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT), - pvr_pt->sync_data->timeline_update_value, - atomic_read(&pvr_pt->sync_data->kref.refcount), - kernel->fence_sync->id, - kernel->fence_sync->vaddr, - get_sync_prim_value(timeline->kernel->fence_sync), - kernel->fence_sync->next_value, - info1, debug_info_timeline(timeline)); - } - } else { - snprintf(info, sizeof(info), - "status=%d tl_taken=%u ref=%d # sync: idle # tl: %s", - pvr_sync_has_kernel_signaled(kernel, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT), - pvr_pt->sync_data->timeline_update_value, - atomic_read(&pvr_pt->sync_data->kref.refcount), - debug_info_timeline(timeline)); - } -#endif - return info; -} - -#endif /* DEBUG_OUTPUT */ - -static u32 sync_pool_get_callers; -static enum PVRSRV_ERROR_TAG -sync_pool_get(struct pvr_sync_native_sync_prim **_sync, - const char *class_name, u8 type) -{ - struct pvr_sync_native_sync_prim *sync; - enum PVRSRV_ERROR_TAG error = PVRSRV_OK; - u32 sync_addr; - - mutex_lock(&sync_pool_mutex); - sync_pool_get_callers++; - - if (list_empty(&sync_pool_free_list)) { - /* If there is nothing in the pool, create a new sync prim. */ - sync = kmalloc(sizeof(*sync), - GFP_KERNEL); - if (!sync) { - pr_err("pvr_sync2: %s: Failed to allocate sync data\n", - __func__); - error = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_unlock; - } - - error = SyncPrimAlloc(pvr_sync_data.sync_prim_context, - &sync->client_sync, class_name); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Failed to allocate sync prim (%s)\n", - __func__, PVRSRVGetErrorString(error)); - goto err_free; - } - - error = SyncPrimGetFirmwareAddr(sync->client_sync, &sync_addr); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Failed to get FW address (%s)\n", - __func__, PVRSRVGetErrorString(error)); - goto err_sync_prim_free; - } - sync->vaddr = sync_addr; - - list_add_tail(&sync->list, &sync_pool_active_list); - ++sync_pool_created; - } else { - sync = list_first_entry(&sync_pool_free_list, - struct pvr_sync_native_sync_prim, list); - list_move_tail(&sync->list, &sync_pool_active_list); - --sync_pool_size; - ++sync_pool_reused; - } - - sync->id = atomic_inc_return(&pvr_sync_data.sync_id); - sync->type = type; - - strlcpy(sync->class, class_name, sizeof(sync->class)); - /* It's crucial to reset the sync to zero */ - set_sync_prim_value(sync, 0); - sync->next_value = 0; - - *_sync = sync; - -err_unlock: - sync_pool_get_callers--; - mutex_unlock(&sync_pool_mutex); - return error; - -err_sync_prim_free: - SyncPrimFree(sync->client_sync); - -err_free: - kfree(sync); - goto err_unlock; -} - -static u32 sync_pool_put_callers; - -static void sync_pool_put(struct pvr_sync_native_sync_prim *sync) -{ - mutex_lock(&sync_pool_mutex); - sync_pool_put_callers++; - - if (sync_pool_size < SYNC_MAX_POOL_SIZE) { - /* Mark it as unused */ - set_sync_prim_value(sync, 0xffffffff); - - list_move(&sync->list, &sync_pool_free_list); - ++sync_pool_size; - } else { - /* Mark it as invalid */ - set_sync_prim_value(sync, 0xdeadbeef); - - list_del(&sync->list); - SyncPrimFree(sync->client_sync); - kfree(sync); - } - - sync_pool_put_callers--; - mutex_unlock(&sync_pool_mutex); -} - -static void sync_pool_clear(void) -{ - struct pvr_sync_native_sync_prim *sync, *n; - - mutex_lock(&sync_pool_mutex); - - list_for_each_entry_safe(sync, n, &sync_pool_free_list, list) { - /* Mark it as invalid */ - set_sync_prim_value(sync, 0xdeadbeef); - - list_del(&sync->list); - SyncPrimFree(sync->client_sync); - kfree(sync); - --sync_pool_size; - } - - mutex_unlock(&sync_pool_mutex); -} - -static void pvr_sync_debug_request(void *hDebugRequestHandle, - u32 ui32VerbLevel, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile) -{ - struct pvr_sync_timeline *tl; - struct pvr_sync_native_sync_checkpoint *sync; - unsigned long flags; - - static const char *const type_names[] = { - "Timeline", "Fence", "Cleanup", - "Foreign Fence", "Foreign Cleanup" - }; - - if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) { - /* if timeline_list_lock and pvr_sync_pt_active_list_spinlock - * are acquired together timeline_list_lock must be always acquired first - */ - spin_lock_irqsave(&timeline_list_lock, flags); - spin_lock(&pvr_sync_pt_active_list_spinlock); - - PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, - "------[ Native Fence Sync: timelines ]------"); - PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, - "foreign timeline:\n"); - - list_for_each_entry(sync, &pvr_sync_pt_active_list, list) { - BUG_ON(sync->type >= ARRAY_SIZE(type_names)); - - PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, - " @%u: fwaddr=%#08x enqu=%u ref=%u state=%s %s (%s)\n", - sync->id, - sync->vaddr, - SyncCheckpointGetEnqueuedCount(sync->client_sync_checkpoint), - SyncCheckpointGetReferenceCount(sync->client_sync_checkpoint), - SyncCheckpointGetStateString(sync->client_sync_checkpoint), - sync->class, - type_names[sync->type]); - } - - list_for_each_entry(tl, &timeline_list, list) { - if (tl->kernel->fence_sync) { - PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, - "%s: @%u/%u refs=%u fwaddr=%#08x\n", - tl->obj->name, - get_sync_prim_value(tl->kernel->fence_sync), - tl->kernel->fence_sync->next_value, - refcount_read(&tl->kref.refcount), - tl->kernel->fence_sync->vaddr); - } else { - PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, - "%s: refs=%u\n", - tl->obj->name, - refcount_read(&tl->kref.refcount)); - } - - list_for_each_entry(sync, &tl->sync_list, list) { - BUG_ON(sync->type >= ARRAY_SIZE(type_names)); - - PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, - " @%u: fwaddr=%#08x enq=%u ref=%u state=%s %s (%s)\n", - sync->id, - sync->vaddr, - SyncCheckpointGetEnqueuedCount(sync->client_sync_checkpoint), - SyncCheckpointGetReferenceCount(sync->client_sync_checkpoint), - SyncCheckpointGetStateString(sync->client_sync_checkpoint), - sync->class, - type_names[sync->type]); - } - } - - spin_unlock(&pvr_sync_pt_active_list_spinlock); - spin_unlock_irqrestore(&timeline_list_lock, flags); - } -} - -static struct sync_pt *pvr_sync_dup(struct sync_pt *sync_pt) -{ - struct pvr_sync_pt *pvr_pt_a = (struct pvr_sync_pt *)sync_pt; - struct pvr_sync_pt *pvr_pt_b = NULL; - - DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt)); - - pvr_pt_b = (struct pvr_sync_pt *) - sync_pt_create(sync_pt_parent(sync_pt), - sizeof(*pvr_pt_b)); - if (!pvr_pt_b) { - pr_err("pvr_sync2: %s: Failed to dup sync pt\n", __func__); - goto err_out; - } - - kref_get(&pvr_pt_a->sync_data->kref); - - pvr_pt_b->sync_data = pvr_pt_a->sync_data; - -err_out: - return (struct sync_pt *)pvr_pt_b; -} - -static int pvr_sync_has_signaled(struct sync_pt *sync_pt) -{ - struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt; - - DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt)); - - return pvr_sync_has_kernel_signaled(pvr_pt->sync_data->kernel, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT); -} - -static int pvr_sync_compare(struct sync_pt *a, struct sync_pt *b) -{ - u32 a1 = ((struct pvr_sync_pt *)a)->sync_data->timeline_update_value; - u32 b1 = ((struct pvr_sync_pt *)b)->sync_data->timeline_update_value; - - DPF("%s: a # %s", __func__, debug_info_sync_pt(a)); - DPF("%s: b # %s", __func__, debug_info_sync_pt(b)); - - if (a1 == b1) - return 0; - - /* Take integer wrapping into account */ - return ((s32)a1 - (s32)b1) < 0 ? -1 : 1; -} - -static void check_for_sync_prim(struct pvr_sync_native_sync_prim *sync) -{ -#ifndef NO_HARDWARE - void *event_object; - enum PVRSRV_ERROR_TAG error = PVRSRV_OK; - - if (!sync || is_sync_prim_met(sync)) - return; - - error = OSEventObjectOpen( - pvr_sync_data.event_object_handle, - &event_object); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Error opening event object (%s)\n", - __func__, - PVRSRVGetErrorString(error)); - return; - } - - if (!is_sync_prim_met(sync)) { - /* This debug will indicate if pvr_sync is stuck waiting for a sync prim */ - pr_err("pvr_sync2: %s: sync prim<%p> %s (%d != %d)\n", - __func__, sync->client_sync, sync->class, - *(sync->client_sync->pui32LinAddr), sync->next_value); - } - - OSEventObjectClose(event_object); -#endif /* NO_HARDWARE */ -} - -static void pvr_sync_defer_free_checkpoints(struct pvr_sync_kernel_pair *kernel) -{ - unsigned long flags; - - spin_lock_irqsave(&sync_checkpoint_free_list_spinlock, flags); - list_add_tail(&kernel->list, &sync_checkpoint_free_list); - spin_unlock_irqrestore(&sync_checkpoint_free_list_spinlock, flags); - - queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work); -} - -static void pvr_sync_timeline_defer_free(struct pvr_sync_timeline_kernel_pair *kernel) -{ - unsigned long flags; - - spin_lock_irqsave(&timeline_free_list_spinlock, flags); - list_add_tail(&kernel->list, &timeline_free_list); - spin_unlock_irqrestore(&timeline_free_list_spinlock, flags); - - queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work); -} - -/* This function assumes the timeline_list_lock is held while it runs */ - -static void pvr_sync_destroy_timeline_locked(struct kref *kref) -{ - unsigned long flags; - struct pvr_sync_timeline *timeline = (struct pvr_sync_timeline *) - container_of(kref, struct pvr_sync_timeline, kref); - - pvr_sync_timeline_defer_free(timeline->kernel); - /* timeline_list_lock is already locked so it's safe to acquire this here */ - spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); - list_del(&timeline->sync_list); - spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); - list_del(&timeline->list); - kfree(timeline); -} - -static void pvr_sw_sync_destroy_timeline(struct kref *kref) -{ - struct pvr_sw_sync_timeline *pvr_sw_timeline = (struct pvr_sw_sync_timeline *) - container_of(kref, struct pvr_sw_sync_timeline, kref); - struct sync_timeline *obj = (void *)pvr_sw_timeline->sw_sync_timeline; - u32 unsignalled_points = 0; - - /* signal any unsignalled points on the sw timeline */ - while (pvr_sw_timeline->current_value < pvr_sw_timeline->next_value-1) { - pvr_sync_sw_timeline_advance(pvr_sw_timeline, NULL); - unsignalled_points++; - } - - if (unsignalled_points > 0) { - pr_err("pvr_sync2: %s: signalled %d sw sync pts for timeline <%p> %s\n", - __func__, unsignalled_points, pvr_sw_timeline, obj->name); - } - - sync_timeline_destroy(obj); - kfree(pvr_sw_timeline); -} - -static void pvr_sync_release_timeline(struct sync_timeline *obj) -{ - struct pvr_sync_timeline *timeline = get_timeline(obj); - unsigned long flags; - - /* If pvr_sync_open failed after calling sync_timeline_create, this - * can be called with a timeline that has not got a timeline sync - * or been added to our timeline list. Use a NULL timeline to - * detect and handle this condition - */ - if (!timeline) - return; - - DPF("%s: # %s", __func__, debug_info_timeline(timeline)); - - if (timeline->kernel->fence_sync) - check_for_sync_prim(timeline->kernel->fence_sync); - - /* Take timeline_list_lock before clearing timeline->obj, to - * avoid the chance of doing so while the list is being iterated - * by pvr_sync_update_all_timelines(). - */ - spin_lock_irqsave(&timeline_list_lock, flags); - - /* Whether or not we're the last reference, obj is going away - * after this function returns, so remove our back reference - * to it. - */ - timeline->obj = NULL; - - /* This might be the last reference to the timeline object. - * If so, we'll go ahead and delete it now. - */ - kref_put(&timeline->kref, pvr_sync_destroy_timeline_locked); - - spin_unlock_irqrestore(&timeline_list_lock, flags); -} - -/* The print_obj() and print_pt() functions have been removed, so we're forced - * to use the timeline_value_str() and pt_value_str() functions. These are - * worse because we're limited to 64 characters, and the strings for sync - * pts have to be formatted like: - * - * pt active: pt_info / tl_info - * - * For us, the tl_info is complicated and doesn't need to be repeated over - * and over. So try to detect the way sync_print_pt() calls the two value_str - * functions and change what pvr_sync_timeline_value_str() returns dynamically. - */ -static struct sync_timeline *last_pt_timeline; - -static void pvr_sync_timeline_value_str(struct sync_timeline *sync_timeline, - char *str, int size) -{ - struct pvr_sync_timeline *timeline = get_timeline(sync_timeline); - - if (timeline->kernel->fence_sync) { - if (sync_timeline != last_pt_timeline) { - snprintf(str, size, "%u 0x%x %u/%u", - timeline->kernel->fence_sync->id, - timeline->kernel->fence_sync->vaddr, - get_sync_prim_value(timeline->kernel->fence_sync), - timeline->kernel->fence_sync->next_value); - } else { - snprintf(str, size, "%u", - get_sync_prim_value(timeline->kernel->fence_sync)); - } - } else { - snprintf(str, size, "n/a"); - } -} - -static void pvr_sync_pt_value_str(struct sync_pt *sync_pt, char *str, int size) -{ - struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt; - struct pvr_sync_kernel_pair *kernel; - - if (!pvr_pt->sync_data) - return; - - kernel = pvr_pt->sync_data->kernel; - - /* Messages must be at most 64 bytes (including the null terminator): - * - * 123456789012345678901234567890123456789012345678901234567890123 - * - * ID FW ADDR C/N # REF TAKEN - * 123456 0xdeadbeef 0/1 # r=2 123456 - * - * ID FW ADDR C/N # ID FW ADDR C/N # REF TAKEN - * 123456 0xdeadbeef 0/1 # 123456 0xdeadbeef 0/1 # r=2 123456 - */ - if (kernel && kernel->fence_sync) { - snprintf(str, size, - "%u 0x%x %c e=%d r=%d %u", - kernel->fence_sync->id, - kernel->fence_sync->vaddr, - get_sync_checkpoint_char(kernel->fence_sync), - SyncCheckpointGetEnqueuedCount(kernel->fence_sync->client_sync_checkpoint), - atomic_read(&pvr_pt->sync_data->kref.refcount), - pvr_pt->sync_data->timeline_update_value); - } else { - snprintf(str, size, "idle # r=%d %u", - atomic_read(&pvr_pt->sync_data->kref.refcount), - pvr_pt->sync_data->timeline_update_value); - } - - last_pt_timeline = sync_pt_parent(sync_pt); -} - -/* pvr_sync_create_sync_data() should be called with the bridge lock held */ -static struct pvr_sync_data * -pvr_sync_create_sync_data(struct pvr_sync_timeline *timeline, - const s32 timeline_fd, - PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, - const char *pt_name) -{ - struct pvr_sync_data *sync_data = NULL; - enum PVRSRV_ERROR_TAG error; - unsigned long flags; - - sync_data = kzalloc(sizeof(*sync_data), GFP_KERNEL); - if (!sync_data) - goto err_out; - - kref_init(&sync_data->kref); - - sync_data->kernel = - kzalloc(sizeof(*sync_data->kernel), - GFP_KERNEL); - - if (!sync_data->kernel) - goto err_free_data; - - INIT_LIST_HEAD(&sync_data->kernel->list); - - sync_data->kernel->fence_sync = - kzalloc(sizeof(struct pvr_sync_native_sync_checkpoint), GFP_KERNEL); - if (!sync_data->kernel->fence_sync) - goto err_free_kernel; - INIT_LIST_HEAD(&sync_data->kernel->fence_sync->list); - - error = SyncCheckpointAlloc(psSyncCheckpointContext, - (PVRSRV_TIMELINE)timeline_fd, - PVRSRV_NO_FENCE, - pt_name, - &sync_data->kernel->fence_sync->client_sync_checkpoint); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Failed to allocate sync checkpoint (%s)\n", - __func__, PVRSRVGetErrorString(error)); - goto err_free_fence; - } - - sync_data->kernel->fence_sync->foreign_sync_fence = NULL; - sync_data->kernel->fence_sync->foreign_sync_fence_name[0] = '\0'; - - sync_data->kernel->fence_sync->vaddr = - SyncCheckpointGetFirmwareAddr(sync_data->kernel->fence_sync->client_sync_checkpoint); - sync_data->kernel->fence_sync->id = - SyncCheckpointGetId(sync_data->kernel->fence_sync->client_sync_checkpoint); - sync_data->kernel->fence_sync->type = SYNC_PT_FENCE_TYPE; - strlcpy(sync_data->kernel->fence_sync->class, pt_name, - sizeof(sync_data->kernel->fence_sync->class)); - - /* Update list (for debug ) */ - spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); - list_add_tail(&sync_data->kernel->fence_sync->list, &timeline->sync_list); - spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); - -err_out: - return sync_data; - -err_free_fence: - kfree(sync_data->kernel->fence_sync); -err_free_kernel: - kfree(sync_data->kernel); -err_free_data: - kfree(sync_data); - sync_data = NULL; - goto err_out; -} - -static void pvr_sync_free_sync_data(struct kref *kref) -{ - struct pvr_sync_data *sync_data = (struct pvr_sync_data *) - container_of(kref, struct pvr_sync_data, kref); - - if (sync_data->kernel) - pvr_sync_defer_free_checkpoints(sync_data->kernel); - - kfree(sync_data); -} - -static void pvr_sync_free_sync(struct sync_pt *sync_pt) -{ - struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt; - - DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt)); - - kref_put(&pvr_pt->sync_data->kref, pvr_sync_free_sync_data); -} - -/* this function uses pvr_sync_timeline_ops defined below */ -static int pvr_sync_fill_driver_data(struct sync_pt *, void *, int); - -static struct sync_timeline_ops pvr_sync_timeline_ops = { - .driver_name = PVRSYNC_MODNAME, - .dup = pvr_sync_dup, - .has_signaled = pvr_sync_has_signaled, - .compare = pvr_sync_compare, - .free_pt = pvr_sync_free_sync, - .release_obj = pvr_sync_release_timeline, - .timeline_value_str = pvr_sync_timeline_value_str, - .pt_value_str = pvr_sync_pt_value_str, - .fill_driver_data = pvr_sync_fill_driver_data, -}; - -static inline bool is_pvr_timeline(struct sync_timeline *obj) -{ - return obj->ops == &pvr_sync_timeline_ops; -} - -static inline bool is_pvr_timeline_pt(struct sync_pt *pt) -{ - return is_pvr_timeline(sync_pt_parent(pt)); -} - -static int -pvr_sync_fill_driver_data(struct sync_pt *sync_pt, void *data, int size) -{ - struct pvr_sync_pt_info *info = (struct pvr_sync_pt_info *)data; - struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt; - struct pvr_sync_data *sync_data = pvr_pt->sync_data; - struct pvr_sync_kernel_pair *kernel = sync_data->kernel; - - if (size < sizeof(*info)) - return -ENOMEM; - - info->ui32TlTaken = sync_data->timeline_update_value; - - if (kernel && kernel->fence_sync) { - info->id = kernel->fence_sync->id; - info->ui32FWAddr = kernel->fence_sync->vaddr; - info->ui32CurrOp = get_sync_checkpoint_value(kernel->fence_sync); - info->ui32NextOp = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; - } else { - info->id = 0; - info->ui32FWAddr = 0; - info->ui32CurrOp = 0; - info->ui32NextOp = 0; - } - - return sizeof(*info); -} - -/* foreign sync handling */ - -static void pvr_sync_foreign_sync_pt_signaled(struct sync_fence *fence, - struct sync_fence_waiter *_waiter) -{ - struct pvr_sync_fence_waiter *waiter = - (struct pvr_sync_fence_waiter *)_waiter; - unsigned long flags; - - /* Complete the SW operation and free the sync if we can. If we can't, - * it will be checked by a later workqueue kick. - */ - if (is_sync_checkpoint_errored(waiter->kernel->fence_sync, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT) || - !is_sync_checkpoint_met(waiter->kernel->fence_sync, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { - - if (!is_sync_checkpoint_met(waiter->kernel->fence_sync, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) - complete_sync_checkpoint(waiter->kernel->fence_sync, - PVRSRV_FENCE_FLAG_CTX_ATOMIC); - - /* We can 'put' the fence now, but this function might be called in - * irq context so we must defer to WQ. - * This WQ is triggered in pvr_sync_defer_free, so adding it to the - * put list before that should guarantee it's cleaned up on the next - * wq run. - */ - spin_lock_irqsave(&sync_fence_put_list_spinlock, flags); - list_add_tail(&waiter->sync_fence->list, &sync_fence_put_list); - spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags); - - pvr_sync_defer_free_checkpoints(waiter->kernel); - - /* The completed sw-sync may allow other tasks to complete, - * so we need to allow them to progress. - */ - queue_work(NativeSyncGetFenceStatusWq(), - &pvr_sync_data.check_status_work); - - kfree(waiter); - } else { - pr_err("pvr_sync2: %s: this sync checkpoint has already been signalled - " - "why are we asked to do this more than once?!\n", __func__); - } -} - -static PSYNC_CHECKPOINT -pvr_sync_create_waiter_for_foreign_sync(int fd, PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext) -{ - struct pvr_sync_kernel_pair *kernel = NULL; - struct pvr_sync_fence_waiter *waiter; - struct pvr_sync_fence *sync_fence; - PSYNC_CHECKPOINT checkpoint = NULL; - struct sync_fence *fence; - enum PVRSRV_ERROR_TAG error; - int err; - unsigned long flags; - - fence = sync_fence_fdget(fd); - if (!fence) { - pr_err("pvr_sync2: %s: Failed to take reference on fence\n", - __func__); - goto err_out; - } - - kernel = kmalloc(sizeof(*kernel), GFP_KERNEL); - if (!kernel) { - pr_err("pvr_sync2: %s: Failed to allocate sync kernel\n", - __func__); - goto err_put_fence; - } - - sync_fence = kmalloc(sizeof(*sync_fence), GFP_KERNEL); - if (!sync_fence) { - pr_err("pvr_sync2: %s: Failed to allocate pvr sync fence\n", - __func__); - goto err_free_kernel; - } - - sync_fence->fence = fence; - - kernel->fence_sync = kzalloc(sizeof(struct pvr_sync_native_sync_checkpoint), GFP_KERNEL); - if (!kernel->fence_sync) - goto err_free_fence; - - INIT_LIST_HEAD(&kernel->fence_sync->list); - - /* Create sync checkpoint for the foreign sync, with an invalid - * timeline (as we do not know it) - */ - error = SyncCheckpointAlloc(psSyncCheckpointContext, - SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, - fd, /* fence_to_resolve */ - fence->name, - &checkpoint); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Failed to allocate sync checkpoint (%s)\n", - __func__, PVRSRVGetErrorString(error)); - goto err_free_fence_sync; - } - kernel->fence_sync->client_sync_checkpoint = checkpoint; - - kernel->fence_sync->foreign_sync_fence = fence; - strlcpy(kernel->fence_sync->foreign_sync_fence_name, - fence->name, - sizeof(kernel->fence_sync->foreign_sync_fence_name)); - - kernel->fence_sync->vaddr = - SyncCheckpointGetFirmwareAddr(kernel->fence_sync->client_sync_checkpoint); - kernel->fence_sync->id = - SyncCheckpointGetId(kernel->fence_sync->client_sync_checkpoint); - kernel->fence_sync->type = SYNC_PT_FOREIGN_FENCE_TYPE; - strlcpy(kernel->fence_sync->class, fence->name, sizeof(kernel->fence_sync->class)); - - /* The custom waiter structure is freed in the waiter callback */ - waiter = kmalloc(sizeof(*waiter), GFP_KERNEL); - if (!waiter) { - pr_err("pvr_sync2: %s: Failed to allocate waiter\n", __func__); - goto err_free_cleanup_sync; - } - - waiter->kernel = kernel; - waiter->sync_fence = sync_fence; - - /* Take an extra ref on the checkpoint for the reference handed over to - * the firmware. - * This must be done before the waiter_init, as the waiter can be called - * and it's reference dropped at _any time_ - */ - SyncCheckpointTakeRef(checkpoint); - - sync_fence_waiter_init(&waiter->waiter, - pvr_sync_foreign_sync_pt_signaled); - - spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); - err = sync_fence_wait_async(fence, &waiter->waiter); - if (err) { - spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); - /* -1 means the fence was broken, 1 means the fence already - * signalled. In either case, roll back what we've done and - * skip using this sync_pt for synchronisation. - */ - goto err_put_checkpoint_ref; - } - - /* Update list (for debug ) */ - list_add_tail(&kernel->fence_sync->list, &pvr_sync_pt_active_list); - spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); - -err_out: - return checkpoint; -err_put_checkpoint_ref: - SyncCheckpointDropRef(checkpoint); - kfree(waiter); -err_free_cleanup_sync: - SyncCheckpointFree(checkpoint); - checkpoint = NULL; -err_free_fence_sync: - kfree(kernel->fence_sync); - kernel->fence_sync = NULL; -err_free_fence: - kfree(sync_fence); - sync_fence = NULL; -err_free_kernel: - kfree(kernel); - kernel = NULL; -err_put_fence: - sync_fence_put(fence); - goto err_out; -} - -static -struct pvr_sync_pt *pvr_sync_create_pt(struct pvr_sync_timeline *timeline, - const s32 timeline_fd, - PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, - const char *pt_name) -{ - struct pvr_sync_data *sync_data; - struct pvr_sync_pt *pvr_pt = NULL; - - sync_data = pvr_sync_create_sync_data(timeline, timeline_fd, - psSyncCheckpointContext, pt_name); - if (!sync_data) { - pr_err("pvr_sync2: %s: Failed to create sync data\n", __func__); - goto err_out; - } - - pvr_pt = (struct pvr_sync_pt *) - sync_pt_create(timeline->obj, sizeof(struct pvr_sync_pt)); - if (!pvr_pt) { - pr_err("pvr_sync2: %s: Failed to create sync pt\n", __func__); - goto err_rollback_fence; - } - - pvr_pt->sync_data = sync_data; - - pvr_pt->timeline = timeline; - - /* Increment the timeline next value */ - pvr_pt->sync_data->timeline_update_value = - timeline->kernel->fence_sync->next_value++; - - return pvr_pt; - -err_rollback_fence: - /* Error the sync checkpoint (so the deferred free considers it 'met') */ - error_sync_checkpoint(sync_data->kernel->fence_sync, PVRSRV_FENCE_FLAG_NONE); - kref_put(&sync_data->kref, pvr_sync_free_sync_data); -err_out: - return NULL; -} - -int pvr_sync_api_init(void *file_handle, void **api_priv) -{ - struct pvr_sync_timeline_wrapper *timeline_wrapper; - struct pvr_sync_timeline *timeline; - char task_comm[TASK_COMM_LEN]; - unsigned long flags; - - get_task_comm(task_comm, current); - - timeline_wrapper = (struct pvr_sync_timeline_wrapper *) - sync_timeline_create(&pvr_sync_timeline_ops, - sizeof(*timeline_wrapper), task_comm); - if (!timeline_wrapper) { - pr_err("pvr_sync2: %s: sync_timeline_create failed\n", __func__); - goto err_out; - } - - timeline = kmalloc(sizeof(*timeline), GFP_KERNEL); - if (!timeline) { - pr_err("pvr_sync2: %s: Out of memory\n", __func__); - goto err_free_timeline_wrapper; - } - - timeline->kernel = kzalloc(sizeof(*timeline->kernel), - GFP_KERNEL); - if (!timeline->kernel) { - pr_err("pvr_sync2: %s: Out of memory\n", __func__); - goto err_free_timeline; - } - - timeline_wrapper->timeline = timeline; - - timeline->obj = &timeline_wrapper->obj; - kref_init(&timeline->kref); - INIT_LIST_HEAD(&timeline->sync_list); - - spin_lock_irqsave(&timeline_list_lock, flags); - list_add_tail(&timeline->list, &timeline_list); - spin_unlock_irqrestore(&timeline_list_lock, flags); - - DPF("%s: # %s", __func__, debug_info_timeline(timeline)); - - *api_priv = (void *)timeline_wrapper; - - return 0; - -err_free_timeline: - kfree(timeline); - - /* Use a NULL timeline to detect this partially-setup timeline in the - * timeline release function (called by sync_timeline_destroy) and - * handle it appropriately. - */ - timeline_wrapper->timeline = NULL; -err_free_timeline_wrapper: - sync_timeline_destroy(&timeline_wrapper->obj); -err_out: - return -ENOMEM; -} - -int pvr_sync_api_deinit(void *api_priv, bool is_sw) -{ - if (!api_priv) - return 0; - - if (!is_sw) { - struct sync_timeline *obj = api_priv; - - DPF("%s: # %s", __func__, - debug_info_timeline(get_timeline(obj))); - - sync_timeline_destroy(obj); - } else { - struct pvr_sw_sync_timeline *pvr_sw_sync_timeline = api_priv; - - /* SW timeline */ - kref_put(&pvr_sw_sync_timeline->kref, pvr_sw_sync_destroy_timeline); - } - return 0; -} - -/* - * This is the function that kick code will call in order to 'finalise' a - * created output fence just prior to returning from the kick function. - * The OS native sync code needs to implement a function meeting this - * specification - the implementation may be a nop if the OS does not need - * to perform any actions at this point. - * - * Input: fence_fd The PVRSRV_FENCE to be 'finalised'. This value - * will have been returned by an earlier call to - * pvr_sync_create_fence(). - * Input: finalise_data The finalise data returned by an earlier call - * to pvr_sync_create_fence(). - */ -static enum PVRSRV_ERROR_TAG -pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd, void *finalise_data) -{ - struct sync_fence *native_fence = (struct sync_fence *)finalise_data; - - if (!native_fence || (fence_fd < 0)) - return PVRSRV_ERROR_INVALID_PARAMS; - - sync_fence_install(native_fence, fence_fd); - return PVRSRV_OK; -} - -/* - * This is the function that kick code will call in order to obtain a new - * PVRSRV_FENCE from the OS native sync code and the PSYNC_CHECKPOINT used - * in that fence. The OS native sync code needs to implement a function - * meeting this specification. - * - * Input: device Not currently used. - * Input: fence_name A string to annotate the fence with (for - * debug). - * Input: timeline The timeline on which the new fence is to be - * created. - * Output: new_fence The new PVRSRV_FENCE to be returned by the - * kick call. - * Output: fence_uid Unique ID of the update fence. - * Output: fence_finalise_data Pointer to data needed to finalise the fence. - * Output: new_checkpoint_handle The PSYNC_CHECKPOINT used by the new fence. - */ -static enum PVRSRV_ERROR_TAG -pvr_sync_create_fence(struct _PVRSRV_DEVICE_NODE_ *device, - const char *fence_name, - PVRSRV_TIMELINE new_fence_timeline, - PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, - PVRSRV_FENCE *new_fence, - u64 *fence_uid, - void **fence_finalise_data, - PSYNC_CHECKPOINT *new_checkpoint_handle, - void **timeline_update_sync, - __u32 *timeline_update_value) -{ - PVRSRV_ERROR err; - PVRSRV_FENCE new_fence_fd = -1; - struct file *timeline_file; - struct sync_timeline *obj; - struct pvr_sync_timeline *timeline; - struct pvr_sync_pt *native_sync_point = NULL; - struct sync_fence *native_fence = NULL; - struct pvr_sync_kernel_pair *sync_kernel; - - if (new_fence_timeline < 0 || !new_fence || - !new_checkpoint_handle || !fence_finalise_data) { - err = PVRSRV_ERROR_INVALID_PARAMS; - goto err_out; - } - - /* We reserve the new fence FD before taking any operations - * as we do not want to fail (e.g. run out of FDs) - */ - new_fence_fd = get_unused_fd_flags(O_CLOEXEC); - if (new_fence_fd < 0) { - err = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; - goto err_out; - } - - timeline_file = fget(new_fence_timeline); - if (!timeline_file) { - pr_err("pvr_sync2: %s: Failed to open supplied timeline fd (%d)\n", - __func__, new_fence_timeline); - err = PVRSRV_ERROR_INVALID_PARAMS; - goto err_put_fd; - } - - obj = pvr_sync_get_api_priv(timeline_file); - if (!obj) { - pr_err("pvr_sync2: %s: Supplied timeline not pvr_sync timeline\n", - __func__); - err = PVRSRV_ERROR_INVALID_PARAMS; - goto err_put_timeline; - } - - timeline = get_timeline(obj); - if (!timeline) { - pr_err("pvr_sync2: %s: Supplied timeline has no private data\n", - __func__); - err = PVRSRV_ERROR_INVALID_PARAMS; - goto err_put_timeline; - } - - /* Check if this timeline already has a sync prim, if not create it now */ - if (!timeline->kernel->fence_sync) { - err = sync_pool_get(&timeline->kernel->fence_sync, - timeline->obj->name, - SYNC_TL_TYPE); - - if (err != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Failed to allocate timeline sync prim (%s)\n", - __func__, PVRSRVGetErrorString(err)); - err = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_put_timeline; - } - } - - native_sync_point = pvr_sync_create_pt(timeline, new_fence_timeline, - psSyncCheckpointContext, fence_name); - if (!native_sync_point) { - pr_err("pvr_sync2: %s: Failed to create sync point\n", - __func__); - err = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_put_timeline; - } - - native_fence = sync_fence_create(fence_name, &native_sync_point->pt); - if (!native_fence) { - struct pvr_sync_native_sync_prim *timeline_prim = - timeline->kernel->fence_sync; - - pr_err("pvr_sync2: %s: Failed to create sync fence\n", - __func__); - err = PVRSRV_ERROR_OUT_OF_MEMORY; - - /* If the point was created but the fence failed to be - * created, the point must be manually freed as a - * fence has not yet taken ownership. - */ - timeline_prim->next_value--; - /* Error the new fence's sync checkpoint - * (so the deferred free considers it 'met') - */ - error_sync_checkpoint(native_sync_point->sync_data->kernel->fence_sync, - PVRSRV_FENCE_FLAG_NONE); - pvr_sync_free_sync(&native_sync_point->pt); - goto err_put_timeline; - } - - sync_kernel = native_sync_point->sync_data->kernel; - - /* For Linux, we do not return the fence fd here, but via - * pvr_sync_finalise_fence() - this is because once we - * associate the fd with the fence, it can only be closed - * from client code so it should only be done once we - * know we will definitely require it. - */ - *new_fence = new_fence_fd; - *fence_finalise_data = (void *)native_fence; - *new_checkpoint_handle = sync_kernel->fence_sync->client_sync_checkpoint; - - if (timeline_update_sync && timeline_update_value) { - *timeline_update_sync = (void *)timeline->kernel->fence_sync->client_sync; - *timeline_update_value = timeline->kernel->fence_sync->next_value; - } - - *fence_uid = OSGetCurrentClientProcessIDKM(); - *fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX); - - fput(timeline_file); - - return PVRSRV_OK; - -err_put_timeline: - fput(timeline_file); -err_put_fd: - pr_err("pvr_sync2: %s: putting fd %d back to unused\n", __func__, new_fence_fd); - put_unused_fd(new_fence_fd); - *fence_uid = PVRSRV_NO_FENCE; -err_out: - return err; -} - -/* - * This is the function that kick code will call in order to 'rollback' a - * created output fence should an error occur when submitting the kick. - * The OS native sync code needs to implement a function meeting this - * specification. - * - * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence - * should be destroyed and any actions taken due to - * its creation that need to be undone should be - * reverted. - * Input: finalise_data The finalise data for the fence to be 'rolled back'. - */ -static enum PVRSRV_ERROR_TAG -pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback, - void *fence_data_to_rollback) -{ - PVRSRV_ERROR err = PVRSRV_OK; - struct sync_fence *sync_fence = (struct sync_fence *)fence_data_to_rollback; - struct sync_pt *sync_pt; - struct pvr_sync_pt *pvr_pt = NULL; - int j = 0; - - if (!sync_fence) { - pr_err("pvr_sync2: %s: Failed to recognise fence_to_rollback(%d)\n", - __func__, fence_to_rollback); - err = PVRSRV_ERROR_INVALID_PARAMS; - goto err_out; - } - - (void)j; - for_each_sync_pt(sync_pt, sync_fence, j) { - if (!is_pvr_timeline_pt(sync_pt)) { - pr_err("pvr_sync2: %s: Fence(%d) contains non-pvr timeline sync_pt\n", - __func__, fence_to_rollback); - err = PVRSRV_ERROR_INVALID_PARAMS; - goto err_out2; - } - - pvr_pt = (struct pvr_sync_pt *)sync_pt; - - SyncCheckpointError(pvr_pt->sync_data->kernel->fence_sync->client_sync_checkpoint, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT); - - /* rollback timeline next_value */ - pvr_pt->timeline->kernel->fence_sync->next_value--; - } - - /* close the fence */ - sync_fence_put(sync_fence); - -err_out2: - put_unused_fd(fence_to_rollback); - -err_out: - return err; -} - -/* - * This is the function that kick code will call in order to obtain a list of - * the PSYNC_CHECKPOINTs for a given PVRSRV_FENCE passed to a kick function. - * The OS native sync code will allocate the memory to hold the returned list - * of PSYNC_CHECKPOINT ptrs. The caller will free this memory once it has - * finished referencing it. - * - * Input: fence The input (check) fence - * Output: nr_checkpoints The number of PVRSRV_SYNC_CHECKPOINT ptrs - * returned in the checkpoint_handles - * parameter. - * Output: fence_uid Unique ID of the check fence - * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs. - */ -static enum PVRSRV_ERROR_TAG -pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, - PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints, - PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid) -{ - PVRSRV_ERROR err = PVRSRV_OK; - - if (!nr_checkpoints || !checkpoint_handles) { - err = PVRSRV_ERROR_INVALID_PARAMS; - goto err_out; - } - - if (fence_to_resolve < 0) { - /* Null fence passed, so return 0 checkpoints */ - *nr_checkpoints = 0; - *checkpoint_handles = NULL; - *fence_uid = 0; - } else { - struct sync_fence *sync_fence = sync_fence_fdget(fence_to_resolve); - struct sync_pt *sync_pt; - struct pvr_sync_kernel_pair *sync_kernel; - u32 points_on_fence = 0; - PSYNC_CHECKPOINT foreign_checkpoint = NULL; - PSYNC_CHECKPOINT *next_checkpoint; - bool add_foreign_sync = true; - int j = 0; - - if (!sync_fence) { - pr_err("pvr_sync2: %s: Failed to read sync private data for fd %d\n", - __func__, fence_to_resolve); - err = PVRSRV_ERROR_HANDLE_NOT_FOUND; - goto err_out; - } - - /* Alloc memory to hold list of PSYNC_CHECKPOINTs */ - /* (Alloc memory for MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoint handles) */ - *checkpoint_handles = - kmalloc_array(MAX_SYNC_CHECKPOINTS_PER_FENCE, - sizeof(PSYNC_CHECKPOINT), GFP_KERNEL); - if (!(*checkpoint_handles)) { - pr_err("pvr_sync2: %s: Failed to alloc memory for returned list of sync checkpoints\n", - __func__); - err = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_out2; - } - - next_checkpoint = *checkpoint_handles; - - (void)j; - for_each_sync_pt(sync_pt, sync_fence, j) { - struct pvr_sync_pt *pvr_pt = NULL; - - /* Make sure that we do not overrun the memory we allocated */ - if (points_on_fence >= MAX_SYNC_CHECKPOINTS_PER_FENCE) { - pr_err("pvr_sync2: Maximum number of sync checkpoints in a fence exceeded (greater than %d)", - MAX_SYNC_CHECKPOINTS_PER_FENCE); - err = PVRSRV_ERROR_INVALID_PARAMS; - - for (j = 0; j < points_on_fence; j++) - SyncCheckpointDropRef((*checkpoint_handles)[j]); - - kfree(*checkpoint_handles); - goto err_out2; - } - - if (is_pvr_timeline_pt(sync_pt)) { - pvr_pt = (struct pvr_sync_pt *)sync_pt; - sync_kernel = pvr_pt->sync_data->kernel; - - if (!sync_kernel || - is_sync_checkpoint_met(sync_kernel->fence_sync, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { - continue; - } - - /* Take ref on sync_checkpoint - this will be dropped by the - * caller (Kick code) once it has incremented the checkpoint's - * CCB enqueued count. We only really need to do this for - * foreign sync checkpoints, to prevent the sync_checkpoint - * from being destroyed if it gets signalled while being processed - * by the Kick code, but the Kick code has no knowledge of whether a - * sync_checkpoint is foreign, so we take a ref on all checkpoints. - */ - SyncCheckpointTakeRef(sync_kernel->fence_sync->client_sync_checkpoint); - - *next_checkpoint = sync_kernel->fence_sync->client_sync_checkpoint; - next_checkpoint++; - points_on_fence++; - } else if (add_foreign_sync) { - foreign_checkpoint = pvr_sync_create_waiter_for_foreign_sync(fence_to_resolve, psSyncCheckpointContext); - - if (foreign_checkpoint) { - /* Take ref on sync_checkpoint - this will be dropped - * by the caller (see comment for the other call to - * SyncCheckpointTakeRef, above). - */ - /* For foreign points, an extra - * checkpoint reference was taken at - * creation time to ensure it wasn't - * completed and free'd before we got - * here, so ownership of that reference - * is effectively passed to the firmware - */ - *next_checkpoint = foreign_checkpoint; - next_checkpoint++; - points_on_fence++; - add_foreign_sync = false; - } - } - } - - if (0) { - int ii; - - pr_err("pvr_sync2: %s: returning nr_checkpoints=%d\n", - __func__, points_on_fence); - for (ii = 0; ii < points_on_fence; ii++) { - PSYNC_CHECKPOINT *psTmp = *(checkpoint_handles + ii); - - pr_err("pvr_sync2: %s: pt %d: sync checkpoint <%p>,\n", - __func__, ii, psTmp); - pr_err("pvr_sync2: %s: ID=%d\n", - __func__, SyncCheckpointGetId(*psTmp)); - } - } - *nr_checkpoints = points_on_fence; - *fence_uid = OSGetCurrentClientProcessIDKM(); - *fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX); - -err_out2: - sync_fence_put(sync_fence); - } - -err_out: - return err; -} - -#if defined(PDUMP) -static enum PVRSRV_ERROR_TAG -pvr_sync_fence_get_checkpoints(PVRSRV_FENCE fence_to_pdump, u32 *nr_checkpoints, - struct SYNC_CHECKPOINT_TAG ***checkpoint_handles) -{ - enum PVRSRV_ERROR_TAG err; - struct sync_fence *sync_fence; - struct sync_pt *sync_pt; - struct pvr_sync_kernel_pair *sync_kernel; - u32 points_on_fence = 0; - struct SYNC_CHECKPOINT_TAG **next_checkpoint; - struct SYNC_CHECKPOINT_TAG **checkpoints = NULL; - int j = 0; - - if (!nr_checkpoints || !checkpoint_handles) { - err = PVRSRV_ERROR_INVALID_PARAMS; - goto err_out; - } - - if (fence_to_pdump < 0) { - /* Null fence passed, so return 0 checkpoints */ - err = PVRSRV_ERROR_INVALID_PARAMS; - goto err_out; - } - - sync_fence = sync_fence_fdget(fence_to_pdump); - if (!sync_fence) { - pr_err("pvr_sync2: %s: Failed to read sync private data for fd %d\n", - __func__, fence_to_pdump); - err = PVRSRV_ERROR_HANDLE_NOT_FOUND; - goto err_out; - } - - /* Alloc memory to hold list of PSYNC_CHECKPOINTs */ - checkpoints = kmalloc_array(MAX_SYNC_CHECKPOINTS_PER_FENCE, - sizeof(*checkpoints), GFP_KERNEL); - if (!checkpoints) { - pr_err("pvr_sync2: %s: Failed to alloc memory for returned list of sync checkpoints\n", - __func__); - err = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_put_fence; - } - - next_checkpoint = checkpoints; - - (void)j; - for_each_sync_pt(sync_pt, sync_fence, j) { - struct pvr_sync_pt *pvr_pt = NULL; - - /* Make sure that we do not overrun the memory we allocated */ - if (points_on_fence >= MAX_SYNC_CHECKPOINTS_PER_FENCE) { - pr_err("pvr_sync2: Maximum number of sync checkpoints in a fence exceeded (greater than %d)", - MAX_SYNC_CHECKPOINTS_PER_FENCE); - err = PVRSRV_ERROR_INVALID_PARAMS; - kfree(*checkpoint_handles); - goto err_put_fence; - } - - if (is_pvr_timeline_pt(sync_pt)) { - pvr_pt = (struct pvr_sync_pt *)sync_pt; - sync_kernel = pvr_pt->sync_data->kernel; - if (!sync_kernel) - continue; - *next_checkpoint = sync_kernel->fence_sync->client_sync_checkpoint; - next_checkpoint++; - points_on_fence++; - } - } - - *checkpoint_handles = checkpoints; - *nr_checkpoints = points_on_fence; - err = PVRSRV_OK; -err_put_fence: - sync_fence_put(sync_fence); -err_out: - return err; - -} -#endif - -static u32 -pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs) -{ - u32 our_ufo_ct = 0; - struct pvr_sync_native_sync_checkpoint *sync; - unsigned long flags; - - spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); - /* dump info on any ufos in our active list */ - list_for_each_entry(sync, &pvr_sync_pt_active_list, list) { - u32 *this_ufo_vaddr = vaddrs; - u32 ufo_num; - DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL; - - for (ufo_num = 0; ufo_num < nr_ufos; ufo_num++, this_ufo_vaddr++) { - if (sync->vaddr == *this_ufo_vaddr) { - static const char *const type_names[] = { - "Timeline", "Fence", "Cleanup", - "Foreign Fence", "Foreign Cleanup" - }; - - /* Dump sync info */ - PVR_DUMPDEBUG_LOG(pfnDummy, NULL, - "\tSyncID = %d, FWAddr = 0x%08x: %s (%s - [%p] %s)", - sync->id, sync->vaddr, - sync->class, - type_names[sync->type], - sync->foreign_sync_fence, - sync->foreign_sync_fence_name); - our_ufo_ct++; - } - } - } - spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); - return our_ufo_ct; -} - -int pvr_sync_api_rename(void *api_priv, void *user_data) -{ - struct sync_timeline *obj = api_priv; - struct pvr_sync_timeline *timeline = get_timeline(obj); - struct pvr_sync_rename_ioctl_data *data = user_data; - - data->szName[sizeof(data->szName) - 1] = '\0'; - strlcpy(timeline->obj->name, data->szName, sizeof(timeline->obj->name)); - - return 0; -} - -int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new) -{ - struct sync_timeline *obj = api_priv; - struct pvr_sync_timeline *timeline = get_timeline(obj); - struct pvr_sw_sync_timeline *pvr_sw_sync_timeline; - - /* We can only convert an empty GPU timeline */ - if (timeline->kernel->fence_sync && - timeline->kernel->fence_sync->next_value) { - pr_err("pvr_sync2: %s ERROR! timeline->kernel->fence_sync=<%p>, timeline->kernel->fence_sync->next_value=%d\n", - __func__, timeline->kernel->fence_sync, - timeline->kernel->fence_sync->next_value); - return -EFAULT; - } - - /* Create a pvr_sw_sync timeline */ - pvr_sw_sync_timeline = kmalloc(sizeof(*pvr_sw_sync_timeline), GFP_KERNEL); - if (!pvr_sw_sync_timeline) { - pr_err("pvr_sync2: %s ERROR! no memory to allocate pvr_sw_sync_timeline struct\n", - __func__); - return -ENOMEM; - } - - pvr_sw_sync_timeline->current_value = 0; - pvr_sw_sync_timeline->next_value = 1; - kref_init(&pvr_sw_sync_timeline->kref); - - /* Create a sw_sync timeline with the old GPU timeline's name */ - pvr_sw_sync_timeline->sw_sync_timeline = sw_sync_timeline_create(timeline->obj->name); - if (!pvr_sw_sync_timeline->sw_sync_timeline) { - pr_err("pvr_sync2: %s ERROR! error returned from sw_sync_timeline_create() for timeline->obj->name '%s'\n", - __func__, timeline->obj->name); - kfree(pvr_sw_sync_timeline); - return -ENOMEM; - } - - /* Destroy the old GPU timeline and update the struct file */ - DPF("%s: # %s", __func__, debug_info_timeline(timeline)); - - sync_timeline_destroy(timeline->obj); - DPF("%s pvr_sw_sync_timeline<%p>, sw_sync_timeline<%p> curr=%llu,next=%llu", - pvr_sw_sync_timeline, - pvr_sw_sync_timeline->sw_sync_timeline, - pvr_sw_sync_timeline->current_value, - pvr_sw_sync_timeline->next_value); - - *api_priv_new = (void *)pvr_sw_sync_timeline; - - return 0; -} - -int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data) -{ - struct pvr_sw_sync_timeline *pvr_sw_timeline = api_priv; - struct pvr_sw_sync_create_fence_data *data = user_data; - struct sync_fence *fence; - int fd = get_unused_fd_flags(O_CLOEXEC); - struct sync_pt *sync_pt; - struct sw_sync_timeline *timeline; - int err; - - if (fd < 0) { - pr_err("pvr_sync2: %s: Failed to find unused fd (%d)\n", - __func__, fd); - err = -EMFILE; - goto err_out; - } - - timeline = pvr_sw_timeline->sw_sync_timeline; - - sync_pt = sw_sync_pt_create(timeline, pvr_sw_timeline->next_value); - if (!sync_pt) { - pr_err("pvr_sync2: %s: Failed to create a sync point (%d)\n", - __func__, fd); - err = -ENOMEM; - goto err_put_fd; - } - - data->name[sizeof(data->name) - 1] = '\0'; - fence = sync_fence_create(data->name, sync_pt); - if (!fence) { - pr_err("pvr_sync2: %s: Failed to create a fence (%d)\n", - __func__, fd); - sync_pt_free(sync_pt); - err = -ENOMEM; - goto err_put_fd; - } - - data->fence = fd; - data->sync_pt_idx = pvr_sw_timeline->next_value; - - sync_fence_install(fence, fd); - pvr_sw_timeline->next_value++; - - return 0; - -err_put_fd: - pr_err("pvr_sync2: %s: putting fd %d back to unused\n", __func__, fd); - put_unused_fd(fd); -err_out: - return err; -} - -int pvr_sync_api_sw_inc(void *api_priv, void *user_data) -{ - struct pvr_sw_sync_timeline *pvr_timeline = api_priv; - struct sw_sync_timeline *timeline; - struct pvr_sw_timeline_advance_data *data = user_data; - - timeline = pvr_timeline->sw_sync_timeline; - - /* Don't allow sw timeline to be advanced beyond the last defined point */ - if (pvr_timeline->current_value == (pvr_timeline->next_value-1)) { - pr_err("pvr_sync2: attempt to advance SW timeline beyond last defined point\n"); - return -EPERM; - } - - sw_sync_timeline_inc(timeline, 1); - pvr_timeline->current_value++; - data->sync_pt_idx = pvr_timeline->current_value; - - return 0; -} - -static void -pvr_sync_check_status_work_queue_function(struct work_struct *data) -{ - /* A completed SW operation may un-block the GPU */ - PVRSRVCheckStatus(NULL); -} - -/* Returns true if the freelist still has entries, else false if empty */ -static bool -pvr_sync_clean_freelist(void) -{ - struct pvr_sync_kernel_pair *kernel, *k; - struct pvr_sync_timeline_kernel_pair *tl_kernel, *tl_k; - struct pvr_sync_fence *sync_fence, *f; - LIST_HEAD(unlocked_free_checkpoint_list); - LIST_HEAD(unlocked_free_timeline_list); - LIST_HEAD(unlocked_free_list); - unsigned long flags; - bool freelist_empty; - - /* We can't free the sync directly in this loop because - * that will take the mmap mutex. We can't take mutexes while we have - * this list locked with a spinlock. So move all the items we want to - * free to another, local list (no locking required) and process it - * in a second loop. - */ - - spin_lock_irqsave(&sync_checkpoint_free_list_spinlock, flags); - list_for_each_entry_safe(kernel, k, &sync_checkpoint_free_list, list) { - /* Check if this sync is not used anymore. */ - if ((kernel->fence_sync) && - !is_sync_checkpoint_met(kernel->fence_sync, - PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) { - continue; - } - - /* Remove the entry from the free list. */ - list_move_tail(&kernel->list, &unlocked_free_checkpoint_list); - } - - /* Wait and loop if there are still syncs on the free list (IE - * are still in use by the HW). - */ - freelist_empty = list_empty(&sync_checkpoint_free_list); - - spin_unlock_irqrestore(&sync_checkpoint_free_list_spinlock, flags); - - spin_lock_irqsave(&timeline_free_list_spinlock, flags); - list_for_each_entry_safe(tl_kernel, tl_k, &timeline_free_list, list) { - /* Check if this sync is not used anymore. */ - if (tl_kernel->fence_sync && !is_sync_prim_met(tl_kernel->fence_sync)) - continue; - - /* Remove the entry from the free list. */ - list_move_tail(&tl_kernel->list, &unlocked_free_timeline_list); - } - - /* Wait and loop if there are still syncs on the free list (IE - * are still in use by the HW). - */ - freelist_empty &= list_empty(&timeline_free_list); - - spin_unlock_irqrestore(&timeline_free_list_spinlock, flags); - - - list_for_each_entry_safe(kernel, k, &unlocked_free_checkpoint_list, list) { - list_del(&kernel->list); - - if (kernel->fence_sync && kernel->fence_sync->client_sync_checkpoint) { - spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags); - if (!list_empty(&kernel->fence_sync->list)) - list_del_init(&kernel->fence_sync->list); - - spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags); - SyncCheckpointFree(kernel->fence_sync->client_sync_checkpoint); - kernel->fence_sync->client_sync_checkpoint = NULL; - } - kfree(kernel->fence_sync); - kfree(kernel); - } - - list_for_each_entry_safe(tl_kernel, tl_k, &unlocked_free_timeline_list, list) { - list_del(&tl_kernel->list); - - if (tl_kernel->fence_sync) - sync_pool_put(tl_kernel->fence_sync); - kfree(tl_kernel); - } - - /* sync_fence_put() must be called from process/WQ context - * because it uses fput(), which is not allowed to be called - * from interrupt context in kernels <3.6. - */ - INIT_LIST_HEAD(&unlocked_free_list); - - spin_lock_irqsave(&sync_fence_put_list_spinlock, flags); - list_for_each_entry_safe(sync_fence, f, &sync_fence_put_list, list) - list_move_tail(&sync_fence->list, &unlocked_free_list); - - spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags); - - list_for_each_entry_safe(sync_fence, f, &unlocked_free_list, list) { - list_del(&sync_fence->list); - sync_fence_put(sync_fence->fence); - kfree(sync_fence); - } - - return !freelist_empty; -} - -static void -pvr_sync_defer_free_work_queue_function(struct work_struct *data) -{ - enum PVRSRV_ERROR_TAG error = PVRSRV_OK; - void *event_object; - - error = OSEventObjectOpen(pvr_sync_data.event_object_handle, - &event_object); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Error opening event object (%s)\n", - __func__, PVRSRVGetErrorString(error)); - return; - - } - - while (pvr_sync_clean_freelist()) { - - error = OSEventObjectWait(event_object); - - switch (error) { - - case PVRSRV_OK: - case PVRSRV_ERROR_TIMEOUT: - /* Timeout is normal behaviour */ - continue; - default: - pr_err("pvr_sync2: %s: Error waiting for event object (%s)\n", - __func__, PVRSRVGetErrorString(error)); - break; - } - } - error = OSEventObjectClose(event_object); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Error closing event object (%s)\n", - __func__, PVRSRVGetErrorString(error)); - } -} - -static -void pvr_sync_free_checkpoint_list_mem(void *mem_ptr) -{ - kfree(mem_ptr); -} - -static -void pvr_sync_update_all_timelines(void *command_complete_handle) -{ - struct pvr_sync_timeline *timeline, *n; - u32 num_signalled = 0; - unsigned long flags; - - spin_lock_irqsave(&timeline_list_lock, flags); - - list_for_each_entry(timeline, &timeline_list, list) { - /* If a timeline is destroyed via pvr_sync_release_timeline() - * in parallel with a call to pvr_sync_update_all_timelines(), - * the timeline_list_lock will block destruction of the - * 'timeline' pointer. Use kref_get_unless_zero() to detect - * and handle this race. Skip the timeline if it's being - * destroyed, blocked only on the timeline_list_lock. - */ - timeline->valid = - kref_get_unless_zero(&timeline->kref) ? true : false; - } - - list_for_each_entry_safe(timeline, n, &timeline_list, list) { - /* We know timeline is valid at this point because we're - * holding the list lock (so pvr_sync_destroy_timeline() has - * to wait). - */ - void *obj = timeline->obj; - - /* If we're racing with pvr_sync_release_timeline(), ignore */ - if (!timeline->valid) - continue; - - /* If syncs have signaled on the GPU, echo this in pvr_sync. - * - * At this point we know the timeline is valid, but obj might - * have raced and been set to NULL. It's only important that - * we use NULL / non-NULL consistently with the if() and call - * to sync_timeline_signal() -- the timeline->obj can't be - * freed (pvr_sync_release_timeline() will be stuck waiting - * for the timeline_list_lock) but it might have been made - * invalid by the base sync driver, in which case this call - * will bounce harmlessly. - */ - if (obj) { - sync_timeline_signal(obj); - num_signalled++; - } - - /* We're already holding the timeline_list_lock */ - kref_put(&timeline->kref, pvr_sync_destroy_timeline_locked); - } - - spin_unlock_irqrestore(&timeline_list_lock, flags); -} - -enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void) -{ - /* Initialise struct and register with sync_checkpoint.c */ - pvr_sync_data.sync_checkpoint_ops.pfnFenceResolve = pvr_sync_resolve_fence; - pvr_sync_data.sync_checkpoint_ops.pfnFenceCreate = pvr_sync_create_fence; - pvr_sync_data.sync_checkpoint_ops.pfnFenceDataRollback = pvr_sync_rollback_fence_data; - pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence; - pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_update_all_timelines; - pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem = - pvr_sync_free_checkpoint_list_mem; - pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs = - pvr_sync_dump_info_on_stalled_ufos; - strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName, - "pvr_sync2", SYNC_CHECKPOINT_IMPL_MAX_STRLEN); -#if defined(PDUMP) - pvr_sync_data.sync_checkpoint_ops.pfnSyncFenceGetCheckpoints = - pvr_sync_fence_get_checkpoints; -#endif - - return SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops); -} - -int pvr_sync_init(void) -{ - return pvr_sync_ioctl_init(); -} - -void pvr_sync_deinit(void) -{ - pvr_sync_ioctl_deinit(); -} - -enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct pvr_drm_private *priv = ddev->dev_private; - enum PVRSRV_ERROR_TAG error; - - /* Multi-Device not supported for sync2, if we attempt to init - * another device then print a big warning to kernel log - */ - if (WARN_ON(pvr_sync_data.defer_free_wq)) { - pr_err("pvr_sync2: Multi-Device not supported\n"); - return PVRSRV_ERROR_ALREADY_EXISTS; - } - - DPF("%s", __func__); - - atomic_set(&pvr_sync_data.sync_id, 0); - - error = PVRSRVAcquireGlobalEventObjectKM( - &pvr_sync_data.event_object_handle); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Failed to acquire global event object (%s)\n", - __func__, PVRSRVGetErrorString(error)); - goto err_out; - } - - error = SyncPrimContextCreate(priv->dev_node, - &pvr_sync_data.sync_prim_context); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Failed to create sync prim context (%s)\n", - __func__, PVRSRVGetErrorString(error)); - goto err_release_event_object; - } - - pvr_sync_data.defer_free_wq = - create_freezable_workqueue("pvr_sync_defer_free_workqueue"); - if (!pvr_sync_data.defer_free_wq) { - pr_err("pvr_sync2: %s: Failed to create pvr_sync defer_free workqueue\n", - __func__); - goto err_free_sync_context; - } - - INIT_WORK(&pvr_sync_data.defer_free_work, - pvr_sync_defer_free_work_queue_function); - - INIT_WORK(&pvr_sync_data.check_status_work, - pvr_sync_check_status_work_queue_function); - error = PVRSRVRegisterCmdCompleteNotify( - &pvr_sync_data.command_complete_handle, - &pvr_sync_update_all_timelines, - &priv->dev_node); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Failed to register MISR notification (%s)\n", - __func__, PVRSRVGetErrorString(error)); - goto err_destroy_defer_free_wq; - } - - error = PVRSRVRegisterDeviceDbgRequestNotify( - &priv->sync_debug_notify_handle, - priv->dev_node, - pvr_sync_debug_request, - DEBUG_REQUEST_ANDROIDSYNC, - NULL); - if (error != PVRSRV_OK) { - pr_err("pvr_sync2: %s: Failed to register debug notifier (%s)\n", - __func__, PVRSRVGetErrorString(error)); - goto err_unregister_cmd_complete; - } - - error = PVRSRV_OK; - return error; - -err_unregister_cmd_complete: - PVRSRVUnregisterCmdCompleteNotify( - pvr_sync_data.command_complete_handle); -err_destroy_defer_free_wq: - destroy_workqueue(pvr_sync_data.defer_free_wq); -err_free_sync_context: - SyncPrimContextDestroy(pvr_sync_data.sync_prim_context); -err_release_event_object: - PVRSRVReleaseGlobalEventObjectKM(pvr_sync_data.event_object_handle); -err_out: - - return error; -} - -void pvr_sync_device_deinit(struct device *dev) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct pvr_drm_private *priv = ddev->dev_private; - - DPF("%s", __func__); - - PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_debug_notify_handle); - - PVRSRVUnregisterCmdCompleteNotify( - pvr_sync_data.command_complete_handle); - - /* This will drain the workqueue, so we guarantee that all deferred - * syncs are free'd before returning. - */ - destroy_workqueue(pvr_sync_data.defer_free_wq); - - sync_pool_clear(); - - SyncPrimContextDestroy(pvr_sync_data.sync_prim_context); - - - PVRSRVReleaseGlobalEventObjectKM(pvr_sync_data.event_object_handle); -} - -enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms) -{ - int err; - - DPF("fence<%p>, to=%d", fence, timeout_in_ms); - - err = sync_fence_wait(fence, timeout_in_ms); - /* -ETIME means active. In this case we will retry later again. If the - * return value is an error or zero we will close this fence and - * proceed. This makes sure that we are not getting stuck here when a - * fence changes into an error state for whatever reason. - */ - if (err == -ETIME) { - DPF("timeout", __func__); -#ifdef DEBUG_OUTPUT - _dump_fence(fence, NULL, NULL); -#endif - return PVRSRV_ERROR_TIMEOUT; - } else if (err != 0) { - pr_err("%s: failed dependencies\n", __func__); - return PVRSRV_ERROR_FAILED_DEPENDENCIES; - } - - return PVRSRV_OK; -} - -enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence) -{ - sync_fence_put(fence); - - return PVRSRV_OK; -} - -enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **pfence) -{ - struct file *file; - - file = fget(fence_fd); - if (file == NULL || file->private_data == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; - - *pfence = file->private_data; - - return PVRSRV_OK; -} - -enum PVRSRV_ERROR_TAG -pvr_sync_sw_timeline_fence_create(struct _PVRSRV_DEVICE_NODE_ *pvrsrv_dev_node, - int timeline_fd, - const char *fence_name, - int *fence_fd_out, - u64 *sync_pt_idx) -{ - enum PVRSRV_ERROR_TAG srv_err; - struct file *file; - struct pvr_sw_sync_timeline *pvr_sw_timeline; - struct sync_fence *fence = NULL; - struct sync_pt *sync_point; - int fd; - - (void)(pvrsrv_dev_node); - - fd = get_unused_fd_flags(O_CLOEXEC); - if (fd < 0) { - pr_err("%s: invalid fd\n", __func__); - - return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; - } - - file = fget(timeline_fd); - pvr_sw_timeline = pvr_sync_get_api_priv(file); - if (!pvr_sw_timeline) { - /* unrecognised timeline */ - pr_err("%s: unrecognised timeline\n", __func__); - - srv_err = PVRSRV_ERROR_INVALID_PARAMS; - if (file) - goto err_put_file; - else - goto err_put_fd; - } - - DPF("pvr_sw_timeline<%p>", pvr_sw_timeline); - DPF("psSWTimeline<%p>", pvr_sw_timeline->sw_sync_timeline); - - sync_point = sw_sync_pt_create(pvr_sw_timeline->sw_sync_timeline, - pvr_sw_timeline->next_value); - if (!sync_point) { - srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_put_file; - } - - fence = sync_fence_create(fence_name, sync_point); - if (!fence) { - srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_free_pt; - } - - sync_fence_install(fence, fd); - - if (sync_pt_idx) - *sync_pt_idx = pvr_sw_timeline->next_value; - pvr_sw_timeline->next_value++; - - fput(file); - - *fence_fd_out = fd; - - DPF("returned fence fd %d <%p> '%s'", *fence_fd_out, fence, fence_name); - - return PVRSRV_OK; - -err_free_pt: - sync_pt_free(sync_point); -err_put_file: - fput(file); -err_put_fd: - put_unused_fd(fd); - return srv_err; -} - -enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline, u64 *sync_pt_idx) -{ - struct sw_sync_timeline *sw_timeline; - struct pvr_sw_sync_timeline *pvr_sw_timeline; - - if (timeline == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; - - pvr_sw_timeline = (struct pvr_sw_sync_timeline *)timeline; - sw_timeline = (struct sw_sync_timeline *)pvr_sw_timeline->sw_sync_timeline; - - if (pvr_sw_timeline->current_value == (pvr_sw_timeline->next_value - 1)) { - pr_err("%s: attempt to advance SW timeline beyond last defined point\n", - __func__); - return PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT; - } - - sw_sync_timeline_inc(sw_timeline, 1); - pvr_sw_timeline->current_value++; - - if (sync_pt_idx) - *sync_pt_idx = pvr_sw_timeline->current_value; - - return PVRSRV_OK; -} - -enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline) -{ - struct pvr_sw_sync_timeline *pvr_sw_timeline; - - if (timeline == NULL) - return PVRSRV_ERROR_INVALID_PARAMS; - - pvr_sw_timeline = (struct pvr_sw_sync_timeline *)timeline; - kref_put(&pvr_sw_timeline->kref, pvr_sw_sync_destroy_timeline); - - return PVRSRV_OK; -} - -enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd, - void **timeline_out) -{ - enum PVRSRV_ERROR_TAG srv_err; - struct file *file; - struct pvr_sw_sync_timeline *pvr_sw_timeline; - int ret; - - file = fget(timeline_fd); - pvr_sw_timeline = pvr_sync_get_api_priv(file); - if (!pvr_sw_timeline) { - pr_err("%s: invalid params\n", __func__); - srv_err = PVRSRV_ERROR_INVALID_PARAMS; - if (file) - goto err_put_file; - else - goto err_out; - } - - *timeline_out = (void *)pvr_sw_timeline; - - /* Take ref on pvr_sw_timeline */ - ret = kref_get_unless_zero(&pvr_sw_timeline->kref); - if (ret) - srv_err = PVRSRV_OK; - else - srv_err = PVRSRV_ERROR_INVALID_PARAMS; - - DPF("pvr_sw_timeline=<%p>, pvr_sw_timeline->c=%llu, n=%llu", - pvr_sw_timeline->sw_sync_timeline, pvr_sw_timeline->current_value, - pvr_sw_timeline->next_value); - DPF("&pvr_sw_timeline->current_value=<%p>", - &pvr_sw_timeline->current_value); - DPF("returned, *timeline_out=<%p>", *timeline_out); - -err_put_file: - fput(file); -err_out: - return srv_err; -} - -enum PVRSRV_ERROR_TAG sync_dump_fence(void *sw_fence_obj, - DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, - void *dump_debug_file) -{ - struct sync_fence *fence = (struct sync_fence *) sw_fence_obj; - - _dump_fence(fence, dump_debug_printf, dump_debug_file); - - return PVRSRV_OK; -} - -enum PVRSRV_ERROR_TAG sync_sw_dump_timeline(void *sw_timeline_obj, - DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, - void *dump_debug_file) -{ -#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 0)) - struct pvr_sw_sync_timeline *timeline = - (struct pvr_sw_sync_timeline *) sw_timeline_obj; - - PVR_DUMPDEBUG_LOG(dump_debug_printf, - dump_debug_file, - "TL:%s SeqNum: %llu/%llu", - timeline->sw_sync_timeline->obj.name, - timeline->current_value, - timeline->next_value); -#else - PVR_DUMPDEBUG_LOG(dump_debug_printf, - dump_debug_file, - "Timeline Stats not available on this kernel!"); -#endif - return PVRSRV_OK; -} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_api.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_api.h index 5226654cd623..97dd746439b9 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_api.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_api.h @@ -44,7 +44,7 @@ #ifndef _PVR_SYNC_API_H #define _PVR_SYNC_API_H -//#include +#include "img_types.h" int pvr_sync_api_init(void *file_handle, void **api_priv); int pvr_sync_api_deinit(void *api_priv, bool is_sw); @@ -52,6 +52,8 @@ int pvr_sync_api_rename(void *api_priv, void *user_data); int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new); int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data); int pvr_sync_api_sw_inc(void *api_priv, void *user_data); +int pvr_sync_api_force_exp_only(void *api_priv, void *api_data); +int pvr_sync_api_create_export_fence(void *api_priv, void *user_data); struct file; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_file.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_file.c index e3656108e963..c6cec6d9dd0d 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_file.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_file.c @@ -46,6 +46,7 @@ #include "pvr_sync.h" #include "pvr_fence.h" #include "pvr_counting_timeline.h" +#include "pvr_export_fence.h" #include "linux_sw_sync.h" @@ -58,6 +59,7 @@ #include #include "pvr_sync_api.h" +#include "osfunc_common.h" /* This header must always be included last */ #include "kernel_compatibility.h" @@ -107,10 +109,14 @@ struct pvr_sync_timeline { char name[32]; void *file_handle; bool is_sw; + bool is_export; /* Fence context used for hw fences */ struct pvr_fence_context *hw_fence_context; /* Timeline and context for sw fences */ - struct pvr_counting_fence_timeline *sw_fence_timeline; + union { + struct pvr_counting_fence_timeline *sw_fence_timeline; + struct pvr_exp_fence_context *exp_fence_context; + }; #if defined(NO_HARDWARE) /* List of all timelines (used to advance all timelines in nohw builds) */ struct list_head list; @@ -136,6 +142,17 @@ void pvr_sync_nohw_signal_fence(void *fence_data_to_signal) } mutex_unlock(&pvr_timeline_active_list_lock); } +static +void pvr_sync_nohw_signal_exp_fence(PVRSRV_FENCE fence_to_signal) +{ + struct dma_fence *fence; + + fence = sync_file_get_fence(fence_to_signal); + if (fence && pvr_is_exp_fence(fence)) + dma_fence_signal(fence); + + dma_fence_put(fence); +} #endif static struct pvr_sync_timeline *pvr_sync_timeline_fget(int fd) @@ -176,9 +193,10 @@ int pvr_sync_api_init(void *file_handle, void **api_priv) if (!timeline) return -ENOMEM; - strlcpy(timeline->name, task_comm, sizeof(timeline->name)); + OSStringSafeCopy(timeline->name, task_comm, sizeof(timeline->name)); timeline->file_handle = file_handle; timeline->is_sw = false; + timeline->is_export = false; *api_priv = (void *)timeline; @@ -192,7 +210,7 @@ int pvr_sync_api_deinit(void *api_priv, bool is_sw) if (!timeline) return 0; - if (timeline->sw_fence_timeline) { + if (timeline->is_sw && timeline->sw_fence_timeline) { /* This makes sure any outstanding SW syncs are marked as * complete at timeline close time. Otherwise it'll leak the * timeline (as outstanding fences hold a ref) and possibly @@ -202,6 +220,8 @@ int pvr_sync_api_deinit(void *api_priv, bool is_sw) pvr_counting_fence_timeline_force_complete( timeline->sw_fence_timeline); pvr_counting_fence_timeline_put(timeline->sw_fence_timeline); + } else if (timeline->is_export && timeline->exp_fence_context) { + pvr_exp_fence_context_destroy(timeline->exp_fence_context); } if (timeline->hw_fence_context) { @@ -374,7 +394,7 @@ pvr_sync_create_fence( err = PVRSRV_ERROR_OUT_OF_MEMORY; goto err_destroy_fence; } - strlcpy(sync_file_user_name(sync_file), + OSStringSafeCopy(sync_file_user_name(sync_file), pvr_fence->name, sizeof(sync_file_user_name(sync_file))); dma_fence_put(&pvr_fence->base); @@ -518,24 +538,39 @@ pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, &fences[i]->flags)) #endif { - struct pvr_fence *pvr_fence = - pvr_fence_create_from_fence( - pvr_sync_data.foreign_fence_context, - psSyncCheckpointContext, - fences[i], - fence_to_resolve, - "foreign"); - if (!pvr_fence) { - pr_err(FILE_NAME ": %s: Failed to create fence\n", - __func__); - err = PVRSRV_ERROR_OUT_OF_MEMORY; - goto err_free_checkpoints; + struct pvr_fence *pvr_fence; + + /* Check if fences[i] is an export fence */ + if (pvr_is_exp_fence(fences[i])) { + /* Assign export fence a sync checkpoint if it does not already + * have one. + */ + err = pvr_exp_fence_assign_checkpoint(PVRSRV_NO_FENCE, + fences[i], + psSyncCheckpointContext, + &checkpoints[num_used_fences]); + SyncCheckpointTakeRef(checkpoints[num_used_fences]); + ++num_used_fences; + } + else { + pvr_fence = pvr_fence_create_from_fence( + pvr_sync_data.foreign_fence_context, + psSyncCheckpointContext, + fences[i], + fence_to_resolve, + "foreign"); + if (!pvr_fence) { + pr_err(FILE_NAME ": %s: Failed to create fence\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_free_checkpoints; + } + checkpoints[num_used_fences] = + pvr_fence_get_checkpoint(pvr_fence); + SyncCheckpointTakeRef(checkpoints[num_used_fences]); + ++num_used_fences; + dma_fence_put(&pvr_fence->base); } - checkpoints[num_used_fences] = - pvr_fence_get_checkpoint(pvr_fence); - SyncCheckpointTakeRef(checkpoints[num_used_fences]); - ++num_used_fences; - dma_fence_put(&pvr_fence->base); } } /* If we don't return any checkpoints, delete the array because @@ -565,6 +600,97 @@ pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, goto err_put_fence; } +/* + * This is the function that kick code will call in order to obtain the + * PSYNC_CHECKPOINT for a given export fence (PVRSRV_FENCE) passed to a kick + * function. + * If export_fence is not a valid export fence, (ie fops != &pvr_exp_fence_ops) + * this function will return (error). + * + * Input: export_fence The export fence to resolve + * Input: checkpoint_context The context in which to create the new + * sync checkpoint for the export fence + * Output: checkpoint_handle The returned PVRSRV_SYNC_CHECKPOINT. + */ +static enum PVRSRV_ERROR_TAG +pvr_sync_resolve_export_fence(PVRSRV_FENCE fence_to_resolve, + PSYNC_CHECKPOINT_CONTEXT checkpoint_context, + PSYNC_CHECKPOINT *checkpoint_handle) +{ + PVRSRV_ERROR err = PVRSRV_OK; + struct dma_fence *fence; + + fence = sync_file_get_fence(fence_to_resolve); + if (!fence) { + pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n", + __func__, fence_to_resolve); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_out; + } + + if (!pvr_is_exp_fence(fence)) { + pr_err(FILE_NAME ": %s: Fence not a pvr export fence\n", __func__); + dma_fence_put(fence); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + err = pvr_exp_fence_assign_checkpoint(fence_to_resolve, + fence, + checkpoint_context, + checkpoint_handle); + if (err != PVRSRV_OK) { + pr_err(FILE_NAME ": %s: Failed to assign export fence a sync checkpoint\n", + __func__); + } + + dma_fence_put(fence); + +err_out: + return err; +} + +/* + * This is the function that kick code will call in order to rollback the + * PSYNC_CHECKPOINT assigned to an export fence (PVRSRV_FENCE) passed to a kick + * function. + * If export_fence is not a valid export fence, (ie fops != &pvr_exp_fence_ops) + * this function will return (error). + * + * Input: fence_to_rollback The export fence to rollback + */ +static enum PVRSRV_ERROR_TAG +pvr_sync_rollback_export_fence(PVRSRV_FENCE fence_to_rollback) +{ + PVRSRV_ERROR err = PVRSRV_OK; + struct dma_fence *fence; + + fence = sync_file_get_fence(fence_to_rollback); + if (!fence) { + pr_err("%s: Failed to read sync private data for fd %d\n", + __func__, fence_to_rollback); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_get_fence; + } + + if (!pvr_is_exp_fence(fence)) { + pr_err(FILE_NAME ": %s: Fence not a pvr export fence\n", __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_is_exp_fence; + } + + err = pvr_exp_fence_rollback(fence); + if (err != PVRSRV_OK) { + pr_err("%s: Failed to rollback export fence\n", + __func__); + } + +err_is_exp_fence: + dma_fence_put(fence); + +err_get_fence: + return err; +} + /* * This is the function that driver code will call in order to request the * sync implementation to output debug information relating to any sync @@ -640,11 +766,20 @@ pvr_sync_fence_get_checkpoints(PVRSRV_FENCE fence_to_pdump, u32 *nr_checkpoints, } for (i = 0; i < num_fences; i++) { - pvr_fence = to_pvr_fence(fences[i]); - if (!pvr_fence) + if (is_pvr_fence(fences[i])) { + pvr_fence = to_pvr_fence(fences[i]); + if (pvr_fence) { + checkpoints[num_used_fences] = pvr_fence_get_checkpoint(pvr_fence); + ++num_used_fences; + } + } else if (pvr_is_exp_fence(fences[i])) { + struct pvr_exp_fence *pvr_exp_fence = to_pvr_exp_fence(fences[i]); + + checkpoints[num_used_fences] = pvr_exp_fence_get_checkpoint(pvr_exp_fence); + if (checkpoints[num_used_fences]) + ++num_used_fences; + } else continue; - checkpoints[num_used_fences] = pvr_fence_get_checkpoint(pvr_fence); - ++num_used_fences; } *checkpoint_handles = checkpoints; @@ -664,9 +799,9 @@ int pvr_sync_api_rename(void *api_priv, void *user_data) struct pvr_sync_rename_ioctl_data *data = user_data; data->szName[sizeof(data->szName) - 1] = '\0'; - strlcpy(timeline->name, data->szName, sizeof(timeline->name)); + OSStringSafeCopy(timeline->name, data->szName, sizeof(timeline->name)); if (timeline->hw_fence_context) - strlcpy(timeline->hw_fence_context->name, data->szName, + OSStringSafeCopy(timeline->hw_fence_context->name, data->szName, sizeof(timeline->hw_fence_context->name)); return 0; @@ -688,7 +823,83 @@ int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new) timeline->is_sw = true; + *api_priv_new = (void *)timeline; + + return 0; +} + +/* We simply treat an export-fence as a SW fence and tweak + * the timeline structure to flag it as an 'is_export' type. + */ +int pvr_sync_api_force_exp_only(void *api_priv, void *api_data) +{ + struct pvr_sync_timeline *timeline = api_priv; + + if (timeline->is_export) { + pr_err(FILE_NAME ": %s: Already marked export timeline\n", __func__); + return 0; + } + + timeline->exp_fence_context = pvr_exp_fence_context_create("pvr_exp_fence_ctx", "pvr_sync"); + if (!timeline->exp_fence_context) + return -ENOMEM; + + timeline->is_export = true; + + return 0; +} + +int pvr_sync_api_create_export_fence(void *api_priv, void *user_data) +{ + struct pvr_sync_timeline *timeline = api_priv; + struct pvr_exp_fence_context *exp_fence_context = timeline->exp_fence_context; + pvr_exp_sync_create_fence_data_t *data = user_data; + struct sync_file *sync_file; + int fd; + struct dma_fence *fence; + int err; + + if (data == NULL) { + pr_err(FILE_NAME ": %s: Unexpected NULL user_data\n", __func__); + err = -EINVAL; + goto err_out; + } + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + pr_err(FILE_NAME ": %s: Failed to find unused fd (%d)\n", + __func__, fd); + err = -EMFILE; + goto err_out; + } + + fence = pvr_exp_fence_create(exp_fence_context, fd, &data->sync_pt_idx); + if (!fence) { + pr_err(FILE_NAME ": %s: Failed to create a dma fence (%d)\n", + __func__, fd); + err = -ENOMEM; + goto err_put_fd; + } + + sync_file = sync_file_create(fence); + dma_fence_put(fence); + if (!sync_file) { + pr_err(FILE_NAME ": %s: Failed to create a sync_file (%d)\n", + __func__, fd); + err = -ENOMEM; + goto err_put_fd; + } + + data->fence = fd; + + fd_install(fd, sync_file->file); + return 0; + +err_put_fd: + put_unused_fd(fd); +err_out: + return err; } int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data) @@ -779,19 +990,23 @@ enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void) pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence; #if defined(NO_HARDWARE) pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_nohw_signal_fence; + pvr_sync_data.sync_checkpoint_ops.pfnNoHWSignalExpFence = pvr_sync_nohw_signal_exp_fence; #else pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = NULL; + pvr_sync_data.sync_checkpoint_ops.pfnNoHWSignalExpFence = NULL; #endif pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem = pvr_sync_free_checkpoint_list_mem; pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs = pvr_sync_dump_info_on_stalled_ufos; - strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName, "pvr_sync_file", + OSStringSafeCopy(pvr_sync_data.sync_checkpoint_ops.pszImplName, "pvr_sync_file", SYNC_CHECKPOINT_IMPL_MAX_STRLEN); #if defined(PDUMP) pvr_sync_data.sync_checkpoint_ops.pfnSyncFenceGetCheckpoints = pvr_sync_fence_get_checkpoints; #endif + pvr_sync_data.sync_checkpoint_ops.pfnExportFenceResolve = pvr_sync_resolve_export_fence; + pvr_sync_data.sync_checkpoint_ops.pfnExportFenceRollback = pvr_sync_rollback_export_fence; return SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops); } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_common.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_common.c index 60ba3555e779..0e1facde86cb 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_common.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_common.c @@ -47,6 +47,9 @@ #include "pvr_sync_api.h" #include "pvr_sync_ioctl_common.h" +#include "pvr_debug.h" +#include "pvr_uaccess.h" + /* * The PVR Sync API is unusual in that some operations configure the * timeline for use, and are no longer allowed once the timeline is @@ -74,6 +77,7 @@ struct pvr_sync_file_data { atomic_t in_use; void *api_private; bool is_sw; + bool is_export; }; static bool pvr_sync_set_in_use(struct pvr_sync_file_data *fdata) @@ -174,22 +178,25 @@ int pvr_sync_close_common(void *connection_data) return 0; } -static inline int pvr_sync_ioctl_rename(void *api_priv, void *arg) -{ - struct pvr_sync_rename_ioctl_data *data = arg; +#define PVR_SYNC_IOCTL_DISPATCH_INTERNAL_DECL(name, type) \ +static inline int pvr_sync_ioctl_common_internal_ ## name(struct pvr_sync_file_data *fdata, type data) - return pvr_sync_api_rename(api_priv, data); +PVR_SYNC_IOCTL_DISPATCH_INTERNAL_DECL(rename, struct pvr_sync_rename_ioctl_data *) +{ + return pvr_sync_api_rename(fdata->api_private, data); } -static inline int pvr_sync_ioctl_force_sw_only(struct pvr_sync_file_data *fdata) +PVR_SYNC_IOCTL_DISPATCH_INTERNAL_DECL(force_sw_only, void*) { - void *data = fdata->api_private; + void *new_api_private = fdata->api_private; int err; - err = pvr_sync_api_force_sw_only(fdata->api_private, &data); + PVR_UNREFERENCED_PARAMETER(data); + + err = pvr_sync_api_force_sw_only(fdata->api_private, &new_api_private); if (!err) { - if (data != fdata->api_private) - fdata->api_private = data; + if (new_api_private != fdata->api_private) + fdata->api_private = new_api_private; fdata->is_sw = true; } @@ -197,81 +204,202 @@ static inline int pvr_sync_ioctl_force_sw_only(struct pvr_sync_file_data *fdata) return err; } -static inline int pvr_sync_ioctl_sw_create_fence(void *api_priv, void *arg) +PVR_SYNC_IOCTL_DISPATCH_INTERNAL_DECL(force_exp_only, void*) { - struct pvr_sw_sync_create_fence_data *data = arg; + int err; + PVR_UNREFERENCED_PARAMETER(data); + + err = pvr_sync_api_force_exp_only(fdata->api_private, data); + if (!err) + fdata->is_export = true; - return pvr_sync_api_sw_create_fence(api_priv, data); + return err; } -static inline int pvr_sync_ioctl_sw_inc(void *api_priv, void *arg) +PVR_SYNC_IOCTL_DISPATCH_INTERNAL_DECL(create_export_fence, void*) { - struct pvr_sw_timeline_advance_data *data = arg; - - return pvr_sync_api_sw_inc(api_priv, data); + return pvr_sync_api_create_export_fence(fdata->api_private, data); } -int pvr_sync_ioctl_common(struct file *file, unsigned int cmd, void *arg) +PVR_SYNC_IOCTL_DISPATCH_INTERNAL_DECL(sw_create_fence, struct pvr_sw_sync_create_fence_data *) { - int err = -ENOTTY; - struct pvr_sync_file_data *fdata; - bool in_setup; - - fdata = pvr_sync_get_private_data(file); - if (!fdata) - return -EINVAL; - - switch (cmd) { - case DRM_PVR_SYNC_RENAME_CMD: - case DRM_PVR_SYNC_FORCE_SW_ONLY_CMD: - if (!pvr_sync_set_in_setup(fdata)) - return -EBUSY; - - in_setup = true; - break; - default: - if (!pvr_sync_set_in_use(fdata)) - return -EBUSY; + return pvr_sync_api_sw_create_fence(fdata->api_private, data); +} - in_setup = false; - break; - } +PVR_SYNC_IOCTL_DISPATCH_INTERNAL_DECL(sw_inc, struct pvr_sw_timeline_advance_data *) +{ + return pvr_sync_api_sw_inc(fdata->api_private, data); +} - if (in_setup) { - if (fdata->is_sw) - err = -ENOTTY; - else - switch (cmd) { - case DRM_PVR_SYNC_RENAME_CMD: - err = pvr_sync_ioctl_rename(fdata->api_private, - arg); - break; - case DRM_PVR_SYNC_FORCE_SW_ONLY_CMD: - err = pvr_sync_ioctl_force_sw_only(fdata); - break; - default: - break; - } - } else { - if (!fdata->is_sw) - err = -ENOTTY; - else - switch (cmd) { - case DRM_PVR_SW_SYNC_CREATE_FENCE_CMD: - err = pvr_sync_ioctl_sw_create_fence(fdata->api_private, - arg); - break; - case DRM_PVR_SW_SYNC_INC_CMD: - err = pvr_sync_ioctl_sw_inc(fdata->api_private, - arg); - break; - default: - break; - } - } +/* + * enum pvr_sync_ioctl_dispatch_type + * @pvr_sync_ioctl_dispatch_type_setup: The command may only be used during the setup phase + * @pvr_sync_ioctl_dispatch_type_export: The command may only be used with an export only timeline. + * @pvr_sync_ioctl_dispatch_type_software: The command may only be used with a software only timeline. + */ +enum pvr_sync_ioctl_dispatch_type { + pvr_sync_ioctl_dispatch_type_setup, + pvr_sync_ioctl_dispatch_type_export, + pvr_sync_ioctl_dispatch_type_software, +}; - if (in_setup) - pvr_sync_reset_in_setup(fdata); +/* Generates a function `from` which performs validation on the data before passing it to `into` */ +#define PVR_SYNC_IOCTL_DISPATCH_VALIDATE(name, structure, type) \ +PVR_SYNC_IOCTL_DISPATCH_DECL(name) \ +{ \ + int err = -ENOTTY; \ + structure server_data; \ + \ + struct pvr_sync_file_data *fdata = pvr_sync_get_private_data(file); \ + if (unlikely(!fdata)) \ + return -EINVAL; \ + \ + /* Check if the device is busy and the operation is valid for the timelines current state */ \ + if (type == pvr_sync_ioctl_dispatch_type_setup) \ + { \ + if (!pvr_sync_set_in_setup(fdata)) \ + return -EBUSY; \ + if (fdata->is_sw || fdata->is_export) \ + goto return_; \ + } \ + else \ + { \ + if (!pvr_sync_set_in_use(fdata)) \ + return -EBUSY; \ + \ + switch (type) \ + { \ + case pvr_sync_ioctl_dispatch_type_software: { \ + if (!fdata->is_sw) /* Not a software timeline, software operations cannot be used */ \ + goto return_; \ + } \ + break; \ + case pvr_sync_ioctl_dispatch_type_export: { \ + if (!fdata->is_export) /* Not an export timeline, export operations cannot be used */ \ + goto return_; \ + } \ + break; \ + default: \ + goto return_; /* Invalid Type */ \ + } \ + } \ + \ + /* copy_from_user */ \ + err = pvr_sync_ioctl_dispatch_copy_in__##name((structure __user *)user_data, &server_data); \ + if (unlikely(err)) \ + goto return_; \ + \ + /* Continue into api */ \ + err = pvr_sync_ioctl_common_internal_ ## name(fdata, (structure __force *) PVR_SYNC_IOCTL_DISPATCH_DATA(user_data, &server_data)); \ + \ + if (likely(!err)) \ + { \ + /* copy_to_user */ \ + err = pvr_sync_ioctl_dispatch_copy_out__##name((structure __user *)user_data, &server_data); \ + } \ + \ +return_: \ + if (type == pvr_sync_ioctl_dispatch_type_setup) \ + pvr_sync_reset_in_setup(fdata); \ + return err; \ +} - return err; +#if !defined(USE_PVRSYNC_DEVNODE) +/* drm_ioctl() already copies the data over, see comment on drm_ioctl_t */ +#define PVR_SYNC_IOCTL_DISPATCH_DATA(pUM, pKM) pUM +#define PVR_SYNC_IOCTL_DISPATCH_COPY_WRAPPER(dir, name, structure, copy) \ +INLINE static int pvr_sync_ioctl_dispatch_copy_ ## dir ## __ ## name (structure __user *pUM, structure *pKM) \ +{ return 0; } +#else /* !defined(USE_PVRSYNC_DEVNODE) */ +/* Generates a function to copy over the arguments to/from user-mode */ +#define PVR_SYNC_IOCTL_DISPATCH_DATA(pUM, pKM) pKM +#define PVR_SYNC_IOCTL_DISPATCH_COPY_WRAPPER(dir, name, structure, copy) \ +INLINE static int pvr_sync_ioctl_dispatch_copy_ ## dir ## __ ## name (structure __user *pUM, structure *pKM) \ +{ \ + /* May be unused if there are no in/out args */ \ + PVR_UNREFERENCED_PARAMETER(pUM); \ + PVR_UNREFERENCED_PARAMETER(pKM); \ + /* Copy over the data */ \ + { copy } \ + return 0; \ +} +#endif /* !defined(USE_PVRSYNC_DEVNODE) */ + +/*************************************************************************/ /*! +@Function PVR_SYNC_IOCTL_DISPATCH_COPY +@Description Generates the code to copy from/to user mode depending on @dir. +@Input dir Either `from` or `to` corresponding to `copy_from_user` + and `copy_to_user` respectively. +@Input to Pointer for the dest. +@Input from Pointer for the src. +*/ /**************************************************************************/ +#define PVR_SYNC_IOCTL_DISPATCH_COPY(dir, to, from) \ +if (pvr_copy_##dir##_user(to, from, sizeof(*pKM))) \ +{ \ + PVR_DPF((PVR_DBG_ERROR, "Failed copy " #dir " user")); \ + return -EFAULT; \ } + +/* Copy data from user */ +#define PVR_SYNC_IOCTL_DISPATCH_COPY_IN \ +PVR_SYNC_IOCTL_DISPATCH_COPY(from, pKM, pUM) + +/* Copy data to user */ +#define PVR_SYNC_IOCTL_DISPATCH_COPY_OUT \ +PVR_SYNC_IOCTL_DISPATCH_COPY(to, pUM, pKM) + +/* Copy no data */ +#define PVR_SYNC_IOCTL_DISPATCH_COPY_NONE + +/*************************************************************************/ /*! +@Function PVR_SYNC_IOCTL_DISPATCH_FUNCTION +@Description Generates a dispatch function which validates the ioctl command + and dispatches it to the internal an function. + Generates a dispatch function + `pvr_sync_ioctl_common_##name(struct file*, void*)` + which validates the ioctl command and dispatches it to + an internal function + `pvr_sync_ioctl_common_internal_##name(struct pvr_sync_file_data*, structure*)` +@Input name A name for the dispatch functions, must be unique. +@Input structure The type of the user_data. +@Input type The type specifies when the ioctls are allowed or + if the ioctl is allowed on the current + timeline configuration. +@Input copy_in Either `COPY_IN` or `COPY_NONE` +@Input copy_out Either `COPY_OUT` or `COPY_NONE` +*/ /**************************************************************************/ +#define PVR_SYNC_IOCTL_DISPATCH_FUNCTION(name, structure, type, copy_in, copy_out) \ + PVR_SYNC_IOCTL_DISPATCH_COPY_WRAPPER(in, name, structure, PVR_SYNC_IOCTL_DISPATCH_ ## copy_in) \ + PVR_SYNC_IOCTL_DISPATCH_COPY_WRAPPER(out, name, structure, PVR_SYNC_IOCTL_DISPATCH_ ## copy_out) \ + PVR_SYNC_IOCTL_DISPATCH_VALIDATE(name, structure, pvr_sync_ioctl_dispatch_type_ ## type) + + +PVR_SYNC_IOCTL_DISPATCH_FUNCTION(rename, struct pvr_sync_rename_ioctl_data, + setup, + COPY_IN, + COPY_NONE); + +PVR_SYNC_IOCTL_DISPATCH_FUNCTION(force_sw_only, void*, + setup, + COPY_NONE, + COPY_NONE); + +PVR_SYNC_IOCTL_DISPATCH_FUNCTION(force_exp_only, void*, + setup, + COPY_NONE, + COPY_NONE); + +PVR_SYNC_IOCTL_DISPATCH_FUNCTION(sw_create_fence, struct pvr_sw_sync_create_fence_data, + software, + COPY_IN, + COPY_OUT); + +PVR_SYNC_IOCTL_DISPATCH_FUNCTION(create_export_fence, pvr_exp_sync_create_fence_data_t, + export, + COPY_IN, + COPY_OUT); + +PVR_SYNC_IOCTL_DISPATCH_FUNCTION(sw_inc, struct pvr_sw_timeline_advance_data, + software, + COPY_NONE, + COPY_OUT); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_common.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_common.h index ef12dc298368..0959e7673164 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_common.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_common.h @@ -46,16 +46,30 @@ struct file; -/* Functions provided by pvr_sync_ioctl_common */ +/* Functions provided by pvr_sync_ioctl_common.c */ int pvr_sync_open_common(void *connection_data, void *file_handle); int pvr_sync_close_common(void *connection_data); -int pvr_sync_ioctl_common(struct file *file, unsigned int cmd, void *arg); void *pvr_sync_get_api_priv_common(struct file *file); +#if defined(USE_PVRSYNC_DEVNODE) +#define PVR_SYNC_IOCTL_DISPATCH_DECL(name) \ + int pvr_sync_ioctl_common_ ## name(struct file *file, void __user *user_data) +#else /* defined(USE_PVRSYNC_DEVNODE) */ +#define PVR_SYNC_IOCTL_DISPATCH_DECL(name) \ + int pvr_sync_ioctl_common_ ## name(struct file *file, void *user_data) +#endif /* defined(USE_PVRSYNC_DEVNODE) */ + +PVR_SYNC_IOCTL_DISPATCH_DECL(rename); +PVR_SYNC_IOCTL_DISPATCH_DECL(force_sw_only); +PVR_SYNC_IOCTL_DISPATCH_DECL(force_exp_only); +PVR_SYNC_IOCTL_DISPATCH_DECL(sw_create_fence); +PVR_SYNC_IOCTL_DISPATCH_DECL(create_export_fence); +PVR_SYNC_IOCTL_DISPATCH_DECL(sw_inc); + struct pvr_sync_file_data; -/* Functions required by pvr_sync_ioctl_common */ +/* Functions required by pvr_sync_ioctl_common.c */ bool pvr_sync_set_private_data(void *connection_data, struct pvr_sync_file_data *fdata); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_drm.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_drm.c index 423c8d3a75ef..313393d09c1f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_drm.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_drm.c @@ -127,35 +127,51 @@ void pvr_sync_close(void *connection_data) if (iErr < 0) pr_err("%s: ERROR (%d) returned by pvr_sync_close_common()\n", __func__, iErr); + + iErr = OSConnectionPrivateDataDeInit(PVRSRVConnectionPrivateData(connection_data)); + if (iErr != 0) + pr_err("%s: ERROR (%d) returned by OSConnectionPrivateDataDeInit()\n", + __func__, iErr); + + if (connection_data) + kfree(connection_data); } int pvr_sync_rename_ioctl(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *file) { - return pvr_sync_ioctl_common(file->filp, - DRM_PVR_SYNC_RENAME_CMD, arg); + return pvr_sync_ioctl_common_rename(file->filp, arg); } int pvr_sync_force_sw_only_ioctl(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *file) { - return pvr_sync_ioctl_common(file->filp, - DRM_PVR_SYNC_FORCE_SW_ONLY_CMD, arg); + return pvr_sync_ioctl_common_force_sw_only(file->filp, arg); } int pvr_sw_sync_create_fence_ioctl(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *file) { - return pvr_sync_ioctl_common(file->filp, - DRM_PVR_SW_SYNC_CREATE_FENCE_CMD, arg); + return pvr_sync_ioctl_common_sw_create_fence(file->filp, arg); } int pvr_sw_sync_inc_ioctl(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *file) { - return pvr_sync_ioctl_common(file->filp, - DRM_PVR_SW_SYNC_INC_CMD, arg); + return pvr_sync_ioctl_common_sw_inc(file->filp, arg); +} + +int pvr_sync_ioctl_force_exp_only(struct drm_device __maybe_unused *dev, + void *arg, struct drm_file *file) +{ + return pvr_sync_ioctl_common_force_exp_only(file->filp, arg); +} + +int pvr_export_fence_sync_create_fence_ioctl(struct drm_device __maybe_unused *dev, + void *arg, struct drm_file *file) +{ + return pvr_sync_ioctl_common_create_export_fence(file->filp, arg); } int pvr_sync_ioctl_init(void) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_drm.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_drm.h index 756ce4bf71e6..93182205ca0b 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_drm.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_sync_ioctl_drm.h @@ -58,5 +58,9 @@ int pvr_sw_sync_create_fence_ioctl(struct drm_device *dev, void *arg, struct drm_file *file); int pvr_sw_sync_inc_ioctl(struct drm_device *dev, void *arg, struct drm_file *file); +int pvr_sync_ioctl_force_exp_only(struct drm_device *dev, void *arg, + struct drm_file *file); +int pvr_export_fence_sync_create_fence_ioctl(struct drm_device *dev, void *arg, + struct drm_file *file); #endif /* _PVR_SYNC_IOCTL_DRM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/rogue_trace_events.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/rogue_trace_events.h index b079d73af784..8952bec0f102 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/rogue_trace_events.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/rogue_trace_events.h @@ -59,9 +59,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define show_usecs_from_ns(ns) \ ({ \ u64 t = ns + (NSEC_PER_USEC / 2); \ - u32 rem; \ do_div(t, NSEC_PER_USEC); \ - rem = do_div(t, USEC_PER_SEC); \ + do_div(t, USEC_PER_SEC); \ }) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) @@ -558,7 +557,7 @@ TRACE_EVENT_FN(rogue_firmware_activity, TP_fast_assign( __entry->timestamp = timestamp; - __entry->gpu_id = gpu_id, + __entry->gpu_id = gpu_id; __assign_str(task, task); __entry->fw_event = fw_event; ), diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/services_kernel_client.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/services_kernel_client.h index 64e3b9941ea1..4ec5356abbf0 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/services_kernel_client.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/services_kernel_client.h @@ -58,6 +58,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* included for the define PVRSRV_LINUX_DEV_INIT_ON_PROBE */ #include "pvr_drm.h" +#include "pvr_dma_resv.h" + #ifndef __pvrsrv_defined_struct_enum__ /* sync_external.h */ @@ -83,6 +85,7 @@ struct PVRSRV_CLIENT_SYNC_PRIM_OP; struct _PMR_; struct _PVRSRV_DEVICE_NODE_; struct dma_buf; +struct dma_resv; struct SYNC_PRIM_CONTEXT_TAG; /* pvr_notifier.h */ @@ -140,6 +143,8 @@ enum PVRSRV_ERROR_TAG PVRSRVUnregisterDriverDbgRequestNotify(void *hNotify); struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR); +struct dma_resv *PhysmemGetDmaResv(struct _PMR_ *psPMR); + /* pvrsrv.h */ enum PVRSRV_ERROR_TAG PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject); @@ -169,9 +174,9 @@ __u32 OSStringUINT32ToStr(char *pszBuf, size_t uSize, __u32 ui32Num); /* srvkm.h */ enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceCreate(void *pvOSDevice, - int i32OsDeviceID, + int i32KernelDeviceID, struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); -enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceDestroy( +void PVRSRVCommonDeviceDestroy( struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); const char *PVRSRVGetErrorString(enum PVRSRV_ERROR_TAG eError); #if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) @@ -182,6 +187,9 @@ enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceInitialise( #ifndef CHECKPOINT_PFNS typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_EXPORT_FENCE_RESOLVE_FN)(PVRSRV_FENCE export_fence, PSYNC_CHECKPOINT_CONTEXT checkpoint_context, PSYNC_CHECKPOINT *checkpoint_handle); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_EXPORT_FENCE_ROLLBACK_FN)(PVRSRV_FENCE export_fence); + #ifndef CHECKPOINT_PFNS typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)( struct _PVRSRV_DEVICE_NODE_ *device, @@ -227,6 +235,7 @@ typedef PVRSRV_ERROR(*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE */ #ifndef CHECKPOINT_PFNS typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); +typedef void (*PFN_SYNC_CHECKPOINT_NOHW_SIGNAL_EXPORT_FENCE_FN)(PVRSRV_FENCE fence_to_signal); typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); #define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 @@ -237,12 +246,15 @@ typedef struct { PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; + PFN_SYNC_CHECKPOINT_NOHW_SIGNAL_EXPORT_FENCE_FN pfnNoHWSignalExpFence; PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; char pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; #if defined(PDUMP) PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; #endif + PFN_SYNC_CHECKPOINT_EXPORT_FENCE_RESOLVE_FN pfnExportFenceResolve; + PFN_SYNC_CHECKPOINT_EXPORT_FENCE_ROLLBACK_FN pfnExportFenceRollback; } PFN_SYNC_CHECKPOINT_STRUCT; enum PVRSRV_ERROR_TAG SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); @@ -273,6 +285,8 @@ const char *SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint); #if defined(SUPPORT_NATIVE_FENCE_SYNC) struct _PVRSRV_DEVICE_NODE_ *SyncCheckpointGetAssociatedDevice(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext); #endif +IMG_BOOL SyncCheckpointCommonDeviceIDs(PSYNC_CHECKPOINT_CONTEXT psSyncContext, IMG_HANDLE hDevRef); +enum PVRSRV_ERROR_TAG SyncCheckpointGetCounters(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 *puiInUse, IMG_UINT32 *puiMax, IMG_UINT32 *puiXDInUse, IMG_UINT32*puiXDMax); #endif @@ -284,6 +298,11 @@ struct _PVRSRV_DEVICE_NODE_ *SyncCheckpointGetAssociatedDevice(PSYNC_CHECKPOINT_ @Return struct workqueue_struct ptr on success, NULL otherwise. */ /**************************************************************************/ struct workqueue_struct *NativeSyncGetFenceStatusWq(void); +/*************************************************************************/ /*! +@Function NativeSyncGetFenceCtxDestroyWq +@Return struct workqueue_struct ptr on success, NULL otherwise. +*/ /**************************************************************************/ +struct workqueue_struct *NativeSyncGetFenceCtxDestroyWq(void); #endif #endif /* __SERVICES_KERNEL_CLIENT__ */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/srvinit_param.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/srvinit_param.c deleted file mode 100644 index b3103b6604c8..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/srvinit_param.c +++ /dev/null @@ -1,216 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title Services initialisation parameter routines -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Device specific functions -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -/* - * This is an example file which is presented as a means of providing the - * necessary access mechanisms to allow Services Server settings to be set - * and/or queried in non-Linux operating systems. - * - * The access mechanisms detailed here require the same functionality present - * in the Services Client/UM to access AppHints. This is an example and it may - * or may not be appropriate for the target OS. The implementation approach - * taken needs to be considered in the context of the target OS. - * - * Consult the PowerVR Rogue DDK Services AppHint Guide for more details on - * the Application Hints subsystem and the - * Rogue DDK Services OS Porting Reference for explanations of the interface(s) - * provided by the routines shown in this file. - */ - -#include "img_defs.h" -#include "pvr_debug.h" -#include "allocmem.h" - -#include "osfunc.h" -#include "os_srvinit_param.h" - -void * -SrvInitParamOpen(void) -{ - void *pvHintState = NULL; - - PVRSRVCreateAppHintState(IMG_SRVCLIENT, 0, &pvHintState); - - return pvHintState; -} - -void -SrvInitParamClose(void *pvState) -{ - PVRSRVFreeAppHintState(IMG_SRVCLIENT, pvState); -} - -void -_SrvInitParamGetBOOL(void *pvState, const IMG_CHAR *pszName, const IMG_BOOL *pbDefault, IMG_BOOL *pbValue) -{ - (void) PVRSRVGetAppHint(pvState, pszName, IMG_BOOL_TYPE, pbDefault, pbValue); -} - -void -_SrvInitParamGetUINT32(void *pvState, const IMG_CHAR *pszName, const IMG_UINT32 *pui32Default, IMG_UINT32 *pui32Value) -{ - (void) PVRSRVGetAppHint(pvState, pszName, IMG_UINT_TYPE, pui32Default, pui32Value); -} - -void -_SrvInitParamGetSTRING(void *pvState, const IMG_CHAR *pszName, const IMG_CHAR *psDefault, IMG_CHAR *pBuffer, size_t size) -{ - PVR_UNREFERENCED_PARAMETER(size); - (void) PVRSRVGetAppHint(pvState, pszName, IMG_STRING_TYPE, psDefault, pBuffer); -} - -void -_SrvInitParamGetUINT32BitField(void *pvState, const IMG_CHAR *pszBaseName, IMG_UINT32 uiDefault, const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, IMG_UINT32 uiSize, IMG_UINT32 *puiValue) -{ - IMG_UINT32 uiValue = uiDefault; - IMG_CHAR *pszName; - const IMG_UINT32 baseLen = OSStringLength(pszBaseName); - IMG_UINT32 extraLen = 0; - IMG_UINT32 i, ui32Size; - - for (i = 0; i < uiSize; i++) - { - unsigned len = OSStringLength(psLookup[i].pszValue); - - if (len > extraLen) - { - extraLen = len; - } - } - extraLen++; - - ui32Size = baseLen + extraLen; - pszName = OSAllocMem(ui32Size); - - if (pszName != NULL) - { - (void) OSStringLCopy(pszName, pszBaseName, ui32Size); - - for (i = 0; i < uiSize; i++) - { - const IMG_CHAR *pszExtra = psLookup[i].pszValue; - IMG_UINT32 uiBDefault = 0; - IMG_UINT32 uiBValue; - IMG_UINT32 j; - - for (j = 0; pszExtra[j] != 0; j++) - { - pszName[baseLen + j] = toupper(pszExtra[j]); - } - pszName[baseLen + j] = 0; - - PVRSRVGetAppHint(pvState, pszName, IMG_UINT_TYPE, &uiBDefault, &uiBValue); - - if (uiBValue != 0) - { - uiValue |= psLookup[i].ui32Value; - } - } - - OSFreeMem(pszName); - } - else - { - PVR_DPF((PVR_DBG_WARNING, "Failed to get apphint %s, will use the default value", pszBaseName)); - } - - *puiValue = uiValue; -} - -void -_SrvInitParamGetUINT32List(void *pvState, const IMG_CHAR *pszName, IMG_UINT32 uiDefault, const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, IMG_UINT32 uiSize, IMG_UINT32 *puiValue) -{ - IMG_UINT32 uiValue = uiDefault; - IMG_CHAR acValue[APPHINT_MAX_STRING_SIZE]; - IMG_UINT32 i; - - if (PVRSRVGetAppHint(pvState, pszName, IMG_STRING_TYPE, "", &acValue)) - { - IMG_CHAR *pszParam; - - acValue[APPHINT_MAX_STRING_SIZE - 1] = '\0'; - - pszParam = &acValue[OSStringLength(acValue)]; - - /* Strip trailing blanks */ - while (pszParam >= &acValue[0]) - { - if (*pszParam != '\0' && *pszParam != ' ' && *pszParam != '\t') - { - break; - } - - *(pszParam--) = '\0'; - } - - pszParam = &acValue[0]; - - /* Strip leading blanks */ - while (*pszParam == ' ' || *pszParam == '\t') - { - pszParam++; - } - - for (i = 0; i < uiSize; i++) - { - if (OSStringNCompare(pszParam, psLookup[i].pszValue, APPHINT_MAX_STRING_SIZE) == 0) - { - uiValue = psLookup[i].ui32Value; - break; - } - } - if (i == uiSize) - { - if (OSStringLength(acValue) == 0) - { - PVR_DPF((PVR_DBG_WARNING, "No value set for initialisation parameter %s", pszName)); - } - else - { - PVR_DPF((PVR_DBG_WARNING, "Unrecognised value (%s) for initialisation parameter %s", acValue, pszName)); - } - } - } - - *puiValue = uiValue; -} diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/trace_events.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/trace_events.c index 7201e99eb061..9fdd8598f2ef 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/trace_events.c +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/trace_events.c @@ -54,9 +54,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" #include "trace_events.h" +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) #include "rogue_trace_events.h" +#endif #include "sync_checkpoint_external.h" +#if defined(PVRSRV_TRACE_ROGUE_EVENTS) + static bool fence_update_event_enabled, fence_check_event_enabled; bool trace_rogue_are_fence_updates_traced(void) @@ -229,30 +233,23 @@ void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckFail)); } } -#endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) int PVRGpuTraceEnableUfoCallbackWrapper(void) { - -#if defined(SUPPORT_RGX) PVRGpuTraceEnableUfoCallback(); -#endif - return 0; } int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void) { - -#if defined(SUPPORT_RGX) PVRGpuTraceEnableFirmwareActivityCallback(); -#endif - return 0; } #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */ +#endif /* defined(SUPPORT_RGX) */ +#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, IMG_UINT64 ui64Size) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/trace_events.h b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/trace_events.h index 0c72161f8027..5a4d0567688b 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/trace_events.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/trace_events.h @@ -49,7 +49,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * enabled, just like the actual trace event functions that the kernel * defines for us. */ -#ifdef CONFIG_EVENT_TRACING + +#if defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) bool trace_rogue_are_fence_checks_traced(void); bool trace_rogue_are_fence_updates_traced(void); @@ -97,15 +98,7 @@ void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, IMG_UINT32 ui32UFOCount, const RGX_HWPERF_UFO_DATA_ELEMENT *puData); #endif /* if defined(SUPPORT_RGX) */ - -void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, - IMG_UINT64 ui64Size); - -void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, - IMG_UINT32 ui32Pid, - IMG_UINT64 ui64Size); - -#else /* CONFIG_TRACE_EVENTS */ +#else /* defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) */ static inline bool trace_rogue_are_fence_checks_traced(void) { @@ -176,7 +169,16 @@ void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, { } #endif /* if defined(SUPPORT_RGX)*/ +#endif /* defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) */ + +#if defined(CONFIG_EVENT_TRACING) +void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, + IMG_UINT64 ui64Size); +void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, + IMG_UINT32 ui32Pid, + IMG_UINT64 ui64Size); +#else static inline void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, IMG_UINT64 ui64Size) @@ -189,7 +191,6 @@ void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, IMG_UINT64 ui64Size) { } - -#endif /* CONFIG_TRACE_EVENTS */ +#endif /* defined(CONFIG_EVENT_TRACING) */ #endif /* TRACE_EVENTS_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/connection_server.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/connection_server.h index d11a6eae8bd9..673c37c02a3c 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/connection_server.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/connection_server.h @@ -84,7 +84,7 @@ typedef struct _CONNECTION_DATA_ IMG_HANDLE hClientTLStream; -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) /* * Connection-based values per application which can be modified by the * AppHint settings 'OSid, OSidReg, bOSidAxiProtReg' for each application. @@ -94,12 +94,11 @@ typedef struct _CONNECTION_DATA_ IMG_UINT32 ui32OSid; IMG_UINT32 ui32OSidReg; IMG_BOOL bOSidAxiProtReg; -#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ +#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */ #if defined(SUPPORT_DMA_TRANSFER) IMG_BOOL bAcceptDmaRequests; ATOMIC_T ui32NumDmaTransfersInFlight; - POS_LOCK hDmaReqLock; IMG_HANDLE hDmaEventObject; #endif /* Structure which is hooked into the cleanup thread work list */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/dc_server.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/dc_server.h deleted file mode 100644 index cf30a3dc4cfa..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/dc_server.h +++ /dev/null @@ -1,173 +0,0 @@ -/**************************************************************************/ /*! -@File -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /***************************************************************************/ - -#ifndef DC_SERVER_H -#define DC_SERVER_H - -#include "img_types.h" -#include "pvrsrv_error.h" -#include "pvrsrv_surface.h" -#include "pmr.h" -#include "kerneldisplay.h" -#include "sync_server.h" - -#include "dc_external.h" - -#define DC_MAX_DEVICE_COUNT (1) -#define DC_MAX_PANEL_COUNT (1) -#define DC_MAX_FORMATS (1) -#define DC_MAX_DIMENSIONS (1) -#define DC_MAX_PIPE_COUNT (1) - -typedef struct _DC_DEVICE_ DC_DEVICE; -typedef struct _DC_DISPLAY_CONTEXT_ DC_DISPLAY_CONTEXT; -typedef struct _DC_BUFFER_ DC_BUFFER; -typedef DC_BUFFER* DC_PIN_HANDLE; - -PVRSRV_ERROR DCDevicesQueryCount(IMG_UINT32 *pui32DeviceCount); - -PVRSRV_ERROR DCDevicesEnumerate(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32DeviceArraySize, - IMG_UINT32 *pui32DeviceCount, - IMG_UINT32 *paui32DeviceIndex); - -PVRSRV_ERROR DCDeviceAcquire(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32DeviceIndex, - DC_DEVICE **ppsDevice); - -PVRSRV_ERROR DCDeviceRelease(DC_DEVICE *psDevice); - -PVRSRV_ERROR DCGetInfo(DC_DEVICE *psDevice, - DC_DISPLAY_INFO *psDisplayInfo); - -PVRSRV_ERROR DCPanelQueryCount(DC_DEVICE *psDevice, - IMG_UINT32 *pui32NumPanels); - -PVRSRV_ERROR DCPanelQuery(DC_DEVICE *psDevice, - IMG_UINT32 ui32PanelsArraySize, - IMG_UINT32 *pui32NumPanels, - PVRSRV_PANEL_INFO *pasPanelInfo); - -PVRSRV_ERROR DCFormatQuery(DC_DEVICE *psDevice, - IMG_UINT32 ui32FormatArraySize, - PVRSRV_SURFACE_FORMAT *pasFormat, - IMG_UINT32 *pui32Supported); - -PVRSRV_ERROR DCDimQuery(DC_DEVICE *psDevice, - IMG_UINT32 ui32DimSize, - PVRSRV_SURFACE_DIMS *pasDim, - IMG_UINT32 *pui32Supported); - -PVRSRV_ERROR DCSetBlank(DC_DEVICE *psDevice, - IMG_BOOL bEnabled); - -PVRSRV_ERROR DCSetVSyncReporting(DC_DEVICE *psDevice, - IMG_BOOL bEnabled); - -PVRSRV_ERROR DCLastVSyncQuery(DC_DEVICE *psDevice, - IMG_INT64 *pi64Timestamp); - -PVRSRV_ERROR DCSystemBufferAcquire(DC_DEVICE *psDevice, - IMG_UINT32 *pui32ByteStride, - DC_BUFFER **ppsBuffer); - -PVRSRV_ERROR DCSystemBufferRelease(DC_BUFFER *psBuffer); - -PVRSRV_ERROR DCDisplayContextCreate(DC_DEVICE *psDevice, - DC_DISPLAY_CONTEXT **ppsDisplayContext); - -PVRSRV_ERROR DCDisplayContextFlush(void); - -PVRSRV_ERROR DCDisplayContextConfigureCheck(DC_DISPLAY_CONTEXT *psDisplayContext, - IMG_UINT32 ui32PipeCount, - PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib, - DC_BUFFER **papsBuffers); - -PVRSRV_ERROR DCDisplayContextConfigure(DC_DISPLAY_CONTEXT *psDisplayContext, - IMG_UINT32 ui32PipeCount, - PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib, - DC_BUFFER **papsBuffers, - IMG_UINT32 ui32DisplayPeriod, - IMG_UINT32 ui32MaxDepth, - PVRSRV_FENCE iAcquireFence, - PVRSRV_TIMELINE iReleaseFenceTimeline, - PVRSRV_FENCE *piReleaseFence); - -PVRSRV_ERROR DCDisplayContextDestroy(DC_DISPLAY_CONTEXT *psDisplayContext); - -PVRSRV_ERROR DCBufferAlloc(DC_DISPLAY_CONTEXT *psDisplayContext, - DC_BUFFER_CREATE_INFO *psSurfInfo, - IMG_UINT32 *pui32ByteStride, - DC_BUFFER **ppsBuffer); - -PVRSRV_ERROR DCBufferFree(DC_BUFFER *psBuffer); - -PVRSRV_ERROR DCBufferImport(DC_DISPLAY_CONTEXT *psDisplayContext, - IMG_UINT32 ui32NumPlanes, - PMR **papsImport, - DC_BUFFER_IMPORT_INFO *psSurfAttrib, - DC_BUFFER **ppsBuffer); - -PVRSRV_ERROR DCBufferUnimport(DC_BUFFER *psBuffer); - -PVRSRV_ERROR DCBufferAcquire(DC_BUFFER *psBuffer, - PMR **psPMR); - -PVRSRV_ERROR DCBufferRelease(PMR *psPMR); - -PVRSRV_ERROR DCBufferPin(DC_BUFFER *psBuffer, DC_PIN_HANDLE *phPin); - -PVRSRV_ERROR DCBufferUnpin(DC_PIN_HANDLE hPin); - -PVRSRV_ERROR DCResetDevice(DC_DEVICE *psDevice); - -PVRSRV_ERROR DCInit(void); -PVRSRV_ERROR DCDeInit(void); - -#if defined(INTEGRITY_OS) -IMG_HANDLE DCDisplayContextGetHandle(DC_DISPLAY_CONTEXT *psDisplayContext); -IMG_UINT32 DCDeviceGetIndex(IMG_HANDLE hDevice); -IMG_HANDLE DCDeviceGetDeviceAtIndex(IMG_UINT32 ui32DeviceIndex); -#endif - -#endif /*DC_SERVER_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/debug_common.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/debug_common.h index e8b902f47114..aba576d273a1 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/debug_common.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/debug_common.h @@ -49,7 +49,23 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PVRSRV_ERROR DebugCommonInitDriver(void); void DebugCommonDeInitDriver(void); +const IMG_CHAR *PVRSRVGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState); +const IMG_CHAR *PVRSRVGetDebugHealthStatusString(PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus); +const IMG_CHAR *PVRSRVGetDebugHealthReasonString(PVRSRV_DEVICE_HEALTH_REASON eHealthReason); + PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); +typedef struct _IMG_FLAGS2DESC_ +{ + IMG_UINT32 uiFlag; + const IMG_CHAR *pszLabel; +} IMG_FLAGS2DESC; + +void DebugCommonFlagStrings(IMG_CHAR *psDesc, + IMG_UINT32 ui32DescSize, + const IMG_FLAGS2DESC *psConvTable, + IMG_UINT32 ui32TableSize, + IMG_UINT32 ui32Flags); + #endif /* DEBUG_COMMON_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/device.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/device.h index 40d91120ab8f..e301ef1738e8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/device.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/device.h @@ -52,7 +52,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "srvkm.h" #include "physheap.h" #include "sync_internal.h" -#include "sysinfo.h" #include "dllist.h" #include "rgx_bvnc_defs_km.h" @@ -77,6 +76,7 @@ struct _CONNECTION_DATA_; @Input psDeviceNode Pointer to device node to allocate the UFO for. + @Input ui32RequestedSize Minimum size of allocation requested @Output ppsMemDesc Pointer to pointer for the memdesc of the allocation @Output pui32SyncAddr FW Base address of the UFO block @@ -85,9 +85,10 @@ struct _CONNECTION_DATA_; @Return PVRSRV_OK if allocation was successful */ /**************************************************************************/ typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, - DEVMEM_MEMDESC **ppsMemDesc, - IMG_UINT32 *pui32SyncAddr, - IMG_UINT32 *puiSyncPrimBlockSize); + IMG_UINT32 ui32RequestedSize, + DEVMEM_MEMDESC **ppsMemDesc, + IMG_UINT32 *pui32SyncAddr, + IMG_UINT32 *puiSyncPrimBlockSize); /*************************************************************************/ /*! @Function FreeUFOBlockCallback @@ -108,7 +109,7 @@ typedef struct _PVRSRV_DEVICE_IDENTIFIER_ IMG_CHAR *pszPDumpRegName; /* Under Linux, this is the minor number of RenderNode corresponding to this Device */ - IMG_INT32 i32OsDeviceID; + IMG_INT32 i32KernelDeviceID; /* Services layer enumeration of the device used in pvrdebug */ IMG_UINT32 ui32InternalID; } PVRSRV_DEVICE_IDENTIFIER; @@ -125,17 +126,12 @@ typedef struct _DEVICE_MEMORY_INFO_ } DEVICE_MEMORY_INFO; #define MMU_BAD_PHYS_ADDR (0xbadbad00badULL) -#define DUMMY_PAGE ("DUMMY_PAGE") -#define DEV_ZERO_PAGE ("DEV_ZERO_PAGE") -#define PVR_DUMMY_PAGE_INIT_VALUE (0x0) -#define PVR_ZERO_PAGE_INIT_VALUE (0x0) typedef struct __DEFAULT_PAGE__ { /*Page handle for the page allocated (UMA/LMA)*/ PG_HANDLE sPageHandle; POS_LOCK psPgLock; - ATOMIC_T atRefCounter; /*Default page size in terms of log2 */ IMG_UINT32 ui32Log2PgSize; IMG_UINT64 ui64PgPhysAddr; @@ -144,35 +140,61 @@ typedef struct __DEFAULT_PAGE__ #endif } PVRSRV_DEF_PAGE; +#define PVRSRV_DEVICE_STATE_LIST \ + X(UNDEFINED) \ + X(CREATING) \ + X(CREATED) \ + X(ACTIVE) \ + X(FROZEN) \ + X(DEINIT) \ + X(DESTRUCTING) \ + X(BAD) \ + X(PCI_ERROR) \ + X(LAST) \ + typedef enum _PVRSRV_DEVICE_STATE_ { - PVRSRV_DEVICE_STATE_UNDEFINED = 0, - PVRSRV_DEVICE_STATE_INIT, - PVRSRV_DEVICE_STATE_ACTIVE, - PVRSRV_DEVICE_STATE_DEINIT, - PVRSRV_DEVICE_STATE_BAD, +#define X(_name) PVRSRV_DEVICE_STATE_ ## _name, + PVRSRV_DEVICE_STATE_LIST +#undef X + } PVRSRV_DEVICE_STATE; +#define PVRSRV_DEVICE_HEALTH_STATUS_LIST \ + X(UNDEFINED) \ + X(OK) \ + X(NOT_RESPONDING) \ + X(DEAD) \ + X(FAULT) \ + X(LAST) \ + typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_ { - PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED = 0, - PVRSRV_DEVICE_HEALTH_STATUS_OK, - PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING, - PVRSRV_DEVICE_HEALTH_STATUS_DEAD, - PVRSRV_DEVICE_HEALTH_STATUS_FAULT +#define X(_name) PVRSRV_DEVICE_HEALTH_STATUS_ ## _name, + PVRSRV_DEVICE_HEALTH_STATUS_LIST +#undef X + } PVRSRV_DEVICE_HEALTH_STATUS; +#define PVRSRV_DEVICE_HEALTH_REASON_LIST \ + X(NONE) \ + X(ASSERTED) \ + X(POLL_FAILING) \ + X(TIMEOUTS) \ + X(QUEUE_CORRUPT) \ + X(QUEUE_STALLED) \ + X(IDLING) \ + X(RESTARTING) \ + X(MISSING_INTERRUPTS) \ + X(PCI_ERROR) \ + X(LAST) \ + typedef enum _PVRSRV_DEVICE_HEALTH_REASON_ { - PVRSRV_DEVICE_HEALTH_REASON_NONE = 0, - PVRSRV_DEVICE_HEALTH_REASON_ASSERTED, - PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING, - PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS, - PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, - PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED, - PVRSRV_DEVICE_HEALTH_REASON_IDLING, - PVRSRV_DEVICE_HEALTH_REASON_RESTARTING, - PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS +#define X(_name) PVRSRV_DEVICE_HEALTH_REASON_ ## _name, + PVRSRV_DEVICE_HEALTH_REASON_LIST +#undef X + } PVRSRV_DEVICE_HEALTH_REASON; typedef enum _PVRSRV_DEVICE_DEBUG_DUMP_STATUS_ @@ -190,6 +212,13 @@ typedef struct DI_GROUP DI_GROUP; typedef struct DI_ENTRY DI_ENTRY; #endif +#if (RGX_NUM_DRIVERS_SUPPORTED > 1) +#ifndef DI_VZ_DATA_DEFINED +#define DI_VZ_DATA_DEFINED +typedef struct DI_VZ_DATA DI_VZ_DATA; +#endif +#endif + typedef struct _PVRSRV_DEVICE_DEBUG_INFO_ { DI_GROUP *psGroup; @@ -200,25 +229,29 @@ typedef struct _PVRSRV_DEVICE_DEBUG_INFO_ DI_ENTRY *psFWGCOVEntry; #endif DI_ENTRY *psFWMappingsEntry; -#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) +#if defined(SUPPORT_RISCV_GDB) DI_ENTRY *psRiscvDmiDIEntry; IMG_UINT64 ui64RiscvDmi; #endif DI_ENTRY *psDevMemEntry; IMG_HANDLE hGpuUtilUserDebugFS; #endif /* SUPPORT_RGX */ -#ifdef SUPPORT_VALIDATION - DI_ENTRY *psRGXRegsEntry; -#endif /* SUPPORT_VALIDATION */ -#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS - DI_ENTRY *psPowMonEntry; -#endif #ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS DI_ENTRY *psPowerDataEntry; #endif #if defined(PVRSRV_ENABLE_PROCESS_STATS) DI_ENTRY *psPowerTimingStatsEntry; #endif +#if (RGX_NUM_DRIVERS_SUPPORTED > 1) + DI_GROUP *psVZGroup; + DI_GROUP *apsVZDriverGroups[RGX_NUM_DRIVERS_SUPPORTED]; + DI_ENTRY *apsVZDriverPriorityDIEntries[RGX_NUM_DRIVERS_SUPPORTED]; + DI_ENTRY *apsVZDriverTimeSliceDIEntries[RGX_NUM_DRIVERS_SUPPORTED]; + DI_ENTRY *psVZDriverTimeSliceIntervalDIEntry; + DI_ENTRY *apsVZDriverIsolationGroupDIEntries[RGX_NUM_DRIVERS_SUPPORTED]; + DI_VZ_DATA *apsVZDriverData[RGX_NUM_DRIVERS_SUPPORTED]; + DI_ENTRY *psVZDriverConnectionCooldownPeriodDIEntry; +#endif } PVRSRV_DEVICE_DEBUG_INFO; #if defined(PVRSRV_DEBUG_LISR_EXECUTION) @@ -237,7 +270,7 @@ typedef struct _LISR_EXECUTION_INFO_ /* snapshot from the last LISR invocation */ #if defined(RGX_FW_IRQ_OS_COUNTERS) - IMG_UINT32 aui32InterruptCountSnapshot[RGX_NUM_OS_SUPPORTED]; + IMG_UINT32 aui32InterruptCountSnapshot[RGX_NUM_DRIVERS_SUPPORTED]; #else IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM]; #endif @@ -246,12 +279,28 @@ typedef struct _LISR_EXECUTION_INFO_ IMG_UINT64 ui64Clockns; } LISR_EXECUTION_INFO; -#define UPDATE_LISR_DBG_STATUS(status) psDeviceNode->sLISRExecutionInfo.ui32Status = (status) +#if !defined(__CHECKER__) +#define UPDATE_LISR_DBG_STATUS(status) \ + do { \ + psDeviceNode->sLISRExecutionInfo.ui32Status = (status); \ + if ((status > RGX_LISR_INIT) && (status < RGX_LISR_PROCESSED)) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "%s: IRQ %" IMG_UINT64_FMTSPEC " rejected: %s", __func__, psDeviceNode->ui64nLISR, #status)); \ + } \ + } while (0) + #define UPDATE_LISR_DBG_SNAPSHOT(idx, val) psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[idx] = (val) #define UPDATE_LISR_DBG_TIMESTAMP() psDeviceNode->sLISRExecutionInfo.ui64Clockns = OSClockns64() #define UPDATE_LISR_DBG_COUNTER() psDeviceNode->ui64nLISR++ #define UPDATE_MISR_DBG_COUNTER() psDeviceNode->ui64nMISR++ -#else +#else /*!defined(__CHECKER__) */ +#define UPDATE_LISR_DBG_STATUS(status) +#define UPDATE_LISR_DBG_SNAPSHOT(idx, val) +#define UPDATE_LISR_DBG_TIMESTAMP() +#define UPDATE_LISR_DBG_COUNTER() +#define UPDATE_MISR_DBG_COUNTER() +#endif /* !defined(__CHECKER__) */ +#else /* defined(PVRSRV_DEBUG_LISR_EXECUTION) */ #define UPDATE_LISR_DBG_STATUS(status) #define UPDATE_LISR_DBG_SNAPSHOT(idx, val) #define UPDATE_LISR_DBG_TIMESTAMP() @@ -262,8 +311,7 @@ typedef struct _LISR_EXECUTION_INFO_ typedef struct _PVRSRV_DEVICE_NODE_ { PVRSRV_DEVICE_IDENTIFIER sDevId; - - PVRSRV_DEVICE_STATE eDevState; + PVRSRV_DEVICE_STATE eDevState; /* Set using PVRSRVDeviceSetState, not directly. */ PVRSRV_DEVICE_FABRIC_TYPE eDevFabricType; ATOMIC_T eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */ @@ -284,26 +332,29 @@ typedef struct _PVRSRV_DEVICE_NODE_ POS_LOCK hPowerLock; IMG_PID uiPwrLockOwnerPID; /* Only valid between lock and corresponding unlock operations of hPowerLock */ +#if defined(DEBUG) + struct + { + const char *pszFile; /* Power lock acquired location (File) */ + IMG_UINT32 ui32LineNum; /* Power lock acquired location (Line number) */ + IMG_UINT64 ui64Timestamp; /* Power lock acquired timestamp */ + } sPowerLockOwner; +#endif + +#if defined(SUPPORT_PMR_DEFERRED_FREE) || defined(SUPPORT_MMU_DEFERRED_FREE) + IMG_UINT32 uiPowerOffCounter; /* Counts how many times the device has been powered + off. Incremented in PVRSRVSetDeviceCurrentPowerState().*/ + IMG_UINT32 uiPowerOffCounterNext; /* Value of next update to uiPowerOffCounter. */ +#endif /* current system device power state */ PVRSRV_SYS_POWER_STATE eCurrentSysPowerState; PPVRSRV_POWER_DEV psPowerDev; - /* multicore configuration information */ - IMG_UINT32 ui32MultiCoreNumCores; /* total cores primary + secondaries. 0 for non-multi core */ - IMG_UINT32 ui32MultiCorePrimaryId; /* primary core id for this device */ - IMG_UINT64 *pui64MultiCoreCapabilities; /* capabilities for each core */ - /* callbacks the device must support: */ - PVRSRV_ERROR (*pfnDevSLCFlushRange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, - MMU_CONTEXT *psMMUContext, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate); - PVRSRV_ERROR (*pfnInvalFBSCTable)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, MMU_CONTEXT *psMMUContext, IMG_UINT64 ui64FBSCEntries); @@ -322,6 +373,18 @@ typedef struct _PVRSRV_DEVICE_NODE_ IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + /* Callback pfnMMUTopLevelPxWorkarounds may be NULL if not required */ + void (*pfnMMUTopLevelPxWorkarounds)(struct _CONNECTION_DATA_ *psConnection, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_DEV_PHYADDR sPhysAddrL1Px, + void *pxL1PxCpuVAddr); + + /* Callback pfnMMUTweakProtFlags may be NULL if not required */ + void (*pfnMMUTweakProtFlags)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_DEVICEATTRIBS *psDevAttrs, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + MMU_PROTFLAGS_T *uiMMUProtFlags); + void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); @@ -361,6 +424,8 @@ typedef struct _PVRSRV_DEVICE_NODE_ MMU_DEVICEATTRIBS* (*pfnGetMMUDeviceAttributes)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_BOOL bKernelMemoryCtx); + PVRSRV_DEVICE_SNOOP_MODE (*pfnGetDeviceSnoopMode)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + PVRSRV_DEVICE_CONFIG *psDevConfig; /* device post-finalise compatibility check */ @@ -369,9 +434,15 @@ typedef struct _PVRSRV_DEVICE_NODE_ /* initialise device-specific physheaps */ PVRSRV_ERROR (*pfnPhysMemDeviceHeapsInit) (struct _PVRSRV_DEVICE_NODE_ *); + /* determining the appropriate LMA allocation policy */ + PHYS_HEAP_POLICY (*pfnPhysHeapGetLMAPolicy) (PHYS_HEAP_USAGE_FLAGS, struct _PVRSRV_DEVICE_NODE_ *psDevNode); + /* initialise fw mmu, if FW not using GPU mmu, NULL otherwise. */ PVRSRV_ERROR (*pfnFwMMUInit) (struct _PVRSRV_DEVICE_NODE_ *); + /* Check device's FW Main physheap free memory */ + PVRSRV_ERROR (*pfnCheckForSufficientFWPhysMem) (struct _PVRSRV_DEVICE_NODE_ *); + /* information about the device's address space and heaps */ DEVICE_MEMORY_INFO sDevMemoryInfo; @@ -384,46 +455,48 @@ typedef struct _PVRSRV_DEVICE_NODE_ void *pvDevice; #if defined(SUPPORT_GPUVIRT_VALIDATION) - RA_ARENA *psOSSharedArena; + RA_ARENA *psOSSharedArena; RA_ARENA *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS]; #endif - /* FW_MAIN, FW_CONFIG and FW_GUEST heaps. Acquired during device - * initialisation. Guest heaps may be acquired dynamically in PVZ call - * or initialisation for static carveout. */ - PHYS_HEAP *psFWMainPhysHeap; - PHYS_HEAP *psFWCfgPhysHeap; - PHYS_HEAP *apsFWPremapPhysHeap[RGX_NUM_OS_SUPPORTED]; - - IMG_UINT32 ui32RegisteredPhysHeaps; - PHYS_HEAP **papsRegisteredPhysHeaps; - - /* PHYS_HEAP Mapping table to the platform's physical memory heap(s) - * used by this device. The physical heaps are created based on - * the PHYS_HEAP_CONFIG data from the platform's system layer at device - * creation time. - * - * Contains PVRSRV_PHYS_HEAP_LAST entries for all the possible physical heaps allowed in the design. - * It allows the system layer PhysHeaps for the device to be identified for use in creating new PMRs. - * See PhysHeapCreatePMR() + /* When virtualisation support is enabled the Firmware heaps of virtualised + * drivers can be entirely premapped into the Fw's VA space, during init + * or during runtime on explicit request from Guest drivers. */ + PHYS_HEAP *apsFWPremapPhysHeap[RGX_NUM_DRIVERS_SUPPORTED]; + + /* Head of the physical heap list. Tracks PhysHeap objects created from + * the PHYS_HEAP_CONFIG definitions supplied by the system layer at + * device creation time. There could be 1 or more and varies from system + * to system. + */ + struct _PHYS_HEAP_ *psPhysHeapList; + POS_LOCK hPhysHeapLock; + + /* The apsPhysHeap array is a mapping table to the system's, often fewer, + * physical memory heaps defined for this device. It contains + * PVRSRV_PHYS_HEAP_LAST entries, one for each possible physical + * heaps allowed in the design. Each PhysHeap in the design is acquired + * and stored in the mapping table during device create. Fall-back logic + * is employed to ensure a valid heap is always found from the set defined + * in the system layer for the device. Responsibility for this is shared + * between the common layer (PhysHeapInitDeviceHeaps) and sub-device + * layer (pfnPhysMemDeviceHeapsInit). + * It is used in the PhysMem module to create PMRs from a given PhysHeap + * of memory. See PhysHeapCreatePMR() */ PHYS_HEAP *apsPhysHeap[PVRSRV_PHYS_HEAP_LAST]; IMG_UINT32 ui32UserAllocHeapCount; -#if defined(SUPPORT_AUTOVZ) - /* Phys Heap reserved for storing the MMU mappings of firmware. The - * memory backing up this Phys Heap must persist across driver or OS - * reboots. Swapped into psMMUPhysHeap above when setting up FW MemCtx. - */ - PHYS_HEAP *psFwMMUReservedPhysHeap; -#endif - /* Flag indicating if the firmware has been initialised during the * 1st boot of the Host driver according to the AutoVz life-cycle. */ IMG_BOOL bAutoVzFwIsUp; +#if defined(SUPPORT_AUTOVZ) + IMG_BOOL bAutoVzAllowGPUPowerdown; +#endif + /* Flags indicating VM state and if PVZ calls originating from it are valid */ - IMG_UINT32 ui32VmState; + IMG_UINT32 ui32VmState; struct _PVRSRV_DEVICE_NODE_ *psNext; struct _PVRSRV_DEVICE_NODE_ **ppsThis; @@ -471,16 +544,28 @@ typedef struct _PVRSRV_DEVICE_NODE_ IMG_HANDLE hAppHintDbgReqNotify; IMG_HANDLE hPhysHeapDbgReqNotify; - PVRSRV_DEF_PAGE sDummyPage; + /* Device MMU common module can support one other larger page size + * (e.g. 16KB) in addition to the default OS page size (often 4KB). + * If supported, device code will store the size here as a log2 value + * during device creation/registration. A 0 value will imply it is + * not supported. + */ + IMG_UINT32 ui32Non4KPageSizeLog2; + + PVRSRV_DEF_PAGE sScratchPage; PVRSRV_DEF_PAGE sDevZeroPage; - POSWR_LOCK hMemoryContextPageFaultNotifyListLock; + /* Lock protects access to sMemoryContextPageFaultNotifyListHead and + * per memory context DEVMEMINT_CTX::sProcessNotifyListHead lists. */ + POSWR_LOCK hPageFaultNotifyLock; DLLIST_NODE sMemoryContextPageFaultNotifyListHead; - /* System DMA capability */ - IMG_BOOL bHasSystemDMA; + /* System DMA channels */ + IMG_UINT32 ui32RefCountDMA; IMG_HANDLE hDmaTxChan; IMG_HANDLE hDmaRxChan; + POS_LOCK hDmaTxLock; + POS_LOCK hDmaRxLock; #if defined(PDUMP) /* @@ -513,9 +598,6 @@ typedef struct _PVRSRV_DEVICE_NODE_ #endif -#if defined(SUPPORT_VALIDATION) - POS_LOCK hValidationLock; -#endif /* Members for linking which connections are open on this device */ POS_LOCK hConnectionsLock; /*!< Lock protecting sConnections */ @@ -529,6 +611,28 @@ typedef struct _PVRSRV_DEVICE_NODE_ PVRSRV_DEVICE_DEBUG_INFO sDebugInfo; IMG_BOOL bEnablePFDebug; /*!< EnablePageFaultDebug AppHint setting for device */ + + DLLIST_NODE sCleanupThreadWorkList; /*!< List of work for the cleanup thread associated with the device */ + ATOMIC_T i32NumCleanupItems; /*!< Number of cleanup thread work items. Includes items being freed. */ +#if defined(SUPPORT_PMR_DEFERRED_FREE) + /* Data for the deferred freeing of a PMR physical pages for a given device */ + DLLIST_NODE sPMRZombieList; /*!< List of PMRs to free */ + POS_LOCK hPMRZombieListLock; /*!< List lock */ + IMG_UINT32 uiPMRZombieCount; /*!< Number of elements in the list */ + IMG_UINT32 uiPMRZombieCountInCleanup; /*!< Number of elements in cleanup items */ +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + ATOMIC_T eFrozen; /*< Frozen / Unfrozen indicator */ + IMG_HANDLE hDeviceThreadEvObj; /*< Event Object for Freeze indicator */ + IMG_HANDLE hDeviceFreezeThaw; /*< Event handle for Freeze/Thaw */ + POS_LOCK hFreezeThawLock; /*< Freeze/Thaw lock */ + ATOMIC_T iFreezeCount; /*< Number of blocked on frozen tasks */ + ATOMIC_T iTotalFreezes; /*< Total number of times device frozen */ + ATOMIC_T iThreadsActive; /*< Number of threads active on this device */ + IMG_UINT64 ui64LastDeviceOffTimestamp; /* Last device power off timestamp */ + IMG_UINT64 ui64LastDeviceOffHostTimestampNs; /* Last device power off host timestamp */ +#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) + IMG_BOOL bGPUWorkPeriodFTraceEnabled; +#endif } PVRSRV_DEVICE_NODE; /* @@ -548,6 +652,27 @@ PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions); +void PVRSRVDeviceSetState(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_STATE eNewDevState); + +#define PVRSRVIsStatusRecoverable(eStatus) \ + (((eStatus == PVRSRV_DEVICE_HEALTH_STATUS_DEAD)) ? \ + IMG_FALSE : IMG_TRUE) + +#if defined(SUPPORT_PMR_DEFERRED_FREE) || defined(SUPPORT_MMU_DEFERRED_FREE) +/* Determines if a 32-bit `uiCurrent` counter advanced to or beyond + * `uiRequired` value. The function takes into consideration that the + * counter could have wrapped around. */ +static INLINE IMG_BOOL PVRSRVHasCounter32Advanced(IMG_UINT32 uiCurrent, + IMG_UINT32 uiRequired) +{ + return uiCurrent >= uiRequired ? + /* ... with the counter wrapped around ... + * There can't be ~4 billion transactions completed, so consider wrapped */ + (((uiCurrent - uiRequired) > 0xF0000000UL) ? IMG_FALSE : IMG_TRUE) : + /* There can't be ~4 billion transactions pending, so consider wrapped */ + (((uiRequired - uiCurrent) > 0xF0000000UL) ? IMG_TRUE : IMG_FALSE); +} +#endif #endif /* DEVICE_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_heapcfg.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_heapcfg.h index bf49a07cbefd..3b8c0aa08921 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_heapcfg.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_heapcfg.h @@ -49,16 +49,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" #include "pvrsrv_error.h" -/* - * Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID - */ -#define RGX_HEAP_4KB_PAGE_SHIFT (12U) -#define RGX_HEAP_16KB_PAGE_SHIFT (14U) -#define RGX_HEAP_64KB_PAGE_SHIFT (16U) -#define RGX_HEAP_256KB_PAGE_SHIFT (18U) -#define RGX_HEAP_1MB_PAGE_SHIFT (20U) -#define RGX_HEAP_2MB_PAGE_SHIFT (21U) - struct _PVRSRV_DEVICE_NODE_; struct _CONNECTION_DATA_; struct _DEVMEMINT_HEAP_; diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_history_server.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_history_server.h index 26dc7e2b8826..81fe702371ac 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_history_server.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_history_server.h @@ -149,10 +149,15 @@ typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_ typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_ { IMG_UINT32 ui32NumResults; + IMG_UINT64 ui64SearchCount; /* result 0 is the newest */ DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS]; } DEVICEMEM_HISTORY_QUERY_OUT; +void DevicememHistoryDumpRecordStats(PVRSRV_DEVICE_NODE *psDevNode, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_server.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_server.h index 65eba6d11d86..8e000c663970 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_server.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_server.h @@ -58,18 +58,9 @@ typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT; typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP; typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION; -typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING; +typedef struct _DEVMEMXINT_RESERVATION_ DEVMEMXINT_RESERVATION; typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY; - -/* Function prototypes for obsolete Devmem bridge calls kept - * to minimise compatibility impact. - */ -PVRSRV_ERROR DevmemCompatReserve2(PMR *psPMR); -PVRSRV_ERROR DevmemCompatReserve4(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR); -PVRSRV_ERROR DevmemCompatReserve1(PMR *psPMR); -PVRSRV_ERROR DevmemCompatReserve3(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR); - /* * DevmemServerGetImportHandle() * @@ -110,28 +101,6 @@ PVRSRV_ERROR DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx, IMG_HANDLE *phPrivData); -/* - * DevmemIntAllocDefBackingPage - * - * This function allocates default backing page and initializes it - * with a given default value - * - */ -PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_DEF_PAGE *psDefPage, - IMG_INT uiInitValue, - IMG_CHAR *pcDefPageName, - IMG_BOOL bInitPage); -/* - * DevmemIntFreeDefBackingPage - * - * Frees a given page - */ -void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_DEF_PAGE *psDefPage, - IMG_CHAR *pcDefPageName); - - /* * DevmemIntCtxCreate() * @@ -197,9 +166,6 @@ PVRSRV_ERROR DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, IMG_UINT32 uiHeapConfigIndex, IMG_UINT32 uiHeapIndex, - IMG_DEV_VIRTADDR sHeapBaseAddr, - IMG_DEVMEM_SIZE_T uiHeapLength, - IMG_UINT32 uiLog2DataPageSize, DEVMEMINT_HEAP **ppsDevmemHeapPtr); /* * DevmemIntHeapDestroy() @@ -219,148 +185,295 @@ DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap); IMG_DEV_VIRTADDR DevmemIntHeapGetBaseAddr(DEVMEMINT_HEAP *psDevmemHeap); -/* - * DevmemIntMapPMR() - * - * Maps the given PMR to the virtual range previously allocated with - * DevmemIntReserveRange() - * - * If appropriate, the PMR must have had its physical backing committed, as - * this call will call into the MMU code to set up the page tables for this - * allocation, which shall in turn request the physical addresses from the - * PMR. Alternatively, the PMR implementation can choose to do so off the - * the back of the "lock" callback, which it will receive as a result - * (indirectly) of this call. - * - * This function makes no promise w.r.t. the circumstances that it can be - * called, and these would be "inherited" from the implementation of the PMR. - * For example if the PMR "lock" callback causes pages to be pinned at that - * time (which may cause scheduling or disk I/O etc.) then it would not be - * legal to "Map" the PMR in a context where scheduling events are disallowed. - * - * If you call DevmemIntMapPMR() (and the call succeeds) then you are promising - * that you shall later call DevmemIntUnmapPMR() - */ -PVRSRV_ERROR -DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, - DEVMEMINT_RESERVATION *psReservation, - PMR *psPMR, - PVRSRV_MEMALLOCFLAGS_T uiMapFlags, - DEVMEMINT_MAPPING **ppsMappingPtr); -/* - * DevmemIntUnmapPMR() - * - * Reverses the mapping caused by DevmemIntMapPMR() - */ +/*************************************************************************/ /*! + * @Function DevmemIntReserveRange() + * @Description Reserves a number of virtual addresses starting sReservationVAddr + * and continuing until sReservationVAddr + uiVirtualSize - 1. + * + * If you call DevmemIntReserveRange() (and the call succeeds) + * then you are promising that you shall later call DevmemIntUnreserveRange() + * + * @Input psConnectionData The connection data from the bridge. Used + * to determine where the call to this function + * originated from. + * @Input psDeviceNode The device node (unused). + * @Input psDevmemHeap The virtual heap the DevVAddr is within. + * @Input sReservationVAddr The first virtual address of the range. + * @Input uiVirtualSize The number of bytes in the virtual range. + * @Input uiFlags Mem alloc flags + * @Output ppsReservationPtr A pointer to the created reservation. + * + * @Return PVRSRV_ERROR +*/ /**************************************************************************/ PVRSRV_ERROR -DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping); +DevmemIntReserveRange(CONNECTION_DATA *psConnectionData, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sReservationVAddr, + IMG_DEVMEM_SIZE_T uiVirtualSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + DEVMEMINT_RESERVATION **ppsReservationPtr); -/* DevmemIntMapPages() +/*************************************************************************/ /*! + * @Function DevmemIntUnreserveRange() + * @Description Unreserves the specified virtual range. In the case that the + * virtual range has not been unmapped, it will be unmapped. + * If any references are held on the reservation PVRSRV_ERROR_RETRY + * will be returned. * - * Maps an arbitrary amount of pages from a PMR to a reserved range + * @Input psDevmemReservation The reservation to unreserve * - * @input psReservation Reservation handle for the range - * @input psPMR PMR that is mapped - * @input ui32PageCount Number of consecutive pages that are - * mapped - * @input ui32PhysicalPgOffset Logical offset in the PMR - * @input uiFlags Mapping flags - * @input sDevVAddrBase Virtual address base to start the - * mapping from - */ + * @Return PVRSRV_ERROR +*/ /**************************************************************************/ PVRSRV_ERROR -DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation, - PMR *psPMR, - IMG_UINT32 ui32PageCount, - IMG_UINT32 ui32PhysicalPgOffset, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddrBase); +DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation); -/* DevmemIntUnmapPages() +/*************************************************************************/ /*! + * @Function DevmemIntMapPMR * - * Unmaps an arbitrary amount of pages from a reserved range + * @Description Maps the given PMR to the virtual range previously reserved with + * DevmemIntReserveRange(). When calling this function, the reservation + * must be valid, and not mapped. Additionally, the PMRs logical + * size and the reservations virtual size must be equal. * - * @input psReservation Reservation handle for the range - * @input sDevVAddrBase Virtual address base to start from - * @input ui32PageCount Number of consecutive pages that are - * unmapped - */ + * If appropriate, the PMR must have had its physical backing + * committed, as this call will call into the MMU code to set + * up the page tables for this allocation, which shall in turn + * request the physical addresses from the PMR. Alternatively, + * the PMR implementation can choose to do so off the back of + * the "lock" callback, which it will receive as a result + * (indirectly) of this call. + * + * If you call DevmemIntMapPMR() (and the call succeeds) then you + * are promising that you shall later call DevmemIntUnmapPMR() + * + * @Input psReservation The reservation the PMR will be mapped into. + * @Input psPMR The PMR to be mapped. + * + * @Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ PVRSRV_ERROR -DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation, - IMG_DEV_VIRTADDR sDevVAddrBase, - IMG_UINT32 ui32PageCount); +DevmemIntMapPMR(DEVMEMINT_RESERVATION *psReservation, PMR *psPMR); -/* - * DevmemIntReserveRange() +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + +/*************************************************************************/ /*! + * @Function DevmemIntRemapPageInPMR + * + * @Description Distributes calls to the MMU module to remap a given PMR + * page offset into all associated mappings. + * + * @Input psPMR The PMR to be mapped. + * @Input psMappingListHead The mapping node list head where nodes are + * associated with the PMR via calls to + * PMRLinkGPUMapping. + * Expected type: + * DLLIST_NODE list head from the PMR + * (sGpuMappingListHead) + * @Input ui32LogicalPgOffset The logical page offset into the + * PMR and reservation. + * + * @Return PVRSRV_ERROR failure code. + * PVRSRV_ERROR_DEVICEMEM_REJECT_REMAP_REQUEST can be returned + * if remap is not possible on the given page offset. +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntRemapPageInPMR(PMR *psPMR, DLLIST_NODE *psMappingListHead, IMG_UINT32 ui32LogicalPgOffset); +#endif + +/*************************************************************************/ /*! + * @Function DevmemIntUnmapPMR() * - * Indicates that the specified range should be reserved from the given heap. + * @Description Unmaps a previously mapped virtual range. * - * In turn causes the page tables to be allocated to cover the specified range. + * @Input psReservation The virtual range to unmap. * - * If you call DevmemIntReserveRange() (and the call succeeds) then you are - * promising that you shall later call DevmemIntUnreserveRange() - */ + * @Return PVRSRV_ERROR +*/ /**************************************************************************/ PVRSRV_ERROR -DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, - IMG_DEV_VIRTADDR sAllocationDevVAddr, - IMG_DEVMEM_SIZE_T uiAllocationSize, - DEVMEMINT_RESERVATION **ppsReservationPtr); -/* - * DevmemIntUnreserveRange() - * - * Undoes the state change caused by DevmemIntReserveRage() - */ +DevmemIntUnmapPMR(DEVMEMINT_RESERVATION *psReservation); + + +/*************************************************************************/ /*! + * @Function DevmemIntReserveRangeAndMapPMR() + * + * @Description Reserve (with DevmemIntReserveRange), and map a virtual range + * to a PMR (with DevmemIntMapPMR). + * + * @Input psConnectionData The connection data from the bridge. Used + * to determine where the call to this function + * originated from. + * @Input psDeviceNode The device node. + * @Input psDevmemHeap The virtual heap DevVAddr is within. + * @Input sReservationVAddr The first virtual address of the range. + * @Input uiVirtualSize The number of bytes in the virtual range. + * @Input psPMR The PMR to be mapped. + * @Input uiFlags Mem alloc flags + * @Output ppsReservation A pointer to the created reservation. + * + * @Return PVRSRV_ERROR +*/ /**************************************************************************/ PVRSRV_ERROR -DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation); +DevmemIntReserveRangeAndMapPMR(CONNECTION_DATA *psConnectionData, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sReservationVAddr, + IMG_DEVMEM_SIZE_T uiVirtualSize, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + DEVMEMINT_RESERVATION **ppsReservation); /*************************************************************************/ /*! -@Function DevmemIntChangeSparse -@Description Changes the sparse allocations of a PMR by allocating and freeing - pages and changing their corresponding CPU and GPU mappings. - -@input psDevmemHeap Pointer to the heap we map on -@input psPMR The PMR we want to map -@input ui32AllocPageCount Number of pages to allocate -@input pai32AllocIndices The logical PMR indices where pages will - be allocated. May be NULL. -@input ui32FreePageCount Number of pages to free -@input pai32FreeIndices The logical PMR indices where pages will - be freed. May be NULL. -@input uiSparseFlags Flags passed in to determine which kind - of sparse change the user wanted. - See devicemem_typedefs.h for details. -@input uiFlags Memalloc flags for this virtual range. -@input sDevVAddrBase The base address of the virtual range of - this sparse allocation. -@input sCpuVAddrBase The CPU base address of this allocation. - May be 0 if not existing. -@Return PVRSRV_ERROR failure code + * @Function DevmemIntChangeSparse + * @Description Changes the sparse allocations of a PMR by allocating and freeing + * pages and changing their corresponding GPU mapping. + * + * Prior to calling this function DevmemIntMapPMR + * or DevmemIntReserveRangeAndMapPMR must be used. + * + * @Input psReservation The reservation that the PMR is mapped to. + * @Input ui32AllocPageCount Number of pages to allocate + * @Input pai32AllocIndices The logical PMR indices where pages will + * be allocated. May be NULL. + * @Input ui32FreePageCount Number of pages to free + * @Input pai32FreeIndices The logical PMR indices where pages will + * be freed. May be NULL. + * @Input uiSparseFlags Flags passed in to determine which kind + * of sparse change the user wanted. + * See devicemem_typedefs.h for details. + * @Return PVRSRV_ERROR */ /**************************************************************************/ PVRSRV_ERROR -DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, - PMR *psPMR, - IMG_UINT32 ui32AllocPageCount, +DevmemIntChangeSparse(IMG_UINT32 ui32AllocPageCount, IMG_UINT32 *pai32AllocIndices, IMG_UINT32 ui32FreePageCount, IMG_UINT32 *pai32FreeIndices, SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_DEV_VIRTADDR sDevVAddrBase, - IMG_UINT64 sCpuVAddrBase); + DEVMEMINT_RESERVATION *psReservation); -/* - * DevmemIntFlushDevSLCRange() +PVRSRV_ERROR +DevmemIntGetReservationData(DEVMEMINT_RESERVATION* psReservation, PMR** ppsPMR, IMG_DEV_VIRTADDR* psDevVAddr); + +/*************************************************************************/ /*! + * @Function DevmemXIntReserveRange() + * @Description Indicates that the specified range should be reserved from the + * given heap. * - * Flush specified device context's virtual address range from SLC. - */ + * In turn causes the page tables to be allocated to cover the + * specified range. + * + * If you call DevmemIntReserveRange() (and the call succeeds) + * then you are promising that you shall later call + * DevmemIntUnreserveRange(). + * + * @Input psDevmemHeap Pointer to the heap the reservation is made + * on + * @Input sReservationVAddr Virtual address of the reservation + * @Input uiVirtualSize Size of the reservation (in bytes) + * @Input ppsRsrv Return pointer to the reservation object + * + * @Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemXIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sReservationVAddr, + IMG_DEVMEM_SIZE_T uiVirtualSize, + DEVMEMXINT_RESERVATION **ppsRsrv); + +/*************************************************************************/ /*! + * @Function DevmemXIntUnreserveRange() + * @Description Undoes the state change caused by DevmemXIntReserveRage() + * + * @Input psRsrv Reservation handle for the range + * + * @Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemXIntUnreserveRange(DEVMEMXINT_RESERVATION *psRsrv); + +/*************************************************************************/ /*! +@Function DevmemIntReservationAcquire +@Description Acquire a reference to the provided device memory reservation. + Prevents releasing of the reservation if external device + resource components still require it. +@Return IMG_TRUE if referenced and IMG_FALSE in case of error +*/ /**************************************************************************/ +IMG_BOOL +DevmemIntReservationAcquire(DEVMEMINT_RESERVATION *psDevmemReservation); + +/*************************************************************************/ /*! +@Function DevmemIntReservationRelease +@Description Release the reference to the provided device memory reservation. + Once these references have been released the + reservation is allowed to be released from UM. +@Return None. +*/ /**************************************************************************/ +void +DevmemIntReservationRelease(DEVMEMINT_RESERVATION *psDevmemReservation); + +/*************************************************************************/ /*! + * @Function DevmemXIntMapPages() + * @Description Maps an arbitrary amount of pages from a PMR to a reserved range + * and takes references on the PMR. + * + * @Input psRsrv Reservation handle for the range + * @Input psPMR PMR that is mapped + * @Input uiPageCount Number of consecutive pages that are + * mapped + * @Input uiPhysPageOffset Logical offset in the PMR (measured in pages) + * @Input uiFlags Mapping flags + * @Input uiVirtPageOffset Offset from the reservation base to start the + * mapping from (measured in pages) + * + * @Return PVRSRV_ERROR +*/ /**************************************************************************/ PVRSRV_ERROR -DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevmemCtx, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate); +DevmemXIntMapPages(DEVMEMXINT_RESERVATION *psRsrv, + PMR *psPMR, + IMG_UINT32 uiPageCount, + IMG_UINT32 uiPhysPageOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiVirtPageOffset); + +/*************************************************************************/ /*! + * @Function DevmemXIntUnmapPages() + * @Description Unmaps an arbitrary amount of pages from a reserved range and + * releases references on associated PMRs. + * + * @Input psRsrv Reservation handle for the range + * @Input uiVirtPageOffset Offset from the reservation base to start the + * mapping from (measured in pages) + * @Input uiPageCount Number of consecutive pages that are + * unmapped + * + * @Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemXIntUnmapPages(DEVMEMXINT_RESERVATION *psRsrv, + IMG_UINT32 uiVirtPageOffset, + IMG_UINT32 uiPageCount); + +/*************************************************************************/ /*! + * @Function DevmemXIntMapVRangeToBackingPage() + * @Description Maps a kernel internal backing page to a reserved range. + * + * @Input psRsrv Reservation handle for the range + * @Input uiPageCount Number of consecutive pages that are + * mapped + * @Input uiFlags Mapping flags + * @Input uiVirtPageOffset Offset from the reservation base to start the + * mapping from (measured in pages) + * + * @Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemXIntMapVRangeToBackingPage(DEVMEMXINT_RESERVATION *psRsrv, + IMG_UINT32 uiPageCount, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiVirtPageOffset); /* - * DevmemIntRGXInvalidateFBSCTable() + * DevmemIntInvalidateFBSCTable() * * Invalidate selected FBSC table indices. * @@ -386,14 +499,11 @@ DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection, @Description Registers a PID to be notified when a page fault occurs on a specific device memory context. @Input psDevmemCtx The context to be notified about. -@Input ui32PID The PID of the process that would like to be - notified. @Input bRegister If true, register. If false, de-register. @Return PVRSRV_ERROR */ /**************************************************************************/ PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, - IMG_INT32 ui32PID, IMG_BOOL bRegister); /*************************************************************************/ /*! @@ -411,6 +521,26 @@ PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, IMG_DEV_VIRTADDR sFaultAddress); #if defined(PDUMP) +PVRSRV_ERROR +DevmemIntPDumpGetValidRegions(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + DLLIST_NODE *psValidRegionsList); + +void +DevmemIntPDumpFreeValidRegions(DLLIST_NODE *psValidRegionsList); + +PVRSRV_ERROR +DevmemIntPDumpSaveFromRegionListToFileVirtual(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, + DLLIST_NODE *psDevAddrRegions, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags); + /* * DevmemIntPDumpSaveToFileVirtual() * @@ -418,7 +548,9 @@ PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, * the given virtual address. */ PVRSRV_ERROR -DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, +DevmemIntPDumpSaveToFileVirtual(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, IMG_DEV_VIRTADDR sDevAddrStart, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 uiArraySize, @@ -426,6 +558,21 @@ DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, IMG_UINT32 ui32FileOffset, IMG_UINT32 ui32PDumpFlags); +/* + * DevmemIntPDumpSaveToFileVirtualNoValidate() + * + * Writes out PDump "SAB" commands with the data found in memory at + * the given virtual address. Doesn't perform address validation. + */ +PVRSRV_ERROR +DevmemIntPDumpSaveToFileVirtualNoValidate(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags); + IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext); @@ -464,11 +611,64 @@ DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, IMG_UINT32 ui32PDumpFlags); #else /* PDUMP */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpGetValidRegions) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpGetValidRegions(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psDevmemCtx); + PVR_UNREFERENCED_PARAMETER(sDevAddrStart); + PVR_UNREFERENCED_PARAMETER(uiSize); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpFreeValidRegions) +#endif +static INLINE void +DevmemIntPDumpFreeValidRegions(DLLIST_NODE *psDevAddrRegions) +{ + PVR_UNREFERENCED_PARAMETER(psDevAddrRegions); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpSaveFromRegionListToFileVirtual) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpSaveFromRegionListToFileVirtual(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, + DLLIST_NODE *psDevAddrRegions, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psDevmemCtx); + PVR_UNREFERENCED_PARAMETER(psDevAddrRegions); + PVR_UNREFERENCED_PARAMETER(pszFilename); + PVR_UNREFERENCED_PARAMETER(ui32FileOffset); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + + return PVRSRV_OK; +} + #ifdef INLINE_IS_PRAGMA #pragma inline(DevmemIntPDumpSaveToFileVirtual) #endif static INLINE PVRSRV_ERROR -DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, +DevmemIntPDumpSaveToFileVirtual(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevmemCtx, IMG_DEV_VIRTADDR sDevAddrStart, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 uiArraySize, @@ -476,6 +676,8 @@ DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, IMG_UINT32 ui32FileOffset, IMG_UINT32 ui32PDumpFlags) { + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(psDevmemCtx); PVR_UNREFERENCED_PARAMETER(sDevAddrStart); PVR_UNREFERENCED_PARAMETER(uiSize); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_server_utils.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_server_utils.h index ad85c07cdcf6..923bfc9a8f18 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_server_utils.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/devicemem_server_utils.h @@ -47,8 +47,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_memallocflags.h" #include "pvrsrv.h" -static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, - PVRSRV_MEMALLOCFLAGS_T ulFlags, +static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_MEMALLOCFLAGS_T ulFlags, IMG_UINT32 *pui32Ret) { IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); @@ -79,8 +78,7 @@ static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, * This avoids errors on arm64 when uncached is turned into ordered device memory * and suffers from problems with unaligned access. */ - if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) && - !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) + if (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) { ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; } @@ -107,8 +105,7 @@ static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, return eError; } -static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, - PVRSRV_MEMALLOCFLAGS_T ulFlags, +static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_MEMALLOCFLAGS_T ulFlags, IMG_UINT32 *pui32Ret) { IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags); @@ -139,8 +136,7 @@ static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNod * This avoids errors on arm64 when uncached is turned into ordered device memory * and suffers from problems with unaligned access. */ - if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) && - !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) + if (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) { ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC; } @@ -167,21 +163,6 @@ static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNod return eError; } -static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, - PVRSRV_MEMALLOCFLAGS_T ulFlags) -{ - IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); - IMG_BOOL bRet = IMG_FALSE; - - PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags)); - - if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) - { - bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig); - } - return bRet; -} - static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T ulFlags) { diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/di_server.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/di_server.h index a68894b1b430..a797890c069a 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/di_server.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/di_server.h @@ -43,7 +43,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef DI_SERVER_H #define DI_SERVER_H -#if defined(__linux__) +#if defined(__KERNEL__) && defined(__linux__) #include #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) @@ -53,7 +53,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ #else #include -#endif /* __linux__ */ +#endif /* __KERNEL__ && __linux__ */ #include "di_common.h" #include "pvrsrv_error.h" @@ -181,8 +181,8 @@ void DIWrite(const OSDI_IMPL_ENTRY *psEntry, const void *pvData, * @Input psEntry pointer to OSDI_IMPL_ENTRY object * @Input pszFmt NUL-terminated format string */ -void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) - __printf(2, 3); +__printf(2, 3) +void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...); /*! @Function DIVPrintf * @@ -194,6 +194,7 @@ void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) * @Input pszFmt NUL-terminated format string * @Input pArgs vs_list object */ +__printf(2, 0) void DIVPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, va_list pArgs); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/dkp_impl.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/dkp_impl.h new file mode 100644 index 000000000000..b5f47f8910a2 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/dkp_impl.h @@ -0,0 +1,163 @@ +/**************************************************************************/ /*! +@File dkp_impl.h +@Title Functions for supporting the DRM Key Provider +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef DKP_IMPL_H +#define DKP_IMPL_H + +typedef IMG_HANDLE PVRDKF_DKP_HANDLE; +typedef PVRDKF_DKP_HANDLE * PPVRDKF_DKP_HANDLE; + +/*! @Function DKP_PFN_SHOW + * + * @Description + * + * Describes the function called by the DKF infrastructure to request the DKP + * outputs all the mandatory and optional keys it supports. Each + * key / value pair should be output one per line using the provided + * PVRDPFOutput routine. + * + * The function should check that the passed 'psDevNode' references the + * same device as that associated with the module-specific 'hPrivData' handle. + * If it doesn't, no data should be displayed. + * + * @Input psDevNode Pointer to device node for which data must be shown. + * @Input pid Process-ID for which data must be shown. + * @Input hPrivData Private data passed to DKF from DKP_Register. The DKP + * must use this within calls to the DKP_PFN_SHOW to limit + * the data key/value pairs to only those that are relevant + * to this instance. + */ +typedef void (DKP_PFN_SHOW)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, int pid, + IMG_HANDLE hPrivData); + +typedef IMG_UINT32 DKP_CONNECTION_FLAGS; + +#define DKP_CONNECTION_FLAG_SYNC BIT(0) +#define DKP_CONNECTION_FLAG_SERVICES BIT(1) + +#define DKP_CONNECTION_FLAG_INVALID IMG_UINT32_C(0) +#define DKP_CONNECTION_FLAG_ALL (DKP_CONNECTION_FLAG_SYNC | DKP_CONNECTION_FLAG_SERVICES) + +/* @Function PVRSRVRegisterDKP + * + * @Description + * Registers a DKP with the specified module's DKF entry. + * + * @Input hPrivData DKP Private data handle that the DKF will pass into + * the ShowPFN when called. Can be NULL if there is no + * DKP instance private data. + * @Input pszDKPName Provider name associated with the caller. + * @Input psShowPfn Function to be used when the fdinfo statistics + * are queried. + * @Input ui32Filter The connection types this DKP should be output + * on. + * @Output phDkpHandle Location to store generated handle created by this + * function. + * + * @Returns PVRSRV_ERROR + */ +PVRSRV_ERROR PVRSRVRegisterDKP(IMG_HANDLE hPrivData, + const char *pszDKPName, + DKP_PFN_SHOW *psShowPfn, + DKP_CONNECTION_FLAGS ui32Filter, + PPVRDKF_DKP_HANDLE phDkpHandle); + +/* @Function PVRSRVUnRegisterDKP + * + * @Description + * Removes a previously registered key from the device's DKF entry. + * + * @Input hPrivData Private data handle. + * @Input hDkpHandle Handle to the DKP's registered entry. + * Obtained from earlier PVRSRV_DKP_Register call. + * + * @Returns PVRSRV_ERROR + */ +PVRSRV_ERROR PVRSRVUnRegisterDKP(IMG_HANDLE hPrivData, + PVRDKF_DKP_HANDLE hDkpHandle); + + +/* @Function PVRDKPOutput + * Wrapper function which passes the printf-style arguments to the registered + * DKF output function associated with the registered DKP handle. + * + * @Input hPrivData Handle for associated DKP instance producing the output. + * @Input fmt printf-style format string + * + */ +void PVRDKPOutput(IMG_HANDLE hPrivData, const char *fmt, ...) __printf(2, 3); + + +#if !defined(__linux__) || defined(INTEGRITY_OS) || defined(__QNXNTO__) + +/* Stub routines follow */ +static inline PVRSRV_ERROR PVRSRVRegisterDKP(IMG_HANDLE hPrivData, + const char *pszDKPName, + DKP_PFN_SHOW *psShowPfn, + DKP_CONNECTION_FLAGS ui32Filter, + PPVRDKF_DKP_HANDLE phDkpHandle) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(pszDKPName); + PVR_UNREFERENCED_PARAMETER(psShowPfn); + PVR_UNREFERENCED_PARAMETER(phDkpHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +static inline PVRSRV_ERROR PVRSRVUnRegisterDKP(IMG_HANDLE hPrivData, + PVRDKF_DKP_HANDLE hDkpHandle) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(hDkpHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +static inline void PVRDKPOutput(IMG_HANDLE hPrivData, const char *fmt, ...) __printf(2, 3) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(fmt); +} +#endif /* !__linux__ || INTEGRITY_OS || __QNXNTO__ */ + +#endif /* DKP_IMPL_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/dma_km.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/dma_km.h index ce4d1317279b..9bf633dfd98f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/dma_km.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/dma_km.h @@ -77,7 +77,9 @@ PVRSRV_ERROR DmaTransfer(CONNECTION_DATA *psConnection, IMG_UINT32 uiFlags, PVRSRV_TIMELINE iUpdateTimeline); -PVRSRV_ERROR PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode); -void PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode); +PVRSRV_ERROR PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode, + CONNECTION_DATA *psConnectionData); +void PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode, + CONNECTION_DATA *psConnectionData); #endif /* DMA_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/handle.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/handle.h index 92946b6fbb36..2c132901be1e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/handle.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/handle.h @@ -152,6 +152,7 @@ typedef struct _PROCESS_HANDLE_BASE_ { PVRSRV_HANDLE_BASE *psHandleBase; ATOMIC_T iRefCount; + uintptr_t uiHashKey; } PROCESS_HANDLE_BASE; extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase; @@ -195,12 +196,16 @@ PVRSRV_ERROR PVRSRVHandleInit(void); PVRSRV_ERROR PVRSRVHandleDeInit(void); +/* Only called from sync_fallback_server.c */ +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void); +#endif -PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase); -PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid, IMG_UINT64 ui64MaxBridgeTime); +PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(PROCESS_HANDLE_BASE **ppsBase); +PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime); void LockHandle(PVRSRV_HANDLE_BASE *psBase); void UnlockHandle(PVRSRV_HANDLE_BASE *psBase); + #endif /* !defined(HANDLE_API_H) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/handle_impl.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/handle_impl.h index 94305979d130..e965e536fb5e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/handle_impl.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/handle_impl.h @@ -82,6 +82,7 @@ typedef struct _HANDLE_IMPL_FUNCTAB_ /* Destroy handle base */ PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase); + } HANDLE_IMPL_FUNCTAB; PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs); diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/handle_types.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/handle_types.h index 795e2061809f..776ab86cc097 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/handle_types.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/handle_types.h @@ -53,6 +53,7 @@ HANDLETYPE(DEVMEMINT_CTX) HANDLETYPE(DEVMEMINT_CTX_EXPORT) HANDLETYPE(DEVMEMINT_HEAP) HANDLETYPE(DEVMEMINT_RESERVATION) +HANDLETYPE(DEVMEMXINT_RESERVATION) HANDLETYPE(DEVMEMINT_MAPPING) HANDLETYPE(RGX_FW_MEMDESC) HANDLETYPE(RGX_FREELIST) @@ -63,9 +64,6 @@ HANDLETYPE(RGX_SERVER_TQ_TDM_CONTEXT) HANDLETYPE(RGX_SERVER_COMPUTE_CONTEXT) HANDLETYPE(RGX_SERVER_RAY_CONTEXT) HANDLETYPE(RGX_SERVER_KICKSYNC_CONTEXT) -#if defined(PVR_TESTING_UTILS) && defined(SUPPORT_VALIDATION) -HANDLETYPE(RGX_SERVER_GPUMAP_CONTEXT) -#endif HANDLETYPE(SYNC_PRIMITIVE_BLOCK) HANDLETYPE(SYNC_RECORD_HANDLE) HANDLETYPE(PVRSRV_TIMELINE_SERVER) @@ -86,3 +84,4 @@ HANDLETYPE(DEV_PRIV_DATA) HANDLETYPE(MM_PLAT_CLEANUP) HANDLETYPE(WORKEST_RETURN_DATA) HANDLETYPE(DI_CONTEXT) +HANDLETYPE(PVR_HWPERF_SD) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/htbserver.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/htbserver.h index c30556c3501a..4aaa3dd35e08 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/htbserver.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/htbserver.h @@ -71,8 +71,20 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" #include "pvrsrv_error.h" -#include "pvrsrv.h" -#include "htbuffer.h" +#include "htbuffer_types.h" + +#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) { (void)HTBLogSimple(SF, ## args); } } while (0) + +/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */ +#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff)) +#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff)) + +/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */ +#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff)) +#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff)) + +/* Host Trace Buffer name */ +#define HTB_STREAM_NAME "PVRHTBuffer" /************************************************************************/ /*! @Function HTBInit @@ -82,7 +94,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. number */ /**************************************************************************/ PVRSRV_ERROR -HTBInit(void); +HTBInit_Impl(void); /************************************************************************/ /*! @Function HTBDeInit @@ -92,25 +104,7 @@ HTBInit(void); number */ /**************************************************************************/ PVRSRV_ERROR -HTBDeInit(void); - -/*************************************************************************/ /*! - @Function HTBConfigureKM - @Description Configure or update the configuration of the Host Trace Buffer - - @Input ui32NameSize Size of the pszName string - - @Input pszName Name to use for the underlying data buffer - - @Input ui32BufferSize Size of the underlying data buffer - - @Return eError Internal services call returned eError error - number -*/ /**************************************************************************/ -PVRSRV_ERROR -HTBConfigureKM(IMG_UINT32 ui32NameSize, const IMG_CHAR * pszName, - const IMG_UINT32 ui32BufferSize); - +HTBDeInit_Impl(void); /*************************************************************************/ /*! @Function HTBControlKM @@ -132,7 +126,7 @@ HTBConfigureKM(IMG_UINT32 ui32NameSize, const IMG_CHAR * pszName, number */ /**************************************************************************/ PVRSRV_ERROR -HTBControlKM(const IMG_UINT32 ui32NumFlagGroups, +HTBControlKM_Impl(const IMG_UINT32 ui32NumFlagGroups, const IMG_UINT32 *aui32GroupEnable, const IMG_UINT32 ui32LogLevel, const IMG_UINT32 ui32EnablePID, @@ -148,7 +142,7 @@ HTBControlKM(const IMG_UINT32 ui32NumFlagGroups, */ /**************************************************************************/ void -HTBSyncPartitionMarker(const IMG_UINT32 ui32Marker); +HTBSyncPartitionMarker_Impl(const IMG_UINT32 ui32Marker); /*************************************************************************/ /*! @Function HTBSyncPartitionMarkerRpt @@ -162,7 +156,7 @@ HTBSyncPartitionMarker(const IMG_UINT32 ui32Marker); */ /**************************************************************************/ void -HTBSyncPartitionMarkerRepeat(const IMG_UINT32 ui32Marker, +HTBSyncPartitionMarkerRepeat_Impl(const IMG_UINT32 ui32Marker, const IMG_UINT64 ui64SyncOSTS, const IMG_UINT64 ui64SyncCRTS, const IMG_UINT32 ui32ClkSpeed); @@ -183,22 +177,12 @@ HTBSyncPartitionMarkerRepeat(const IMG_UINT32 ui32Marker, */ /**************************************************************************/ void -HTBSyncScale(const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS, +HTBSyncScale_Impl(const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS, const IMG_UINT64 ui64CRTS, const IMG_UINT32 ui32CalcClkSpd); /*************************************************************************/ /*! - @Function HTBLogKM - @Description Record a Host Trace Buffer log event - - @Input PID The PID of the process the event is associated - with. This is provided as an argument rather - than querying internally so that events associated - with a particular process, but performed by - another can be logged correctly. - - @Input TID The TID of the process the event is associated with. - - @Input ui64TimeStamp The timestamp to be associated with this log event + @Function HTBLogSimple + @Description Record a Host Trace Buffer log event with implicit PID and Timestamp @Input SF The log event ID @@ -207,10 +191,16 @@ HTBSyncScale(const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS, @Return PVRSRV_OK Success. */ /**************************************************************************/ -PVRSRV_ERROR -HTBLogKM(IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, HTB_LOG_SFids SF, - IMG_UINT32 ui32NumArgs, IMG_UINT32 *aui32Args); +IMG_INTERNAL PVRSRV_ERROR +HTBLogSimple_Impl(IMG_UINT32 SF, ...); + +/* DEBUG log group enable */ +#if !defined(HTB_DEBUG_LOG_GROUP) +#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */ +#define HTB_LOG_TYPE_DBG __BUILDERROR__ +#endif +#if defined(PVRSRV_ENABLE_HTB) /*************************************************************************/ /*! @Function HTBIsConfigured @Description Determine if HTB stream has been configured @@ -222,7 +212,29 @@ HTBLogKM(IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, HTB_LOG_SFids */ /**************************************************************************/ IMG_BOOL -HTBIsConfigured(void); +HTBIsConfigured_Impl(void); + +#define HTBIsConfigured HTBIsConfigured_Impl +#define HTBLogSimple HTBLogSimple_Impl +#define HTBSyncScale(bLogValues, ui64OSTS, ui64CRTS, ui32CalcClkSpd) \ + HTBSyncScale_Impl((bLogValues), (ui64OSTS), (ui64CRTS), (ui32CalcClkSpd)) +#define HTBSyncPartitionMarkerRepeat(ui32Marker, ui64SyncOSTS, ui64SyncCRTS, ui32ClkSpeed) \ + HTBSyncPartitionMarkerRepeat_Impl((ui32Marker), (ui64SyncOSTS), (ui64SyncCRTS), (ui32ClkSpeed)) +#define HTBSyncPartitionMarker(a) HTBSyncPartitionMarker_Impl((a)) +#define HTBControlKM(ui32NumFlagGroups, aui32GroupEnable, ui32LogLevel, ui32EnablePID, eLogMode, eOpMode) \ + HTBControlKM_Impl((ui32NumFlagGroups), (aui32GroupEnable), (ui32LogLevel), (ui32EnablePID), (eLogMode), (eOpMode)) +#define HTBInit() HTBInit_Impl() +#define HTBDeInit() HTBDeInit_Impl() +#else /* !PVRSRV_ENABLE_HTB) */ +#define HTBIsConfigured() IMG_FALSE +#define HTBLogSimple(SF, args...) PVRSRV_OK +#define HTBSyncScale(a, b, c, d) +#define HTBSyncPartitionMarkerRepeat(a, b, c, d) +#define HTBSyncPartitionMarker(a) +#define HTBControlKM(a, b, c, d, e, f) PVRSRV_OK +#define HTBDeInit() PVRSRV_OK +#define HTBInit() PVRSRV_OK +#endif /* PVRSRV_ENABLE_HTB */ #endif /* HTBSERVER_H */ /* EOF */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/mmu_common.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/mmu_common.h index 1802fa3b86a8..d0c5e9f03aed 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/mmu_common.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/mmu_common.h @@ -88,18 +88,18 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvr_notifier.h" #include "pvrsrv_error.h" #include "servicesext.h" - +#include "sync_prim_internal.h" /*! The level of the MMU */ typedef enum { - MMU_LEVEL_0 = 0, /* Level 0 = Page */ + MMU_LEVEL_0 = 0, /* Level 0 = Page */ - MMU_LEVEL_1, - MMU_LEVEL_2, - MMU_LEVEL_3, + MMU_LEVEL_1, /* Level 1 = PT */ + MMU_LEVEL_2, /* Level 2 = PD */ + MMU_LEVEL_3, /* Level 3 = PC */ MMU_LEVEL_LAST } MMU_LEVEL; @@ -108,30 +108,79 @@ typedef enum #define MMU_MAX_LEVEL 3 -typedef struct _MMU_LEVEL_DATA_ -{ - IMG_UINT32 ui32Index; - IMG_UINT32 ui32NumOfEntries; - IMG_CHAR const *psDebugStr; - IMG_UINT8 uiBytesPerEntry; - IMG_UINT64 ui64Address; -} MMU_LEVEL_DATA; -typedef enum _MMU_FAULT_TYPE_ +typedef struct _MMU_CONTEXT_ MMU_CONTEXT; + + +/* + P(C/D/T) Entry Config: + + MSB-----------------------------------------------LSB + | PT Addr: | variable PT ctrl | protection flags: | + | bits c+v | b bits | a bits | + ----------------------------------------------------- + where v is the variable page table modifier and is optional +*/ +/*! + Generic MMU entry description. This is used to describe PC, PD and PT entries. +*/ +typedef struct _MMU_PxE_CONFIG_ { - MMU_FAULT_TYPE_UNKNOWN = 0, /* If fault is not analysed by Host */ - MMU_FAULT_TYPE_PM, - MMU_FAULT_TYPE_NON_PM, -} MMU_FAULT_TYPE; + MMU_LEVEL ePxLevel; /*! MMU Level this config describes */ + const IMG_CHAR *pszPxLevelStr; /*! Px string for this level */ + IMG_UINT8 uiBytesPerEntry; /*! Size of an entry in bytes */ -typedef struct _MMU_FAULT_DATA_ + IMG_UINT64 uiAddrMask; /*! Physical address mask */ + IMG_UINT8 uiAddrShift; /*! Physical address shift */ + IMG_UINT8 uiAddrLog2Align; /*! Physical address Log 2 alignment */ + + IMG_UINT64 uiVarCtrlMask; /*! Variable control mask */ + IMG_UINT8 uiVarCtrlShift; /*! Variable control shift */ + + IMG_UINT64 uiProtMask; /*! Protection flags mask */ + IMG_UINT8 uiProtShift; /*! Protection flags shift */ + IMG_UINT64 uiPendingEnMask; /*! Entry pending bit mask */ + IMG_UINT64 uiValidEnMask; /*! Entry valid bit mask */ + IMG_UINT8 uiValidEnShift; /*! Entry valid bit shift */ + IMG_UINT64 uiParityBitMask; /*! Entry parity bit mask */ + IMG_UINT8 uiParityBitShift; /*! Entry parity bit shift */ +} MMU_PxE_CONFIG; + +/*! + MMU virtual address split +*/ +typedef struct _MMU_DEVVADDR_CONFIG_ { - MMU_LEVEL eTopLevel; - MMU_FAULT_TYPE eType; - MMU_LEVEL_DATA sLevelData[MMU_LEVEL_LAST]; -} MMU_FAULT_DATA; + /*! Page catalogue index mask */ + IMG_UINT64 uiPCIndexMask; + /*! Page catalogue index shift */ + IMG_UINT8 uiPCIndexShift; + /*! Total number of PC entries */ + IMG_UINT32 uiNumEntriesPC; + + /*! Page directory mask */ + IMG_UINT64 uiPDIndexMask; + /*! Page directory shift */ + IMG_UINT8 uiPDIndexShift; + /*! Total number of PD entries */ + IMG_UINT32 uiNumEntriesPD; -struct _MMU_DEVVADDR_CONFIG_; + /*! Page table mask */ + IMG_UINT64 uiPTIndexMask; + /*! Page index shift */ + IMG_UINT8 uiPTIndexShift; + /*! Total number of PT entries */ + IMG_UINT32 uiNumEntriesPT; + + /*! Page offset mask */ + IMG_UINT64 uiPageOffsetMask; + /*! Page offset shift */ + IMG_UINT8 uiPageOffsetShift; + + /*! First virtual address mappable for this config */ + IMG_UINT64 uiOffsetInBytes; + +} MMU_DEVVADDR_CONFIG; /*! MMU device attributes. This structure is the interface between the generic @@ -139,22 +188,28 @@ struct _MMU_DEVVADDR_CONFIG_; */ typedef struct _MMU_DEVICEATTRIBS_ { + /*! Page and address type */ PDUMP_MMU_TYPE eMMUType; + /*! Name string of the PDUMP memory space to use */ IMG_CHAR *pszMMUPxPDumpMemSpaceName; - /*! The type of the top level object */ - MMU_LEVEL eTopLevel; - - /*! Alignment requirement of the base object */ + /*! Alignment requirement of the top/base object */ IMG_UINT32 ui32BaseAlign; - /*! HW config of the base object */ + /*! HW config of the top/base object */ struct _MMU_PxE_CONFIG_ *psBaseConfig; /*! Address split for the base object */ const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig; + /*! Supported page sizes validation mask */ + IMG_UINT32 ui32ValidPageSizeMask; + +#if defined(PVRSRV_MMU_PARITY_ON_PTALLOC_AND_PTEUNMAP) + IMG_UINT64* pui64PrecomputedAllocParity[2]; +#endif + /*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */ IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); /*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */ @@ -188,71 +243,9 @@ typedef struct _MMU_DEVICEATTRIBS_ IMG_HANDLE hGetPageSizeFnPriv; } MMU_DEVICEATTRIBS; -/*! - MMU virtual address split -*/ -typedef struct _MMU_DEVVADDR_CONFIG_ -{ - /*! Page catalogue index mask */ - IMG_UINT64 uiPCIndexMask; - /*! Page catalogue index shift */ - IMG_UINT8 uiPCIndexShift; - /*! Total number of PC entries */ - IMG_UINT32 uiNumEntriesPC; - /*! Page directory mask */ - IMG_UINT64 uiPDIndexMask; - /*! Page directory shift */ - IMG_UINT8 uiPDIndexShift; - /*! Total number of PD entries */ - IMG_UINT32 uiNumEntriesPD; - /*! Page table mask */ - IMG_UINT64 uiPTIndexMask; - /*! Page index shift */ - IMG_UINT8 uiPTIndexShift; - /*! Total number of PT entries */ - IMG_UINT32 uiNumEntriesPT; - /*! Page offset mask */ - IMG_UINT64 uiPageOffsetMask; - /*! Page offset shift */ - IMG_UINT8 uiPageOffsetShift; - /*! First virtual address mappable for this config */ - IMG_UINT64 uiOffsetInBytes; - -} MMU_DEVVADDR_CONFIG; - -/* - P(C/D/T) Entry Config: - - MSB-----------------------------------------------LSB - | PT Addr: | variable PT ctrl | protection flags: | - | bits c+v | b bits | a bits | - ----------------------------------------------------- - where v is the variable page table modifier and is optional -*/ -/*! - Generic MMU entry description. This is used to describe PC, PD and PT entries. -*/ -typedef struct _MMU_PxE_CONFIG_ -{ - IMG_UINT8 uiBytesPerEntry; /*! Size of an entry in bytes */ - - IMG_UINT64 uiAddrMask; /*! Physical address mask */ - IMG_UINT8 uiAddrShift; /*! Physical address shift */ - IMG_UINT8 uiAddrLog2Align; /*! Physical address Log 2 alignment */ - - IMG_UINT64 uiVarCtrlMask; /*! Variable control mask */ - IMG_UINT8 uiVarCtrlShift; /*! Variable control shift */ - - IMG_UINT64 uiProtMask; /*! Protection flags mask */ - IMG_UINT8 uiProtShift; /*! Protection flags shift */ - - IMG_UINT64 uiValidEnMask; /*! Entry valid bit mask */ - IMG_UINT8 uiValidEnShift; /*! Entry valid bit shift */ -} MMU_PxE_CONFIG; /* MMU Protection flags */ - /* These are specified generically and in a h/w independent way, and are interpreted at each level (PC/PD/PT) separately. */ @@ -276,8 +269,6 @@ typedef IMG_UINT32 MMU_PROTFLAGS_T; MMU_PROTFLAGS_DEVICE_MASK) -typedef struct _MMU_CONTEXT_ MMU_CONTEXT; - struct _PVRSRV_DEVICE_NODE_; struct _CONNECTION_DATA_; @@ -291,6 +282,31 @@ typedef struct _MMU_PAGESIZECONFIG_ IMG_UINT32 uiMaxRefCount; } MMU_PAGESIZECONFIG; +/*************************************************************************/ /*! +@Function MMU_InitDevice + +@Description Creates MMU device specific resources. + +@Input psDevNode Device node of the device to create the + MMU context for + +@Return PVRSRV_OK if the initialisation process was successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR MMU_InitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + +/*************************************************************************/ /*! +@Function MMU_DeInitDevice + +@Description Clean-up MMU device specific resources. + +@Input psDevNode Device node of the device + +@Return None +*/ +/*****************************************************************************/ +void MMU_DeInitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + /*************************************************************************/ /*! @Function MMU_ContextCreate @@ -335,8 +351,6 @@ MMU_ContextDestroy(MMU_CONTEXT *psMMUContext); @Input uSize The size of the allocation -@Output puActualSize Actual size of allocation - @Input uiProtFlags Generic MMU protection flags @Input uDevVAddrAlignment Alignment requirement of the virtual @@ -351,7 +365,6 @@ MMU_ContextDestroy(MMU_CONTEXT *psMMUContext); PVRSRV_ERROR MMU_Alloc(MMU_CONTEXT *psMMUContext, IMG_DEVMEM_SIZE_T uSize, - IMG_DEVMEM_SIZE_T *puActualSize, IMG_UINT32 uiProtFlags, IMG_DEVMEM_SIZE_T uDevVAddrAlignment, IMG_DEV_VIRTADDR *psDevVAddr, @@ -406,6 +419,13 @@ MMU_Free(MMU_CONTEXT *psMMUContext, @Input uiLog2PageSize Log2 page size of the pages to map @Return PVRSRV_OK if the mapping was successful + PVRSRV_ERROR_RETRY if SUPPORT_LINUX_OSPAGE_MIGRATION is + enabled and migrate is in progress. Requests to MMU_MapPages + may return retry if the target PMR to map is + in migrate state. This is because in order for migrate + to complete higher locking primitives are required to give + way to the migrate path. Expect PVRSRV_ERROR_RETRY return + from this function if this give way is required. */ /*****************************************************************************/ PVRSRV_ERROR @@ -439,10 +459,10 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext, @Input uiMemAllocFlags Indicates if the unmapped regions need to be backed by dummy or zero page -@Return None +@Return PVRSRV_OK if the unmap operation was successful */ /*****************************************************************************/ -void +PVRSRV_ERROR MMU_UnmapPages(MMU_CONTEXT *psMMUContext, PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, IMG_DEV_VIRTADDR sDevVAddr, @@ -451,6 +471,29 @@ MMU_UnmapPages(MMU_CONTEXT *psMMUContext, IMG_UINT32 uiLog2PageSize, PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags); +/*************************************************************************/ /*! +@Function MMUX_MapVRangeToBackingPage + +@Description Map virtual range to a backing page in the MMU. + Used in DevmemX calls which don't tie a PMR to a virtual + range implicitly. + +@Input psMMUContext MMU context to operate on +@Input uiMappingFlags Memalloc flags for the mapping +@Input sDevVAddrBase Device virtual address of the 1st page +@Input ui32MapPageCount Number of pages to map +@Input uiLog2PageSize Log2 page size of the pages to map + +@Return PVRSRV_OK if the mapping was successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMUX_MapVRangeToBackingPage(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32MapPageCount, + IMG_UINT32 uiLog2HeapPageSize); + /*************************************************************************/ /*! @Function MMU_MapPMRFast @@ -469,13 +512,20 @@ MMU_UnmapPages(MMU_CONTEXT *psMMUContext, @Input uiMappingFlags Memalloc flags for the mapping -@Return PVRSRV_OK if the PMR was successfully mapped +@Return PVRSRV_OK if the PMR was successfully mapped. + PVRSRV_ERROR_RETRY if SUPPORT_LINUX_OSPAGE_MIGRATION is + enabled and migrate is in progress. Requests to MMU_MapPages + may return retry if the target PMR to map is + in migrate state. This is because in order for migrate + to complete higher locking primitives are required to give + way to the migrate path. Expect PVRSRV_ERROR_RETRY return + from this function if this give way is required. */ /*****************************************************************************/ PVRSRV_ERROR MMU_MapPMRFast(MMU_CONTEXT *psMMUContext, IMG_DEV_VIRTADDR sDevVAddr, - const PMR *psPMR, + PMR *psPMR, IMG_DEVMEM_SIZE_T uiSizeBytes, PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, IMG_UINT32 uiLog2PageSize); @@ -494,15 +544,46 @@ MMU_MapPMRFast(MMU_CONTEXT *psMMUContext, @Input uiLog2PageSize log2 size of the page -@Return None +@Return PVRSRV_OK if the PMR was successfully unmapped */ /*****************************************************************************/ -void +PVRSRV_ERROR MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, IMG_DEV_VIRTADDR sDevVAddrBase, IMG_UINT32 ui32PageCount, IMG_UINT32 uiLog2PageSize); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +/*************************************************************************/ /*! +@Function MMU_RemapPage + +@Description Remap a single page from a PMR in place. + +@Input psMMUContext MMU context to operate on + +@Input uiMappingFlags Memalloc flags for the mapping + +@Input sDevVAddr Device virtual address of the page + +@Input uiLog2HeapPageSize log2 size of the page + +@Input psOriginPMR PMR to remap + +@Input ui32LogicalPgOffset Page offset into the PMR of the page + to remap. + +@Return PVRSRV_OK if the PMR was successfully re-mapped +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_RemapPage(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 uiLog2HeapPageSize, + PMR *psOriginPMR, + IMG_UINT32 ui32LogicalPgOffset); +#endif + /*************************************************************************/ /*! @Function MMU_AcquireBaseAddr @@ -548,7 +629,7 @@ MMU_AcquireCPUBaseAddr(MMU_CONTEXT *psMMUContext, void **ppvCPUVAddr); void MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext); -#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(SUPPORT_CUSTOM_OSID_EMISSION) /***********************************************************************************/ /*! @Function MMU_SetOSid @@ -609,18 +690,39 @@ void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid, void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags); /*************************************************************************/ /*! -@Function MMU_ExchangeCacheFlags +@Function MMU_GetAndResetCacheFlags -@Description Exchange MMU context flags with specified value, atomically. +@Description Clears MMU context flags, atomically. @Input psMMUContext MMU context -@Input ui32CacheFlags Cache flags to set. - @Return Previous MMU context cache flags. */ /*****************************************************************************/ -IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags); +IMG_UINT32 MMU_GetAndResetCacheFlags(MMU_CONTEXT *psMMUContext); + +typedef struct _MMU_LEVEL_DATA_ +{ + IMG_UINT32 ui32Index; + IMG_UINT32 ui32NumOfEntries; + const IMG_CHAR *psDebugStr; + IMG_UINT8 uiBytesPerEntry; + IMG_UINT64 ui64Address; +} MMU_LEVEL_DATA; + +typedef enum _MMU_FAULT_TYPE_ +{ + MMU_FAULT_TYPE_UNKNOWN = 0, /* If fault is not analysed by Host */ + MMU_FAULT_TYPE_PM, + MMU_FAULT_TYPE_NON_PM, +} MMU_FAULT_TYPE; + +typedef struct _MMU_FAULT_DATA_ +{ + MMU_LEVEL eTopLevel; + MMU_FAULT_TYPE eType; + MMU_LEVEL_DATA sLevelData[MMU_LEVEL_LAST]; +} MMU_FAULT_DATA; /*************************************************************************/ /*! @Function MMU_CheckFaultAddress @@ -752,6 +854,22 @@ MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, } #endif /* PDUMP */ -void RGXMapBRN71422TargetPhysicalAddress(MMU_CONTEXT *psMMUContext); +#if defined(SUPPORT_PMR_DEFERRED_FREE) +/*************************************************************************/ /*! +@Function MMU_CacheInvalidateKick + +@Description Kicks the Firmware to invalidate caches + +@Input psDeviceNode Pointer to the device node +@Output puiRequiredSyncValue Value the associated sync prim will be + updated to after the kick is finished + (parameter ignored if NULL) + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR MMU_CacheInvalidateKick(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_UINT32 *puiRequiredSyncValue); +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ #endif /* #ifdef MMU_COMMON_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/oskm_apphint.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/os_apphint.h similarity index 73% rename from drivers/gpu/drm/img/img-volcanic/services/server/include/oskm_apphint.h rename to drivers/gpu/drm/img/img-volcanic/services/server/include/os_apphint.h index 8740a6a597e1..93c9272498e5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/oskm_apphint.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/os_apphint.h @@ -1,7 +1,7 @@ /*************************************************************************/ /*! -@File oskm_apphint.h +@File os_apphint.h @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description OS-independent interface for retrieving KM apphints +@Description OS-independent interface for retrieving apphints @License Dual MIT/GPLv2 The contents of this file are subject to the MIT license as set out below. @@ -46,53 +46,53 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #else #include "services_client_porting.h" #endif -#if !defined(OSKM_APPHINT_H) -#define OSKM_APPHINT_H +#if !defined(OS_APPHINT_H) +#define OS_APPHINT_H -/*! Supplied to os_get_km_apphint_XXX() functions when the param/AppHint is +/*! Supplied to os_get_apphint_XXX() functions when the param/AppHint is * applicable to all devices and not a specific device. Typically used * for server-wide build and module AppHints. */ #define APPHINT_NO_DEVICE (NULL) #if defined(__linux__) && !defined(DOXYGEN) -static INLINE IMG_UINT os_get_km_apphint_UINT32(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) { +static INLINE IMG_UINT os_get_apphint_UINT32(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) { return !pvr_apphint_get_uint32(device, id, pVal); } -static INLINE IMG_UINT os_get_km_apphint_UINT64(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) { +static INLINE IMG_UINT os_get_apphint_UINT64(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) { return !pvr_apphint_get_uint64(device, id, pVal); } -static INLINE IMG_UINT os_get_km_apphint_BOOL(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) { +static INLINE IMG_UINT os_get_apphint_BOOL(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) { return !pvr_apphint_get_bool(device, id, pVal); } -static INLINE IMG_UINT os_get_km_apphint_STRING(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_CHAR *pAppHintDefault, IMG_CHAR *buffer, size_t size) { +static INLINE IMG_UINT os_get_apphint_STRING(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_CHAR *pAppHintDefault, IMG_CHAR *buffer, size_t size) { return !pvr_apphint_get_string(device, id, buffer, size); } -#define OSGetKMAppHintUINT32(device, state, name, appHintDefault, value) \ - os_get_km_apphint_UINT32(device, state, APPHINT_ID_ ## name, appHintDefault, value) +#define OSGetAppHintUINT32(device, state, name, appHintDefault, value) \ + os_get_apphint_UINT32(device, state, APPHINT_ID_ ## name, appHintDefault, value) -#define OSGetKMAppHintUINT64(device, state, name, appHintDefault, value) \ - os_get_km_apphint_UINT64(device, state, APPHINT_ID_ ## name, appHintDefault, value) +#define OSGetAppHintUINT64(device, state, name, appHintDefault, value) \ + os_get_apphint_UINT64(device, state, APPHINT_ID_ ## name, appHintDefault, value) -#define OSGetKMAppHintBOOL(device, state, name, appHintDefault, value) \ - os_get_km_apphint_BOOL(device, state, APPHINT_ID_ ## name, appHintDefault, value) +#define OSGetAppHintBOOL(device, state, name, appHintDefault, value) \ + os_get_apphint_BOOL(device, state, APPHINT_ID_ ## name, appHintDefault, value) -#define OSGetKMAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ - os_get_km_apphint_STRING(device, state, APPHINT_ID_ ## name, appHintDefault, buffer, size) +#define OSGetAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ + os_get_apphint_STRING(device, state, APPHINT_ID_ ## name, appHintDefault, buffer, size) -#define OSCreateKMAppHintState(state) \ +#define OSCreateAppHintState(state) \ PVR_UNREFERENCED_PARAMETER(state) -#define OSFreeKMAppHintState(state) \ +#define OSFreeAppHintState(state) \ PVR_UNREFERENCED_PARAMETER(state) #else /* defined(__linux__) && !defined(DOXYGEN) */ /**************************************************************************/ /*! -@def OSGetKMAppHintUINT32(state, name, appHintDefault, value) -@Description Interface for retrieval of uint32 km app hint. +@def OSGetAppHintUINT32(state, name, appHintDefault, value) +@Description Interface for retrieval of uint32 server app hint. For non-linux operating systems, this macro implements a call from server code to PVRSRVGetAppHint() declared in services_client_porting.h, effectively making it 'shared' code. @@ -103,12 +103,12 @@ static INLINE IMG_UINT os_get_km_apphint_STRING(PVRSRV_DEVICE_NODE *device, void app hint is found. @Output value Pointer to returned app hint value. */ /**************************************************************************/ -#define OSGetKMAppHintUINT32(device, state, name, appHintDefault, value) \ +#define OSGetAppHintUINT32(device, state, name, appHintDefault, value) \ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) /**************************************************************************/ /*! -@def OSGetKMAppHintUINT64(state, name, appHintDefault, value) -@Description Interface for retrieval of uint64 km app hint. +@def OSGetAppHintUINT64(state, name, appHintDefault, value) +@Description Interface for retrieval of uint64 server app hint. For non-linux operating systems, this macro implements a call from server code to PVRSRVGetAppHint() declared in services_client_porting.h, effectively making it 'shared' code. @@ -119,12 +119,12 @@ static INLINE IMG_UINT os_get_km_apphint_STRING(PVRSRV_DEVICE_NODE *device, void app hint is found. @Output value Pointer to returned app hint value. */ /**************************************************************************/ -#define OSGetKMAppHintUINT64(device, state, name, appHintDefault, value) \ +#define OSGetAppHintUINT64(device, state, name, appHintDefault, value) \ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) /**************************************************************************/ /*! -@def OSGetKMAppHintBOOL(state, name, appHintDefault, value) -@Description Interface for retrieval of IMG_BOOL km app hint. +@def OSGetAppHintBOOL(state, name, appHintDefault, value) +@Description Interface for retrieval of IMG_BOOL server app hint. For non-linux operating systems, this macro implements a call from server code to PVRSRVGetAppHint() declared in services_client_porting.h, effectively making it 'shared' code. @@ -135,12 +135,12 @@ static INLINE IMG_UINT os_get_km_apphint_STRING(PVRSRV_DEVICE_NODE *device, void app hint is found. @Output value Pointer to returned app hint value. */ /**************************************************************************/ -#define OSGetKMAppHintBOOL(device, state, name, appHintDefault, value) \ +#define OSGetAppHintBOOL(device, state, name, appHintDefault, value) \ PVRSRVGetAppHint(state, # name, IMG_BOOL_TYPE, appHintDefault, value) /**************************************************************************/ /*! -@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) -@Description Interface for retrieval of string km app hint. +@def OSGetAppHintSTRING(state, name, appHintDefault, buffer, size) +@Description Interface for retrieval of string server app hint. For non-linux operating systems, this macro implements a call from server code to PVRSRVGetAppHint() declared in services_client_porting.h, effectively making it 'shared' code. @@ -152,35 +152,35 @@ static INLINE IMG_UINT os_get_km_apphint_STRING(PVRSRV_DEVICE_NODE *device, void @Output buffer Buffer used to return app hint string. @Input size Size of the buffer. */ /**************************************************************************/ -#define OSGetKMAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ +#define OSGetAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ (PVR_UNREFERENCED_PARAMETER(size), PVRSRVGetAppHint(state, # name, IMG_STRING_TYPE, appHintDefault, buffer)) /**************************************************************************/ /*! -@def OSCreateKMAppHintState(state) +@def OSCreateAppHintState(state) @Description Creates the app hint state. For non-linux operating systems, this macro implements a call from server code to PVRSRVCreateAppHintState() declared in services_client_porting.h, effectively making it 'shared' code. @Output state App hint state */ /**************************************************************************/ -#define OSCreateKMAppHintState(state) \ +#define OSCreateAppHintState(state) \ PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state) /**************************************************************************/ /*! -@def OSFreeKMAppHintState +@def OSFreeAppHintState @Description Free the app hint state. For non-linux operating systems, this macro implements a call from server code to PVRSRVCreateAppHintState() declared in services_client_porting.h, effectively making it 'shared' code. @Output state App hint state */ /**************************************************************************/ -#define OSFreeKMAppHintState(state) \ +#define OSFreeAppHintState(state) \ PVRSRVFreeAppHintState(IMG_SRV_UM, state) #endif /* defined(__linux__) */ -#endif /* OSKM_APPHINT_H */ +#endif /* OS_APPHINT_H */ /****************************************************************************** - End of file (oskm_apphint.h) + End of file (os_apphint.h) ******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/os_srvinit_param.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/os_srvinit_param.h deleted file mode 100644 index a4d77e381ff7..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/os_srvinit_param.h +++ /dev/null @@ -1,328 +0,0 @@ -/*************************************************************************/ /*! -@File -@Title Services initialisation parameters header -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Services initialisation parameter support for the Linux kernel. -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ - -#ifndef OS_SRVINIT_PARAM_H -#define OS_SRVINIT_PARAM_H - -#if defined(__linux__) && defined(__KERNEL__) -#include "km_apphint.h" -#include "km_apphint_defs.h" - -/* Supplied to SrvInitParamGetXXX() functions when the param/AppHint is - * applicable to all devices and not a specific device. Typically used - * for server-wide build and module AppHints. - */ -#define INITPARAM_NO_DEVICE (NULL) - -#define SrvInitParamOpen() NULL -#define SrvInitParamClose(pvState) ((void)(pvState)) - -#define SrvInitParamGetBOOL(device, state, name, value) \ - ((void) pvr_apphint_get_bool(device, APPHINT_ID_ ## name, &value)) - -#define SrvInitParamGetUINT32(device, state, name, value) \ - ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value)) - -#define SrvInitParamGetUINT64(device, state, name, value) \ - ((void) pvr_apphint_get_uint64(device, APPHINT_ID_ ## name, &value)) - -#define SrvInitParamGetSTRING(device, state, name, buffer, size) \ - ((void) pvr_apphint_get_string(device, APPHINT_ID_ ## name, buffer, size)) - -#define SrvInitParamGetUINT32BitField(device, state, name, value) \ - ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value)) - -#define SrvInitParamGetUINT32List(device, state, name, value) \ - ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value)) - -#else /* defined(__linux__) && defined(__KERNEL__) */ - -#if defined(__cplusplus) -extern "C" { -#endif - -#include "img_defs.h" -#include "img_types.h" - -/*! Lookup item. */ -typedef struct -{ - const IMG_CHAR *pszValue; /*!< looked up name */ - IMG_UINT32 ui32Value; /*!< looked up value */ -} SRV_INIT_PARAM_UINT32_LOOKUP; - -/*************************************************************************/ /*! -@Brief SrvInitParamOpen - -@Description Establish a connection to the Parameter resource store which is - used to hold configuration information associated with the - server instance. - -@Return (void *) Handle to Parameter resource store to be used for - subsequent parameter value queries - -*/ /**************************************************************************/ -void *SrvInitParamOpen(void); - -/*************************************************************************/ /*! -@Brief SrvInitParamClose - -@Description Remove a pre-existing connection to the Parameter resource store - given by 'pvState' and release any temporary storage associated - with the 'pvState' mapping handle - -@Input pvState Handle to Parameter resource store - -*/ /**************************************************************************/ -void SrvInitParamClose(void *pvState); - -/*************************************************************************/ /*! -@Brief _SrvInitParamGetBOOL - -@Description Get the current BOOL value for parameter 'pszName' from the - Parameter resource store attached to 'pvState' - -@Input pvState Handle to Parameter resource store - -@Input pszName Name of parameter to look-up - -@Input pbDefault Value to return if parameter not found - -@Output pbValue Value of parameter 'pszName' or 'pbDefault' - if not found - -*/ /**************************************************************************/ -void _SrvInitParamGetBOOL( - void *pvState, - const IMG_CHAR *pszName, - const IMG_BOOL *pbDefault, - IMG_BOOL *pbValue -); - -/*! Get the BOOL value for parameter 'name' from the parameter resource store - * attached to 'state'. */ -#define SrvInitParamGetBOOL(device, state, name, value) \ - _SrvInitParamGetBOOL(state, # name, & __SrvInitParam_ ## name, &(value)) - -/*! Initialise FLAG type parameter identified by 'name'. */ -#define SrvInitParamInitFLAG(name, defval, unused) \ - static const IMG_BOOL __SrvInitParam_ ## name = defval; - -/*! Initialise BOOL type parameter identified by 'name'. */ -#define SrvInitParamInitBOOL(name, defval, unused) \ - static const IMG_BOOL __SrvInitParam_ ## name = defval; - -/*************************************************************************/ /*! -@Brief _SrvInitParamGetUINT32 - -@Description Get the current IMG_UINT32 value for parameter 'pszName' - from the Parameter resource store attached to 'pvState' - -@Input pvState Handle to Parameter resource store - -@Input pszName Name of parameter to look-up - -@Input pui32Default Value to return if parameter not found - -@Output pui32Value Value of parameter 'pszName' or - 'pui32Default' if not found - -*/ /**************************************************************************/ -void _SrvInitParamGetUINT32( - void *pvState, - const IMG_CHAR *pszName, - const IMG_UINT32 *pui32Default, - IMG_UINT32 *pui32Value -); - -/*! Get the UINT32 value for parameter 'name' from the parameter resource store - * attached to 'state'. */ -#define SrvInitParamGetUINT32(device, state, name, value) \ - _SrvInitParamGetUINT32(state, # name, & __SrvInitParam_ ## name, &(value)) - -/*! Initialise UINT32 type parameter identified by 'name'. */ -#define SrvInitParamInitUINT32(name, defval, unused) \ - static const IMG_UINT32 __SrvInitParam_ ## name = defval; - -/*! Initialise UINT64 type parameter identified by 'name'. */ -#define SrvInitParamInitUINT64(name, defval, unused) \ - static const IMG_UINT64 __SrvInitParam_ ## name = defval; - -/*! @cond Doxygen_Suppress */ -#define SrvInitParamUnreferenced(name) \ - PVR_UNREFERENCED_PARAMETER( __SrvInitParam_ ## name ) -/*! @endcond */ - -/*************************************************************************/ /*! -@Brief _SrvInitParamGetUINT32BitField - -@Description Get the current IMG_UINT32 bitfield value for parameter - 'pszBasename' from the Parameter resource store - attached to 'pvState' - -@Input pvState Handle to Parameter resource store - -@Input pszBaseName Bitfield parameter name to search for - -@Input uiDefault Default return value if parameter not found - -@Input psLookup Bitfield array to traverse - -@Input uiSize number of elements in 'psLookup' - -@Output puiValue Value of bitfield or 'uiDefault' if - parameter not found -*/ /**************************************************************************/ -void _SrvInitParamGetUINT32BitField( - void *pvState, - const IMG_CHAR *pszBaseName, - IMG_UINT32 uiDefault, - const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, - IMG_UINT32 uiSize, - IMG_UINT32 *puiValue -); - -/*! Initialise UINT32 bitfield type parameter identified by 'name' with - * 'inival' value and 'lookup' look up array. */ -#define SrvInitParamInitUINT32Bitfield(name, inival, lookup) \ - static IMG_UINT32 __SrvInitParam_ ## name = inival; \ - static SRV_INIT_PARAM_UINT32_LOOKUP * \ - __SrvInitParamLookup_ ## name = &lookup[0]; \ - static const IMG_UINT32 __SrvInitParamSize_ ## name = \ - ARRAY_SIZE(lookup); - -/*! Get the UINT32 bitfield value for parameter 'name' from the parameter - * resource store attached to 'state'. */ -#define SrvInitParamGetUINT32BitField(device, state, name, value) \ - _SrvInitParamGetUINT32BitField(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) - -/*************************************************************************/ /*! -@Brief _SrvInitParamGetUINT32List - -@Description Get the current IMG_UINT32 list value for the specified - parameter 'pszName' from the Parameter resource store - attached to 'pvState' - -@Input pvState Handle to Parameter resource store - -@Input pszName Parameter list name to search for - -@Input uiDefault Default value to return if 'pszName' is - not set within 'pvState' - -@Input psLookup parameter list to traverse - -@Input uiSize number of elements in 'psLookup' list - -@Output puiValue value of located list element or - 'uiDefault' if parameter not found - -*/ /**************************************************************************/ -void _SrvInitParamGetUINT32List( - void *pvState, - const IMG_CHAR *pszName, - IMG_UINT32 uiDefault, - const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, - IMG_UINT32 uiSize, - IMG_UINT32 *puiValue -); - -/*! Get the UINT32 list value for parameter 'name' from the parameter - * resource store attached to 'state'. */ -#define SrvInitParamGetUINT32List(device, state, name, value) \ - _SrvInitParamGetUINT32List(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) - -/*! Initialise UINT32 list type parameter identified by 'name' with - * 'defval' default value and 'lookup' look up list. */ -#define SrvInitParamInitUINT32List(name, defval, lookup) \ - static IMG_UINT32 __SrvInitParam_ ## name = defval; \ - static SRV_INIT_PARAM_UINT32_LOOKUP * \ - __SrvInitParamLookup_ ## name = &lookup[0]; \ - static const IMG_UINT32 __SrvInitParamSize_ ## name = \ - ARRAY_SIZE(lookup); - -/*************************************************************************/ /*! -@Brief _SrvInitParamGetSTRING - -@Description Get the contents of the specified parameter string 'pszName' - from the Parameter resource store attached to 'pvState' - -@Input pvState Handle to Parameter resource store - -@Input pszName Parameter string name to search for - -@Input psDefault Default string to return if 'pszName' is - not set within 'pvState' - -@Input size Size of output 'pBuffer' - -@Output pBuffer Output copy of 'pszName' contents or - copy of 'psDefault' if 'pszName' is not - set within 'pvState' - -*/ /**************************************************************************/ -void _SrvInitParamGetSTRING( - void *pvState, - const IMG_CHAR *pszName, - const IMG_CHAR *psDefault, - IMG_CHAR *pBuffer, - size_t size -); - -/*! Initialise STRING type parameter identified by 'name' with 'defval' default - * value. */ -#define SrvInitParamInitSTRING(name, defval, unused) \ - static const IMG_CHAR *__SrvInitParam_ ## name = defval; - -/*! Get the STRING value for parameter 'name' from the parameter resource store - * attached to 'state'. */ -#define SrvInitParamGetSTRING(device, state, name, buffer, size) \ - _SrvInitParamGetSTRING(state, # name, __SrvInitParam_ ## name, buffer, size) - -#if defined(__cplusplus) -} -#endif - -#endif /* defined(__linux__) && defined(__KERNEL__) */ - -#endif /* OS_SRVINIT_PARAM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/osconnection_server.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/osconnection_server.h index 28a6dd3825fb..88f7aab29dca 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/osconnection_server.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/osconnection_server.h @@ -60,6 +60,10 @@ PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase); PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection); +struct drm_file; + +struct drm_file* OSGetDRMFile(CONNECTION_DATA *psConnection); + #else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ #ifdef INLINE_IS_PRAGMA #pragma inline(OSConnectionPrivateDataInit) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/osdi_impl.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/osdi_impl.h index 65507feea59b..5be78d6c2b62 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/osdi_impl.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/osdi_impl.h @@ -43,7 +43,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef OSDI_IMPL_H #define OSDI_IMPL_H -#if defined(__linux__) +#if defined(__KERNEL__) && defined(__linux__) #include #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) @@ -53,10 +53,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ #else #include -#endif /* __linux__ */ +#endif /* __KERNEL__ && __linux__ */ #include "di_common.h" #include "pvrsrv_error.h" +#include "img_defs.h" /*! Implementation callbacks. Those operations are performed on native * implementation handles. */ @@ -84,6 +85,7 @@ typedef struct OSDI_IMPL_ENTRY_CB * @Input pszFmt NUL-terminated format string * @Input va_list variable length argument list */ + __printf(2, 0) void (*pfnVPrintf)(void *pvNativeHandle, const IMG_CHAR *pszFmt, va_list pArgs); /*! @Function pfnPuts diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/osfunc.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/osfunc.h index 3e3361f19a43..67ed3d24895b 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/osfunc.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/osfunc.h @@ -59,7 +59,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #endif #endif -#if defined(__linux__) +#if defined(__linux__) && defined(__KERNEL__) #include #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) @@ -270,41 +270,6 @@ typedef void (*PFN_MISR)(void *pvData); */ /**************************************************************************/ typedef void (*PFN_THREAD)(void *pvData); -/*************************************************************************/ /*! -@Function OSChangeSparseMemCPUAddrMap -@Description This function changes the CPU mapping of the underlying - sparse allocation. It is used by a PMR 'factory' - implementation if that factory supports sparse - allocations. -@Input psPageArray array representing the pages in the - sparse allocation -@Input sCpuVAddrBase the virtual base address of the sparse - allocation ('first' page) -@Input sCpuPAHeapBase the physical address of the virtual - base address 'sCpuVAddrBase' -@Input ui32AllocPageCount the number of pages referenced in - 'pai32AllocIndices' -@Input pai32AllocIndices list of indices of pages within - 'psPageArray' that we now want to - allocate and map -@Input ui32FreePageCount the number of pages referenced in - 'pai32FreeIndices' -@Input pai32FreeIndices list of indices of pages within - 'psPageArray' we now want to - unmap and free -@Input bIsLMA flag indicating if the sparse allocation - is from LMA or UMA memory -@Return PVRSRV_OK on success, a failure code otherwise. -*/ /**************************************************************************/ -PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, - IMG_UINT64 sCpuVAddrBase, - IMG_CPU_PHYADDR sCpuPAHeapBase, - IMG_UINT32 ui32AllocPageCount, - IMG_UINT32 *pai32AllocIndices, - IMG_UINT32 ui32FreePageCount, - IMG_UINT32 *pai32FreeIndices, - IMG_BOOL bIsLMA); - /*************************************************************************/ /*! @Function OSInstallMISR @Description Installs a Mid-level Interrupt Service Routine (MISR) @@ -350,6 +315,15 @@ PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData); */ /**************************************************************************/ PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData); +/*************************************************************************/ /*! +@Function OSSyncIRQ +@Description Wait for LISR to complete. If you use this function while + holding a resource which the IRQ handler also requires, + you will deadlock. +@Input ui32IRQ IRQ number +*/ /**************************************************************************/ +void OSSyncIRQ(IMG_UINT32 ui32IRQ); + /*************************************************************************/ /*! @Description Pointer to a function implementing debug dump of thread-specific data. @@ -603,9 +577,10 @@ typedef enum This is used to infer whether the virtual or physical address supplied to the OSCPUCacheXXXRangeKM functions can be omitted when called. +@Input psDevNode device on which the allocation was made @Return OS_CACHE_OP_ADDR_TYPE */ /**************************************************************************/ -OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void); +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode); /*! CPU Cache attributes available for retrieval, DCache unless specified */ typedef enum _OS_CPU_CACHE_ATTRIBUTE_ @@ -682,6 +657,32 @@ IMG_PID OSGetCurrentClientProcessIDKM(void); *****************************************************************************/ IMG_CHAR *OSGetCurrentClientProcessNameKM(void); +/*************************************************************************/ /*! +@Function OSAcquireCurrentPPIDResourceRefKM +@Description Returns a unique process identifier for the current client + parent process (thread group) and takes a reference on it + (if required) to prevent it being freed/re-allocated. + This value may then be used as a unique reference to the + process rather than using the PID value which might be + reallocated to represent a further process on process + destruction. + Note that the value to be returned is an address relating to + the parent process (thread group) and not to just one thread. + It is the caller's responsibility to ensure the reference is + subsequently dropped (by calling OSReleasePPIDResourceRefKM()) + to allow it to be freed when no longer required. +@Return Address of a kernel resource allocated for the current client + parent process (thread group) +*****************************************************************************/ +uintptr_t OSAcquireCurrentPPIDResourceRefKM(void); + +/*************************************************************************/ /*! +@Function OSReleasePPIDResourceRefKM +@Description Drops a reference on the unique process identifier provided. +@Return None +*****************************************************************************/ +void OSReleasePPIDResourceRefKM(uintptr_t psPPIDResource); + /*************************************************************************/ /*! @Function OSGetCurrentClientThreadIDKM @Description Returns ID for current client thread @@ -1083,9 +1084,28 @@ void OSWriteMemoryBarrier(volatile void *hReadback); OSWriteMemoryBarrier(addr); \ } while (0) -#if defined(__linux__) && defined(__KERNEL__) && !defined(NO_HARDWARE) - #define OSReadUncheckedHWReg8(addr, off) ((IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off))) - #define OSReadUncheckedHWReg16(addr, off) ((IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off))) +#if defined(NO_HARDWARE) + /* OSReadHWReg and OSWriteHWReg operations are skipped to no-op in nohw builds */ + #define OSReadUncheckedHWReg32(addr, off) ((void)(addr), (void)(off), 0x30f73a4eU) +#if defined(__QNXNTO__) && __SIZEOF_LONG__ == 8 + /* This is needed for 64-bit QNX builds where the size of a long is 64 bits */ + #define OSReadUncheckedHWReg64(addr, off) ((void)(addr), (void)(off), 0x5b376c9d30f73a4eUL) +#else + #define OSReadUncheckedHWReg64(addr, off) ((void)(addr), (void)(off), 0x5b376c9d30f73a4eULL) +#endif + + #define OSWriteUncheckedHWReg32(addr, off, val) ((void)(addr), (void)(off), (void)(val)) + #define OSWriteUncheckedHWReg64(addr, off, val) ((void)(addr), (void)(off), (void)(val)) + + #define OSReadHWReg32(addr, off) OSReadUncheckedHWReg32(addr, off) + #define OSReadHWReg64(addr, off) OSReadUncheckedHWReg64(addr, off) + + #define OSWriteHWReg32(addr, off, val) OSWriteUncheckedHWReg32(addr, off, val) + #define OSWriteHWReg64(addr, off, val) OSWriteUncheckedHWReg64(addr, off, val) + +#else + +#if defined(__linux__) && defined(__KERNEL__) #define OSReadUncheckedHWReg32(addr, off) ((IMG_UINT32)readl((IMG_BYTE __iomem *)(addr) + (off))) /* Little endian support only */ @@ -1100,8 +1120,6 @@ void OSWriteMemoryBarrier(volatile void *hReadback); ); \ }) - #define OSWriteUncheckedHWReg8(addr, off, val) writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off)) - #define OSWriteUncheckedHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off)) #define OSWriteUncheckedHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_BYTE __iomem *)(addr) + (off)) /* Little endian support only */ #define OSWriteUncheckedHWReg64(addr, off, val) do \ @@ -1113,97 +1131,9 @@ void OSWriteMemoryBarrier(volatile void *hReadback); writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off) + 4); \ } while (0) - #define OSReadHWReg8(addr, off) ({PVR_ASSERT((off) < RGX_HOST_SECURE_REGBANK_OFFSET); OSReadUncheckedHWReg8(addr, off);}) - #define OSReadHWReg16(addr, off) ({PVR_ASSERT((off) < RGX_HOST_SECURE_REGBANK_OFFSET); OSReadUncheckedHWReg16(addr, off);}) - #define OSReadHWReg32(addr, off) ({PVR_ASSERT((off) < RGX_HOST_SECURE_REGBANK_OFFSET); OSReadUncheckedHWReg32(addr, off);}) - #define OSReadHWReg64(addr, off) ({PVR_ASSERT((off) < RGX_HOST_SECURE_REGBANK_OFFSET); OSReadUncheckedHWReg64(addr, off);}) - - #define OSWriteHWReg8(addr, off, val) do \ - { \ - PVR_ASSERT((off) < RGX_HOST_SECURE_REGBANK_OFFSET); \ - OSWriteUncheckedHWReg8(addr, off, val); \ - } while (0) - - #define OSWriteHWReg16(addr, off, val) do \ - { \ - PVR_ASSERT((off) < RGX_HOST_SECURE_REGBANK_OFFSET); \ - OSWriteUncheckedHWReg16(addr, off, val); \ - } while (0) - - #define OSWriteHWReg32(addr, off, val) do \ - { \ - PVR_ASSERT((off) < RGX_HOST_SECURE_REGBANK_OFFSET); \ - OSWriteUncheckedHWReg32(addr, off, val); \ - } while (0) - - #define OSWriteHWReg64(addr, off, val) do \ - { \ - PVR_ASSERT((off) < RGX_HOST_SECURE_REGBANK_OFFSET); \ - OSWriteUncheckedHWReg64(addr, off, val); \ - } while (0) - -#elif defined(NO_HARDWARE) - /* OSReadHWReg operations skipped in no hardware builds */ - #define OSReadHWReg8(addr, off) ((void)(addr), 0x4eU) - #define OSReadHWReg16(addr, off) ((void)(addr), 0x3a4eU) - #define OSReadHWReg32(addr, off) ((void)(addr), 0x30f73a4eU) -#if defined(__QNXNTO__) && __SIZEOF_LONG__ == 8 - /* This is needed for 64-bit QNX builds where the size of a long is 64 bits */ - #define OSReadHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eUL) -#else - #define OSReadHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eULL) -#endif - - #define OSWriteHWReg8(addr, off, val) - #define OSWriteHWReg16(addr, off, val) - #define OSWriteHWReg32(addr, off, val) - #define OSWriteHWReg64(addr, off, val) ((void)(val)) - - #define OSReadUncheckedHWReg8(addr, off) OSReadHWReg8(addr, off) - #define OSReadUncheckedHWReg16(addr, off) OSReadHWReg16(addr, off) - #define OSReadUncheckedHWReg32(addr, off) OSReadHWReg32(addr, off) - #define OSReadUncheckedHWReg64(addr, off) OSReadHWReg64(addr, off) - - #define OSWriteUncheckedHWReg8(addr, off, val) OSWriteHWReg8(addr, off, val) - #define OSWriteUncheckedHWReg16(addr, off, val) OSWriteHWReg16(addr, off, val) - #define OSWriteUncheckedHWReg32(addr, off, val) OSWriteHWReg32(addr, off, val) - #define OSWriteUncheckedHWReg64(addr, off, val) OSWriteHWReg64(addr, off, val) - -#else +#else /* defined(__linux__) && defined(__KERNEL__) */ /*************************************************************************/ /*! -@Function OSReadHWReg8 -@Description Read from an 8-bit memory-mapped device register. - The implementation should not permit the compiler to - reorder the I/O sequence. - The implementation should ensure that for a NO_HARDWARE - build the code does not attempt to read from a location - but instead returns a constant value. -@Input pvLinRegBaseAddr The virtual base address of the register - block. -@Input ui32Offset The byte offset from the base address of - the register to be read. -@Return The byte read. -*/ /**************************************************************************/ - IMG_UINT8 OSReadHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); - -/*************************************************************************/ /*! -@Function OSReadHWReg16 -@Description Read from a 16-bit memory-mapped device register. - The implementation should not permit the compiler to - reorder the I/O sequence. - The implementation should ensure that for a NO_HARDWARE - build the code does not attempt to read from a location - but instead returns a constant value. -@Input pvLinRegBaseAddr The virtual base address of the register - block. -@Input ui32Offset The byte offset from the base address of - the register to be read. -@Return The word read. -*/ /**************************************************************************/ - IMG_UINT16 OSReadHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); - -/*************************************************************************/ /*! -@Function OSReadHWReg32 +@Function OSReadUncheckedHWReg32 @Description Read from a 32-bit memory-mapped device register. The implementation should not permit the compiler to reorder the I/O sequence. @@ -1216,10 +1146,11 @@ void OSWriteMemoryBarrier(volatile void *hReadback); the register to be read. @Return The long word read. */ /**************************************************************************/ - IMG_UINT32 OSReadHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + IMG_UINT32 OSReadUncheckedHWReg32(volatile void *pvLinRegBaseAddr, + IMG_UINT32 ui32Offset); /*************************************************************************/ /*! -@Function OSReadHWReg64 +@Function OSReadUncheckedHWReg64 @Description Read from a 64-bit memory-mapped device register. The implementation should not permit the compiler to reorder the I/O sequence. @@ -1232,42 +1163,10 @@ void OSWriteMemoryBarrier(volatile void *hReadback); the register to be read. @Return The long long word read. */ /**************************************************************************/ - IMG_UINT64 OSReadHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); - -/*************************************************************************/ /*! -@Function OSWriteHWReg8 -@Description Write to an 8-bit memory-mapped device register. - The implementation should not permit the compiler to - reorder the I/O sequence. - The implementation should ensure that for a NO_HARDWARE - build the code does not attempt to write to a location. -@Input pvLinRegBaseAddr The virtual base address of the register - block. -@Input ui32Offset The byte offset from the base address of - the register to be written to. -@Input ui8Value The byte to be written to the register. -@Return None. -*/ /**************************************************************************/ - void OSWriteHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value); - -/*************************************************************************/ /*! -@Function OSWriteHWReg16 -@Description Write to a 16-bit memory-mapped device register. - The implementation should not permit the compiler to - reorder the I/O sequence. - The implementation should ensure that for a NO_HARDWARE - build the code does not attempt to write to a location. -@Input pvLinRegBaseAddr The virtual base address of the register - block. -@Input ui32Offset The byte offset from the base address of - the register to be written to. -@Input ui16Value The word to be written to the register. -@Return None. -*/ /**************************************************************************/ - void OSWriteHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value); - + IMG_UINT64 OSReadUncheckedHWReg64(volatile void *pvLinRegBaseAddr, + IMG_UINT32 ui32Offset); /*************************************************************************/ /*! -@Function OSWriteHWReg32 +@Function OSWriteUncheckedHWReg32 @Description Write to a 32-bit memory-mapped device register. The implementation should not permit the compiler to reorder the I/O sequence. @@ -1280,10 +1179,12 @@ void OSWriteMemoryBarrier(volatile void *hReadback); @Input ui32Value The long word to be written to the register. @Return None. */ /**************************************************************************/ - void OSWriteHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); + void OSWriteUncheckedHWReg32(volatile void *pvLinRegBaseAddr, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value); /*************************************************************************/ /*! -@Function OSWriteHWReg64 +@Function OSWriteUncheckedHWReg64 @Description Write to a 64-bit memory-mapped device register. The implementation should not permit the compiler to reorder the I/O sequence. @@ -1297,21 +1198,73 @@ void OSWriteMemoryBarrier(volatile void *hReadback); register. @Return None. */ /**************************************************************************/ - void OSWriteHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value); + void OSWriteUncheckedHWReg64(volatile void *pvLinRegBaseAddr, + IMG_UINT32 ui32Offset, + IMG_UINT64 ui64Value); + +#endif /* defined(__linux__) && defined(__KERNEL__) */ #if !defined(DOXYGEN) - #define OSReadUncheckedHWReg8(addr, off) OSReadHWReg8(addr, off) - #define OSReadUncheckedHWReg16(addr, off) OSReadHWReg16(addr, off) - #define OSReadUncheckedHWReg32(addr, off) OSReadHWReg32(addr, off) - #define OSReadUncheckedHWReg64(addr, off) OSReadHWReg64(addr, off) - - #define OSWriteUncheckedHWReg8(addr, off, val) OSWriteHWReg8(addr, off, val) - #define OSWriteUncheckedHWReg16(addr, off, val) OSWriteHWReg16(addr, off, val) - #define OSWriteUncheckedHWReg32(addr, off, val) OSWriteHWReg32(addr, off, val) - #define OSWriteUncheckedHWReg64(addr, off, val) OSWriteHWReg64(addr, off, val) -#endif + +#if defined(RGX_HOST_SECURE_REGBANK_OFFSET) && defined(DEBUG) +static INLINE bool _NonSecureRegister(IMG_UINT32 ui32Offset) +{ + const IMG_UINT32 ui32PerCoreRegBankSize = RGX_HOST_SECURE_REGBANK_OFFSET + RGX_HOST_SECURE_REGBANK_SIZE; + const IMG_UINT32 ui32RegOffsetInCoreBank = ui32Offset % ui32PerCoreRegBankSize; + + if (ui32RegOffsetInCoreBank < RGX_HOST_SECURE_REGBANK_OFFSET) + { + return true; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "Secure register (0x%X) accessed incorrectly. " + "Call OSUncheckedHWReg instead with " + "psDevInfo->pvSecureRegsBaseKM as a register base.", + ui32RegOffsetInCoreBank)); + return false; + } + +} +#else +#define _NonSecureRegister(ui32Offset) (true) #endif + /* systems using real hardware must check that regular register + * operations don't attempt to access secure registers */ + static INLINE IMG_UINT32 OSReadHWReg32(volatile void __iomem *pvLinRegBaseAddr, + IMG_UINT32 ui32Offset) + { + PVR_ASSERT(_NonSecureRegister(ui32Offset)); + return OSReadUncheckedHWReg32(pvLinRegBaseAddr, ui32Offset); + } + + static INLINE IMG_UINT64 OSReadHWReg64(volatile void __iomem *pvLinRegBaseAddr, + IMG_UINT32 ui32Offset) + { + PVR_ASSERT(_NonSecureRegister(ui32Offset)); + return OSReadUncheckedHWReg64(pvLinRegBaseAddr, ui32Offset); + } + + static INLINE void OSWriteHWReg32(volatile void __iomem *pvLinRegBaseAddr, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value) + { + PVR_ASSERT(_NonSecureRegister(ui32Offset)); + OSWriteUncheckedHWReg32(pvLinRegBaseAddr, ui32Offset, ui32Value); + } + + static INLINE void OSWriteHWReg64(volatile void __iomem *pvLinRegBaseAddr, + IMG_UINT32 ui32Offset, + IMG_UINT64 ui64Value) + { + PVR_ASSERT(_NonSecureRegister(ui32Offset)); + OSWriteUncheckedHWReg64(pvLinRegBaseAddr, ui32Offset, ui64Value); + } + +#endif /* !defined(DOXYGEN) */ +#endif /* defined(NO_HARDWARE) */ + /*************************************************************************/ /*! @Description Pointer to a timer callback function. @Input pvData Pointer to timer specific data. @@ -1365,7 +1318,7 @@ PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer); @Description Take action in response to an unrecoverable driver error @Return None */ /**************************************************************************/ -void OSPanic(void); +void __noreturn OSPanic(void); /*************************************************************************/ /*! @Function OSCopyToUser diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/ospvr_gputrace.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/ospvr_gputrace.h index fce4cda9add2..6bafc4d6042f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/ospvr_gputrace.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/ospvr_gputrace.h @@ -46,16 +46,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" #include "img_defs.h" #include "rgx_hwperf.h" +#include "rgxdevice.h" #include "device.h" -#if defined(__linux__) - -void PVRGpuTraceEnqueueEvent( - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32FirmwareCtx, - IMG_UINT32 ui32ExternalJobRef, - IMG_UINT32 ui32InternalJobRef, - RGX_HWPERF_KICK_TYPE eKickType); +#if defined(__linux__) && defined(CONFIG_EVENT_TRACING) && \ + (defined(PVRSRV_TRACE_ROGUE_EVENTS) || \ + defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)) /* Early initialisation of GPU Trace events logic. * This function is called on *driver* initialisation. */ @@ -74,6 +70,9 @@ void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); /* Per-device initialisation of the GPU Trace resources */ PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); +/* TL Initialisation of FTrace */ +PVRSRV_ERROR PVRGpuTraceInitStream(PVRSRV_RGXDEV_INFO *psDevInfo); + /* Per-device cleanup for the GPU Trace resources. */ void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); @@ -82,10 +81,6 @@ PVRSRV_ERROR PVRGpuTraceSetEnabled( PVRSRV_DEVICE_NODE *psDeviceNode, IMG_BOOL bNewValue); -/* Returns IMG_TRUE if the gpu trace sub-system has been enabled (but not - * necessarily initialised). */ -IMG_BOOL PVRGpuTraceIsEnabled(void); - /* Performs some initialisation steps if the feature was enabled * on driver startup. */ void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode); @@ -106,20 +101,7 @@ void PVRSRVGpuTraceWorkPeriodEventStatsUnregister( IMG_HANDLE hGpuWorkPeriodEventStats); #endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -#else /* defined(__linux__) */ - -static inline void PVRGpuTraceEnqueueEvent( - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32FirmwareCtx, - IMG_UINT32 ui32ExternalJobRef, - IMG_UINT32 ui32InternalJobRef, - RGX_HWPERF_KICK_TYPE eKickType) -{ - PVR_UNREFERENCED_PARAMETER(psDevNode); - PVR_UNREFERENCED_PARAMETER(ui32ExternalJobRef); - PVR_UNREFERENCED_PARAMETER(ui32InternalJobRef); - PVR_UNREFERENCED_PARAMETER(eKickType); -} +#else /* defined(__linux__) && defined(CONFIG_EVENT_TRACING) */ static inline PVRSRV_ERROR PVRGpuTraceSupportInit(void) { return PVRSRV_OK; @@ -140,6 +122,12 @@ static inline PVRSRV_ERROR PVRGpuTraceInitDevice( return PVRSRV_OK; } +static inline PVRSRV_ERROR PVRGpuTraceInitStream(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PVR_UNREFERENCED_PARAMETER(psDevInfo); + return PVRSRV_OK; +} + static inline void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) { PVR_UNREFERENCED_PARAMETER(psDeviceNode); @@ -154,11 +142,6 @@ static inline PVRSRV_ERROR PVRGpuTraceSetEnabled( return PVRSRV_OK; } -static inline IMG_BOOL PVRGpuTraceIsEnabled(void) -{ - return IMG_FALSE; -} - static inline void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) { PVR_UNREFERENCED_PARAMETER(psDeviceNode); @@ -170,6 +153,33 @@ static inline void PVRGpuTraceDisableUfoCallback(void) {} static inline void PVRGpuTraceEnableFirmwareActivityCallback(void) {} static inline void PVRGpuTraceDisableFirmwareActivityCallback(void) {} -#endif /* defined(__linux__) */ +#endif /* defined(__linux__) && defined(CONFIG_EVENT_TRACING) */ + +#if defined(__linux__) && defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) + +void PVRGpuTraceEnqueueEvent( + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FirmwareCtx, + IMG_UINT32 ui32ExternalJobRef, + IMG_UINT32 ui32InternalJobRef, + RGX_HWPERF_KICK_TYPE eKickType); + +#else + +static inline void PVRGpuTraceEnqueueEvent( + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FirmwareCtx, + IMG_UINT32 ui32ExternalJobRef, + IMG_UINT32 ui32InternalJobRef, + RGX_HWPERF_KICK_TYPE eKickType) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(ui32FirmwareCtx); + PVR_UNREFERENCED_PARAMETER(ui32ExternalJobRef); + PVR_UNREFERENCED_PARAMETER(ui32InternalJobRef); + PVR_UNREFERENCED_PARAMETER(eKickType); +} + +#endif /* defined(__linux__) && defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) */ #endif /* PVR_GPUTRACE_H_ */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_km.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_km.h index b9cb5f654a69..58713cdfa17d 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_km.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_km.h @@ -78,11 +78,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #undef PDUMP_TRACE #if defined(PDUMP_TRACE) -#define PDUMP_HERE_VAR IMG_UINT32 here = 0; +#define PDUMP_HERE_VAR __maybe_unused IMG_UINT32 here = 0; #define PDUMP_HERE(a) { here = (a); if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a))); } #define PDUMP_HEREA(a) { here = (a); PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a))); } #else -#define PDUMP_HERE_VAR IMG_UINT32 here = 0; +#define PDUMP_HERE_VAR __maybe_unused IMG_UINT32 here = 0; #define PDUMP_HERE(a) here = (a); #define PDUMP_HEREA(a) here = (a); #endif @@ -90,6 +90,16 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0 #define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0 +/* Defines for CMD:SetParity's state word */ +#define PDUMP_SET_PARITY_STATE_WORD_VERSION_SHIFT (0U) +#define PDUMP_SET_PARITY_STATE_WORD_VERSION_MASK (0x00000007U) + +#define PDUMP_SET_PARITY_STATE_WORD_PARITY_SHIFT_SHIFT (3U) +#define PDUMP_SET_PARITY_STATE_WORD_PARITY_SHIFT_MASK (0x000001F8U) + +#define PDUMP_SET_PARITY_STATE_WORD_VA_PARITY_SHIFT (9U) +#define PDUMP_SET_PARITY_STATE_WORD_VA_PARITY_MASK (0x00000200U) + /* Invalid value for PDump block number */ #define PDUMP_BLOCKNUM_INVALID IMG_UINT32_MAX @@ -102,6 +112,7 @@ typedef enum _PDUMP_TRANSITION_EVENT_ PDUMP_TRANSITION_EVENT_BLOCK_FINISHED, /* Block mode event, current PDump-block has finished */ PDUMP_TRANSITION_EVENT_BLOCK_STARTED, /* Block mode event, new PDump-block has started */ PDUMP_TRANSITION_EVENT_RANGE_ENTERED, /* Transition into capture range */ + PDUMP_TRANSITION_EVENT_RANGE_APPEND, /* Append to an already entered capture range */ PDUMP_TRANSITION_EVENT_RANGE_EXITED, /* Transition out of capture range */ } PDUMP_TRANSITION_EVENT; @@ -213,6 +224,7 @@ typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION_FENCE_SYNC)(void *pvData, PDUMP_TRAN /* Shared across pdump_x files */ PVRSRV_ERROR PDumpInitCommon(void); void PDumpDeInitCommon(void); +PVRSRV_ERROR PDumpValidateUMFlags(PDUMP_FLAGS_T uiFlags); PVRSRV_ERROR PDumpReady(void); void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset, size_t *puiZeroPageSize, @@ -226,8 +238,8 @@ PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32Frame); PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE * psDeviceNode, - IMG_UINT32* pui32Frame); + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32Frame); PVRSRV_ERROR PDumpCommentKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32CommentSize, @@ -240,7 +252,8 @@ PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval, - IMG_UINT32 ui32MaxParamFileSize); + IMG_UINT32 ui32MaxParamFileSize, + IMG_UINT32 ui32AutoTermTimeout); PVRSRV_ERROR PDumpReg32(PVRSRV_DEVICE_NODE *psDeviceNode, @@ -641,13 +654,15 @@ PDumpWriteSymbAddress(PVRSRV_DEVICE_NODE *psDeviceNode, /* Register the connection with the PDump subsystem */ PVRSRV_ERROR -PDumpRegisterConnection(void *hSyncPrivData, +PDumpRegisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, + void *hSyncPrivData, PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, PDUMP_CONNECTION_DATA **ppsPDumpConnectionData); /* Unregister the connection with the PDump subsystem */ void -PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData); +PDumpUnregisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, + PDUMP_CONNECTION_DATA *psPDumpConnectionData); /* Register for notification of PDump Transition into/out of capture range */ PVRSRV_ERROR @@ -813,10 +828,11 @@ PDumpStopInitPhase(PVRSRV_DEVICE_NODE *psDeviceNode) #endif static INLINE PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32Frame) { PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(ui32Frame); return PVRSRV_OK; } @@ -827,9 +843,10 @@ PDumpSetFrameKM(CONNECTION_DATA *psConnection, static INLINE PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32* pui32Frame) + IMG_UINT32 *pui32Frame) { PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(pui32Frame); return PVRSRV_OK; } @@ -863,7 +880,8 @@ PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval, - IMG_UINT32 ui32MaxParamFileSize) + IMG_UINT32 ui32MaxParamFileSize, + IMG_UINT32 ui32AutoTermTimeout) { PVR_UNREFERENCED_PARAMETER(psConnection); PVR_UNREFERENCED_PARAMETER(psDeviceNode); @@ -872,6 +890,7 @@ PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection, PVR_UNREFERENCED_PARAMETER(ui32End); PVR_UNREFERENCED_PARAMETER(ui32Interval); PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize); + PVR_UNREFERENCED_PARAMETER(ui32AutoTermTimeout); return PVRSRV_OK; } @@ -1011,10 +1030,12 @@ PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, #pragma inline(PDumpRegisterConnection) #endif static INLINE PVRSRV_ERROR -PDumpRegisterConnection(void *hSyncPrivData, +PDumpRegisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, + void *hSyncPrivData, PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, PDUMP_CONNECTION_DATA **ppsPDumpConnectionData) { + PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(hSyncPrivData); PVR_UNREFERENCED_PARAMETER(pfnPDumpSyncBlocks); PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData); @@ -1026,8 +1047,10 @@ PDumpRegisterConnection(void *hSyncPrivData, #pragma inline(PDumpUnregisterConnection) #endif static INLINE void -PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData) +PDumpUnregisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, + PDUMP_CONNECTION_DATA *psPDumpConnectionData) { + PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); } diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_mmu.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_mmu.h index 4f3b2cc90b94..5d03e430febd 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_mmu.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_mmu.h @@ -99,6 +99,9 @@ PDumpMMUDumpPxEntries(PPVRSRV_DEVICE_NODE psDeviceNode, IMG_UINT64 uiPxEProtMask, IMG_UINT64 uiDataValidEnable, IMG_UINT32 ui32Flags, + IMG_UINT32 ui32VAParity, + IMG_UINT32 ui32ParityShift, + IMG_UINT64 ui64ParityMask, PDUMP_MMU_TYPE eMMUType); PVRSRV_ERROR diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_physmem.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_physmem.h index 94d73ada8e4b..bf38df94f3fa 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_physmem.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pdump_physmem.h @@ -75,10 +75,28 @@ PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_HANDLE *phHandlePtr, IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR +PDumpMallocUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_UINT64 ui64Size, + /* alignment is alignment of start of buffer _and_ + minimum contiguity - i.e. smallest allowable + page-size. */ + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_BOOL bInitialise, + IMG_UINT8 ui8InitValue, + IMG_HANDLE *phHandlePtr, + IMG_UINT32 ui32PDumpFlags); + PVRSRV_ERROR PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_HANDLE hPDumpAllocationInfoHandle); +PVRSRV_ERROR +PDumpFreeUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hPDumpAllocationInfoHandle); + void PDumpMakeStringValid(IMG_CHAR *pszString, IMG_UINT32 ui32StrLen); @@ -97,7 +115,8 @@ PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, } static INLINE PVRSRV_ERROR -PDumpMalloc(const IMG_CHAR *pszDevSpace, +PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode, + const IMG_CHAR *pszDevSpace, const IMG_CHAR *pszSymbolicAddress, IMG_UINT64 ui64Size, IMG_DEVMEM_ALIGN_T uiAlign, @@ -106,6 +125,7 @@ PDumpMalloc(const IMG_CHAR *pszDevSpace, IMG_HANDLE *phHandlePtr, IMG_UINT32 ui32PDumpFlags) { + PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(pszDevSpace); PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress); PVR_UNREFERENCED_PARAMETER(ui64Size); @@ -118,8 +138,42 @@ PDumpMalloc(const IMG_CHAR *pszDevSpace, } static INLINE PVRSRV_ERROR -PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle) +PDumpMallocUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_UINT64 ui64Size, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_BOOL bInitialise, + IMG_UINT8 ui8InitValue, + IMG_HANDLE *phHandlePtr, + IMG_UINT32 ui32PDumpFlags) { + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(pszDevSpace); + PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress); + PVR_UNREFERENCED_PARAMETER(ui64Size); + PVR_UNREFERENCED_PARAMETER(uiAlign); + PVR_UNREFERENCED_PARAMETER(bInitialise); + PVR_UNREFERENCED_PARAMETER(ui8InitValue); + PVR_UNREFERENCED_PARAMETER(phHandlePtr); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR +PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle); + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR +PDumpFreeUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle); return PVRSRV_OK; } @@ -131,18 +185,6 @@ PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle) #define PMR_MEMSPACE_FMTSPEC "%s" #define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC "CC_%s" -#if defined(PDUMP) -#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui8InitValue, phHandlePtr) \ - PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui8InitValue, phHandlePtr, PDUMP_NONE) -#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \ - PDumpFree(hHandle) -#else -#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui8InitValue, phHandlePtr) \ - ((void)(*phHandlePtr=NULL)) -#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \ - ((void)(0)) -#endif /* PDUMP */ - PVRSRV_ERROR PDumpPMRWRW32(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszDevSpace, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem.h index cee4ee10ea39..6e2469f73e65 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem.h @@ -75,6 +75,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. allocation is to be done @Input pszSymbolicAddress Symbolic name of the allocation @Input phHandlePtr PDUMP handle to the allocation +@Input uiPid PID of the process owning the allocation + (or PVR_SYS_ALLOC_PID if the allocation + belongs to the driver) @Output hMemHandle Handle to the allocated memory @Output psDevPhysAddr Device Physical address of allocated page @@ -91,6 +94,7 @@ DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, const IMG_CHAR *pszSymbolicAddress, IMG_HANDLE *phHandlePtr, #endif + IMG_PID uiPid, IMG_HANDLE hMemHandle, IMG_DEV_PHYADDR *psDevPhysAddr); @@ -177,37 +181,6 @@ PhysmemNewRamBackedPMR_direct(CONNECTION_DATA * psConnection, IMG_UINT32 ui32PDumpFlags, PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags); -/* - * PhysmemNewRamBackedLockedPMR - * - * Same as function above but is additionally locking down the PMR. - * - * Get the physical memory and lock down the PMR directly, we do not want to - * defer the actual allocation to mapping time. - * - * In general the concept of on-demand allocations is not useful for - * allocations where we give the users the freedom to map and unmap memory at - * will. The user is not expecting their memory contents to suddenly vanish - * just because they unmapped the buffer. - * Even if they would know and be ok with it, we do not want to check for - * every page we unmap whether we have to unlock the underlying PMR. -*/ -PVRSRV_ERROR -PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_DEVMEM_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 *pui32MappingTable, - IMG_UINT32 uiLog2PageSize, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 uiAnnotationLength, - const IMG_CHAR *pszAnnotation, - IMG_PID uiPid, - PMR **ppsPMRPtr, - IMG_UINT32 ui32PDumpFlags, - PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags); - /*************************************************************************/ /*! @Function PhysmemImportPMR @Description Import PMR a previously exported PMR @@ -233,17 +206,6 @@ PhysmemImportPMR(CONNECTION_DATA *psConnection, PMR_LOG2ALIGN_T uiLog2Contig, PMR **ppsPMR); -/*************************************************************************/ /*! -@Function PVRSRVGetMaxPhysHeapCountKM -@Description Get the user accessible physical heap count -@Output puiPhysHeapCount user accessible physical heap count -@Return PVRSRV_OK if successful -*/ /**************************************************************************/ -PVRSRV_ERROR -PVRSRVGetMaxPhysHeapCountKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 *puiPhysHeapCount); - /*************************************************************************/ /*! @Function PVRSRVGetDefaultPhysicalHeapKM @Description For the specified device, get the physical heap used for @@ -254,22 +216,23 @@ PVRSRVGetMaxPhysHeapCountKM(CONNECTION_DATA *psConnection, */ /**************************************************************************/ PVRSRV_ERROR PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_PHYS_HEAP *peHeap); + PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_PHYS_HEAP *peHeap); /*************************************************************************/ /*! -@Function PVRSRVGetHeapPhysMemUsageKM -@Description Get the memory usage statistics for all user accessible - physical heaps -@Input ui32PhysHeapCount Total user accessible physical heaps -@Output apPhysHeapMemStats Buffer to hold the memory statistics +@Function PVRSRVPhysHeapGetMemInfoKM +@Description Get the memory usage statistics for a given physical heap ID +@Input ui32PhysHeapCount Physical Heap count +@Input paePhysHeapID Array of Physical Heap ID's +@Output paPhysHeapMemStats Buffer to hold the memory statistics @Return PVRSRV_OK if successful */ /**************************************************************************/ PVRSRV_ERROR -PVRSRVGetHeapPhysMemUsageKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32PhysHeapCount, - PHYS_HEAP_MEM_STATS *apPhysHeapMemStats); +PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP *paePhysHeapID, + PHYS_HEAP_MEM_STATS_V1 *paPhysHeapMemStats); /*************************************************************************/ /*! @Function PVRSRVPhysHeapGetMemInfoKM @@ -280,10 +243,61 @@ PVRSRVGetHeapPhysMemUsageKM(CONNECTION_DATA *psConnection, @Return PVRSRV_OK if successful */ /**************************************************************************/ PVRSRV_ERROR -PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32PhysHeapCount, - PVRSRV_PHYS_HEAP *paePhysHeapID, - PHYS_HEAP_MEM_STATS *paPhysHeapMemStats); +PVRSRVPhysHeapGetMemInfo2KM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32PhysHeapCount, + PVRSRV_PHYS_HEAP *paePhysHeapID, + PHYS_HEAP_MEM_STATS_V2 *paPhysHeapMemStats); + +/*************************************************************************/ /*! +@Function PhysMemValidateMappingTable +@Description Checks the PMR mapping table provided is valid (ie has + no entries with an index value outside the valid range + of indices for the allocation and has no repeated indices) + +@Input ui32TotalNumVirtChunks Total number of virtual chunks + the allocation has. +@Input ui32IndexCount Number of entries in the mapping + table. +@Input pui32MappingTable Mapping Table. +@Return PVRSRV_OK if parameters are valid. + PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY if mapping table + contains an index out of range or a + repeated index + PVRSRV_ERROR_OUT_OF_MEMORY if unable to allocate memory + for the index tracking table + (used internally in this function) +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysMemValidateMappingTable(IMG_UINT32 ui32TotalNumVirtChunks, + IMG_UINT32 ui32IndexCount, + const IMG_UINT32 *pui32MappingTable); + +/*************************************************************************/ /*! +@Function PhysMemValidateParams +@Description Checks the PMR creation parameters and adjusts them + if possible and necessary + +@Input psDevNode The associated device node. +@Input ui32NumPhysChunks Number of physical chunks. +@Input ui32NumVirtChunks Number of virtual chunks. +@Input pui32MappingTable Mapping Table. +@Input uiFlags Allocation flags. +@Input uiPid PID of current process. +@Inout puiLog2AllocPageSize Log2 of allocation page size. + May be adjusted. +@Inout puiSize Size of the allocation. + May be adjusted. +@Return PVRSRV_OK if parameters are valid. +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysMemValidateParams(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_PID uiPid, + IMG_UINT32 *puiLog2AllocPageSize, + IMG_DEVMEM_SIZE_T *puiSize); #endif /* SRVSRV_PHYSMEM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_dlm.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_dlm.h new file mode 100644 index 000000000000..7d18fd781f2f --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_dlm.h @@ -0,0 +1,88 @@ +/*************************************************************************/ /*! +@File physmem_dlm.h +@Title Dedicated Local Memory allocator for Physical Memory Blocks +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for dedicated local memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PHYSMEM_DLM_H +#define PHYSMEM_DLM_H + +#include "img_types.h" +#include "physheap.h" +#include "physheap_config.h" +#include "device.h" + +/* PMBDestroy + * + * Destroys a given PMB used to represent a block of memory + * obtained from a DLM heap. + */ +void +PMBDestroy(PMB *psPMB); + +/* PMBGetAnnotation + * + * Obtain the annotation of the allocation represented + * by this PMB + */ +const IMG_CHAR * +PMBGetAnnotation(PMB *psPMB); + +/* DLM */ + +/*************************************************************************/ /*! +@Function PhysmemCreateHeapDLM +@Description Create and register new DLM heap. +@Input psDevNode Pointer to device node struct. +@Input uiPolicy Heap allocation policy flags +@Input psConfig Heap configuration. +@Input pszLabel Debug identifier label +@Output ppsPhysHeap Pointer to the created heap. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysmemCreateHeapDLM(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_POLICY uiPolicy, + PHYS_HEAP_CONFIG *psConfig, + IMG_CHAR *pszLabel, + PHYS_HEAP **ppsPhysHeap); + +#endif /* #ifndef PHYSMEM_DLM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_dmabuf.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_dmabuf.h index 99b5c3365fb0..1896204d3f6e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_dmabuf.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_dmabuf.h @@ -47,6 +47,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include +#include "pvr_dma_resv.h" + #if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) #define __pvrsrv_defined_struct_enum__ #include @@ -59,14 +61,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pmr.h" -typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap, - struct dma_buf_attachment *psAttachment); +typedef void (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment); PVRSRV_ERROR PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, struct dma_buf_attachment *psAttachment, PFN_DESTROY_DMABUF_PMR pfnDestroy, PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_PID uiPid, IMG_DEVMEM_SIZE_T uiChunkSize, IMG_UINT32 ui32NumPhysChunks, IMG_UINT32 ui32NumVirtChunks, @@ -78,12 +81,29 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, struct dma_buf * PhysmemGetDmaBuf(PMR *psPMR); +struct dma_resv * +PhysmemGetDmaResv(PMR *psPMR); + +#if defined(SUPPORT_SECURE_ALLOC_KM) && defined(PVR_ANDROID_HAS_DMA_HEAP_FIND) +struct dma_heap * +PhysmemGetDmaHeap(PMR *psPMR); + +void +PhysmemSetDmaHeap(PMR *psPMR, struct dma_heap *psDmaHeap); +#endif /* #if defined(SUPPORT_SECURE_ALLOC_KM) && defined(PVR_ANDROID_HAS_DMA_HEAP_FIND) */ + PVRSRV_ERROR PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDevNode, PMR *psPMR, IMG_INT *piFd); +PVRSRV_ERROR +PhysmemExportGemHandle(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR *psPMR, + IMG_UINT32 *puHandle); + PVRSRV_ERROR PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDevNode, @@ -95,17 +115,6 @@ PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, IMG_DEVMEM_SIZE_T *puiSize, IMG_DEVMEM_ALIGN_T *puiAlign); -PVRSRV_ERROR -PhysmemImportDmaBufLocked(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_INT fd, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 ui32NameSize, - const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], - PMR **ppsPMRPtr, - IMG_DEVMEM_SIZE_T *puiSize, - IMG_DEVMEM_ALIGN_T *puiAlign); - PVRSRV_ERROR PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDevNode, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_ima.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_ima.h new file mode 100644 index 000000000000..4744dacacb31 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_ima.h @@ -0,0 +1,77 @@ +/**************************************************************************/ /*! +@File +@Title Header import memory allocator +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of memory management. This module is responsible for + implementing the function callbacks for local card memory when + used under a shared heap system. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef PHYSMEM_IMA_H +#define PHYSMEM_IMA_H + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "physheap.h" +#include "physheap_config.h" +#include "device.h" + +/*************************************************************************/ /*! +@Function PhysmemCreateHeapIMA +@Description Create and register new IMA heap with IMA specific details and + a DLM heap backing. +@Input psDevNode Pointer to device node struct. +@Input uiPolicy Heap allocation policy flags +@Input psConfig Heap configuration. +@Input pszLabel Debug identifier label +@Input psDLMHeap DLM heap backing this LMA heap. +@Input uiLog2PMBSize Log 2 of PMB Size in bytes supported by DLM heap. +@Output ppsPhysHeap Pointer to the created heap. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysmemCreateHeapIMA(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_POLICY uiPolicy, + PHYS_HEAP_CONFIG *psConfig, + IMG_CHAR *pszLabel, + PHYS_HEAP *psDLMHeap, + IMG_UINT32 uiLog2PMBSize, + PHYS_HEAP **ppsPhysHeap); + +#endif /* PHYSMEM_IMA_H_ */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_lma.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_lma.h index 8bf8b4e620e8..af42d8826dac 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_lma.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_lma.h @@ -49,23 +49,17 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" #include "pvrsrv_error.h" #include "pvrsrv_memallocflags.h" +#include "device.h" /* services/server/include/ */ #include "pmr.h" #include "pmr_impl.h" -typedef IMG_UINT32 PHYSMEM_LMA_POLICY; - -#define PHYSMEM_LMA_POLICY_DEFAULT (0U) - -#define PHYSMEM_LMA_POLICY_ALLOC_ALLOW_NONCONTIG (1U) -#define PHYSMEM_LMA_POLOCY_ALLOC_ALLOW_NONCONTIG_MASK (1U) - /*************************************************************************/ /*! @Function PhysmemCreateHeapLMA @Description Create and register new LMA heap with LMA specific details. @Input psDevNode Pointer to device node struct. -@Input uiLMAPolicy LMA allocation policy flags +@Input uiPolicy Heap allocation policy flags @Input psConfig Heap configuration. @Input pszLabel Debug identifier label @Output ppsPhysHeap Pointer to the created heap. @@ -73,29 +67,9 @@ typedef IMG_UINT32 PHYSMEM_LMA_POLICY; */ /**************************************************************************/ PVRSRV_ERROR PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode, - PHYSMEM_LMA_POLICY uiLMAPolicy, + PHYS_HEAP_POLICY uiPolicy, PHYS_HEAP_CONFIG *psConfig, IMG_CHAR *pszLabel, PHYS_HEAP **ppsPhysHeap); -/* - * PhysmemNewLocalRamBackedPMR - * - * This function will create a PMR using the local card memory and is OS - * agnostic. - */ -PVRSRV_ERROR -PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap, - CONNECTION_DATA *psConnection, - IMG_DEVMEM_SIZE_T uiSize, - IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, - IMG_UINT32 *pui32MappingTable, - IMG_UINT32 uiLog2PageSize, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - const IMG_CHAR *pszAnnotation, - IMG_PID uiPid, - PMR **ppsPMRPtr, - IMG_UINT32 ui32PDumpFlags); - #endif /* #ifndef SRVSRV_PHYSMEM_LMA_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_osmem.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_osmem.h index fcdad2c00be0..5e76513477b6 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_osmem.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_osmem.h @@ -62,6 +62,21 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "connection_server.h" #include "physheap.h" +/*************************************************************************/ /*! +@Function PhysmemCreateHeapOSMEM +@Description Create and register new OSMEM heap with OSMEM specific details. +@Input psDevNode Pointer to device node struct. +@Input uiPolicy Heap allocation policy flags +@Input psConfig Heap configuration. +@Output ppsPhysHeap Pointer to the created heap. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysmemCreateHeapOSMEM(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_POLICY uiPolicy, + PHYS_HEAP_CONFIG *psConfig, + PHYS_HEAP **ppsPhysHeap); + /*************************************************************************/ /*! @Function PhysmemNewOSRamBackedPMR @Description Rogue Services will call this function to allocate GPU device @@ -78,33 +93,33 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. system memory is not to be used in the OS port then the implementation must return PVRSRV_ERROR_NOT_SUPPORTED. -@Input psPhysHeap the phys heap -@Input psConnection the connection to the originator process -@Input uiSize the size of the allocation +@Input psPhysHeap The phys heap +@Input psConnection The connection to the originator process +@Input uiSize The size of the allocation (must be a multiple of page size) -@Input ui32NumPhysChunks when sparse allocations are requested, - this is the number of physical chunks - to be allocated. - For regular allocations, this will be 1. -@Input ui32NumVirtChunks when sparse allocations are requested, - this is the number of virtual chunks - covering the sparse allocation. - For regular allocations, this will be 1. -@Input pui32MappingTable when sparse allocations are requested, +@Input ui32NumPhysChunks When sparse allocations are requested, + this is the number of physical chunks + to be allocated. + For regular allocations, this will be 1. +@Input ui32NumLogicalChunks When sparse allocations are requested, + this is the number of logical chunks + covering the sparse allocation. + For regular allocations, this will be 1. +@Input pui32MappingTable When sparse allocations are requested, this is the list of the indices of each physically-backed virtual chunk For regular allocations, this will be NULL. -@Input uiLog2PageSize the physical pagesize in log2(bytes). -@Input uiFlags the allocation flags. -@Input pszAnnotation string describing the PMR (for debug). - This should be passed into the function - PMRCreatePMR(). -@Input uiPid The process ID that this allocation should - be associated with. -@Output ppsPMROut pointer to the PMR created for the - new allocation -@Input ui32PDumpFlags the pdump flags. +@Input uiLog2DevPageSize The physical dev pagesize in log2(bytes). +@Input uiFlags The allocation flags. +@Input pszAnnotation String describing the PMR (for debug). + This should be passed into the function + PMRCreatePMR(). +@Input uiPid The process ID that this allocation should + be associated with. +@Output ppsPMROut Pointer to the PMR created for the + new allocation +@Input ui32PDumpFlags The pdump flags. @Return PVRSRV_OK on success, a failure code otherwise. */ /**************************************************************************/ PVRSRV_ERROR @@ -112,9 +127,9 @@ PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, CONNECTION_DATA *psConnection, IMG_DEVMEM_SIZE_T uiSize, IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumLogicalChunks, IMG_UINT32 *pui32MappingTable, - IMG_UINT32 uiLog2PageSize, + IMG_UINT32 uiLog2DevPageSize, PVRSRV_MEMALLOCFLAGS_T uiFlags, const IMG_CHAR *pszAnnotation, IMG_PID uiPid, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_ramem.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_ramem.h new file mode 100644 index 000000000000..9dbf668e21b9 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/physmem_ramem.h @@ -0,0 +1,109 @@ +/*************************************************************************/ /*! +@File physmem_ramem.h +@Title Resource allocator managed PMR Factory common definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Part of Services memory management. This file defines the + RA managed memory PMR factory API that is shared between local + physheap implementations (LMA & IMA) +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PHYSMEM_RAMEM_H +#define PHYSMEM_RAMEM_H + +#include "img_types.h" +#include "pvrsrv_memallocflags.h" +#include "physheap.h" +#include "ra.h" +#include "connection_server.h" +#include "pmr_impl.h" + +IMG_UINT32 +RAMemGetPageShift(void); + +PVRSRV_ERROR +RAMemDoPhyContigPagesAlloc(RA_ARENA *pArena, + size_t uiSize, + PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid); + +void +RAMemDoPhyContigPagesFree(RA_ARENA *pArena, + PG_HANDLE *psMemHandle); + +PVRSRV_ERROR +RAMemPhyContigPagesMap(PHYS_HEAP *psPhysHeap, + PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr); + +void +RAMemPhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, + PG_HANDLE *psMemHandle, + void *pvPtr); + +PVRSRV_ERROR +RAMemPhyContigPagesClean(PHYS_HEAP *psPhysHeap, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength); + +/* + * PhysmemNewRAMemRamBackedPMR + * + * This function will create a PMR using the memory managed by + * the PMR factory and is OS agnostic. + */ +PVRSRV_ERROR +PhysmemNewRAMemRamBackedPMR(PHYS_HEAP *psPhysHeap, + RA_ARENA *pArena, + CONNECTION_DATA *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +#endif /* PHYSMEM_RAMEM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr.h index e5fe4ec7fa83..c932f983ba5c 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr.h @@ -74,8 +74,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PMR_MAX_TRANSLATION_STACK_ALLOC (32) -/* Maximum size PMR can have is 1G of memory */ -#define PMR_MAX_SUPPORTED_SIZE (0x40000000ULL) +/* Maximum size PMR can have is 8G of memory */ +#define PMR_MAX_SUPPORTED_SIZE IMG_UINT64_C(0x200000000) /* Max number of pages in a PMR at 4k page size */ #define PMR_MAX_SUPPORTED_4K_PAGE_COUNT (PMR_MAX_SUPPORTED_SIZE >> 12ULL) @@ -89,11 +89,11 @@ typedef IMG_UINT64 PMR_PASSWORD_T; struct _PMR_MAPPING_TABLE_ { - PMR_SIZE_T uiChunkSize; /*!< Size of a "chunk" */ - IMG_UINT32 ui32NumPhysChunks; /*!< Number of physical chunks that are valid */ - IMG_UINT32 ui32NumVirtChunks; /*!< Number of virtual chunks in the mapping */ + PMR_SIZE_T uiChunkSize; /*!< Size of a "chunk" */ + IMG_UINT32 ui32NumPhysChunks; /*!< Number of physical chunks that are valid */ + IMG_UINT32 ui32NumLogicalChunks; /*!< Number of logical chunks in the mapping */ /* Must be last */ - IMG_UINT32 aui32Translation[1]; /*!< Translation mapping for "logical" to physical */ + IMG_UINT32 aui32Translation[IMG_FLEX_ARRAY_MEMBER]; /*!< Translation mapping for "logical" to physical */ }; #define TRANSLATION_INVALID 0xFFFFFFFFUL @@ -102,6 +102,8 @@ typedef struct _PMR_EXPORT_ PMR_EXPORT; typedef struct _PMR_PAGELIST_ PMR_PAGELIST; +IMG_INT32 PMRGetLiveCount(void); + /* * PMRValidateSize * @@ -190,7 +192,7 @@ PVRSRV_ERROR PMRCreatePMR(PHYS_HEAP *psPhysHeap, PMR_SIZE_T uiLogicalSize, IMG_UINT32 ui32NumPhysChunks, - IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumLogicalChunks, IMG_UINT32 *pui32MappingTable, PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, PMR_FLAGS_T uiFlags, @@ -233,9 +235,13 @@ PMRCreatePMR(PHYS_HEAP *psPhysHeap, PVRSRV_ERROR PMRLockSysPhysAddresses(PMR *psPMR); +PVRSRV_ERROR +PMRLockSysPhysAddressesN(PMR *psPMR, IMG_UINT32 uiLockCount); + PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR, - IMG_UINT32 ui32NestingLevel); + IMG_UINT32 uiLockCount, + IMG_UINT32 ui32NestingLevel); /* * PMRUnlockSysPhysAddresses() @@ -246,7 +252,12 @@ PVRSRV_ERROR PMRUnlockSysPhysAddresses(PMR *psPMR); PVRSRV_ERROR -PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel); +PMRUnlockSysPhysAddressesN(PMR *psPMR, IMG_UINT32 uiLockCount); + +PVRSRV_ERROR +PMRUnlockSysPhysAddressesNested(PMR *psPMR, + IMG_UINT32 uiLockCount, + IMG_UINT32 ui32NestingLevel); /* * PhysmemPMRExport() @@ -318,7 +329,7 @@ PVRSRV_ERROR PMRUnmakeLocalImportHandle(PMR *psPMR); /* - * PMRUnexporPMRt() + * PMRUnexportPMR() * * The reverse of PMRExportPMR(). This causes the PMR to no longer be * exported. If the PMR has already been imported, the imported PMR @@ -461,7 +472,7 @@ PMRMMapPMR(PMR *psPMR, * * Take a reference on the passed in PMR */ -void +PVRSRV_ERROR PMRRefPMR(PMR *psPMR); /* @@ -478,24 +489,152 @@ PVRSRV_ERROR PMRUnrefPMR(PMR *psPMR); /* - * PMRUnrefUnlockPMR() + * PMRRefPMRN() * - * Same as above but also unlocks the PMR. + * Take a reference N-times on the passed in PMR. + * + * This function does not perform address locking as opposed to PMRRefPMR(). */ PVRSRV_ERROR -PMRUnrefUnlockPMR(PMR *psPMR); +PMRRefPMRN(PMR *psPMR, IMG_UINT32 uiRefCount); -PPVRSRV_DEVICE_NODE -PMR_DeviceNode(const PMR *psPMR); +/* + * PMRUnrefPMRN() + * + * This undoes a call to any of the PhysmemNew* family of APIs + * (i.e. any PMR factory "constructor"). + * + * This relinquishes N references to the PMR, and, where the refcount + * reaches 0, causes the PMR to be destroyed (calling the finalizer + * callback on the PMR, if there is one) + */ +PVRSRV_ERROR +PMRUnrefPMRN(PMR *psPMR, IMG_UINT32 uiRefCount); + +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +/* + * PMRTryRefPMR() + * + * This attempts to take a reference on the PMR but only succeeds if + * the PMR is not at refcount 0. Other Ref functions would class this + * attempt as a logical error. This function is free to attempt and return + * an error if PMR is in a free in progress state. + */ +PVRSRV_ERROR +PMRTryRefPMR(PMR *psPMR); + +/* + * PMRKernelCpuMapCountIncr() + * + * Increment count of the number of current kernel CPU mappings of the PMR. + */ +void +PMRKernelCpuMapCountIncr(PMR *psPMR); + +/* + * PMRKernelCpuMapCountDecr() + * + * Decrement count of the number of current kernel CPU mappings of the PMR. + */ +void +PMRKernelCpuMapCountDecr(PMR *psPMR); + +IMG_BOOL +PMR_IsKernelCpuMapped(PMR *psPMR); +#endif /* #if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) */ + +/* + * PMRClientCpuMapCountIncr() + * + * Increment count of the number of current client CPU mappings of the PMR. + */ +void +PMRClientCpuMapCountIncr(PMR *psPMR); + +/* + * PMRClientCpuMapCountDecr() + * + * Decrement count of the number of current client CPU mappings of the PMR. + */ +void +PMRClientCpuMapCountDecr(PMR *psPMR); + +IMG_BOOL +PMR_IsClientCpuMapped(PMR *psPMR); + + +/* + * PMRLinkGPUMapping() + * + * Link a GPU mapping with a PMR creating an association. + * Must be protected by PMR lock. + */ +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +void +PMRLinkGPUMapping(PMR *psPMR, DLLIST_NODE *psMappingNode); +#else +void +PMRLinkGPUMapping(PMR *psPMR); +#endif + +/* + * PMRUnlinkGPUMapping() + * + * Unlink a GPU mapping with a PMR destroying an association. + * Must be protected by PMR lock. + */ +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) +void +PMRUnlinkGPUMapping(PMR *psPMR, DLLIST_NODE *psMappingNode); +#else +void +PMRUnlinkGPUMapping(PMR *psPMR); +#endif +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) /* - * PMRIsPMRLive() + * PMRNotifyMigrateInProgress() * - * This function returns true if the PMR is in use and false otherwise. - * This function is not thread safe and hence the caller needs to ensure the - * thread safety by explicitly taking PMR or through other means. + * Used to notify the PMR that the pages backing the PMR are + * in the process of migration. */ -IMG_BOOL PMRIsPMRLive(PMR *psPMR); +void +PMRNotifyMigrateInProgress(PMR *psPMR); + +/* + * PMRNotifyMigrateComplete() + * + * Used to notify the PMR that migration of backing + * pages has completed. + */ +void +PMRNotifyMigrateComplete(PMR *psPMR); + +/* + * PMRRemapGPUPMR() + * + * Trigger Remap of a PMR page with each associated mapping. + * This is called for UMA page migration and will overwrite + * an existing page mapping with the new one at logical + * pg offset in the PMR. + * PVRSRV_ERROR_DEVICEMEM_REJECT_REMAP_REQUEST can be returned + * if remap is not possible for the given page offset. + */ +PVRSRV_ERROR +PMRRemapGPUPMR(PMR *psPMR, IMG_UINT32 ui32LogicalPgOffset); +#endif + +/* + * PMR_IsGpuMultiMapped() + * + * Must be protected by PMR lock. + * + */ +IMG_BOOL +PMR_IsGpuMultiMapped(PMR *psPMR); + +PPVRSRV_DEVICE_NODE +PMR_DeviceNode(const PMR *psPMR); /* * PMR_Flags() @@ -512,13 +651,21 @@ PMR_Flags(const PMR *psPMR); IMG_BOOL PMR_IsSparse(const PMR *psPMR); -void -PMR_LogicalSize(const PMR *psPMR, - IMG_DEVMEM_SIZE_T *puiLogicalSize); +IMG_DEVMEM_SIZE_T +PMR_PhysicalSize(const PMR *psPMR); +/* + * PMR_IsOffsetValid() + * + * Returns if an address offset inside a PMR has a valid + * physical backing. + */ PVRSRV_ERROR -PMR_PhysicalSize(const PMR *psPMR, - IMG_DEVMEM_SIZE_T *puiPhysicalSize); +PMR_IsOffsetValid(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_BOOL *pbValid); PHYS_HEAP * PMR_PhysHeap(const PMR *psPMR); @@ -529,36 +676,36 @@ PMR_GetMappingTable(const PMR *psPMR); IMG_UINT32 PMR_GetLog2Contiguity(const PMR *psPMR); +IMG_DEVMEM_SIZE_T +PMR_LogicalSize(const PMR *psPMR); + /* - * PMRGetMaxChunkCount + * PMR_GetLogicalChunkCount * - * Given a PMR, calculate the maximum number of chunks supported by - * the PMR from the contiguity and return it. + * Retrieve the maximum number of chunks supported by the PMR. + * This property is fixed at creation time. */ -IMG_UINT32 PMRGetMaxChunkCount(PMR *psPMR); +IMG_UINT32 PMR_GetLogicalChunkCount(const PMR *psPMR); const IMG_CHAR * PMR_GetAnnotation(const PMR *psPMR); -/* - * PMR_IsOffsetValid() - * - * Returns if an address offset inside a PMR has a valid - * physical backing. - */ -PVRSRV_ERROR -PMR_IsOffsetValid(const PMR *psPMR, - IMG_UINT32 ui32Log2PageSize, - IMG_UINT32 ui32NumOfPages, - IMG_DEVMEM_OFFSET_T uiLogicalOffset, - IMG_BOOL *pbValid); - PMR_IMPL_TYPE PMR_GetType(const PMR *psPMR); +IMG_CHAR * +PMR_GetTypeStr(const PMR *psPMR); + IMG_INT32 PMR_GetRefCount(const PMR *psPMR); +/* PMR usage type for callers of PMR_DevPhysAddr() */ +typedef IMG_UINT32 PMR_PHYSADDRMODE_TYPE; + +#define CPU_USE BIT(0) /* CPU use, disable IPA policy */ +#define DEVICE_USE BIT(1) /* Device use, enable IPA policy */ +#define MAPPING_USE BIT(2) /* Mapping use, dev phys addrs obtained in a mapping path */ + /* * PMR_DevPhysAddr() * @@ -576,6 +723,16 @@ PMR_GetRefCount(const PMR *psPMR); * * If caller only wants one physical address it is sufficient to pass in: * ui32Log2PageSize==0 and ui32NumOfPages==1 + * + * Returns PVRSRV_OK if successful. + * PVRSRV_ERROR_RETRY if SUPPORT_LINUX_OSPAGE_MIGRATION is + * enabled and migrate is in progress. If we support page migration this + * call may return PVRSRV_ERROR_RETRY back to the requester. This is + * because the function is used to create device mappings and these mappings + * need to be synchronised across both reservations and PMRs. To do this we + * must take both locks in any mapping path. In order for migrate to complete, + * a UM requested Device mapping must temporarily back off the locks. + * Retry signifies this should happen. */ PVRSRV_ERROR PMR_DevPhysAddr(const PMR *psPMR, @@ -583,7 +740,8 @@ PMR_DevPhysAddr(const PMR *psPMR, IMG_UINT32 ui32NumOfPages, IMG_DEVMEM_OFFSET_T uiLogicalOffset, IMG_DEV_PHYADDR *psDevAddr, - IMG_BOOL *pbValid); + IMG_BOOL *pbValid, + PMR_PHYSADDRMODE_TYPE uiPMRUsage); /* * PMR_CpuPhysAddr() @@ -601,41 +759,90 @@ PMR_CpuPhysAddr(const PMR *psPMR, IMG_UINT32 ui32NumOfPages, IMG_DEVMEM_OFFSET_T uiLogicalOffset, IMG_CPU_PHYADDR *psCpuAddrPtr, - IMG_BOOL *pbValid); + IMG_BOOL *pbValid, + PMR_PHYSADDRMODE_TYPE uiPMRUsage); +/* PMRGetUID() + * + * Used for bridge calls that expect a PVRSRV_ERROR returned + * */ PVRSRV_ERROR PMRGetUID(PMR *psPMR, IMG_UINT64 *pui64UID); + +IMG_UINT64 +PMRInternalGetUID(PMR *psPMR); + +#if defined(SUPPORT_PMR_DEFERRED_FREE) /* - * PMR_ChangeSparseMem() + * PMR_IsZombie() + * + * Indicates if a PMR is a "zombie" PMR. This function **must** be called + * inside a PMR factory lock. + */ +IMG_BOOL +PMR_IsZombie(const PMR *psPMR); + +/* + * PMRMarkForDeferFree + * + * Sets sync value required for this PMR to be freed. + */ +void +PMRMarkForDeferFree(PMR *psPMR); + +/* + * PMRQueueZombiesForCleanup + * + * Defers cleanup of all zombie PMRs to the CleanupThread. + * + * Returns IMG_TRUE if any PMRs were queued for free and IMG_FALSE if no PMRs + * were queued. + */ +IMG_BOOL +PMRQueueZombiesForCleanup(PPVRSRV_DEVICE_NODE psDevNode); + +/* + * PMRReviveZombieAndRef + * + * Removed the PMR either form zombie list or cleanup item's list + * and references it. + */ +void +PMRReviveZombieAndRef(PMR *psPMR); +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + +/* + * PMR_ChangeSparseMemUnlocked() * * See note above about Lock/Unlock semantics. * * This function alters the memory map of the given PMR in device space by - * adding/deleting the pages as requested. + * adding/deleting the pages as requested. PMR lock must be taken + * before calling this function. * */ -PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, +PVRSRV_ERROR PMR_ChangeSparseMemUnlocked(PMR *psPMR, IMG_UINT32 ui32AllocPageCount, IMG_UINT32 *pai32AllocIndices, IMG_UINT32 ui32FreePageCount, IMG_UINT32 *pai32FreeIndices, - IMG_UINT32 uiSparseFlags); - + IMG_UINT32 uiSparseFlags); /* - * PMR_ChangeSparseMemCPUMap() + * PMR_ChangeSparseMem() * * See note above about Lock/Unlock semantics. * - * This function alters the memory map of the given PMR in CPU space by - * adding/deleting the pages as requested. + * This function alters the memory map of the given PMR in device space by + * adding/deleting the pages as requested. + * */ -PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, - IMG_UINT64 sCpuVAddrBase, - IMG_UINT32 ui32AllocPageCount, - IMG_UINT32 *pai32AllocIndices, - IMG_UINT32 ui32FreePageCount, - IMG_UINT32 *pai32FreeIndices); +PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiSparseFlags); #if defined(PDUMP) @@ -998,9 +1205,105 @@ PMRInit(void); PVRSRV_ERROR PMRDeInit(void); +#if defined(SUPPORT_PMR_DEFERRED_FREE) +/* + * PMRInitDevice() + * + * Initialised device specific PMR data. + */ +PVRSRV_ERROR +PMRInitDevice(PPVRSRV_DEVICE_NODE psDeviceNode); + +/* + * PMRFreeZombies() + * + * Free deferred PMRs. + */ +void +PMRFreeZombies(PPVRSRV_DEVICE_NODE psDeviceNode); + +/* + * PMRFreeZombies() + * + * Print all zombies to the log. + */ +void +PMRDumpZombies(PPVRSRV_DEVICE_NODE psDeviceNode); + +/* + * PMRDeInitDevice() + * + * Cleans up device specific PMR data. + */ +void +PMRDeInitDevice(PPVRSRV_DEVICE_NODE psDeviceNode); +#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ + #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) PVRSRV_ERROR PMRStoreRIHandle(PMR *psPMR, void *hRIHandle); #endif +#if defined(DEBUG) +void PMRLockHeldAssert(const PMR *psPMR); +#else +#define PMRLockHeldAssert(x) +#endif + +/* + * PMRLockPMR() + * + * To be called when the PMR must not be modified by any other call-stack. + * Acquires the mutex on the passed in PMR. + */ +void +PMRLockPMR(const PMR *psPMR); + +/* + * PMRUnlockPMR() + * + * To be called when the PMR is no longer being modified. + * Releases the per-PMR mutex. + */ +void +PMRUnlockPMR(const PMR *psPMR); + +#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) +PVRSRV_ERROR +PMRModifyIPAPolicy(PMR *psPMR, IMG_UINT8 ui8NewIPAPolicy); + +PVRSRV_ERROR +PMRGetIPAPolicy(PMR *psPMR, IMG_UINT8 *pui8IPAPolicy); + +PVRSRV_ERROR +PMRGetIPAInfo(PMR *psPMR, IMG_UINT32 *pui32IPAPolicy, IMG_UINT32 *pui32IPAShift, + IMG_UINT32 *pui32IPAMask, IMG_UINT32 *pui32IPAFlagsValue); +#endif + +#if defined(PVRSRV_ENABLE_XD_MEM) +/* + * PMR_ImportedDevicesMask() + * + * Returns a bitmask of devices this PMR is imported to. + * Explicitly does NOT include the PMR_DeviceNode's device. + * For each device registered with the PMR: 1 << psDevNode->sDevId.ui32InternalID + */ +IMG_UINT64 +PMR_ImportedDevicesMask(const PMR* psPMR); +static_assert((sizeof(IMG_UINT64) * 8) >= PVRSRV_MAX_DEVICES, "PMR_ImportedDevicesMask() needs to be updated"); +#endif /* defined(PVRSRV_ENABLE_XD_MEM) */ + +#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) +/* + * + * PMR_RegisterDeviceImport() + * + * Register the PMR with the device node. + * This is required for the PMR to be marked as a XD PMR. + * Silently handles if the PMR is already registered with the device. + */ +PVRSRV_ERROR +PMR_RegisterDeviceImport(PMR* psPMR, PPVRSRV_DEVICE_NODE psDevNode); +#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) || defined(PVRSRV_ENABLE_XD_MEM) */ + #endif /* #ifdef SRVSRV_PMR_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr_impl.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr_impl.h index e75b84d15c09..3c290ca3a6e7 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr_impl.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr_impl.h @@ -79,18 +79,28 @@ typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE; */ typedef void *PMR_MMAP_DATA; +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +typedef void *PMR_IMPL_ZOMBIEPAGES; +#endif + +#define PMR_IMPL_TYPES \ + X(NONE), \ + X(OSMEM), \ + X(LMA), \ + X(DMABUF), \ + X(EXTMEM), \ + X(DC), \ + X(TDFWMEM), \ + X(TDSECBUF), \ + X(LAST) + /*! PMR factory type. */ typedef enum _PMR_IMPL_TYPE_ { - PMR_TYPE_NONE = 0, - PMR_TYPE_OSMEM, - PMR_TYPE_LMA, - PMR_TYPE_DMABUF, - PMR_TYPE_EXTMEM, - PMR_TYPE_DC, - PMR_TYPE_TDFWMEM, - PMR_TYPE_TDSECBUF +#define X(type) PMR_TYPE_##type + PMR_IMPL_TYPES +#undef X } PMR_IMPL_TYPE; /*************************************************************************/ /*! @@ -129,12 +139,63 @@ typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); @Input pvPriv Private data (which was generated by the PMR factory when PMR was created) +@Output ppvZombiePages Zombie pages object. If non-null is returned + caller is obligated to call pfnFreeZombiePages + at an appropriate time to prevent memory leaks. + If support for deferred freeing of pages is not + provided, the implementation must set + *ppvZombiePages to NULL. @Return PVRSRV_OK if the operation was successful, an error code otherwise. */ /**************************************************************************/ +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv, + PMR_IMPL_ZOMBIEPAGES *ppvZombiePages); +#else typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); +#endif + +#if defined(SUPPORT_STATIC_IPA) +/*************************************************************************/ /*! +@Brief Callback function type PFN_DEV_PHYS_ADDR_FN + +@Description Called to obtain one or more physical addresses for given + offsets within a PMR. + + The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is + guaranteed to have been called prior to calling the + PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to + rely on the physical address thus obtained after the + PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called. + + Implementation of this callback is mandatory. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input ui32Log2PageSize The log2 page size. +@Input ui32NumOfAddr The number of addresses to be returned +@Input puiOffset The offset from the start of the PMR + (in bytes) for which the physical + address is required. Where multiple + addresses are requested, this will + contain a list of offsets. +@Input ui64IPAPolicyValue The Intermediate Physical Address (IPA) + Policy value to be applied to + the physical address +@Input ui64IPAClearMask The IPA Clear mask to be applied to + the physical address when setting policy. +@Output pbValid List of boolean flags indicating which + addresses in the returned list + (psDevAddrPtr) are valid (for sparse + allocations, not all pages may have a + physical backing) +@Output psDevAddrPtr Returned list of physical addresses +@Return PVRSRV_OK if the operation was successful, an error code + otherwise. +*/ /**************************************************************************/ +#else /*************************************************************************/ /*! @Brief Callback function type PFN_DEV_PHYS_ADDR_FN @@ -168,10 +229,15 @@ typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); @Return PVRSRV_OK if the operation was successful, an error code otherwise. */ /**************************************************************************/ +#endif typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv, IMG_UINT32 ui32Log2PageSize, IMG_UINT32 ui32NumOfAddr, IMG_DEVMEM_OFFSET_T *puiOffset, +#if defined(SUPPORT_STATIC_IPA) + IMG_UINT64 ui64IPAPolicyValue, + IMG_UINT64 ui64IPAClearMask, +#endif IMG_BOOL *pbValid, IMG_DEV_PHYADDR *psDevAddrPtr); @@ -314,6 +380,9 @@ typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv, allocation that do not require a physical allocation. @Input ui32Flags Allocation flags +@Output ppvZombiePages Zombie pages object. If non-null is returned + caller is obligated to call pfnFreeZombiePages + at an appropriate time to prevent memory leaks @Return PVRSRV_OK if the sparse allocation physical backing was updated successfully, an error code otherwise. @@ -324,6 +393,9 @@ typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv, IMG_UINT32 *pai32AllocIndices, IMG_UINT32 ui32FreePageCount, IMG_UINT32 *pai32FreeIndices, +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) + PMR_IMPL_ZOMBIEPAGES *ppvZombiePages, +#endif IMG_UINT32 uiFlags); /*************************************************************************/ /*! @@ -403,14 +475,9 @@ typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv, @Input pvPriv Private data (which was generated by the PMR factory when PMR was created) -@Return PVRSRV_OK if the PMR destruction was successful, an error - code otherwise. - Currently PVRSRV_ERROR_PMR_STILL_REFERENCED is the only - error returned from physmem_dmabuf.c layer and on this - error, destroying of the PMR is aborted without disturbing - the PMR state. +@Return None */ /**************************************************************************/ -typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv); +typedef void (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv); /*************************************************************************/ /*! @Brief Callback function type PFN_ACQUIRE_PMR_FACTORY_LOCK_FN @@ -440,6 +507,50 @@ typedef void (*PFN_ACQUIRE_PMR_FACTORY_LOCK_FN)(void); */ /**************************************************************************/ typedef void (*PFN_RELEASE_PMR_FACTORY_LOCK_FN)(void); +#ifdef SUPPORT_PMR_DEFERRED_FREE +/*************************************************************************/ /*! +@Brief Callback function type PFN_ZOMBIFY_FN + +@Description Called to perform factory actions necessary when PMR becomes + a zombie PMR. + + This function should at least adjust the driver/process memory + stats to reflect the amount of memory is occupied by the zombie + PMRs and at the same time subtract the memory from the main + memory stat the pages are accounted under. + + Implementation of this callback is required when SUPPORT_PMR_DEFERRED_FREE=1. + +@Return PVRSRV_OK if the operation was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_ZOMBIFY_FN)(PMR_IMPL_PRIVDATA pvPriv, + PMR *psPMR); +#endif + +#ifdef SUPPORT_PMR_PAGES_DEFERRED_FREE +/*************************************************************************/ /*! +@Brief Callback function type PFN_FREE_ZOMBIE_PAGES_FN + +@Description Called to perform factory actions to free zombie pages object + previously returned by PFN_CHANGE_SPARSE_MEM_FN. + + This function should free the pages described in the + pvZombiePages parameter and do any associated actions related + to freeing such as poisoning or returning to the page pool. + + Implementation of this callback is required when + SUPPORT_PMR_PAGES_DEFERRED_FREE=1. + +@Return PVRSRV_OK if the operation was successful, an error code + otherwise. If error is returned, the PMR layer might retry. + On error, factory implementations should modify the contents + of the PMR_IMPL_ZOMBIEPAGES object reflecting any changes in + underlying memory as a result of the initial (failed) call. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_FREE_ZOMBIE_PAGES_FN)(PMR_IMPL_ZOMBIEPAGES pvZombiePages); +#endif + /*! PMR factory callback table. */ struct _PMR_IMPL_FUNCTAB_ { @@ -463,8 +574,11 @@ struct _PMR_IMPL_FUNCTAB_ { /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_FN */ PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem; - /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN */ - PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap; + +#ifdef SUPPORT_PMR_PAGES_DEFERRED_FREE + /*! Callback function pointer, see ::PFN_FREE_ZOMBIE_PAGES_FN */ + PFN_FREE_ZOMBIE_PAGES_FN pfnFreeZombiePages; +#endif /*! Callback function pointer, see ::PFN_MMAP_FN */ PFN_MMAP_FN pfnMMap; @@ -477,6 +591,11 @@ struct _PMR_IMPL_FUNCTAB_ { /*! Callback function pointer, see ::PFN_RELEASE_PMR_FACTORY_LOCK_FN */ PFN_RELEASE_PMR_FACTORY_LOCK_FN pfnReleasePMRFactoryLock; + +#ifdef SUPPORT_PMR_DEFERRED_FREE + /*! Callback function pointer, see ::PFN_ZOMBIFY_FN */ + PFN_ZOMBIFY_FN pfnZombify; +#endif }; /*! PMR factory callback table. diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr_os.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr_os.h index 7b4a2117091c..84a3a6c400ee 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr_os.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pmr_os.h @@ -59,4 +59,23 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PVRSRV_ERROR OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData); +#if defined(SUPPORT_LINUX_OSPAGE_MIGRATION) + +/*************************************************************************/ /*! + * @Function OSLinuxPMRUnmapPageInPMR + * + * @Description Distributes calls in the CPU mmap module to unmap a given PMR + * page offset into all associated mappings. + * + * @Input psPMR The PMR to be mapped. + * @Input psMappingListHead List of Linux mappings to adjust. + * @Input ui32LogicalPgOffset The logical page offset into the + * PMR. + * + * @Return None +*/ /**************************************************************************/ +void +OSLinuxPMRUnmapPageInPMR(PMR *psPMR, DLLIST_NODE *psMappingListHead, IMG_UINT32 ui32LogicalPgOffset); +#endif + #endif /* !defined(PMR_OS_H) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/power.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/power.h index e4ef6de844bc..159fa17781a8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/power.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/power.h @@ -66,18 +66,60 @@ typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDe Typedef for a pointer to a Function that will be called before a transition from one power state to another. See also PFN_POST_POWER. */ -typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags); +typedef PVRSRV_ERROR (*PFN_PRE_POWER)(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_POWER_FLAGS ePwrFlags); /*! Typedef for a pointer to a Function that will be called after a transition from one power state to another. See also PFN_PRE_POWER. */ -typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle, - PVRSRV_DEV_POWER_STATE eNewPowerState, - PVRSRV_DEV_POWER_STATE eCurrentPowerState, - PVRSRV_POWER_FLAGS ePwrFlags); +typedef PVRSRV_ERROR (*PFN_POST_POWER)(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_POWER_FLAGS ePwrFlags); + +/* Clock speed handler prototypes */ + +/*! + Typedef for a pointer to a Function that will be called before a transition + from one clock speed to another. See also PFN_POST_CLOCKSPEED_CHANGE. + */ +typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE)(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +/*! + Typedef for a pointer to a Function that will be called after a transition + from one clock speed to another. See also PFN_PRE_CLOCKSPEED_CHANGE. + */ +typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE)(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +/*! + Typedef for a pointer to a function that will be called to transition the + device to a forced idle state. Used in unison with (forced) power requests, + DVFS and cluster count changes. + */ +typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_REQUEST)(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bDeviceOffPermitted); + +/*! + Typedef for a pointer to a function that will be called to cancel a forced + idle state and return the firmware back to a state where the hardware can be + scheduled. + */ +typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_CANCEL_REQUEST)(PPVRSRV_DEVICE_NODE psDeviceNode); + +/*! + Typedef for a pointer to a function that will be called to cancel a forced + idle state and return the firmware back to a state where the hardware can be + scheduled. This function does not wait for a response from the FW. + */ +typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_CANCEL_REQUEST_ASYNC)(PPVRSRV_DEVICE_NODE psDeviceNode); + + +typedef PVRSRV_ERROR (*PFN_GPU_UNITS_POWER_CHANGE)(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_UINT32 ui32SESPowerState); const char *PVRSRVSysPowerStateToString(PVRSRV_SYS_POWER_STATE eState); const char *PVRSRVDevPowerStateToString(PVRSRV_DEV_POWER_STATE eState); @@ -96,7 +138,13 @@ void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode); @Return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or PVRSRV_OK ******************************************************************************/ +#if defined(DEBUG) +PVRSRV_ERROR PVRSRVPowerLock_Debug(PPVRSRV_DEVICE_NODE psDeviceNode, + const char *pszFile, const unsigned int ui32LineNum); +#define PVRSRVPowerLock(DEV_NODE) PVRSRVPowerLock_Debug(DEV_NODE, __FILE__, __LINE__) +#else PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode); +#endif /*! ****************************************************************************** @@ -122,7 +170,32 @@ void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode); PVRSRV_OK ******************************************************************************/ +#if defined(DEBUG) +PVRSRV_ERROR PVRSRVPowerTryLock_Debug(PPVRSRV_DEVICE_NODE psDeviceNode, + const char *pszFile, const unsigned int ui32LineNum); +#define PVRSRVPowerTryLock(DEV_NODE) PVRSRVPowerTryLock_Debug(DEV_NODE, __FILE__, __LINE__) +#else PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode); +#endif + +/*! +****************************************************************************** + + @Function PVRSRVPowerTryLockWaitForTimeout + + @Description Try to obtain the mutex for power transitions. Only allowed when + system power is on. The call blocks until either the lock is acquired, + or the timeout is reached. + + *** Debug only. DO NOT use in core GPU functions which cannot fail. *** + If the power lock cannot be taken the device may be powered down at + any time in another worker thread. + + @Return PVRSRV_ERROR_RETRY or PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or + PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPowerTryLockWaitForTimeout(PPVRSRV_DEVICE_NODE psDeviceNode); /*! ****************************************************************************** @@ -230,21 +303,22 @@ PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG * psDeviceConfig, @Input pfnDevicePrePower : regular device pre power callback @Input pfnDevicePostPower : regular device post power callback @Input pfnSystemPrePower : regular system pre power callback - @Input pfnDevicePostPower : regular system post power callback - @Input pfnSystemPrePower : regular device pre power callback @Input pfnSystemPostPower : regular device pre power callback @Input pfnForcedIdleRequest : forced idle request callback @Input pfnForcedIdleCancelRequest : forced idle request cancel callback + @Input pfnForcedIdleCancelRequestAsync : forced idle request cancel callback, + doesn't wait for response. ******************************************************************************/ void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_POWER_DEV *psPowerDevice, PFN_PRE_POWER pfnDevicePrePower, - PFN_POST_POWER pfnDevicePostPower, - PFN_SYS_PRE_POWER pfnSystemPrePower, - PFN_SYS_POST_POWER pfnSystemPostPower, + PFN_POST_POWER pfnDevicePostPower, + PFN_SYS_PRE_POWER pfnSystemPrePower, + PFN_SYS_POST_POWER pfnSystemPostPower, PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, - PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest); + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST_ASYNC pfnForcedIdleCancelRequestAsync); /* Type PFN_DC_REGISTER_POWER */ PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, @@ -256,6 +330,7 @@ PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST_ASYNC pfnForcedIdleCancelRequestAsync, PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, IMG_HANDLE hDevCookie, PVRSRV_DEV_POWER_STATE eCurrentPowerState, @@ -310,6 +385,42 @@ PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, ******************************************************************************/ IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode); +/*! +****************************************************************************** + + @Function PVRSRVGetSystemPowerState + + @Description + + Return the system power state + + @Input psDeviceNode : Device node + @Output peCurrentSysPowerState : Current power state + + @Return PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. + PVRSRV_OK otherwise. + +******************************************************************************/ +PVRSRV_ERROR PVRSRVGetSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, + PPVRSRV_SYS_POWER_STATE peCurrentSysPowerState); + +/*! +****************************************************************************** + + @Function PVRSRVIsSystemPowered + + @Description + + Whether the system layer is powered, for ensuring the RGX regbank is powered + during initial GPU driver configuration. + + @Input psDeviceNode : Device node + + @Return IMG_BOOL + +******************************************************************************/ +IMG_BOOL PVRSRVIsSystemPowered(PPVRSRV_DEVICE_NODE psDeviceNode); + /**************************************************************************/ /*! @Function PVRSRVDevicePreClockSpeedChange @@ -345,7 +456,7 @@ PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, @Description This function is called after a voltage/frequency change has been made to the GPU HW following a call to PVRSRVDevicePreClockSpeedChange(). - Before calling this function the caller must ensure the system + Before calling this function, the caller must ensure the system data RGX_DATA->RGX_TIMING_INFORMATION->ui32CoreClockSpeed has been updated with the new frequency set, measured in Hz. The function informs the host driver that the DVFS change has @@ -425,6 +536,49 @@ PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, ******************************************************************************/ PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode); +/*! +****************************************************************************** + + @Function PVRSRVDeviceIdleLatchedGetKM + + @Description Perform device-specific processing required to force the device + idle. The device power-lock might be temporarily released (and + again re-acquired) during the course of this call, hence to + maintain lock-ordering power-lock should be the last acquired + lock before calling this function. Latched based on power + device internal refcount. Only actions idle on initial reference + increment via calling this function. + + @Input psDeviceNode : Device node + + @Return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED + When re-acquisition of power-lock failed. + Handled internally but still represents error + occurred. + + PVRSRV_OK When idle request succeeded. + PVRSRV_ERROR Other system errors. + +******************************************************************************/ +PVRSRV_ERROR PVRSRVDeviceIdleLatchedGetKM(PPVRSRV_DEVICE_NODE psDeviceNode); + +/*! +****************************************************************************** + + @Function PVRSRVDeviceIdleLatchedPutAsyncKM + + @Description Perform device-specific processing required to cancel the forced idle state + on the device, returning to normal operation. Does not wait for FW response. + Latched based on power device internal refcount. Only actions idle cancel + once final reference is dropped via calling this function. + + @Input psDeviceNode : Device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVDeviceIdleLatchedPutAsyncKM(PPVRSRV_DEVICE_NODE psDeviceNode); + /*! ****************************************************************************** diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/process_stats.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/process_stats.h index 35a79f619501..c410736e1749 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/process_stats.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/process_stats.h @@ -73,20 +73,27 @@ typedef enum { PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, /* ALLOC_PAGES_PT_LMA mapped to kernel address space */ PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, /* pages allocated from LMA */ PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, /* pages allocated from UMA */ +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES, /* zombie pages allocated from LMA */ + PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES, /* zombie pages allocated from UMA */ +#endif PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, /* mapped UMA/LMA pages */ PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, /* pages in the page pool */ PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, /* dma-buf imports */ +#if defined(SUPPORT_PMR_DEFERRED_FREE) + PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE, /* dma-buf zombie */ +#endif /* Must be the last enum...*/ PVRSRV_MEM_ALLOC_TYPE_COUNT } PVRSRV_MEM_ALLOC_TYPE; - /* * Functions for managing the processes recorded... */ PVRSRV_ERROR PVRSRVStatsInitialise(void); void PVRSRVStatsDestroy(void); +void PVRSRVStatsDestroyDI(void); PVRSRV_ERROR PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats); @@ -96,8 +103,6 @@ PVRSRV_ERROR PVRSRVStatsDeviceConnect(PVRSRV_DEVICE_NODE *psDeviceNode); void PVRSRVStatsDeviceDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode); -#define MAX_POWER_STAT_ENTRIES 51 - /* * Functions for recording the statistics... */ @@ -113,6 +118,14 @@ void PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, IMG_UINT64 ui64Key, IMG_PID uiPid); +#if defined(SUPPORT_PMR_DEFERRED_FREE) +void PVRSRVStatsTransferMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eCurrentType, + PVRSRV_MEM_ALLOC_TYPE eTargetType, + IMG_UINT64 ui64Key, + IMG_PID currentPid + DEBUG_MEMSTATS_PARAMS); +#endif + void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, size_t uiBytes, IMG_PID uiPid); @@ -161,6 +174,7 @@ void PVRSRVStatsUpdateRenderContextStats(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32Total3DStores, IMG_UINT32 ui32TotalCDMStores, IMG_UINT32 ui32TotalTDMStores, + IMG_UINT32 ui32NumRayStores, IMG_PID owner); void PVRSRVStatsUpdateZSBufferStats(PVRSRV_DEVICE_NODE *psDeviceNode, @@ -188,17 +202,19 @@ void PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp, #endif /* Functions used for calculating the memory usage statistics of a process */ -PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, - IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats); +PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, + IMG_UINT32 ui32ArrSize, + IMG_BOOL bAllProcessStats, + IMG_UINT64 *pui64MemoryStats); typedef struct { IMG_UINT32 ui32Pid; - IMG_UINT32 ui32KernelMemUsage; - IMG_UINT32 ui32GraphicsMemUsage; + IMG_UINT64 ui64KernelMemUsage; + IMG_UINT64 ui64GraphicsMemUsage; } PVRSRV_PER_PROCESS_MEM_USAGE; -PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem, - IMG_UINT32 *pui32NumberOfLivePids, - PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData); +PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT64 *pui64TotalMem, + IMG_UINT32 *pui32NumberOfLivePids, + PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData); #endif /* PROCESS_STATS_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvr_dvfs.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvr_dvfs.h index b99b8f5217aa..0529b6d20a10 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvr_dvfs.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvr_dvfs.h @@ -43,8 +43,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef PVR_DVFS_H #define PVR_DVFS_H -#include - #if defined(SUPPORT_LINUX_DVFS) #include #include @@ -53,17 +51,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #endif - #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) - #include - #else - #include - #endif + #include #endif #include "img_types.h" -typedef void (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_UINT32 ui32Freq); -typedef void (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_UINT32 ui32Volt); +typedef void (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_HANDLE hSysData, IMG_UINT32 ui32Freq); +typedef void (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_HANDLE hSysData, IMG_UINT32 ui32Volt); typedef struct _IMG_OPP_ { @@ -82,6 +76,7 @@ typedef struct _IMG_DVFS_DEVICE_CFG_ IMG_UINT32 ui32PollMs; #endif IMG_BOOL bIdleReq; + IMG_BOOL bDTConfig; PFN_SYS_DEV_DVFS_SET_FREQUENCY pfnSetFrequency; PFN_SYS_DEV_DVFS_SET_VOLTAGE pfnSetVoltage; @@ -100,27 +95,42 @@ typedef struct _IMG_DVFS_GOVERNOR_CFG_ { IMG_UINT32 ui32UpThreshold; IMG_UINT32 ui32DownDifferential; +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + /* custom thresholds */ + IMG_UINT32 uiNumMembus; +#endif } IMG_DVFS_GOVERNOR_CFG; #endif #if defined(__linux__) #if defined(SUPPORT_LINUX_DVFS) +typedef enum +{ + PVR_DVFS_STATE_NONE = 0, + PVR_DVFS_STATE_INIT_PENDING, + PVR_DVFS_STATE_READY, + PVR_DVFS_STATE_OFF, + PVR_DVFS_STATE_DEINIT +} PVR_DVFS_STATE; + typedef struct _IMG_DVFS_DEVICE_ { -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) - struct opp *psOPP; -#else struct dev_pm_opp *psOPP; -#endif struct devfreq *psDevFreq; - IMG_BOOL bInitPending; - IMG_BOOL bReady; - IMG_BOOL bEnabled; + PVR_DVFS_STATE eState; IMG_HANDLE hGpuUtilUserDVFS; +#if defined(SUPPORT_PVR_DVFS_GOVERNOR) + IMG_DVFS_GOVERNOR_CFG data; + IMG_BOOL bGovernorReady; +#else struct devfreq_simple_ondemand_data data; +#endif #if defined(CONFIG_DEVFREQ_THERMAL) struct thermal_cooling_device *psDevfreqCoolingDevice; #endif +#if defined(CONFIG_PM_DEVFREQ_EVENT) && defined(SUPPORT_PVR_DVFS_GOVERNOR) + struct pvr_profiling_device *psProfilingDevice; +#endif } IMG_DVFS_DEVICE; #endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvr_notifier.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvr_notifier.h index 57172363ca14..b3757494e4b7 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvr_notifier.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvr_notifier.h @@ -104,7 +104,7 @@ PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify); @Function PVRSRVCheckStatus @Description Calls PVRSRVNotifyCommandCompletion() to notify registered command complete handlers of work completion and then calls - PVRSRVSignalGlobalEO() to signal the global event object. + PVRSRVSignalGlobalEO() to signal the driver wide event objects. @Input hCmdCompCallerHandle Used to prevent a handler from being notified. A NULL value results in all handlers being notified. @@ -125,11 +125,11 @@ void PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle); /*************************************************************************/ /*! -@Function PVRSRVSignalGlobalEO -@Description Signals the global event object. +@Function PVRSRVSignalDriverWideEO +@Description Signals the driver wide event objects. */ /**************************************************************************/ void -PVRSRVSignalGlobalEO(void); +PVRSRVSignalDriverWideEO(void); /*************************************************************************/ /*! diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv.h index 1cd800e6d30d..e9a87ba8ad06 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv.h @@ -108,19 +108,9 @@ typedef struct _DRIVER_INFO_ IMG_BOOL bIsNoMatch; }DRIVER_INFO; -#if defined(SUPPORT_VALIDATION) && defined(__linux__) -typedef struct MEM_LEAK_INTERVALS_TAG -{ - IMG_UINT32 ui32OSAlloc; - IMG_UINT32 ui32GPU; - IMG_UINT32 ui32MMU; -} MEM_LEAK_INTERVALS; -#endif typedef struct PVRSRV_DATA_TAG { - PVRSRV_DRIVER_MODE eDriverMode; /*!< Driver mode (i.e. native, host or guest) */ - IMG_BOOL bForceApphintDriverMode; /*!< Indicate if driver mode is forced via apphint */ DRIVER_INFO sDriverInfo; IMG_UINT32 ui32DPFErrorCount; /*!< Number of Fatal/Error DPFs */ @@ -138,12 +128,12 @@ typedef struct PVRSRV_DATA_TAG IMG_HANDLE hCleanupThread; /*!< Cleanup thread */ IMG_HANDLE hCleanupEventObject; /*!< Event object to drive cleanup thread */ POS_SPINLOCK hCleanupThreadWorkListLock; /*!< Lock protecting the cleanup thread work list */ - DLLIST_NODE sCleanupThreadWorkList; /*!< List of work for the cleanup thread */ IMG_PID cleanupThreadPid; /*!< Cleanup thread process id */ uintptr_t cleanupThreadTid; /*!< Cleanup thread id */ ATOMIC_T i32NumCleanupItemsQueued; /*!< Number of items in cleanup thread work list */ ATOMIC_T i32NumCleanupItemsNotCompleted; /*!< Number of items dropped from cleanup thread work list after retry limit reached */ + ATOMIC_T i32CleanupItemTypes[PVRSRV_CLEANUP_TYPE_LAST]; /*!< Array containing the counts for different cleanup item types. */ IMG_HANDLE hDevicesWatchdogThread; /*!< Devices watchdog thread */ IMG_HANDLE hDevicesWatchdogEvObj; /*! Event object to drive devices watchdog thread */ @@ -151,9 +141,6 @@ typedef struct PVRSRV_DATA_TAG #if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) volatile IMG_UINT32 ui32DevicesWatchdogTimeout; /*! Timeout for the Devices watchdog Thread */ #endif -#ifdef PVR_TESTING_UTILS - volatile IMG_UINT32 ui32DevicesWdWakeupCounter; /* Need this for the unit tests. */ -#endif #if defined(SUPPORT_AUTOVZ) IMG_HANDLE hAutoVzWatchdogThread; /*!< Devices watchdog thread */ @@ -166,6 +153,9 @@ typedef struct PVRSRV_DATA_TAG volatile IMG_BOOL bHWPerfHostThreadStop; IMG_UINT32 ui32HWPerfHostThreadTimeout; + POS_LOCK hClientStreamTableLock; /*!< Lock for the client stream hash */ + HASH_TABLE *psClientStreamTable; /*!< Hash table for client streams */ + IMG_HANDLE hPvzConnection; /*!< PVZ connection used for cross-VM hyper-calls */ POS_LOCK hPvzConnectionLock; /*!< Lock protecting PVZ connection */ @@ -182,14 +172,16 @@ typedef struct PVRSRV_DATA_TAG DEVMEM_MEMDESC *psInfoPageMemDesc; /*! Memory descriptor of the information page. */ POS_LOCK hInfoPageLock; /*! Lock guarding access to information page. */ -#if defined(SUPPORT_VALIDATION) && defined(__linux__) - MEM_LEAK_INTERVALS sMemLeakIntervals; /*!< How often certain memory leak types will trigger */ -#endif IMG_HANDLE hThreadsDbgReqNotify; + IMG_UINT32 ui32PDumpBoundDevice; /*!< PDump is bound to the device first connected to */ + ATOMIC_T iNumDriverTasksActive; /*!< Number of device-agnostic tasks active in the server */ + PVRSRV_DRIVER_MODE aeModuleParamDriverMode[PVRSRV_MAX_DEVICES]; /*!< Driver Mode for each device requested at launch */ } PVRSRV_DATA; +/* Function pointer used to invalidate cache between loops in wait/poll for value functions */ +typedef PVRSRV_ERROR (*PFN_INVALIDATE_CACHEFUNC)(const volatile void*, IMG_UINT64, PVRSRV_CACHE_OP); /*! ****************************************************************************** @@ -200,32 +192,23 @@ typedef struct PVRSRV_DATA_TAG @Return PVRSRV_DATA * ******************************************************************************/ PVRSRV_DATA *PVRSRVGetPVRSRVData(void); +PVRSRV_DRIVER_MODE PVRSRVGetVzModeByDevNum(IMG_UINT32 ui32DevNum); -#define PVRSRV_KM_ERRORS (PVRSRVGetPVRSRVData()->ui32DPFErrorCount) +#define PVRSRV_KM_ERRORS ( PVRSRVGetPVRSRVData() ? PVRSRVGetPVRSRVData()->ui32DPFErrorCount : IMG_UINT32_MAX) #define PVRSRV_ERROR_LIMIT_REACHED (PVRSRV_KM_ERRORS == IMG_UINT32_MAX) #define PVRSRV_REPORT_ERROR() do { if (!(PVRSRV_ERROR_LIMIT_REACHED)) { PVRSRVGetPVRSRVData()->ui32DPFErrorCount++; } } while (0) -#define PVRSRV_VZ_MODE_IS(_expr) (DRIVER_MODE_##_expr == PVRSRVGetPVRSRVData()->eDriverMode) -#define PVRSRV_VZ_RETN_IF_MODE(_expr) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) -#define PVRSRV_VZ_RETN_IF_NOT_MODE(_expr) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) -#define PVRSRV_VZ_RET_IF_MODE(_expr, _rc) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) -#define PVRSRV_VZ_RET_IF_NOT_MODE(_expr, _rc) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) +#define PVRSRV_VZ_MODE_FROM_DEVNODE(pnode) (pnode->psDevConfig->eDriverMode) +#define PVRSRV_VZ_MODE_FROM_DEVINFO(pdevinfo) (pdevinfo->psDeviceNode->psDevConfig->eDriverMode) +#define PVRSRV_VZ_MODE_FROM_DEVCFG(pdevcfg) (pdevcfg->eDriverMode) +#define PVRSRV_VZ_MODE_FROM_DEVID(devid) (PVRSRVGetVzModeByDevNum(devid)) -/*! -****************************************************************************** -@Note The driver execution mode AppHint (i.e. PVRSRV_APPHINT_DRIVERMODE) - can be an override or non-override 32-bit value. An override value - has the MSB bit set & a non-override value has this MSB bit cleared. - Excluding this MSB bit & interpreting the remaining 31-bit as a - signed 31-bit integer, the mode values are: - [-1 native : 0 host : +1 guest ]. -******************************************************************************/ -#define PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(_expr) ((IMG_UINT32)(_expr)&(IMG_UINT32)(1<<31)) -#define PVRSRV_VZ_APPHINT_MODE(_expr) \ - ((((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) == (IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_NATIVE : \ - !((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_HOST : \ - ((IMG_UINT32)((IMG_UINT32)(_expr)&(IMG_UINT)0x7FFFFFFF)==(IMG_UINT32)0x1) ? DRIVER_MODE_GUEST : \ - ((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF)) +#define PVRSRV_VZ_MODE_IS(_expr, _struct, dev) (DRIVER_MODE_##_expr == PVRSRV_VZ_MODE_FROM_##_struct(dev)) + +#define PVRSRV_VZ_RETN_IF_MODE(_expr, _struct, dev) do { if ( PVRSRV_VZ_MODE_IS(_expr, _struct, dev)) { return; } } while (0) +#define PVRSRV_VZ_RET_IF_MODE(_expr, _struct, dev, _rc) do { if ( PVRSRV_VZ_MODE_IS(_expr, _struct, dev)) { return (_rc); } } while (0) + +#define PVRSRV_VZ_TIME_SLICE_MAX (100UL) typedef struct _PHYS_HEAP_ITERATOR_ PHYS_HEAP_ITERATOR; @@ -234,19 +217,19 @@ typedef struct _PHYS_HEAP_ITERATOR_ PHYS_HEAP_ITERATOR; @Function LMA_HeapIteratorCreate @Description - Creates iterator for traversing physical heap requested by ui32Flags. The + Creates iterator for traversing physical heap requested by ePhysHeap. The iterator will go through all of the segments (a segment is physically contiguous) of the physical heap and return their CPU physical address and size. @Input psDevNode: Pointer to device node struct. - @Input ui32Flags: Find heap that matches flags. + @Input ePhysHeap: Find the matching heap. @Output ppsIter: Pointer to the iterator object. @Return PVRSRV_OK upon success and PVRSRV_ERROR otherwise. ******************************************************************************/ PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode, - PHYS_HEAP_USAGE_FLAGS ui32Flags, + PVRSRV_PHYS_HEAP ePhysHeap, PHYS_HEAP_ITERATOR **ppsIter); /*! @@ -323,6 +306,8 @@ PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter, also used by debug-dumping code, this argument MUST be IMG_FALSE otherwise, we might end up requesting debug-dump in recursion and eventually blow-up call stack. + @Input pfnFwInvalidate : Function pointer to invalidation function used + each loop / poll. This is only used for FWmemctx allocations. @Return PVRSRV_ERROR : ******************************************************************************/ @@ -330,7 +315,8 @@ PVRSRV_ERROR PVRSRVPollForValueKM(PVRSRV_DEVICE_NODE *psDevNode, volatile IMG_UINT32 __iomem *pui32LinMemAddr, IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, - POLL_FLAGS ePollFlags); + POLL_FLAGS ePollFlags, + PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate); /*! ****************************************************************************** @@ -343,12 +329,16 @@ PVRSRV_ERROR PVRSRVPollForValueKM(PVRSRV_DEVICE_NODE *psDevNode, @Input ui32Value : Required value @Input ui32Mask : Mask to be applied before checking against ui32Value + @Input pfnFwInvalidate : Function pointer to invalidation function used + each loop / wait. This is only used for + FWmemctx allocations. @Return PVRSRV_ERROR : ******************************************************************************/ PVRSRV_ERROR PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, IMG_UINT32 ui32Value, - IMG_UINT32 ui32Mask); + IMG_UINT32 ui32Mask, + PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate); /*! ****************************************************************************** @@ -380,16 +370,6 @@ IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig); ******************************************************************************/ IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig); -/*! -****************************************************************************** - @Function : PVRSRVSystemSnoopingOfDeviceCache - - @Description : Returns whether the system supports snooping of the device cache - - @Return : IMG_TRUE if the system has device cache snooping -******************************************************************************/ -IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig); - /*! ****************************************************************************** @Function : PVRSRVSystemHasNonMappableLocalMemory @@ -453,8 +433,8 @@ static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui #if defined(SUPPORT_GPUVIRT_VALIDATION) #if defined(EMULATOR) - void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState); - void SetTrustedDeviceAceEnabled(void); + void SetAxiProtOSid(IMG_HANDLE hSysData, IMG_UINT32 ui32OSid, IMG_BOOL bState); + void SetTrustedDeviceAceEnabled(IMG_HANDLE hSysData); #endif #endif @@ -483,14 +463,14 @@ PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout); PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void); /*************************************************************************/ /*! -@Function FindPhysHeapConfig +@Function PVRSRVFindPhysHeapConfig @Description Find Phys Heap Config from Device Config. @Input psDevConfig Pointer to device config. @Input ui32Flags Find heap that matches flags. @Return PHYS_HEAP_CONFIG* Return a config, or NULL if not found. */ /**************************************************************************/ -PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, - PHYS_HEAP_USAGE_FLAGS ui32Flags); +PHYS_HEAP_CONFIG* PVRSRVFindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, + PHYS_HEAP_USAGE_FLAGS ui32Flags); /*************************************************************************/ /*! @Function PVRSRVGetDeviceInstance @@ -501,19 +481,41 @@ PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, PVRSRV_DEVICE_NODE* PVRSRVGetDeviceInstance(IMG_UINT32 ui32Instance); /*************************************************************************/ /*! -@Function PVRSRVGetDeviceInstanceByOSId +@Function PVRSRVGetDeviceInstanceByKernelDevID @Description Return the specified device instance by OS Id. @Input i32OSInstance OS device Id to find @Return PVRSRV_DEVICE_NODE* Return a device node, or NULL if not found. */ /**************************************************************************/ -PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance); +PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByKernelDevID(IMG_INT32 i32OSInstance); + +/*************************************************************************/ /*! +@Function PVRSRVAcquireInternalID +@Description Returns the lowest free device ID. +@Output pui32InternalID The device ID +@Return PVRSRV_ERROR PVRSRV_OK or an error code +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVAcquireInternalID(IMG_UINT32 *pui32InternalID); + +/*************************************************************************/ /*! +@Function PVRSRVDeviceFreeze +@Description Stops further processing from occurring on the device after the + current work ends and blocks user-mode tasks on the specified + device. Device node is put into POWER-DOWN state. +@Input PVRSRV_DEVICE_NODE* Device node +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise a + PVRSRV_ERR_ code on failure +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVDeviceFreeze(PVRSRV_DEVICE_NODE *psDevNode); /*************************************************************************/ /*! -@Function PVRSRVDefaultDomainPower -@Description Returns psDevNode->eCurrentSysPowerState +@Function PVRSRVDeviceThaw +@Description Unfreezes a previously frozen device. Restarts work on the + device, if work queues are non-empty, and unblocks any + user-mode tasks on the specified device. @Input PVRSRV_DEVICE_NODE* Device node -@Return PVRSRV_SYS_POWER_STATE System power state tracked internally +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise a + PVRSRV_ERR_ code on failure */ /**************************************************************************/ -PVRSRV_SYS_POWER_STATE PVRSRVDefaultDomainPower(PVRSRV_DEVICE_NODE *psDevNode); +PVRSRV_ERROR PVRSRVDeviceThaw(PVRSRV_DEVICE_NODE *psDevNode); #endif /* PVRSRV_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_bridge_init.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_bridge_init.h index 750c9816c8ac..b225da6d368e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_bridge_init.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_bridge_init.h @@ -50,4 +50,4 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PVRSRV_ERROR ServerBridgeInit(void); void ServerBridgeDeInit(void); -#endif /* PVRSRV_BRIDGE_INIT_H */ +#endif /* PVRSRV_BRIDGE_INIT_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_cleanup.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_cleanup.h index 9eb454f5e1d1..a197ca9ec908 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_cleanup.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_cleanup.h @@ -44,6 +44,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PVRSRV_CLEANUP_H #include "dllist.h" +#include "device.h" /**************************************************************************/ /*! @Brief CLEANUP_THREAD_FN @@ -110,7 +111,7 @@ typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam); /* Indicates if the timeout on a given item has been reached. * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK */ -#define CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(_item) \ +#define CLEANUP_THREAD_RETRY_TIMEOUT_NOT_REACHED(_item) \ ((_item)->ui32TimeEnd - (_item)->ui32TimeStart >= \ OSClockms() - (_item)->ui32TimeStart) @@ -120,28 +121,84 @@ typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam); #define CLEANUP_THREAD_IS_RETRY_TIMEOUT(_item) \ ((_item)->ui32TimeStart != (_item->ui32TimeEnd)) +#define CLEANUP_TYPE_LIST \ + X(UNDEF) /**/ \ + X(CONNECTION) /**/ \ + X(MMU) /**/ \ + X(OSMEM) /**/ \ + X(PMR) /**/ \ + X(LAST) /**/ \ + +#define CLEANUP_TYPE_ITEM_LABEL_MAX_SIZE 11 /* CONNECTION\0 */ +#define CLEANUP_TYPE_ITEM_DPF " %1.11s : %1.5d" +#define CLEANUP_TYPE_ITEM_DPF_MAX_SIZE CLEANUP_TYPE_ITEM_LABEL_MAX_SIZE+sizeof(" : ")+5+1 + +typedef enum _PVRSRV_CLEANUP_TYPE_ +{ +#define X(_name) PVRSRV_CLEANUP_TYPE_ ## _name, + CLEANUP_TYPE_LIST +#undef X + +} PVRSRV_CLEANUP_TYPE; + +#if defined(CLEANUP_TYPE_STRINGS) + +static const char *const _pszCleanupStrings[] = { +#define X(_name) #_name, + CLEANUP_TYPE_LIST +#undef X +}; + +/*************************************************************************/ /*! +@Function PVRSRVGetCleanupName +@Description Returns the name of a Cleanup Type. + +@Input eCleanupType The enum value of the cleanup type. + +@Return const IMG_CHAR pointer. +*/ /**************************************************************************/ +static inline const IMG_CHAR *PVRSRVGetCleanupName(PVRSRV_CLEANUP_TYPE eCleanupType) +{ + if (eCleanupType < 0 || eCleanupType > PVRSRV_CLEANUP_TYPE_LAST) + { + return "Undefined"; + } + + PVR_ASSERT(sizeof(_pszCleanupStrings[eCleanupType]) < CLEANUP_TYPE_ITEM_LABEL_MAX_SIZE); + + return _pszCleanupStrings[eCleanupType]; +} + +#endif /* CLEANUP_TYPE_STRINGS */ + /* Clean up work item specifics so that the task can be managed by the - * pvr_defer_free cleanup thread in the Server. - */ +* pvr_defer_free cleanup thread in the Server. +*/ typedef struct _PVRSRV_CLEANUP_THREAD_WORK_ { - DLLIST_NODE sNode; /*!< List node used internally by the cleanup + DLLIST_NODE sNode; /*!< List node used internally by the cleanup thread */ - CLEANUP_THREAD_FN pfnFree; /*!< Pointer to the function to be called to + CLEANUP_THREAD_FN pfnFree; /*!< Pointer to the function to be called to carry out the deferred cleanup */ - void *pvData; /*!< private data for pfnFree, usually a way back + void *pvData; /*!< private data for pfnFree, usually a way back to the original PVRSRV_CLEANUP_THREAD_WORK* pointer supplied in the call to PVRSRVCleanupThreadAddWork(). */ - IMG_UINT32 ui32TimeStart; /*!< Timestamp in ms of the moment when + IMG_UINT32 ui32TimeStart; /*!< Timestamp in ms of the moment when cleanup item has been created. */ - IMG_UINT32 ui32TimeEnd; /*!< Time in ms after which no further retry + IMG_UINT32 ui32TimeEnd; /*!< Time in ms after which no further retry attempts will be made, item discard and error logged when this is reached. */ - IMG_UINT32 ui32RetryCount; /*!< Number of times the callback should be + IMG_UINT32 ui32RetryCount; /*!< Number of times the callback should be re-tried when it returns error. */ - IMG_BOOL bDependsOnHW; /*!< Retry again after the RGX interrupt signals - the global event object */ + IMG_BOOL bDependsOnHW; /*!< Don't drop the cleanup task if retry limit + is reached, we could depend on event from + device to continue. */ + PVRSRV_CLEANUP_TYPE eCleanupType;/*!< Type of work item added to queue */ +#if defined(DEBUG) + const char *pszFun; + unsigned int ui32LineNum; +#endif } PVRSRV_CLEANUP_THREAD_WORK; @@ -150,11 +207,33 @@ typedef struct _PVRSRV_CLEANUP_THREAD_WORK_ @Description Add a work item to be called from the cleanup thread -@Input psData : The function pointer and private data for the callback +@Input psDevNode : Pointer to the device node +@Input psData : The function pointer and private data for the + callback + +@Return None +*/ /***************************************************************************/ +#if defined(DEBUG) +#define PVRSRVCleanupThreadAddWork(DEV, DATA) PVRSRVCleanupThreadAddWork_Debug(DEV, DATA, __FILE__, __LINE__) +void PVRSRVCleanupThreadAddWork_Debug(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_CLEANUP_THREAD_WORK *psData, + const char *pszFun, const unsigned int ui32LineNum); +#else +#define PVRSRVCleanupThreadAddWork PVRSRVCleanupThreadAddWork_Int +void PVRSRVCleanupThreadAddWork_Int(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_CLEANUP_THREAD_WORK *psData); +#endif + +/**************************************************************************/ /*! +@Function PVRSRVCleanupThreadWaitForDevice + +@Description Blocking wait for all of the device's items to be cleaned. + +@Input psDevNode : Pointer to the device node @Return None */ /***************************************************************************/ -void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData); +void PVRSRVCleanupThreadWaitForDevice(PVRSRV_DEVICE_NODE *psDevNode); /**************************************************************************/ /*! @Function PVRSRVCleanupThreadGetPid diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_device.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_device.h index 66c13c8ebf3a..6e484a2c625b 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_device.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_device.h @@ -46,7 +46,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "physheap_config.h" #include "pvrsrv_error.h" #include "pvrsrv_memalloc_physheap.h" -#include "pvrsrv_firmware_boot.h" +#include "rgx_firmware_boot.h" #include "rgx_fwif_km.h" #include "servicesext.h" #include "cache_ops.h" @@ -66,9 +66,10 @@ typedef enum _DRIVER_MODE_ /* Do not use these enumerations directly, to query the current driver mode, use the PVRSRV_VZ_MODE_IS() macro */ - DRIVER_MODE_NATIVE = -1, - DRIVER_MODE_HOST = 0, - DRIVER_MODE_GUEST + DRIVER_MODE_NATIVE = 0, + DRIVER_MODE_HOST = 1, + DRIVER_MODE_GUEST = 2, + DRIVER_MODE_DEFAULT = 3, } PVRSRV_DRIVER_MODE; typedef enum @@ -170,18 +171,24 @@ typedef void IMG_BOOL); -#if defined(SUPPORT_TRUSTED_DEVICE) - -typedef struct _PVRSRV_TD_FW_PARAMS_ +typedef struct _PVRSRV_FW_PARAMS_ { const void *pvFirmware; IMG_UINT32 ui32FirmwareSize; + const void *pvSignature; + IMG_UINT32 ui32SignatureSize; PVRSRV_FW_BOOT_PARAMS uFWP; -} PVRSRV_TD_FW_PARAMS; +} PVRSRV_FW_PARAMS; + +typedef PVRSRV_ERROR +(*PFN_PREPARE_FW_IMAGE)(IMG_HANDLE hSysData, + PVRSRV_FW_PARAMS *psFWParams); + +#if defined(SUPPORT_TRUSTED_DEVICE) typedef PVRSRV_ERROR (*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData, - PVRSRV_TD_FW_PARAMS *psTDFWParams); + PVRSRV_FW_PARAMS *psTDFWParams); typedef struct _PVRSRV_TD_POWER_PARAMS_ { @@ -207,7 +214,9 @@ typedef PVRSRV_ERROR #endif /* defined(SUPPORT_TRUSTED_DEVICE) */ #if defined(SUPPORT_GPUVIRT_VALIDATION) -typedef void (*PFN_SYS_DEV_VIRT_INIT)(IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); +typedef void (*PFN_SYS_INIT_FIREWALL)(IMG_HANDLE hSysData, + IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); #endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG_ @@ -306,12 +315,6 @@ struct _PVRSRV_DEVICE_CONFIG_ PFN_SYS_DEV_SOC_TIMER_READ pfnSoCTimerRead; #endif - /*! - *! Callback to handle memory budgeting. Can be used to reject allocations - *! over a certain size (optional). - */ - PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize; - /*! *! Callback to perform host CPU cache maintenance. Might be needed for *! architectures which allow extensions such as RISC-V (optional). @@ -319,6 +322,14 @@ struct _PVRSRV_DEVICE_CONFIG_ PFN_SYS_DEV_HOST_CACHE_MAINTENANCE pfnHostCacheMaintenance; IMG_BOOL bHasPhysicalCacheMaintenance; + /*! + *! Callback to prepare FW image after it has been loaded. This may + *! be used to separate a signature/header from the firmware proper, + *! potentially modifying pvFirmware and ui32FirmwareSize to point to the + *! actual firmware to be loaded. + */ + PFN_PREPARE_FW_IMAGE pfnPrepareFWImage; + #if defined(SUPPORT_TRUSTED_DEVICE) /*! *! Callback to send FW image and FW boot time parameters to the trusted @@ -360,13 +371,13 @@ struct _PVRSRV_DEVICE_CONFIG_ */ IMG_BOOL bDevicePA0IsValid; +#if defined(SUPPORT_GPUVIRT_VALIDATION) /*! - *! Function to initialize System-specific virtualization. If not supported - *! this should be a NULL reference. Only present if - *! SUPPORT_GPUVIRT_VALIDATION is defined. + *! System-specific function to initialise firewall mechanism needed to + *! validate the correctness of memory accesses tagged with OSIDs. + *! Used for hardware validation of virtualization features only. */ -#if defined(SUPPORT_GPUVIRT_VALIDATION) - PFN_SYS_DEV_VIRT_INIT pfnSysDevVirtInit; + PFN_SYS_INIT_FIREWALL pfnSysInitFirewall; #endif /*! @@ -403,16 +414,21 @@ struct _PVRSRV_DEVICE_CONFIG_ IMG_BOOL bHasDma; /*! - *! Device clock&reset + *! ES770x clk&reset required + */ + struct clk *aclk; + struct clk *gray_clk; + struct clk *cfg_clk; + struct reset_control *rstc_axi; + struct reset_control *rstc_cfg; + struct reset_control *rstc_gray; + struct reset_control *rstc_jones; + struct reset_control *rstc_spu; + + /*! + *! DriverMode required */ - struct clk *aclk; - struct clk *gray_clk; - struct clk *cfg_clk; - struct reset_control *rstc_axi; - struct reset_control *rstc_cfg; - struct reset_control *rstc_gray; - struct reset_control *rstc_jones; - struct reset_control *rstc_spu; + PVRSRV_DRIVER_MODE eDriverMode; }; #endif /* PVRSRV_DEVICE_H*/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_firmware_boot.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/rgx_firmware_boot.h similarity index 93% rename from drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_firmware_boot.h rename to drivers/gpu/drm/img/img-volcanic/services/server/include/rgx_firmware_boot.h index 14a196d2767c..b0a14683e1a3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/pvrsrv_firmware_boot.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/rgx_firmware_boot.h @@ -39,11 +39,12 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /***************************************************************************/ -#ifndef PVRSRV_FIRMWARE_BOOT_H -#define PVRSRV_FIRMWARE_BOOT_H +#ifndef RGX_FIRMWARE_BOOT_H +#define RGX_FIRMWARE_BOOT_H #include "img_types.h" -#include "rgx_fwif_shared.h" +#include "rgx_common.h" +#include "powervr/mem_types.h" #define TD_MAX_NUM_MIPS_PAGETABLE_PAGES (4U) @@ -72,6 +73,9 @@ typedef union _PVRSRV_FW_BOOT_PARAMS_ struct { + IMG_DEV_VIRTADDR sFWCodeDevVAddr; + IMG_DEV_VIRTADDR sFWDataDevVAddr; + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; @@ -84,4 +88,4 @@ typedef union _PVRSRV_FW_BOOT_PARAMS_ } PVRSRV_FW_BOOT_PARAMS; -#endif /* PVRSRV_FIRMWARE_BOOT_H */ +#endif /* RGX_FIRMWARE_BOOT_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/ri_server.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/ri_server.h index 69266b8b5d89..a8bcd9f32039 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/ri_server.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/ri_server.h @@ -63,16 +63,17 @@ PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR); PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, IMG_PID ui32Owner); -PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, +PVRSRV_ERROR RIWriteMEMDESCEntryKM(void* psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + PMR *psPMR, IMG_UINT32 ui32TextBSize, const IMG_CHAR *psz8TextB, IMG_UINT64 uiOffset, IMG_UINT64 uiSize, - IMG_BOOL bIsImport, - IMG_BOOL bIsSuballoc, + PVRSRV_MEMALLOCFLAGS_T uiFlags, RI_HANDLE *phRIHandle); -PVRSRV_ERROR RIWriteProcListEntryKM(CONNECTION_DATA *psConnection, +PVRSRV_ERROR RIWriteProcListEntryKM(void* psConnection, PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32TextBSize, const IMG_CHAR *psz8TextB, @@ -101,10 +102,14 @@ PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, IMG_DEV_VIRTADDR *psDevVAddr); #endif +void RIConnectionClosed(void* psConnection); + +PVRSRV_ERROR RIDeleteEntriesForPID(IMG_PID pid); + IMG_BOOL RIGetListEntryKM(IMG_PID pid, IMG_HANDLE **ppHandle, IMG_CHAR **ppszEntryString); -IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType); +IMG_INT32 RITotalAllocProcessUnlocked(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType); #endif /* RI_SERVER_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/scp.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/scp.h deleted file mode 100644 index f70afe70949d..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/scp.h +++ /dev/null @@ -1,241 +0,0 @@ -/**************************************************************************/ /*! -@File -@Title Software Command Processor header -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Defines the interface for the software command processor -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /***************************************************************************/ - -#ifndef SCP_H -#define SCP_H - -#include "img_types.h" -#include "img_defs.h" -#include "pvrsrv_error.h" -#include "sync_server.h" - - -typedef struct _SCP_CONTEXT_ SCP_CONTEXT; /*!< Opaque handle to a software command processor context */ - -typedef IMG_BOOL (*SCPReady)(void *pvReadyData); -typedef void (*SCPDo)(void *pvReadyData, void *pvCompleteData); - -/*************************************************************************/ /*! -@Function SCPCreate - -@Description Create a software command processor - -@Input psDevNode Device node pointer - -@Input ui32CCBSizeLog2 Log2 of the CCB size - -@Output ppsContext Pointer to SCP context created - -@Return PVRSRV_OK if the software command processor was created -*/ -/*****************************************************************************/ -PVRSRV_ERROR SCPCreate(PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 ui32CCBSizeLog2, - SCP_CONTEXT **ppsContext); - -/*************************************************************************/ /*! -@Function SCPAllocCommand - -@Description Allocate space in the software command processor and return - the data pointers for the callback data. - - Once any command ready data and command complete have been setup - the command can be submitted for processing by calling - SCPSubmitCommand. - - When any fences the command has have been meet then the command - ready callback will be called with the command ready data. - Once the command has completed the command complete callback will - be called with the command complete data. - -@Input psSCPContext Context to allocate from - -@Input ui32SyncPrimCount Number of Sync Prim operations - -@Input papsSync Pointer to array of pointers to server syncs - -@Input iAcquireFence The fence that must be signalled - before the command will be actioned - -@Input pfnCommandReady Callback to call if the command is ready - -@Input pfnCommandDo Callback to the function to run - -@Input ui32ReadyDataSize Size of command ready data to allocate in bytes - -@Input pfnCommandComplete Callback to call when the command has completed - -@Input ui32CompleteDataSize Size of command complete data to allocate - -@Output ppvReadyData Pointer to memory allocated for command - ready callback data - -@Output ppvCompleteData Pointer to memory allocated for command - complete callback data - -@Input iReleaseFenceTimeline SW Timeline on which the release fence - should be created - -@Output piReleaseFence The release fence that is signalled - when the command has been released - -@Return PVRSRV_OK if the allocate was successful -*/ -/*****************************************************************************/ -PVRSRV_ERROR SCPAllocCommand(SCP_CONTEXT *psSCPContext, - PVRSRV_FENCE iAcquireFence, - SCPReady pfnCommandReady, - SCPDo pfnCommandDo, - size_t ui32ReadyDataByteSize, - size_t ui32CompleteDataByteSize, - void **ppvReadyData, - void **ppvCompleteData, - PVRSRV_TIMELINE iReleaseFenceTimeline, - PVRSRV_FENCE *piReleaseFence); - -/*************************************************************************/ /*! -@Function SCPSubmitCommand - -@Description Submit a command for processing. We don't actually try to - run the command in this call as it might not be valid to do - from the same thread that this function is being called from - -@Input psSCPContext Context to allocate on which to submit - the command - -@Return PVRSRV_OK if the command was submitted -*/ -/*****************************************************************************/ -PVRSRV_ERROR SCPSubmitCommand(SCP_CONTEXT *psContext); - - -/*************************************************************************/ /*! -@Function SCPRun - -@Description Run the software command processor to see if any commands are - now ready. - -@Input psSCPContext Context to process - -@Return PVRSRV_OK if the software command processor was run -*/ -/*****************************************************************************/ -PVRSRV_ERROR SCPRun(SCP_CONTEXT *psContext); - -/*************************************************************************/ /*! -@Function SCPCommandComplete - -@Description Complete a command which the software command processor - has previously issued. - Note: Commands _MUST_ be completed in order - -@Input psSCPContext Context to process - -@Input bIgnoreFences Do not respect any fence checks. - -@Return PVRSRV_OK if the software command processor was run -*/ -/*****************************************************************************/ -void SCPCommandComplete(SCP_CONTEXT *psContext, - IMG_BOOL bIgnoreFences); - -/*************************************************************************/ /*! -@Function SCPFlush - -@Description Flush the software command processor. - -@Input psSCPContext Context to process - -@Return PVRSRV_OK if all commands have been completed, otherwise - PVRSRV_ERROR_RETRY -*/ -/*****************************************************************************/ -PVRSRV_ERROR SCPFlush(SCP_CONTEXT *psContext); - -/*************************************************************************/ /*! -@Function SCPHasPendingCommand - -@Description Check the software command processor for pending commands. - -@Input psContext Context to process - -@Return IMG_TRUE if there is at least one pending command - IMG_FALSE if there are no pending commands -*/ -/*****************************************************************************/ -IMG_BOOL SCPHasPendingCommand(SCP_CONTEXT *psContext); - -/*************************************************************************/ /*! -@Function SCPDumpStatus - -@Description Dump the status of the provided software command processor. - -@Input psSCPContext Context to dump - -@Input pfnDumpDebugPrintf Debug print function - -@Return None -*/ -/*****************************************************************************/ -void SCPDumpStatus(SCP_CONTEXT *psContext, - DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, - void *pvDumpDebugFile); - -/*************************************************************************/ /*! -@Function SCPDestroy - -@Description Destroy a software command processor. - -@Input psSCPContext Context to destroy - -@Return None -*/ -/*****************************************************************************/ -void SCPDestroy(SCP_CONTEXT *psContext); - - -#endif /* SCP_H */ - -/****************************************************************************** - End of file (queue.h) -******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/srvcore.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/srvcore.h index 0483b0aff56a..8d1385f618d4 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/srvcore.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/srvcore.h @@ -53,6 +53,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgx_bridge.h" #endif +#if defined(DEBUG_BRIDGE_KM) PVRSRV_ERROR CopyFromUserWrapper(CONNECTION_DATA *psConnection, IMG_UINT32 ui32DispatchTableEntry, @@ -65,6 +66,28 @@ CopyToUserWrapper(CONNECTION_DATA *psConnection, void __user *pvDest, void *pvSrc, IMG_UINT32 ui32Size); +#else +FORCE_INLINE PVRSRV_ERROR +CopyFromUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void *pvDest, + void __user *pvSrc, + IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); + return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); +} +FORCE_INLINE PVRSRV_ERROR +CopyToUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void __user *pvDest, + void *pvSrc, + IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); + return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); +} +#endif IMG_INT DummyBW(IMG_UINT32 ui32DispatchTableEntry, @@ -85,6 +108,10 @@ typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY arguments before calling into srvkm proper */ POS_LOCK hBridgeLock; /*!< The bridge lock which needs to be acquired before calling the above wrapper */ + + IMG_UINT32 ui32InBufferSize; /*!< The expected size of the in buffer given by the client */ + IMG_UINT32 ui32OutBufferSize; /*!< The expected size of the out buffer given by the client */ + #if defined(DEBUG_BRIDGE_KM) const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */ const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */ @@ -118,7 +145,9 @@ _SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, BridgeWrapperFunction pfFunction, const IMG_CHAR *pszFunctionName, POS_LOCK hBridgeLock, - const IMG_CHAR* pszBridgeLockName); + const IMG_CHAR* pszBridgeLockName, + IMG_UINT32 ui32InBufferSize, + IMG_UINT32 ui32OutBufferSize); void UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, IMG_UINT32 ui32Index); @@ -126,9 +155,14 @@ UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, /* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */ #define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\ - hBridgeLock) \ - _SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\ - (POS_LOCK)hBridgeLock, #hBridgeLock) + hBridgeLock, ui32InBufferSize, ui32OutBufferSize) \ + do \ + { \ + static_assert((ui32InBufferSize) <= PVRSRV_MAX_BRIDGE_IN_SIZE, "Bridge input buffer is too small for bridge function: " #pfFunction); \ + static_assert((ui32OutBufferSize) <= PVRSRV_MAX_BRIDGE_OUT_SIZE, "Bridge output buffer is too small for bridge function: " #pfFunction); \ + _SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\ + (POS_LOCK)hBridgeLock, #hBridgeLock, ui32InBufferSize, ui32OutBufferSize); \ + } while (0) #define DISPATCH_TABLE_GAP_THRESHOLD 5 @@ -209,7 +243,7 @@ PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection, PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, - IMG_UINT32 *ui32MemoryStats); + IMG_UINT64 *pui64MemoryStats); static INLINE PVRSRV_ERROR DestroyServerResource(const SHARED_DEV_CONNECTION hConnection, @@ -222,6 +256,17 @@ PVRSRV_ERROR DestroyServerResource(const SHARED_DEV_CONNECTION hConnection, return pfnDestroyCall(GetBridgeHandle(hConnection), hResource); } +/*************************************************************************/ /*! +@Function PVRSRVBlockIfFrozen +@Description Puts caller into a blocking wait (using event-objects) if the + specified device is in a FROZEN state. Routine completes and + returns control to the caller once the underlying device state + clears its FROZEN state. +@Input psDeviceNode Device Node reference +@Return Nothing. Execution blocks if device is frozen. +*/ /**************************************************************************/ +void PVRSRVBlockIfFrozen(PVRSRV_DEVICE_NODE *psDeviceNode); + #endif /* SRVCORE_H */ /****************************************************************************** diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/srvkm.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/srvkm.h index 1ca4ee807a0a..29418764daa5 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/srvkm.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/srvkm.h @@ -72,13 +72,13 @@ void PVRSRVCommonDriverDeInit(void); @Description Creates and initialises a common layer Services device node for an OS native device. First stage device discovery. @Input pvOSDevice OS native device -@Input i32OsDeviceID A unique identifier which helps recognise this +@Input i32KernelDeviceID A unique identifier which helps recognise this Device in the UM space provided by the OS. @Output ppsDeviceNode Points to the new device node on success @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise */ /**************************************************************************/ PVRSRV_ERROR -PVRSRVCommonDeviceCreate(void *pvOSDevice, IMG_INT32 i32OsDeviceID, +PVRSRVCommonDeviceCreate(void *pvOSDevice, IMG_INT32 i32KernelDeviceID, struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); /*************************************************************************/ /*! @@ -96,9 +96,8 @@ PVRSRV_ERROR PVRSRVCommonDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceN @Function PVRSRVCommonDeviceDestroy @Description Destroys a PVR Services device node. @Input psDeviceNode Device node to destroy -@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise */ /**************************************************************************/ -PVRSRV_ERROR +void PVRSRVCommonDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); /****************** @@ -111,7 +110,7 @@ exits by its own means (break, return, goto, etc.) Example of usage: -LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) +LOOP_UNTIL_TIMEOUT_US(MAX_HW_TIME_US) { if (psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) { @@ -120,7 +119,7 @@ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -} END_LOOP_UNTIL_TIMEOUT(); +} END_LOOP_UNTIL_TIMEOUT_US(); -----------------------------------------------------------------------------*/ @@ -129,17 +128,17 @@ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) * necessary when preemption is enabled. */ /* PRQA S 3411,3431 12 */ /* critical format, leave alone */ -#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \ +#define LOOP_UNTIL_TIMEOUT_US(TIMEOUT_US) \ {\ IMG_UINT32 uiOffset, uiStart, uiCurrent; \ IMG_INT32 iNotLastLoop; \ for (uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\ - ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \ + ((uiCurrent - uiStart + uiOffset) < (TIMEOUT_US)) || iNotLastLoop--; \ uiCurrent = OSClockus(), \ uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \ uiStart = uiCurrent < uiStart ? 0 : uiStart) -#define END_LOOP_UNTIL_TIMEOUT() \ +#define END_LOOP_UNTIL_TIMEOUT_US() \ } #endif /* SRVKM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/sync_checkpoint.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/sync_checkpoint.h index 33c26f420862..3c3846d7f0cd 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/sync_checkpoint.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/sync_checkpoint.h @@ -84,6 +84,7 @@ typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(PPVRSRV_DEVICE_NODE typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data); typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data); typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); +typedef void (*PFN_SYNC_CHECKPOINT_NOHW_SIGNAL_EXPORT_FENCE_FN)(PVRSRV_FENCE fence_to_signal); typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); typedef IMG_UINT32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(IMG_UINT32 num_ufos, IMG_UINT32 *vaddrs); #if defined(PDUMP) @@ -91,6 +92,10 @@ typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE IMG_UINT32 *puiNumCheckpoints, PSYNC_CHECKPOINT **papsCheckpoints); #endif +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_EXPORT_FENCE_RESOLVE_FN)(PVRSRV_FENCE iExportFence, + PSYNC_CHECKPOINT_CONTEXT checkpoint_context, + PSYNC_CHECKPOINT *checkpoint_handle); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_EXPORT_FENCE_ROLLBACK_FN)(PVRSRV_FENCE iExportFence); #define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 @@ -101,12 +106,15 @@ typedef struct PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; + PFN_SYNC_CHECKPOINT_NOHW_SIGNAL_EXPORT_FENCE_FN pfnNoHWSignalExpFence; PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; IMG_CHAR pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; #if defined(PDUMP) PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; #endif + PFN_SYNC_CHECKPOINT_EXPORT_FENCE_RESOLVE_FN pfnExportFenceResolve; + PFN_SYNC_CHECKPOINT_EXPORT_FENCE_ROLLBACK_FN pfnExportFenceRollback; } PFN_SYNC_CHECKPOINT_STRUCT; PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); @@ -243,7 +251,7 @@ SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSync /*************************************************************************/ /*! @Function SyncCheckpointSignalNoHW -@Description Signal the synchronisation checkpoint in NO_HARWARE build +@Description Signal the synchronisation checkpoint in NO_HARDWARE build @Input psSyncCheckpoint The synchronisation checkpoint to signal @@ -425,6 +433,51 @@ SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, IMG_UINT64 *puiFenceUID, PDUMP_FLAGS_T ui32PDumpFlags); +/*************************************************************************/ /*! +@Function SyncCheckpointResolveExportFence + +@Description Resolve an export fence, returning the sync checkpoint + that fence contains. + This function in turn calls a function provided by the + sync implementation. + +@Input hExportFence The export fence to be resolved + +@Input psSyncCheckpointContext The context in which to create the + new sync checkpoint for the export fence + +@Output ppsSyncCheckpoint The sync checkpoint the fence + contains + +@Return PVRSRV_OK if a valid fence was provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointResolveExportFence(PVRSRV_FENCE hExportFence, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PSYNC_CHECKPOINT *ppsSyncCheckpoint, + PDUMP_FLAGS_T ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointRollbackExportFence + +@Description Rollback an export fence, freeing the sync checkpoint + that fence was assigned. + This function in turn calls a function provided by the + sync implementation. + +@Input hExportFence The export fence to be rolled back + +@Return PVRSRV_OK if a valid fence was provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointRollbackExportFence(PVRSRV_FENCE hExportFence); + /*************************************************************************/ /*! @Function SyncCheckpointCreateFence @@ -570,6 +623,27 @@ SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem); PVRSRV_ERROR SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData); +/*************************************************************************/ /*! +@Function SyncCheckpointNoHWSignalExportFence + +@Description Called by the DDK in a NO_HARDWARE build only. + After syncs have been manually signalled by the DDK, this + function is called to allow the OS native sync implementation + to signal the export fence (as the usual callback notification + of signalled checkpoints is not supported for NO_HARDWARE). + This function in turn calls a function provided by the + OS native sync implementation. + +@Input iExportFenceToSignal The export fence to signal. + +@Return PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function, otherwise + PVRSRV_OK. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointNoHWSignalExportFence(PVRSRV_FENCE iExportFenceToSignal); + /*************************************************************************/ /*! @Function SyncCheckpointDumpInfoOnStalledUFOs @@ -663,4 +737,47 @@ PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence); #endif -#endif /* SYNC_CHECKPOINT_H */ +/*************************************************************************/ /*! +@Function SyncCheckpointCommonDeviceIDs + +@Description Determine if the DeviceIDs referenced by the checkpoint context + and device reference are to the same physical device. + +@Input psSyncContext Handle to the synchronisation checkpoint context + +@Input hDevRef Handle to the device reference to check against + +@Return IMG_TRUE if devices refer to the same device, or if one of the + parameters is NULL. + IMG_FALSE if the two references are to different physical + devices. +*/ +/*****************************************************************************/ +IMG_BOOL SyncCheckpointCommonDeviceIDs(PSYNC_CHECKPOINT_CONTEXT psSyncContext, + IMG_HANDLE hDevRef); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetCounters + +@Description Return the Current In Use and Max In Use Sync Checkpoint + counters for the specified sync context. + +@Input psDevNode Handle to the device-node + +@Output puiInUse Number of Sync CPs currently in-use + +@Output puiMax Maximum number of Sync CPs allocated + +@Output puiXDInUse Number of X-D Sync CPs currently in-use + +@Output puiXDMax Maximum number of X-D Sync CPs used + +@Return PVRSRV_OK if a valid sync checkpoint context is passed. +*/ +/*****************************************************************************/ +PVRSRV_ERROR SyncCheckpointGetCounters(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 *puiInUse, + IMG_UINT32 *puiMax, + IMG_UINT32 *puiXDInUse, + IMG_UINT32 *puiXDMax); +#endif /* SYNC_CHECKPOINT_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/tlintern.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/tlintern.h index c3edce6b8cd1..114f90f98548 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/tlintern.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/tlintern.h @@ -94,6 +94,8 @@ typedef struct _TL_STREAM_ void (*pfOnReaderOpenCallback)(void *pvArg); /*!< Optional on reader connect callback */ void *pvOnReaderOpenUserData; /*!< On reader connect user data */ + void (*pfOnReaderCloseCallback)(void *pvArg);/*!< Optional on reader disconnect callback */ + void *pvOnReaderCloseUserData; /*!< On reader disconnect user data */ void (*pfProducerCallback)(void); /*!< Optional producer callback of type TL_STREAM_SOURCECB */ void *pvProducerUserData; /*!< Producer callback user data */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/tlstream.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/tlstream.h index 911e720e7cdd..aa237bba4d79 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/tlstream.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/tlstream.h @@ -181,6 +181,7 @@ typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream, IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser); typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg); +typedef void (*TL_STREAM_ONREADERCLOSECB)(void *pvArg); /*************************************************************************/ /*! @Function TLAllocSharedMemIfNull @@ -220,6 +221,10 @@ TLFreeSharedMem(IMG_HANDLE hStream); opens this stream, may be null. @Input pvOnReaderOpenUD Optional user data for pfOnReaderOpenCB, may be null. + @Input pfOnReaderCloseCB Optional callback called when a client + closes this stream, may be null. + @Input pvOnReaderCloseUD Optional user data for pfOnReaderCloseCB, + may be null. @Input pfProducerCB Optional callback, may be null. @Input pvProducerUD Optional user data for callback, may be null. @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or string name @@ -239,6 +244,8 @@ TLStreamCreate(IMG_HANDLE *phStream, IMG_UINT32 ui32StreamFlags, TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, void *pvOnReaderOpenUD, + TL_STREAM_ONREADERCLOSECB pfOnReaderCloseCB, + void *pvOnReaderCloseUD, TL_STREAM_SOURCECB pfProducerCB, void *pvProducerUD); @@ -584,7 +591,7 @@ IMG_BOOL TLStreamOutOfData(IMG_HANDLE hStream); @Function TLStreamResetProducerByteCount @Description Reset the producer byte counter on the specified stream. @Input hStream Stream handle. - @Input IMG_UINT32 Value to reset counter to, often 0. + @Input ui32Value Value to reset counter to, often 0. @Return PVRSRV_OK Success. @Return PVRSRV_ERROR_STREAM_MISUSE Success but the read and write positions did not match, @@ -594,6 +601,18 @@ IMG_BOOL TLStreamOutOfData(IMG_HANDLE hStream); PVRSRV_ERROR TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value); +/*************************************************************************/ /*! + @Function TLStreamGetMaxTransfer + @Description Obtain the maximum number of bytes that can be submitted to + a given stream. + @Input uiXferSize Requested transfer size from producer. + @Input hConsumerStream Stream handle for consumer. + @Return IMG_UINT32 Amount of data that can be submitted to + the consumer stream. +*/ /**************************************************************************/ +IMG_UINT32 +TLStreamGetMaxTransfer(IMG_UINT32 uiXferSize, IMG_HANDLE hConsumerStream); + #endif /* TLSTREAM_H */ /***************************************************************************** End of file (tlstream.h) diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/tutils_km.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/tutils_km.h deleted file mode 100644 index a5f4356681bd..000000000000 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/tutils_km.h +++ /dev/null @@ -1,174 +0,0 @@ -/*************************************************************************/ /*! -@File tutils_km.h -@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Kernel services functions for calls to tutils (testing utils) - layer in the server -@License Dual MIT/GPLv2 - -The contents of this file are subject to the MIT license as set out below. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -Alternatively, the contents of this file may be used under the terms of -the GNU General Public License Version 2 ("GPL") in which case the provisions -of GPL are applicable instead of those above. - -If you wish to allow use of your version of this file only under the terms of -GPL, and not to allow others to use your version of this file under the terms -of the MIT license, indicate your decision by deleting the provisions above -and replace them with the notice and other provisions required by GPL as set -out in the file called "GPL-COPYING" included in this distribution. If you do -not delete the provisions above, a recipient may use your version of this file -under the terms of either the MIT license or GPL. - -This License is also included in this distribution in the file called -"MIT-COPYING". - -EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ /**************************************************************************/ -#ifndef TUTILS_KM_H -#define TUTILS_KM_H - -#include "img_defs.h" -#include "img_types.h" -#include "pvrsrv_error.h" -#include "connection_server.h" -#include "device.h" -#include "pvrsrv_sync_km.h" - - -PVRSRV_ERROR ServerTestIoctlKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 uiCmd, - IMG_PBYTE uiIn1, - IMG_UINT32 uiIn2, - IMG_UINT32* puiOut1, - IMG_UINT32* puiOut2); - -PVRSRV_ERROR PowMonTestIoctlKM(IMG_UINT32 uiCmd, - IMG_UINT32 uiIn1, - IMG_UINT32 uiIn2, - IMG_UINT32 *puiOut1, - IMG_UINT32 *puiOut2); - -PVRSRV_ERROR SyncCheckpointTestIoctlKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 uiCmd, - IMG_UINT32 uiIn1, - IMG_UINT32 uiIn2, - const IMG_CHAR *pszInName, - IMG_UINT32 *puiOut1, - IMG_UINT32 *puiOut2, - IMG_UINT8 *puiOut3); - -IMG_EXPORT -PVRSRV_ERROR DevmemIntAllocHostMemKM(IMG_DEVMEM_SIZE_T ui32Size, - PVRSRV_MEMALLOCFLAGS_T uiFlags, - IMG_UINT32 ui32LableLength, - const IMG_CHAR *pszAllocLabel, - PMR **ppsPMR); - -PVRSRV_ERROR DevmemIntFreeHostMemKM(PMR *psPMR); - -IMG_EXPORT -PVRSRV_ERROR PowerTestIoctlKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - IMG_UINT32 uiCmd, - IMG_UINT32 uiIn1, - IMG_UINT32 uiIn2, - IMG_UINT32 *puiOut1, - IMG_UINT32 *puiOut2); - -PVRSRV_ERROR TestIOCTLSyncFbFenceSignalPVR(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - void *psFence); - -PVRSRV_ERROR TestIOCTLSyncFbFenceCreatePVR(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_UINT32 uiNameLength, - const IMG_CHAR *pszName, - PVRSRV_TIMELINE iTL, - PVRSRV_FENCE *piOutFence); - -PVRSRV_ERROR TestIOCTLSyncFbFenceResolvePVR(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_FENCE iFence); -PVRSRV_ERROR TestIOCTLSyncFbSWTimelineAdvance(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_TIMELINE iSWTl); - -PVRSRV_ERROR TestIOCTLSyncFbSWFenceCreate(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_TIMELINE iTl, - IMG_UINT32 uiFenceNameLength, - const IMG_CHAR *pszFenceName, - PVRSRV_FENCE *piFence); - - - -PVRSRV_ERROR TestIOCTLSyncSWTimelineFenceCreateKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_TIMELINE sTimeline, - IMG_UINT32 uiNameLength, - const IMG_CHAR *pszFenceName, - PVRSRV_FENCE *psOutFence); - -PVRSRV_ERROR TestIOCTLSyncSWTimelineAdvanceKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_TIMELINE sTimeline); - -PVRSRV_ERROR TestIOCTLIsTimelineValidKM(PVRSRV_TIMELINE sTimeline, - IMG_BOOL *bResult); - -PVRSRV_ERROR TestIOCTLIsFenceValidKM(PVRSRV_FENCE sFence, - IMG_BOOL *bResult); - -PVRSRV_ERROR TestIOCTLSyncCheckpointResolveFenceKM(CONNECTION_DATA * psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - PVRSRV_FENCE hFence, - IMG_UINT32 *pui32NumSyncCheckpoints); - -PVRSRV_ERROR TestIOCTLSyncCheckpointCreateFenceKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDevNode, - IMG_CHAR *pszFenceName, - PVRSRV_TIMELINE hTimeline, - PVRSRV_FENCE *phOutFence, - IMG_UINT64 *puiUpdateFenceUID); - -PVRSRV_ERROR TestIOCTLWriteByteKM(IMG_BYTE ui8WriteData); - -PVRSRV_ERROR TestIOCTLReadByteKM(IMG_BYTE *pui8ReadData); - -typedef IMG_UINT32 DI_CONTEXT; -PVRSRV_ERROR TestIOCTLHandleArray2CreateKM(DI_CONTEXT **ppsTestResources); -PVRSRV_ERROR TestIOCTLHandleArray10CreateKM(DI_CONTEXT **ppsTestResources); -PVRSRV_ERROR TestIOCTLHandleCleanupDestroy(DI_CONTEXT *psTestResource); -PVRSRV_ERROR TestIOCTLHandleArray2CreateCPKM(DI_CONTEXT **ppsTestResources); -PVRSRV_ERROR TestIOCTLHandleCleanupDestroyCP(DI_CONTEXT *psTestResource); -PVRSRV_ERROR TestIOCTLHandleArray2CreatePPKM(CONNECTION_DATA *psConnection, - PVRSRV_DEVICE_NODE *psDeviceNode, - DI_CONTEXT **ppsTestResources); -PVRSRV_ERROR TestIOCTLHandleArray2CreateLUKM(DI_CONTEXT *psLookedup, - DI_CONTEXT **ppsTestResources); -PVRSRV_ERROR TestIOCTLHandleArrayNCreate(IMG_UINT32 ui32NumResourcesRequested, - IMG_UINT32 *pui32NumResourcesCreated, - DI_CONTEXT **ppsTestResources); -PVRSRV_ERROR TestIOCTLHandleArrayNCreateCP(IMG_UINT32 ui32NumResourcesRequested, - IMG_UINT32 *pui32NumResourcesCreated, - DI_CONTEXT **ppsTestResources); - -#endif /* TUTILS_KM_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/vmm_impl.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/vmm_impl.h index 45ef3fc86c12..928a3f6bc9ac 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/vmm_impl.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/vmm_impl.h @@ -48,18 +48,37 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" #include "pvrsrv_error.h" +#include "pvrsrv_device.h" typedef enum _VMM_CONF_PARAM_ { - VMM_CONF_PRIO_OSID0 = 0, - VMM_CONF_PRIO_OSID1 = 1, - VMM_CONF_PRIO_OSID2 = 2, - VMM_CONF_PRIO_OSID3 = 3, - VMM_CONF_PRIO_OSID4 = 4, - VMM_CONF_PRIO_OSID5 = 5, - VMM_CONF_PRIO_OSID6 = 6, - VMM_CONF_PRIO_OSID7 = 7, - VMM_CONF_HCS_DEADLINE = 8 + VMM_CONF_PRIO_DRV0 = 0, + VMM_CONF_PRIO_DRV1 = 1, + VMM_CONF_PRIO_DRV2 = 2, + VMM_CONF_PRIO_DRV3 = 3, + VMM_CONF_PRIO_DRV4 = 4, + VMM_CONF_PRIO_DRV5 = 5, + VMM_CONF_PRIO_DRV6 = 6, + VMM_CONF_PRIO_DRV7 = 7, + VMM_CONF_HCS_DEADLINE = 8, + VMM_CONF_ISOLATION_GROUP_DRV0 = 9, + VMM_CONF_ISOLATION_GROUP_DRV1 = 10, + VMM_CONF_ISOLATION_GROUP_DRV2 = 11, + VMM_CONF_ISOLATION_GROUP_DRV3 = 12, + VMM_CONF_ISOLATION_GROUP_DRV4 = 13, + VMM_CONF_ISOLATION_GROUP_DRV5 = 14, + VMM_CONF_ISOLATION_GROUP_DRV6 = 15, + VMM_CONF_ISOLATION_GROUP_DRV7 = 16, + VMM_CONF_TIME_SLICE_DRV0 = 17, + VMM_CONF_TIME_SLICE_DRV1 = 18, + VMM_CONF_TIME_SLICE_DRV2 = 19, + VMM_CONF_TIME_SLICE_DRV3 = 20, + VMM_CONF_TIME_SLICE_DRV4 = 21, + VMM_CONF_TIME_SLICE_DRV5 = 22, + VMM_CONF_TIME_SLICE_DRV6 = 23, + VMM_CONF_TIME_SLICE_DRV7 = 24, + VMM_CONF_TIME_SLICE_INTERVAL = 25, + VMM_CONF_VZ_CONNECTION_COOLDOWN_PERIOD = 26, } VMM_CONF_PARAM; /* @@ -135,19 +154,19 @@ typedef struct _VMM_PVZ_CONNECTION_ /* Corresponding server side entries to handle guest PVZ calls NOTE: - - Additional PVZ function ui32OSID parameter - - OSID determination is responsibility of VM manager - - Actual OSID value must be supplied by VM manager + - Additional PVZ function ui32DriverID parameter + - Driver ID determination is responsibility of VM manager + - Actual Driver ID value must be supplied by VM manager - This can be done either in client/VMM/host side - Must be done before host pvz function(s) are called - - Host pvz function validates incoming OSID values + - Host pvz function validates incoming Driver ID values */ - PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32OSID, + PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID, IMG_UINT64 ui64Size, IMG_UINT64 ui64PAddr); - PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32OSID, + PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); } sServerFuncTab; @@ -157,10 +176,10 @@ typedef struct _VMM_PVZ_CONNECTION_ information to the host; these events may in turn be forwarded to the firmware */ - PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32OSID, + PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); - PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32OSID, + PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType, @@ -179,7 +198,9 @@ typedef struct _VMM_PVZ_CONNECTION_ connection to the host. @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ******************************************************************************/ -PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection); -void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection); +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig); +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig); #endif /* VMM_IMPL_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/vmm_pvz_server.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/vmm_pvz_server.h index be5e3f4f4bdb..02e2683c0c19 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/vmm_pvz_server.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/vmm_pvz_server.h @@ -57,7 +57,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ******************************************************************************/ PVRSRV_ERROR -PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, +PvzServerMapDevPhysHeap(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID, IMG_UINT64 ui64Size, IMG_UINT64 ui64PAddr); @@ -70,7 +70,7 @@ PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ******************************************************************************/ PVRSRV_ERROR -PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, +PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); /*! @@ -82,7 +82,7 @@ PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ******************************************************************************/ PVRSRV_ERROR -PvzServerOnVmOnline(IMG_UINT32 ui32OSID, +PvzServerOnVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); /*! @@ -96,10 +96,10 @@ PvzServerOnVmOnline(IMG_UINT32 ui32OSID, the GPU and it is safe to remove the memory for such VM. @Return PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if for some reason the FW is taking too long to clean-up the resources of the - OSID. Otherwise, a PVRSRV_ERROR code. + DriverID. Otherwise, a PVRSRV_ERROR code. ******************************************************************************/ PVRSRV_ERROR -PvzServerOnVmOffline(IMG_UINT32 ui32OSID, +PvzServerOnVmOffline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); /*! diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/vz_vm.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/vz_vm.h index 80e82d94ee30..ee4a7fb5b3a7 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/vz_vm.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/vz_vm.h @@ -46,11 +46,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "vmm_impl.h" -bool IsVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID); +bool IsVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); -PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID); +PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); -PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID); +PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue, diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/include/vz_vmm_pvz.h b/drivers/gpu/drm/img/img-volcanic/services/server/include/vz_vmm_pvz.h index 0e548d232326..03d5525936b8 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/include/vz_vmm_pvz.h +++ b/drivers/gpu/drm/img/img-volcanic/services/server/include/vz_vmm_pvz.h @@ -56,15 +56,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. be a hyper-call or cross-VM call @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ******************************************************************************/ -PVRSRV_ERROR PvzConnectionInit(void); -void PvzConnectionDeInit(void); +PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig); +void PvzConnectionDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig); /*! ******************************************************************************* @Function PvzConnectionAcquire() and PvzConnectionRelease() @Description These are to acquire/release a handle to the VM manager para-virtz connection to make a pvz call; on the client, use it - it to make the actual pvz call and on the server handler / + to make the actual pvz call and on the server handler / VM manager, use it to complete the processing for the pvz call or make a VM manager to host pvzbridge call @Return VMM_PVZ_CONNECTION* on success. Otherwise NULL diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem.c index 64dd12f26572..c032094ff0b3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem.c @@ -55,7 +55,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "client_mm_bridge.h" #include "client_cache_bridge.h" #include "services_km.h" -#include "pvrsrv_memallocflags_internal.h" #if defined(PDUMP) #if defined(__KERNEL__) @@ -80,7 +79,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdevice.h" #include "pvr_ricommon.h" #include "pvrsrv_apphint.h" -#include "oskm_apphint.h" +#include "os_apphint.h" #include "srvcore.h" #if defined(__linux__) #include "linux/kernel.h" @@ -110,7 +109,7 @@ IMG_UINT64 _GetPremappedVA(PMR *psPMR, PVRSRV_DEVICE_NODE *psDevNode) IMG_DEV_PHYADDR sDevAddr; IMG_BOOL bValid; - PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; + PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM]; IMG_DEV_PHYADDR sHeapAddr; eError = PhysHeapGetDevPAddr(psPhysHeap, &sHeapAddr); @@ -132,7 +131,7 @@ IMG_UINT64 _GetPremappedVA(PMR *psPMR, PVRSRV_DEVICE_NODE *psDevNode) eError = PMRLockSysPhysAddresses(psPMR); PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddr", fail); - eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sDevAddr, &bValid); + eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sDevAddr, &bValid, DEVICE_USE); if (eError != PVRSRV_OK) { PVR_LOG_IF_ERROR(eError, "PMR_DevPhysAddr"); @@ -159,15 +158,11 @@ IMG_UINT64 _GetPremappedVA(PMR *psPMR, PVRSRV_DEVICE_NODE *psDevNode) static INLINE PVRSRV_MEMALLOCFLAGS_T DevmemOverrideFlagsOrPassThrough(SHARED_DEV_CONNECTION hDevConnection, PVRSRV_MEMALLOCFLAGS_T uiFlags) { -#if defined(__KERNEL__) && defined(RGX_FEATURE_GPU_CPU_COHERENCY) +#if defined(__KERNEL__) && defined(RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK) /* * Override the requested memory flags of FW allocations only, * non-FW allocations pass-through unmodified. * - * On fully coherent platforms: - * - We upgrade uncached, CPU-only cached or GPU-only cached to - * full coherency. This gives caching improvements for free. - * * On ace-lite platforms: * - If the allocation is not CPU cached, then there is nothing * for the GPU to snoop regardless of the GPU cache setting. @@ -179,25 +174,22 @@ DevmemOverrideFlagsOrPassThrough(SHARED_DEV_CONNECTION hDevConnection, PVRSRV_ME * All other platforms: * - Do not modify the allocation flags. */ - if (PVRSRV_CHECK_FW_MAIN(uiFlags)) - { - PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDevConnection; - if (PVRSRVSystemSnoopingOfDeviceCache(psDevNode->psDevConfig) && - PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig)) - { - /* Clear existing flags, mark the allocation as fully coherent. */ - uiFlags &= ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK); - uiFlags |= PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; - } - else if ((PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)) && - (PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)) && - PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) && - psDevNode->eDevFabricType == PVRSRV_DEVICE_FABRIC_ACELITE) + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDevConnection; + + if (psDevNode->pvDevice != NULL && PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, GPU_CPU_COHERENCY)) + { + if (PVRSRV_CHECK_FW_MAIN(uiFlags)) { - /* Upgrade the allocation from GPU cached incoherent to GPU cached coherent. */ - uiFlags &= ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK; - uiFlags |= PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT; + if ((PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)) && + (PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)) && + PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) && + psDevNode->eDevFabricType == PVRSRV_DEVICE_FABRIC_ACELITE) + { + /* Upgrade the allocation from GPU cached incoherent to GPU cached coherent. */ + uiFlags &= ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK; + uiFlags |= PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT; + } } } #else @@ -308,126 +300,72 @@ DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc, IMG_UINT32 *pauiFreePageIndices, SPARSE_MEM_RESIZE_FLAGS uiSparseFlags) { - PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; - DEVMEM_IMPORT *psImport = psMemDesc->psImport; + PVRSRV_ERROR eError; + DEVMEM_IMPORT *psImport; SHARED_DEV_CONNECTION hDevConnection; IMG_HANDLE hPMR; - IMG_HANDLE hSrvDevMemHeap; - POS_LOCK hLock; - IMG_DEV_VIRTADDR sDevVAddr; - IMG_CPU_VIRTADDR pvCpuVAddr; + DEVMEM_HEAP *psHeap; + IMG_HANDLE hReservation; +#ifdef PVRSRV_NEED_PVR_ASSERT DEVMEM_PROPERTIES_T uiProperties; +#endif /* PVRSRV_NEED_PVR_ASSERT */ - if (NULL == psImport) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Sparse memory import", __func__)); - goto e0; - } + PVR_ASSERT(psMemDesc != NULL); + PVR_ASSERT(psMemDesc->psImport != NULL); + psImport = psMemDesc->psImport; hDevConnection = psImport->hDevConnection; hPMR = psImport->hPMR; - hLock = psImport->hLock; - sDevVAddr = psImport->sDeviceImport.sDevVAddr; - pvCpuVAddr = psImport->sCPUImport.pvCPUVAddr; - - if (NULL == hDevConnection) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Bridge handle", __func__)); - goto e0; - } - - if (NULL == hPMR) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PMR handle", __func__)); - goto e0; - } + psHeap = psImport->sDeviceImport.psHeap; + hReservation = psImport->sDeviceImport.hReservation; - if ((uiSparseFlags & SPARSE_RESIZE_BOTH) && (0 == sDevVAddr.uiAddr)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Device Virtual Map", __func__)); - goto e0; - } - - if ((uiSparseFlags & SPARSE_MAP_CPU_ADDR) && (NULL == pvCpuVAddr)) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CPU Virtual Map", __func__)); - goto e0; - } + PVR_ASSERT(hDevConnection != NULL); + PVR_ASSERT(hPMR != NULL); + PVR_ASSERT(psHeap != NULL); + PVR_ASSERT(BITMASK_ANY(uiSparseFlags, SPARSE_RESIZE_BOTH)); + PVR_ASSERT(hReservation != LACK_OF_RESERVATION_POISON); +#ifdef PVRSRV_NEED_PVR_ASSERT uiProperties = GetImportProperties(psMemDesc->psImport); - if (uiProperties & DEVMEM_PROPERTIES_SECURE) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Secure buffers currently do not support sparse changes", - __func__)); - eError = PVRSRV_ERROR_INVALID_PARAMS; - goto e0; - } - - if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: This memory descriptor doesn't support sparse changes", - __func__)); - eError = PVRSRV_ERROR_INVALID_REQUEST; - goto e0; - } - -#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE - if (psMemDesc->sCPUMemDesc.ui32RefCount > 0) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: This memory descriptor is mapped more than once (refcnt: %u)into " - "CPU Address space.\nRelease all CPU maps of this object and retry...", - __func__, psMemDesc->sCPUMemDesc.ui32RefCount)); - eError = PVRSRV_ERROR_OBJECT_STILL_REFERENCED; - goto e0; - } -#endif + PVR_ASSERT(!BITMASK_HAS(uiProperties, DEVMEM_PROPERTIES_SECURE)); + PVR_ASSERT(!BITMASK_HAS(uiProperties, DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)); +#endif /* PVRSRV_NEED_PVR_ASSERT */ - hSrvDevMemHeap = psImport->sDeviceImport.psHeap->hDevMemServerHeap; + PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount == 0); - OSLockAcquire(hLock); + OSLockAcquire(psImport->hLock); eError = BridgeChangeSparseMem(GetBridgeHandle(hDevConnection), - hSrvDevMemHeap, - hPMR, - ui32AllocPageCount, - paui32AllocPageIndices, - ui32FreePageCount, - pauiFreePageIndices, - uiSparseFlags, - psImport->uiFlags, - sDevVAddr, - (IMG_UINT64)((uintptr_t)pvCpuVAddr)); + ui32AllocPageCount, + paui32AllocPageIndices, + ui32FreePageCount, + pauiFreePageIndices, + uiSparseFlags, + hReservation); - OSLockRelease(hLock); + OSLockRelease(psImport->hLock); - if (eError != PVRSRV_OK) - { - goto e0; - } + PVR_RETURN_IF_ERROR(eError); - if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + if (GetInfoPageDebugFlags(hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) { - BridgeDevicememHistorySparseChange(GetBridgeHandle(psMemDesc->psImport->hDevConnection), - psMemDesc->psImport->hPMR, - psMemDesc->uiOffset, - psMemDesc->sDeviceMemDesc.sDevVAddr, - psMemDesc->uiAllocSize, - psMemDesc->szText, - DevmemGetHeapLog2PageSize(psImport->sDeviceImport.psHeap), - ui32AllocPageCount, - paui32AllocPageIndices, - ui32FreePageCount, - pauiFreePageIndices, - psMemDesc->ui32AllocationIndex, - &psMemDesc->ui32AllocationIndex); + BridgeDevicememHistorySparseChange(GetBridgeHandle(hDevConnection), + hPMR, + psMemDesc->uiOffset, + psMemDesc->sDeviceMemDesc.sDevVAddr, + psMemDesc->uiAllocSize, + psMemDesc->szText, + DevmemGetHeapLog2PageSize(psHeap), + ui32AllocPageCount, + paui32AllocPageIndices, + ui32FreePageCount, + pauiFreePageIndices, + psMemDesc->ui32AllocationIndex, + &psMemDesc->ui32AllocationIndex); } -e0: - return eError; + return PVRSRV_OK; } static void @@ -443,9 +381,7 @@ SubAllocImportAlloc(RA_PERARENA_HANDLE hArena, RA_LENGTH_T uBaseAlignment, const IMG_CHAR *pszAnnotation, /* returned data */ - RA_BASE_T *puiBase, - RA_LENGTH_T *puiActualSize, - RA_PERISPAN_HANDLE *phImport) + RA_IMPORT *psRAImport) { /* When suballocations need a new lump of memory, the RA calls back here. Later, in the kernel, we must construct a new PMR @@ -553,9 +489,9 @@ SubAllocImportAlloc(RA_PERARENA_HANDLE hArena, psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN; OSLockRelease(psImport->hLock); - *puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr; - *puiActualSize = uiSize; - *phImport = psImport; + psRAImport->base = psImport->sDeviceImport.sDevVAddr.uiAddr; + psRAImport->uSize = uiSize; + psRAImport->hPriv = psImport; return PVRSRV_OK; @@ -1010,13 +946,13 @@ DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, PVR_ASSERT(uiReservedRegionLength + DEVMEM_HEAP_MINIMUM_SIZE <= uiLength); psHeap = OSAllocMem(sizeof(*psHeap)); - PVR_GOTO_IF_NOMEM(psHeap, eError, e0); + PVR_GOTO_IF_NOMEM(psHeap, eError, HeapAllocError); /* Need to keep local copy of heap name, so caller may free theirs */ ui32pszStrSize = OSStringLength(pszName) + 1; pszStr = OSAllocMem(ui32pszStrSize); - PVR_GOTO_IF_NOMEM(pszStr, eError, e1); - OSStringLCopy(pszStr, pszName, ui32pszStrSize); + PVR_GOTO_IF_NOMEM(pszStr, eError, SubAllocRANameCopyError); + OSStringSafeCopy(pszStr, pszName, ui32pszStrSize); psHeap->pszName = pszStr; psHeap->uiSize = uiLength; @@ -1030,20 +966,30 @@ DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, pszName, psCtx); ui32pszStrSize = OSStringLength(aszBuf) + 1; pszStr = OSAllocMem(ui32pszStrSize); - PVR_GOTO_IF_NOMEM(pszStr, eError, e2); - OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); + PVR_GOTO_IF_NOMEM(pszStr, eError, SubAllocRANameError); + OSStringSafeCopy(pszStr, aszBuf, ui32pszStrSize); psHeap->pszSubAllocRAName = pszStr; #if defined(__KERNEL__) if (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORFW) { void *pvAppHintState = NULL; - IMG_UINT32 ui32FirmwarePolicydefault = 0, ui32FirmwarePolicy=0; - OSCreateKMAppHintState(&pvAppHintState); - OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DevMemFWHeapPolicy, + IMG_UINT32 ui32FirmwarePolicydefault = PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY; + IMG_UINT32 ui32FirmwarePolicy = PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY; + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DevMemFWHeapPolicy, &ui32FirmwarePolicydefault, &ui32FirmwarePolicy); ui32PolicyVMRA = ui32Policy = ui32FirmwarePolicy; - OSFreeKMAppHintState(pvAppHintState); + OSFreeAppHintState(pvAppHintState); + + /* Flag the change from default setting */ + if (ui32FirmwarePolicy != ui32FirmwarePolicydefault) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: %s: DevMemFWHeapPolicy set to %u, default %u", + __func__, pszStr, + ui32FirmwarePolicy, ui32FirmwarePolicydefault)); + } } #endif @@ -1071,7 +1017,7 @@ DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName, /* Subsequent imports: */ ui32Log2Quantum, - RA_LOCKCLASS_2, + RA_LOCKCLASS_3, SubAllocImportAlloc, SubAllocImportFree, (RA_PERARENA_HANDLE) psHeap, @@ -1079,7 +1025,7 @@ DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, if (psHeap->psSubAllocRA == NULL) { eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA; - goto e3; + goto SubAllocRACreateError; } psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment; @@ -1122,9 +1068,9 @@ DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, if (pszStr == NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; - goto e4; + goto VMRANameError; } - OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); + OSStringSafeCopy(pszStr, aszBuf, ui32pszStrSize); psHeap->pszQuantizedVMRAName = pszStr; psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName, @@ -1134,7 +1080,7 @@ DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, ui32PolicyVMRA); if (psHeap->psQuantizedVMRA == NULL) { - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, VMRACreateError); } if (!RA_Add(psHeap->psQuantizedVMRA, @@ -1145,7 +1091,7 @@ DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, NULL /* per ispan handle */)) { RA_Delete(psHeap->psQuantizedVMRA); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, VMRACreateError); } psHeap->psCtx = psCtx; @@ -1153,49 +1099,40 @@ DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, /* Create server-side counterpart of Device Memory heap */ eError = BridgeDevmemIntHeapCreate(GetBridgeHandle(psCtx->hDevConnection), - psCtx->hDevMemServerContext, - uiHeapBlueprintID, - uiHeapIndex, - sBaseAddress, - uiLength, - ui32Log2Quantum, - &hDevMemServerHeap); - PVR_GOTO_IF_ERROR(eError, e6); + psCtx->hDevMemServerContext, + uiHeapBlueprintID, + uiHeapIndex, + &hDevMemServerHeap); + PVR_GOTO_IF_ERROR(eError, ServerHeapCreateError); psHeap->hDevMemServerHeap = hDevMemServerHeap; eError = OSLockCreate(&psHeap->hLock); - PVR_GOTO_IF_ERROR(eError, e7); + PVR_GOTO_IF_ERROR(eError, LockCreateErr); psHeap->psCtx->uiNumHeaps++; *ppsHeapPtr = psHeap; -#if defined(PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING) - psHeap->psMemDescList = NULL; -#endif /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */ - return PVRSRV_OK; /* error exit paths */ -e7: +LockCreateErr: eError2 = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psCtx->hDevConnection), psHeap->hDevMemServerHeap); PVR_ASSERT (eError2 == PVRSRV_OK); -e6: - if (psHeap->psQuantizedVMRA) - RA_Delete(psHeap->psQuantizedVMRA); -e5: - if (psHeap->pszQuantizedVMRAName) - OSFreeMem(psHeap->pszQuantizedVMRAName); -e4: +ServerHeapCreateError: + RA_Delete(psHeap->psQuantizedVMRA); +VMRACreateError: + OSFreeMem(psHeap->pszQuantizedVMRAName); +VMRANameError: RA_Delete(psHeap->psSubAllocRA); -e3: +SubAllocRACreateError: OSFreeMem(psHeap->pszSubAllocRAName); -e2: +SubAllocRANameError: OSFreeMem(psHeap->pszName); -e1: +SubAllocRANameCopyError: OSFreeMem(psHeap); -e0: +HeapAllocError: PVR_ASSERT(eError != PVRSRV_OK); return eError; } @@ -1211,6 +1148,14 @@ DevmemGetHeapBaseDevVAddr(struct DEVMEM_HEAP_TAG *psHeap, return PVRSRV_OK; } +IMG_INTERNAL DEVMEM_SIZE_T +DevmemGetHeapSize(struct DEVMEM_HEAP_TAG *psHeap) +{ + PVR_RETURN_IF_INVALID_PARAM(psHeap != NULL); + + return psHeap->uiSize; +} + IMG_INTERNAL PVRSRV_ERROR DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, IMG_DEVMEM_SIZE_T *puiSize, @@ -1234,7 +1179,7 @@ DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, { uiAlign = 1ULL << uiLog2Quantum; } - uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1); + uiSize = PVR_ALIGN(uiSize, uiAlign); *puiSize = uiSize; *puiAlign = uiAlign; @@ -1401,7 +1346,7 @@ DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, } #endif - if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) + if (uiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) { /* Deferred Allocation not supported on SubAllocs*/ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failParams); @@ -1480,7 +1425,7 @@ DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, * if it is not. * */ OSLockAcquire(psImport->hLock); - if (!(uiSize & ((1ULL << psHeap->uiLog2Quantum) - 1)) && + if (!(uiSize & (IMG_PAGE2BYTES64(psHeap->uiLog2Quantum) - 1)) && (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER)) { psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE; @@ -1585,7 +1530,7 @@ DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, * the allocation gets mapped/unmapped */ CheckAnnotationLength(pszText); - OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); + OSStringSafeCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) @@ -1597,10 +1542,12 @@ DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, psMemDesc->szText, psMemDesc->uiOffset, uiAllocatedSize, - IMG_FALSE, - IMG_TRUE, + uiFlags | PVRSRV_MEMALLOCFLAG_RI_SUBALLOC, &(psMemDesc->hRIHandle)); - PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); + } } #else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ PVR_UNREFERENCED_PARAMETER (pszText); @@ -1703,7 +1650,7 @@ DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, * the allocation gets mapped/unmapped */ CheckAnnotationLength(pszText); - OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); + OSStringSafeCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) @@ -1719,12 +1666,11 @@ DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, "^", psMemDesc->uiOffset, uiSize, - IMG_FALSE, - IMG_FALSE, + uiFlags, &psMemDesc->hRIHandle); PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); } -#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ +#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ PVR_UNREFERENCED_PARAMETER (pszText); #endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ @@ -1813,7 +1759,7 @@ DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, * the allocation gets mapped/unmapped */ CheckAnnotationLength(pszText); - OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); + OSStringSafeCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); #if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) @@ -1829,12 +1775,11 @@ DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, "^", psMemDesc->uiOffset, uiSize, - IMG_FALSE, - IMG_FALSE, + uiFlags, &psMemDesc->hRIHandle); PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); } -#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ +#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ PVR_UNREFERENCED_PARAMETER (pszText); #endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ @@ -2064,8 +2009,7 @@ DevmemImport(SHARED_DEV_CONNECTION hDevConnection, "^", psMemDesc->uiOffset, psMemDesc->psImport->uiSize, - IMG_TRUE, - IMG_TRUE, + uiFlags | PVRSRV_MEMALLOCFLAG_RI_IMPORT | PVRSRV_MEMALLOCFLAG_RI_SUBALLOC, &psMemDesc->hRIHandle); PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); } @@ -2158,7 +2102,7 @@ DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc, } /* Don't map memory for deferred allocations */ - if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) + if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) { PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); bMap = IMG_FALSE; @@ -2257,7 +2201,7 @@ DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc, } /* Don't map memory for deferred allocations */ - if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) + if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) { PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); bMap = IMG_FALSE; @@ -2692,8 +2636,7 @@ DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, "^", psMemDesc->uiOffset, psMemDesc->psImport->uiSize, - IMG_TRUE, - IMG_FALSE, + uiFlags | PVRSRV_MEMALLOCFLAG_RI_IMPORT, &(psMemDesc->hRIHandle)); PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); } @@ -2704,7 +2647,7 @@ DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, * to DevicememHistory when the allocation gets mapped/unmapped */ CheckAnnotationLength(pszAnnotation); - OSStringLCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); + OSStringSafeCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); return PVRSRV_OK; @@ -2738,19 +2681,6 @@ DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, psContext->hDevMemServerContext, psFaultAddress); } -IMG_INTERNAL PVRSRV_ERROR -DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate) -{ - DEVMEM_IMPORT *psImport = psMemDesc->psImport; - return BridgeDevmemFlushDevSLCRange(GetBridgeHandle(psImport->hDevConnection), - psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext, - sDevVAddr, - uiSize, - bInvalidate); -} #if defined(RGX_FEATURE_FBCDC) IMG_INTERNAL PVRSRV_ERROR @@ -2791,21 +2721,18 @@ DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap) @Input psContext Memory context the process that would like to be notified about. -@Input ui32PID The PID of the calling process. @Input bRegister If true, register. If false, de-register. @Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ error code */ /***************************************************************************/ IMG_INTERNAL PVRSRV_ERROR RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, - IMG_UINT32 ui32PID, IMG_BOOL bRegister) { PVRSRV_ERROR eError; eError = BridgeDevmemIntRegisterPFNotifyKM(GetBridgeHandle(psContext->hDevConnection), psContext->hDevMemServerContext, - ui32PID, bRegister); if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED) { @@ -2821,3 +2748,7 @@ DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped) { psHeap->bPremapped = IsPremapped; } + +/****************************************************************************** + End of file (devicemem.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem_pdump.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem_pdump.c index 639f93a17455..c86441217b2a 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem_pdump.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem_pdump.c @@ -76,11 +76,6 @@ DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, IMG_FALSE); PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpLoadMem"); - /* If PDump was rejected for this device, suppress silently */ - if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) - { - PVR_ASSERT(eError == PVRSRV_OK); - } } IMG_INTERNAL void @@ -101,11 +96,6 @@ DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc, IMG_TRUE); PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpLoadMem"); - /* If PDump was rejected for this device, suppress silently */ - if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) - { - PVR_ASSERT(eError == PVRSRV_OK); - } } IMG_INTERNAL void @@ -123,11 +113,6 @@ DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, uiPDumpFlags); PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpLoadMemValue32"); - /* If PDump was rejected for this device, suppress silently */ - if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) - { - PVR_ASSERT(eError == PVRSRV_OK); - } } IMG_INTERNAL void @@ -144,11 +129,6 @@ DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, ui64Value, uiPDumpFlags); PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpLoadMemValue64"); - /* If PDump was rejected for this device, suppress silently */ - if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) - { - PVR_ASSERT(eError == PVRSRV_OK); - } } IMG_INTERNAL PVRSRV_ERROR @@ -175,11 +155,6 @@ DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, &uiNextSymName); PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpSymbolicAddr"); - /* If PDump was rejected for this device, suppress silently */ - if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) - { - PVR_ASSERT(eError == PVRSRV_OK); - } OSSNPrintf(pszName, ui32Size, "%s:%s", &aszMemspaceName[0], &aszSymbolicName[0]); return eError; @@ -203,11 +178,6 @@ DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, uiFileOffset); PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpSaveToFile"); - /* If PDump was rejected for this device, suppress silently */ - if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) - { - PVR_ASSERT(eError == PVRSRV_OK); - } } IMG_INTERNAL void @@ -235,11 +205,6 @@ DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, ui32PdumpFlags); PVR_LOG_IF_ERROR(eError, "BridgeDevmemIntPDumpSaveToFileVirtual"); - /* If PDump was rejected for this device, suppress silently */ - if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) - { - PVR_ASSERT(eError == PVRSRV_OK); - } } IMG_INTERNAL void @@ -271,11 +236,6 @@ DevmemPDumpDataDescriptor(DEVMEM_MEMDESC *psMemDesc, ui32PdumpFlags); PVR_LOG_IF_ERROR(eError, "BridgePDumpDataDescriptor"); - /* If PDump was rejected for this device, suppress silently */ - if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) - { - PVR_ASSERT(eError == PVRSRV_OK); - } } IMG_INTERNAL PVRSRV_ERROR diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem_utils.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem_utils.c index 2c8d6c142fa2..52bb9c838b52 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem_utils.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/devicemem_utils.c @@ -213,10 +213,10 @@ DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap, if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) { PVRSRV_ERROR eErr; - eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), + eErr = BridgePVRSRVStatsUpdateOOMStat(GetBridgeHandle(psHeap->psCtx->hDevConnection), PVRSRV_DEVICE_STAT_TYPE_OOM_VIRTMEM_COUNT, OSGetCurrentProcessID()); - PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); + PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVStatsUpdateOOMStat"); } #endif goto failSVM; @@ -778,10 +778,10 @@ static PVRSRV_ERROR DevmemReserveVARange(DEVMEM_HEAP *psHeap, (eError == PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL)) { PVRSRV_ERROR eErr; - eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), + eErr = BridgePVRSRVStatsUpdateOOMStat(GetBridgeHandle(psHeap->psCtx->hDevConnection), PVRSRV_DEVICE_STAT_TYPE_INVALID_VIRTMEM, OSGetCurrentProcessID()); - PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); + PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVStatsUpdateOOMStat"); } #endif return eError; @@ -807,14 +807,13 @@ PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, RA_BASE_T uiAllocatedAddr; RA_LENGTH_T uiAllocatedSize; IMG_DEV_VIRTADDR sBase; - IMG_HANDLE hReservation; PVRSRV_ERROR eError; IMG_UINT uiAlign; IMG_BOOL bDestroyed = IMG_FALSE; /* Round the provided import alignment to the configured heap alignment */ uiAlign = 1ULL << psHeap->uiLog2ImportAlignment; - uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1); + uiAlign = PVR_ALIGN(psImport->uiAlign, uiAlign); psDeviceImport = &psImport->sDeviceImport; @@ -885,10 +884,10 @@ PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) { PVRSRV_ERROR eErr; - eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), + eErr = BridgePVRSRVStatsUpdateOOMStat(GetBridgeHandle(psHeap->psCtx->hDevConnection), PVRSRV_DEVICE_STAT_TYPE_OOM_VIRTMEM_COUNT, OSGetCurrentProcessID()); - PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); + PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVStatsUpdateOOMStat"); } #endif PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM, failVMRAAlloc); @@ -1014,33 +1013,34 @@ PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, } else { - /* Setup page tables for the allocated VM space */ - eError = BridgeDevmemIntReserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), - psHeap->hDevMemServerHeap, - sBase, - uiAllocatedSize, - &hReservation); - PVR_GOTO_IF_ERROR(eError, failReserve); - + PVRSRV_MEMALLOCFLAGS_T uiFlags; + uiFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK; if (bMap) { - PVRSRV_MEMALLOCFLAGS_T uiMapFlags; - - uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK; + eError = BridgeDevmemIntReserveRangeAndMapPMR(GetBridgeHandle(psHeap->psCtx->hDevConnection), + psHeap->hDevMemServerHeap, + sBase, + uiAllocatedSize, + psImport->hPMR, + uiFlags, + &psDeviceImport->hReservation); - /* Actually map the PMR to allocated VM space */ - eError = BridgeDevmemIntMapPMR(GetBridgeHandle(psHeap->psCtx->hDevConnection), - psHeap->hDevMemServerHeap, - hReservation, - psImport->hPMR, - uiMapFlags, - &psDeviceImport->hMapping); - PVR_GOTO_IF_ERROR(eError, failMap); + psDeviceImport->hMapping = LACK_OF_MAPPING_POISON; + PVR_GOTO_IF_ERROR(eError, failReserve); psDeviceImport->bMapped = IMG_TRUE; } - - psDeviceImport->hReservation = hReservation; + else + { + /* Setup page tables for the allocated VM space */ + eError = BridgeDevmemIntReserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), + psHeap->hDevMemServerHeap, + sBase, + uiAllocatedSize, + uiFlags, + &psDeviceImport->hReservation); + PVR_GOTO_IF_ERROR(eError, failReserve); + } } /* Setup device mapping specific parts of the mapping info */ @@ -1062,12 +1062,6 @@ PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, return PVRSRV_OK; -failMap: - if (!psHeap->bPremapped) - { - BridgeDevmemIntUnreserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), - hReservation); - } failReserve: if (ui64OptionalMapAddress == 0) { @@ -1115,15 +1109,6 @@ IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport) if (!psHeap->bPremapped) { - if (psDeviceImport->bMapped) - { - eError = DestroyServerResource(psImport->hDevConnection, - NULL, - BridgeDevmemIntUnmapPMR, - psDeviceImport->hMapping); - PVR_ASSERT(eError == PVRSRV_OK); - } - eError = DestroyServerResource(psImport->hDevConnection, NULL, BridgeDevmemIntUnreserveRange, diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/hash.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/hash.c index 994ae5871f5f..434c6e967bc4 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/common/hash.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/hash.c @@ -696,6 +696,19 @@ HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args) return PVRSRV_OK; } +/*************************************************************************/ /*! +@Function HASH_Count +@Description Retrieve the number of entries in the hash table. +@Input pHash The hash table. +@Return The number of entries. +*/ /**************************************************************************/ +IMG_INTERNAL IMG_UINT32 +HASH_Count(HASH_TABLE *pHash) +{ + return pHash->uCount; +} + + #ifdef HASH_TRACE /*************************************************************************/ /*! @Function HASH_Dump diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/hash_functions.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/hash_functions.c new file mode 100644 index 000000000000..517dae297ff8 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/hash_functions.c @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@File +@Title Reusable hash functions for hash.c. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements common hash functions. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "hash_functions.h" +#include "img_defs.h" + +/* Declaring function here to avoid dependencies that are introduced by + * including osfunc.h. */ +IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, + size_t uiSize); + +IMG_UINT32 HASH_Djb2_Hash(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) +{ + IMG_CHAR *pszStr = pKey; + IMG_UINT32 ui32Hash = 5381, ui32Char; + + PVR_UNREFERENCED_PARAMETER(uKeySize); + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + + while ((ui32Char = *pszStr++) != '\0') + { + ui32Hash = ((ui32Hash << 5) + ui32Hash) + ui32Char; + } + + return ui32Hash; +} + +IMG_BOOL HASH_Djb2_Compare(size_t uKeySize, void *pKey1, void *pKey2) +{ + IMG_CHAR *pszKey1 = pKey1, *pszKey2 = pKey2; + + return OSStringNCompare(pszKey1, pszKey2, uKeySize) == 0; +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/htbuffer.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/htbuffer.c index 73822a875229..5f83127a5462 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/common/htbuffer.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/htbuffer.c @@ -103,95 +103,4 @@ HTBControl( ); } - -/*************************************************************************/ /*! -*/ /**************************************************************************/ -static PVRSRV_ERROR -_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, - HTB_LOG_SFids SF, va_list args) -{ -#if defined(__KERNEL__) - IMG_UINT32 i; - IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF); -#if defined(__KLOCWORK__) - IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS + 1]; // Prevent KW False-positive -#else - IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS]; -#endif - - PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS); - ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS) ? - HTB_LOG_MAX_PARAMS : ui32NumArgs; - - /* unpack var args before sending over bridge */ - for (i=0; i 8 typedef __uint128_t uint128_t; @@ -115,7 +111,7 @@ uint256_t; #error No support for architectures where void* and long are sized differently #endif -#if __SIZEOF_LONG__ > DEVICE_MEMSETCPY_ALIGN_IN_BYTES +#if __SIZEOF_LONG__ > DEVICE_MEMSETCPY_ALIGN_IN_BYTES /* Meaningless, and harder to do correctly */ # error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long) typedef unsigned long block_t; @@ -385,65 +381,65 @@ void DeviceMemSetBytes(void *pvDst, unsigned char ui8Value, size_t uSize) } } -#if !defined(__QNXNTO__) /* Ignore Neutrino as it uses strlcpy */ - -#if defined(__KERNEL__) && defined(__linux__) +#if defined(__linux__) && defined(__KERNEL__) +#if defined(DEBUG) /* - * In case of Linux kernel-mode in a debug build, choose the variant - * of StringLCopy that uses strlcpy and logs truncation via a stack dump. - * For Linux kernel-mode in a release build, strlcpy alone is used. + * In case of Linux kernel-mode in a debug build, choose the variant of + * OSStringSafeCopy that uses strscpy and logs truncation via a stack dump. For + * Linux kernel-mode in a release build, strscpy alone is used. */ -#if defined(DEBUG) IMG_INTERNAL -size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) +ssize_t OSStringSafeCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) { /* - * Let strlcpy handle any truncation cases correctly. + * Let strscpy handle any truncation cases correctly. * We will definitely get a NUL-terminated string set in pszDest */ - size_t uSrcSize = strlcpy(pszDest, pszSrc, uDataSize); + ssize_t sCopiedCnt = strscpy(pszDest, pszSrc, uDataSize); #if defined(PVR_DEBUG_STRLCPY) /* Handle truncation by dumping calling stack if debug allows */ - if (uSrcSize >= uDataSize) + if (sCopiedCnt < 0) { PVR_DPF((PVR_DBG_WARNING, - "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'", - __func__, pszSrc, (long)uDataSize, pszDest)); + "%s: String truncated Src = '<%s>' %zu bytes, Dest = '%s'", + __func__, pszSrc, uDataSize, pszDest)); OSDumpStack(); } #endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */ - return uSrcSize; + return sCopiedCnt; } #endif /* defined(DEBUG) */ - -#else /* defined(__KERNEL__) && defined(__linux__) */ +#else /* defined(__linux__) && defined(__KERNEL__) */ /* - * For every other platform, make use of the strnlen and strncpy - * implementation of StringLCopy. + * For every other platform, make use of the strnlen and strncpy implementation + * of OSStringSafeCopy. + * * NOTE: It is crucial to avoid memcpy as this has a hidden side-effect of * dragging in whatever the build-environment flavour of GLIBC is which can * cause unexpected failures for host-side command execution. */ IMG_INTERNAL -size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) +ssize_t OSStringSafeCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) { - size_t uSrcSize = strnlen(pszSrc, uDataSize); - - (void)strncpy(pszDest, pszSrc, uSrcSize); - if (uSrcSize == uDataSize) - { - pszDest[uSrcSize-1] = '\0'; - } - else + /* Match the specification of Linux strscpy - if destination size is 0, + * return negative value (-E2BIG to be precise, but it's probably best + * not to use Linux-specific return values, so -1 will do). */ + if (uDataSize && (uDataSize <= SSIZE_MAX)) { - pszDest[uSrcSize] = '\0'; - } + size_t uSrcSize = strnlen(pszSrc, uDataSize); - return uSrcSize; -} + (void)strncpy(pszDest, pszSrc, MIN(uSrcSize + 1, uDataSize)); + if (uSrcSize == uDataSize) + { + pszDest[uSrcSize-1] = '\0'; + return -1; + } -#endif /* defined(__KERNEL__) && defined(__linux__) */ + return (ssize_t)uSrcSize; + } -#endif /* !defined(__QNXNTO__) */ + return -1; +} +#endif /* defined(__linux__) && defined(__KERNEL__) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/ra.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/ra.c index cc7cd9d26ade..d1646b309f6d 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/common/ra.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/ra.c @@ -172,9 +172,17 @@ struct _RA_ARENA_ RA_LENGTH_T uQuantum; /* import interface, if provided */ - PFN_RA_ALLOC pImportAlloc; - - PFN_RA_FREE pImportFree; + union { + void* pImportAlloc; + PFN_RA_IMPORT_ALLOC_SINGLE pImportAllocSingle; + PFN_RA_IMPORT_ALLOC_MULTI pImportAllocMulti; + }; + enum RA_IMPORT_ALLOC_METHOD { + RA_IMPORT_ALLOC_METHOD_SINGLE, + RA_IMPORT_ALLOC_METHOD_MULTI, + } eImportAllocMethod; + + PFN_RA_IMPORT_FREE pImportFree; /* Arbitrary handle provided by arena owner to be passed into the * import alloc and free hooks @@ -208,6 +216,9 @@ struct _RA_ARENA_ }; +static_assert(sizeof(void*) == sizeof(PFN_RA_IMPORT_ALLOC_SINGLE), "pImportAlloc == pImportAllocSingle. Import callbacks must be pointers of the same size."); +static_assert(sizeof(void*) == sizeof(PFN_RA_IMPORT_ALLOC_MULTI), "pImportAlloc == pImportAllocMulti. Import callbacks must be pointers of the same size."); + struct _RA_ARENA_ITERATOR_ { RA_ARENA *pArena; @@ -215,6 +226,12 @@ struct _RA_ARENA_ITERATOR_ IMG_BOOL bIncludeFreeSegments; }; +/* Enum for selecting behaviour of _ConvertAndFree* functions*/ +typedef enum { + CONVERT_AND_FREE = 0, + CONVERT_DONT_FREE = 1 +} RA_CONVERT_AND_FREE_BEHAVIOUR; + static PVRSRV_ERROR _RA_FreeMultiUnlocked(RA_ARENA *pArena, RA_BASE_ARRAY_T aBaseArray, RA_BASE_ARRAY_SIZE_T uiBaseArraySize); @@ -234,9 +251,9 @@ _RA_FreeMultiUnlockedSparse(RA_ARENA *pArena, @Input _uSize - requested allocation size @Input _uflags - allocation flags @Input _uBaseAlignment - Alignment for the returned allocated base -@Input _pBase - receives allocated base -@Output _pActualSize - actual allocation size -@Input _pRef - user reference + + +@Output _psImport - The base and size of the span to insert. @Return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails to allocate. */ /**************************************************************************/ @@ -246,18 +263,14 @@ _RequestAllocFail(RA_PERARENA_HANDLE _h, RA_FLAGS_T _uFlags, RA_LENGTH_T _uBaseAlignment, const IMG_CHAR *_pszAnnotation, - RA_BASE_T *_pBase, - RA_LENGTH_T *_pActualSize, - RA_PERISPAN_HANDLE *_phPriv) + RA_IMPORT *_psImport) { PVR_UNREFERENCED_PARAMETER(_h); PVR_UNREFERENCED_PARAMETER(_uSize); - PVR_UNREFERENCED_PARAMETER(_pActualSize); - PVR_UNREFERENCED_PARAMETER(_phPriv); PVR_UNREFERENCED_PARAMETER(_uFlags); PVR_UNREFERENCED_PARAMETER(_uBaseAlignment); - PVR_UNREFERENCED_PARAMETER(_pBase); PVR_UNREFERENCED_PARAMETER(_pszAnnotation); + PVR_UNREFERENCED_PARAMETER(_psImport); return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL; } @@ -316,6 +329,16 @@ pvr_log2(RA_LENGTH_T n) } #endif +static INLINE void _FreeTableLimitBoundsCheck(IMG_UINT32 *uiIndex) +{ + if (*uiIndex >= FREE_TABLE_LIMIT) + { + PVR_DPF((PVR_DBG_ERROR, "Index exceeds FREE_TABLE_LIMIT (1TB), " + "Clamping Index to FREE_TABLE_LIMIT")); + *uiIndex = FREE_TABLE_LIMIT - 1; + } +} + #if defined(RA_VALIDATE) /*************************************************************************/ /*! @@ -367,7 +390,7 @@ _IsInFreeList(RA_ARENA *pArena, BT *pBT) PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); - if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags)) + if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->uiFlags != pBT->uFlags)) { return 0; } @@ -599,7 +622,8 @@ _FreeListInsert(RA_ARENA *pArena, BT *pBT) BT *pBTTemp = NULL; uIndex = pvr_log2(pBT->uSize); - PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); + _FreeTableLimitBoundsCheck(&uIndex); + PVR_ASSERT(!_IsInFreeList(pArena, pBT)); pBT->type = btt_free; @@ -692,7 +716,8 @@ _FreeListRemove(RA_ARENA *pArena, BT *pBT) IMG_UINT32 uIndex; uIndex = pvr_log2(pBT->uSize); - PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); + _FreeTableLimitBoundsCheck(&uIndex); + PVR_ASSERT(_IsInFreeList(pArena, pBT)); if (pBT->next_free != NULL) @@ -812,6 +837,8 @@ _RemoveResourceSpan(RA_ARENA *pArena, BT *pBT) pBT->is_rightmost) { _SegmentListRemove(pArena, pBT); + pArena->ui64TotalArenaSize -= pBT->uSize; + pArena->ui64FreeArenaSize -= pBT->uSize; pArena->pImportFree(pArena->pImportHandle, pBT->base, pBT->hPriv); OSFreeMem(pBT); @@ -1134,28 +1161,32 @@ _ConvertGhostBaseToReal(RA_ARENA *pArena, } /*************************************************************************/ /*! - * @Function _FreeGhostBasesFromReal + * @Function _ConvertAndFreeStartFromGhost * - * @Description Given a ghost base and size, free the contiguous ghost bases from the - * real base. This has the effect of shrinking the size of the real base. - * If ghost pages remain after the free region, a new Real base will be + * @Description Given a ghost base and size, convert and free contiguous + * ghost bases. This has the effect of shrinking the size of + * the real base holding the range to free. If ghost pages + * remain after the free region, a new Real base will be * created to host them. + * * @Input pArena - The RA Arena to free the Ghost Bases from. * @Input aBaseArray - The array to remove bases from * @Input uiBaseArraySize - The size of the Base array to free from. * @Input uiChunkSize - The chunk size used to generate the Ghost Bases. * @Input ui32GhostBaseIndex - The index into the array of the initial Ghost base to free * @Input ui32FreeCount - The number of Ghost bases to free from the Real base. + * @Input eBehaviour - Specifies if the function should convert and free or only convert. * * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure. */ /**************************************************************************/ static PVRSRV_ERROR -_FreeGhostBasesFromReal(RA_ARENA *pArena, - RA_BASE_ARRAY_T aBaseArray, - RA_BASE_ARRAY_SIZE_T uiBaseArraySize, - RA_LENGTH_T uiChunkSize, - IMG_UINT32 ui32GhostBaseIndex, - IMG_UINT32 ui32FreeCount) +_ConvertAndFreeStartFromGhost(RA_ARENA *pArena, + RA_BASE_ARRAY_T aBaseArray, + RA_BASE_ARRAY_SIZE_T uiBaseArraySize, + RA_LENGTH_T uiChunkSize, + IMG_UINT32 ui32GhostBaseIndex, + IMG_UINT32 ui32FreeCount, + RA_CONVERT_AND_FREE_BEHAVIOUR eBehaviour) { PVRSRV_ERROR eError; RA_BASE_T uiRealBase; @@ -1195,17 +1226,20 @@ _FreeGhostBasesFromReal(RA_ARENA *pArena, PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); } - /* Free the region calculated */ - eError = _FreeSingleBaseArray(pArena, - &aBaseArray[ui32GhostBaseIndex], - ui32FreeCount); - PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); + if (eBehaviour == CONVERT_AND_FREE) + { + /* Free the region calculated */ + eError = _FreeSingleBaseArray(pArena, + &aBaseArray[ui32GhostBaseIndex], + ui32FreeCount); + PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); + } return eError; } /*************************************************************************/ /*! - * @Function _ConvertGhostBaseFreeReal + * @Function _ConvertAndFreeStartFromReal * * @Description Used in the case that we want to keep some indices that are ghost pages * but the indices to free start with the real base. In this case we can @@ -1216,15 +1250,17 @@ _FreeGhostBasesFromReal(RA_ARENA *pArena, * @Input aBaseArray - The Base array to free from. * @Input uiChunkSize - The chunk size used to generate the Ghost bases. * @Input uiGhostBaseIndex - The index into the array of the Ghost base to convert. + * @Input eBehaviour - Specifies if the function should convert and free or only convert. * * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure. */ /**************************************************************************/ static PVRSRV_ERROR -_ConvertGhostBaseFreeReal(RA_ARENA *pArena, - RA_BASE_ARRAY_T aBaseArray, - RA_LENGTH_T uiChunkSize, - IMG_UINT32 uiRealBaseIndex, - IMG_UINT32 uiGhostBaseIndex) +_ConvertAndFreeStartFromReal(RA_ARENA *pArena, + RA_BASE_ARRAY_T aBaseArray, + RA_LENGTH_T uiChunkSize, + IMG_UINT32 uiRealBaseIndex, + IMG_UINT32 uiGhostBaseIndex, + RA_CONVERT_AND_FREE_BEHAVIOUR eBehaviour) { PVRSRV_ERROR eError; RA_BASE_T uiRealBase = aBaseArray[uiRealBaseIndex]; @@ -1237,18 +1273,22 @@ _ConvertGhostBaseFreeReal(RA_ARENA *pArena, uiChunkSize); PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); - eError = _FreeSingleBaseArray(pArena, - &aBaseArray[uiRealBaseIndex], - uiGhostBaseIndex - uiRealBaseIndex); - PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray"); + if (eBehaviour == CONVERT_AND_FREE) + { + eError = _FreeSingleBaseArray(pArena, + &aBaseArray[uiRealBaseIndex], + uiGhostBaseIndex - uiRealBaseIndex); + PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray"); + } return eError; } /*************************************************************************/ /*! - * @Function _FreeBaseArraySlice + * @Function _ConvertAndFreeBaseArraySlice * - * @Description Free Bases in an Array Slice. + * @Description Convert and maybe free Bases in an Array Slice. + * This function might convert some ghosts into real bases. * This function assumes that the slice is within a single Real base alloc. * i.e the uiFreeStartIndex and uiFreeCount remain fully within a single real * base alloc and do not cross into another Real base region. @@ -1259,16 +1299,18 @@ _ConvertGhostBaseFreeReal(RA_ARENA *pArena, * @Input uiChunkSize - The base chunk size used to generate the Ghost bases. * @Input uiFreeStartIndex - The index in the array to start freeing from * @Input uiFreeCount - The number of bases to free. + * @Input eBehaviour - Specifies if the function should convert and free or only convert. * * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure. */ /**************************************************************************/ static PVRSRV_ERROR -_FreeBaseArraySlice(RA_ARENA *pArena, - RA_BASE_ARRAY_T aBaseArray, - RA_BASE_ARRAY_SIZE_T uiBaseArraySize, - RA_LENGTH_T uiChunkSize, - IMG_UINT32 uiFreeStartIndex, - IMG_UINT32 uiFreeCount) +_ConvertAndFreeBaseArraySlice(RA_ARENA *pArena, + RA_BASE_ARRAY_T aBaseArray, + RA_BASE_ARRAY_SIZE_T uiBaseArraySize, + RA_LENGTH_T uiChunkSize, + IMG_UINT32 uiFreeStartIndex, + IMG_UINT32 uiFreeCount, + RA_CONVERT_AND_FREE_BEHAVIOUR eBehaviour) { /*3 cases: * Key: () = Region to Free @@ -1310,32 +1352,37 @@ _FreeBaseArraySlice(RA_ARENA *pArena, RA_BASE_IS_REAL(aBaseArray[uiFreeStartIndex + uiFreeCount]) || RA_BASE_IS_INVALID(aBaseArray[uiFreeStartIndex + uiFreeCount])) { - eError = _FreeSingleBaseArray(pArena, - &aBaseArray[uiFreeStartIndex], - uiFreeCount); - PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray"); + if (eBehaviour == CONVERT_AND_FREE) + { + eError = _FreeSingleBaseArray(pArena, + &aBaseArray[uiFreeStartIndex], + uiFreeCount); + PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray"); + } } /* Case 2*/ else { - eError = _ConvertGhostBaseFreeReal(pArena, - aBaseArray, - uiChunkSize, - uiFreeStartIndex, - uiFreeStartIndex + uiFreeCount); - PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); + eError = _ConvertAndFreeStartFromReal(pArena, + aBaseArray, + uiChunkSize, + uiFreeStartIndex, + uiFreeStartIndex + uiFreeCount, + eBehaviour); + PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertAndFreeStartFromReal"); } } /* Case 3 */ else if (RA_BASE_IS_GHOST(aBaseArray[uiFreeStartIndex])) { - eError = _FreeGhostBasesFromReal(pArena, - aBaseArray, - uiBaseArraySize, - uiChunkSize, - uiFreeStartIndex, - uiFreeCount); - PVR_LOG_RETURN_IF_ERROR(eError, "_FreeGhostBasesFromReal"); + eError = _ConvertAndFreeStartFromGhost(pArena, + aBaseArray, + uiBaseArraySize, + uiChunkSize, + uiFreeStartIndex, + uiFreeCount, + eBehaviour); + PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertAndFreeStartFromGhost"); } /* Attempt to free an invalid base, this could be a duplicated * value in the free sparse index array */ @@ -1343,7 +1390,7 @@ _FreeBaseArraySlice(RA_ARENA *pArena, { PVR_DPF((PVR_DBG_ERROR, "Attempt to free already free base Index %u", uiFreeStartIndex)); - PVR_ASSERT(!"Attempted double free.") + PVR_ASSERT(!"Attempted double free."); return PVRSRV_ERROR_RA_FREE_INVALID_CHUNK; } @@ -1487,8 +1534,9 @@ _AttemptAllocAligned(RA_ARENA *pArena, index_high = index_low; } - PVR_ASSERT(index_low < FREE_TABLE_LIMIT); - PVR_ASSERT(index_high < FREE_TABLE_LIMIT); + _FreeTableLimitBoundsCheck(&index_high); + _FreeTableLimitBoundsCheck(&index_low); + PVR_ASSERT(index_low <= index_high); if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_BUCKET_MASK) == RA_POLICY_BUCKET_BEST_FIT)) @@ -1754,57 +1802,73 @@ _AttemptAllocAlignedAssured(RA_ARENA *pArena, return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; } +/* Used as an inout parameter to reduce required memory allocations. + * The import callbacks return one or more RA_IMPORT which is then converted into a BT*. + * Once a RA_IMPORT is converted to a BT*, it is no longer needed. Therefore, + * the space taken up by the RA_IMPORT can be reused. */ +typedef union { + RA_IMPORT import; + BT *pBT; +} RA_IMPORT_INTERNAL; + + +static_assert(sizeof(RA_IMPORT_INTERNAL) == sizeof(RA_IMPORT), + "An RA_IMPORT[] array must be equivalent to an RA_IMPORT_INTERNAL[] array. " + "This allows RA_IMPORT[] returned by callbacks to be reused internally " + "as BT*[] without leaking scope to the user API."); + +static_assert(offsetof(RA_IMPORT_INTERNAL, pBT) == 0, + "The pBT member must be at offset 0 to match a BT*[]."); + +/* Support importing 16 spans at once before allocating more memory. + * Almost all imports will only require a single span. + * However, if importing multiple spans, shouldn't always require memory allocations. + * Nominal use of multi imports shouldn't require many spans. */ +#define RA_DEFAULT_IMPORTS_BUFFER_SIZE (16 * (sizeof(RA_IMPORT) / sizeof(BT*))) + +static_assert((RA_DEFAULT_IMPORTS_BUFFER_SIZE * sizeof(BT*)) >= sizeof(RA_IMPORT), + "The default must be able to hold at least a single RA_IMPORT."); + +typedef struct { + BT **apsBTs; + IMG_UINT32 uiBTCount; +} BT_PTR_ARRAY; + /*************************************************************************/ /*! -@Function _AttemptImportSpanAlloc -@Description Attempt to Import more memory and create a new span. +@Function _AttemptImportSpanAllocSingle +@Description Attempt to import more memory and create a new span. Function attempts to import more memory from the callback provided at RA creation time, if successful the memory will form a new span in the RA. @Input pArena The arena. @Input uRequestSize The requested allocation size. -@Input uImportMultiplier Import x-times more for future requests if - we have to import new memory. @Input uImportFlags Flags influencing allocation policy. @Input uAlignment The alignment requirements of the allocation Required uAlignment, or 0. Must be a power of 2 if not 0 -@Input pszAnnotation String to describe the allocation -@Output pImportBase Allocated import base - (non-optional, must not be NULL) -@Output pImportSize Allocated import size -@Output pImportBT Allocated import BT -@Return PVRSRV_OK - success +@Input pszAnnotation String to describe the allocation. +@Output psImportInternal The pBT member must be set to the new span. + Unless an error is returned. +@Return PVRSRV_OK - Success */ /**************************************************************************/ static PVRSRV_ERROR -_AttemptImportSpanAlloc(RA_ARENA *pArena, - RA_LENGTH_T uRequestSize, - IMG_UINT8 uImportMultiplier, - RA_FLAGS_T uImportFlags, - RA_LENGTH_T uAlignment, - const IMG_CHAR *pszAnnotation, - RA_BASE_T *pImportBase, - RA_LENGTH_T *pImportSize, - BT **pImportBT) +_AttemptImportSpanAllocSingle(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + const IMG_CHAR *pszAnnotation, + RA_IMPORT_INTERNAL *psImportInternal) { - IMG_HANDLE hPriv; + PVRSRV_ERROR eError; RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK); + RA_IMPORT *psImport = &psImportInternal->import; BT *pBT; - PVRSRV_ERROR eError; - - *pImportSize = uRequestSize; - - /* apply over-allocation multiplier after all alignment adjustments */ - *pImportSize *= uImportMultiplier; - /* ensure that we import according to the quanta of this arena */ - *pImportSize = PVR_ALIGN(*pImportSize, pArena->uQuantum); - - eError = pArena->pImportAlloc(pArena->pImportHandle, - *pImportSize, uImportFlags, - uAlignment, - pszAnnotation, - pImportBase, pImportSize, - &hPriv); + eError = pArena->pImportAllocSingle(pArena->pImportHandle, + uRequestSize, uImportFlags, + uAlignment, + pszAnnotation, + psImport); if (PVRSRV_OK != eError) { return eError; @@ -1813,12 +1877,12 @@ _AttemptImportSpanAlloc(RA_ARENA *pArena, /* If we successfully import more resource, create a span to * represent it else free the resource we imported. */ - pBT = _InsertResourceSpan(pArena, *pImportBase, *pImportSize, uFlags); + pBT = _InsertResourceSpan(pArena, psImport->base, psImport->uSize, uFlags); if (pBT == NULL) { /* insufficient resources to insert the newly acquired span, so free it back again */ - pArena->pImportFree(pArena->pImportHandle, *pImportBase, hPriv); + pArena->pImportFree(pArena->pImportHandle, psImport->base, psImport->hPriv); PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " "size=0x%llx failed!", __func__, pArena->name, @@ -1828,20 +1892,233 @@ _AttemptImportSpanAlloc(RA_ARENA *pArena, return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED; } - pBT->hPriv = hPriv; - *pImportBT = pBT; + pBT->hPriv = psImport->hPriv; + /* Replace the RA_IMPORT with a BT*. + * To free an import we only need the BT so we can reuse the elements slot in ppsImports. */ + psImportInternal->pBT = pBT; + + pArena->ui64TotalArenaSize += pBT->uSize; + pArena->ui64FreeArenaSize += pBT->uSize; return eError; } -IMG_INTERNAL RA_ARENA * -RA_Create(IMG_CHAR *name, - RA_LOG2QUANTUM_T uLog2Quantum, - IMG_UINT32 ui32LockClass, - PFN_RA_ALLOC imp_alloc, - PFN_RA_FREE imp_free, - RA_PERARENA_HANDLE arena_handle, - RA_POLICY_T ui32PolicyFlags) +/*************************************************************************/ /*! +@Function _AttemptImportSpanAllocMulti +@Description Attempt to import more memory and create new span(s). + Function attempts to import more memory from the callback + provided at RA creation time, if successful the memory + will form at least one new span in the RA. +@Input pArena The arena. +@Input uRequestSize The requested allocation size. +@Input uImportFlags Flags influencing allocation policy. +@Input uAlignment The alignment requirements of the allocation + Required uAlignment, or 0. + Must be a power of 2 if not 0 +@Input pszAnnotation String to describe the allocation. +@Inout puiBTCount The number of elements in *ppsImports. The + count will be modified to reflect how many + BT*s have been created. +@Inout ppsImports Pointer to an array of RA_IMPORT_INTERNAL. + If the must be array resized, this will point + to the new array. It will be freed higher up + the callstack if necessary. If successful, + the pBT member must be set to a new span for + every element in the array of length puiBTCount. +@Return PVRSRV_OK - Success +*/ /**************************************************************************/ +static PVRSRV_ERROR +_AttemptImportSpanAllocMulti(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + const IMG_CHAR *pszAnnotation, + IMG_UINT32 *puiImportCount, + RA_IMPORT_INTERNAL **ppsImports) +{ + PVRSRV_ERROR eError; + RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK); + IMG_UINT32 i; + + eError = pArena->pImportAllocMulti(pArena->pImportHandle, + uRequestSize, uImportFlags, + uAlignment, + pszAnnotation, + puiImportCount, + (RA_IMPORT**) ppsImports); + PVR_RETURN_IF_ERROR(eError); + + /* An error should be returned by the callback if the count == 0. */ + PVR_ASSERT(*puiImportCount >= 1); + /* An error should be returned by the callback if *ppsImports == NULL. */ + PVR_ASSERT(*ppsImports != NULL); + + /* If we successfully import more resource, create spans to represent it. */ + for (i = 0; i < *puiImportCount; i++) + { + BT *pBT; + RA_IMPORT *psImport = &(*ppsImports)[i].import; + + pBT = _InsertResourceSpan(pArena, psImport->base, psImport->uSize, uFlags); + PVR_GOTO_IF_FALSE(pBT != NULL, release_bts); + + pBT->hPriv = psImport->hPriv; + pArena->ui64TotalArenaSize += pBT->uSize; + pArena->ui64FreeArenaSize += pBT->uSize; + + /* Replace the RA_IMPORT with a BT*. + * To free an import we only need the BT so we can reuse the elements slot in ppsImports. */ + (*ppsImports)[i].pBT = pBT; + } + + return PVRSRV_OK; + +release_bts: + { + /* Free the just imported span that failed to allocate a BT. */ + IMG_UINT32 j; + RA_IMPORT *psImport = &(*ppsImports)[i].import; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " + "request=0x%llx size=0x%llx failed!", __func__, pArena->name, + (unsigned long long)uRequestSize, (unsigned long long)psImport->uSize)); + + /* Insufficient resources to insert the newly acquired span so free it back again. */ + pArena->pImportFree(pArena->pImportHandle, psImport->base, psImport->hPriv); + + /* Remove the previously inserted BTs. */ + for (j = 0; j < i; j++) + { + BT *pBT = (*ppsImports)[j].pBT; + pArena->ui64TotalArenaSize -= pBT->uSize; + pArena->ui64FreeArenaSize -= pBT->uSize; + _FreeBT(pArena, pBT); + } + + return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED; + } +} + +/*************************************************************************/ /*! +@Function _AttemptImportSpanAlloc +@Description Attempt to import more memory and create new span(s). + Function attempts to import more memory from the callback + provided at RA creation time, if successful the memory + will form a new span in the RA. +@Input pArena The arena. +@Input uRequestSize The requested allocation size. +@Input uImportMultiplier Import x-times more for future requests if + we have to import new memory. +@Input uImportFlags Flags influencing allocation policy. +@Input uAlignment The alignment requirements of the allocation + Required uAlignment, or 0. + Must be a power of 2 if not 0 +@Input pszAnnotation String to describe the allocation +@Inout psBTArray A container to a BT*[] and it's length. + If the array is not large enough, a new array + will be returned and must be freed with OSFreeMem + regardless of the return error code. + On error, the array may be NULL! +@Return PVRSRV_OK - Success +*/ /**************************************************************************/ +static PVRSRV_ERROR +_AttemptImportSpanAlloc(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + IMG_UINT8 uImportMultiplier, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + const IMG_CHAR *pszAnnotation, + BT_PTR_ARRAY *psBTArray) +{ + PVRSRV_ERROR eError = PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED; + RA_IMPORT_INTERNAL *psImports; + + /* Interpret the BT*[] as a RA_IMPORT_INTERNAL[]. + * Any extra space is ignored. + * Example: + * |---BT*--|---BT*--|---BT*--|---BT*--|---BT*--|---BT*--|---BT*--| + * |----RA_IMPORT_INTERNAL----|----RA_IMPORT_INTERNAL----|........| + * + * It must be able to hold at least 1 RA_IMPORT_INTERNAL. + * This should never fail if RA_DEFAULT_IMPORTS_BUFFER_SIZE is used. */ + PVR_ASSERT(psBTArray->uiBTCount * sizeof(BT*) >= sizeof(RA_IMPORT_INTERNAL)); + PVR_ASSERT(psBTArray != NULL && psBTArray->apsBTs != NULL); + psImports = (RA_IMPORT_INTERNAL*) psBTArray->apsBTs; + + /* apply over-allocation multiplier after all alignment adjustments */ + uRequestSize *= uImportMultiplier; + /* ensure that we import according to the quanta of this arena */ + uRequestSize = PVR_ALIGN(uRequestSize, pArena->uQuantum); + + switch (pArena->eImportAllocMethod) + { + /* Exactly 1 BT will be created. */ + case RA_IMPORT_ALLOC_METHOD_SINGLE: + { + eError = _AttemptImportSpanAllocSingle(pArena, uRequestSize, uImportFlags, uAlignment, pszAnnotation, &psImports[0]); + PVR_GOTO_IF_ERROR(eError, err_); + psBTArray->uiBTCount = 1; + + /* Reinterpret the RA_IMPORT_INTERNAL[] back into a BT*[]. + * We only care about psBTArray->apsBTs[0] being valid as we return a count of 1. */ + psBTArray->apsBTs = (BT**) psImports; + break; + } + /* 1 or more BTs will be created. */ + case RA_IMPORT_ALLOC_METHOD_MULTI: + { + /* The number of RA_IMPORT_INTERNAL that can be stored using the same space. */ + IMG_UINT32 uiImportCount = (psBTArray->uiBTCount) / (sizeof(RA_IMPORT_INTERNAL) / sizeof(BT*)); + + eError = _AttemptImportSpanAllocMulti(pArena, uRequestSize, uImportFlags, uAlignment, pszAnnotation, &uiImportCount, &psImports); + PVR_LOG_RETURN_IF_ERROR(eError, "_AttemptImportSpanAllocMulti"); + + /* Reinterpret the RA_IMPORT_INTERNAL[] back into a BT*[]. */ + psBTArray->apsBTs = (BT**) psImports; + + /* If a BT* is smaller than an RA_IMPORT_INTERNAL, the array of BT*s will not be tightly packed. + * Therefore, it is not a valid BT*[] and the elements need to be compressed. + * Example: Given the array below + * |----RA_IMPORT_INTERNAL-0--|----RA_IMPORT_INTERNAL-1--|........| + * |--BT*-0-|.................|--BT*-1-|..........................| + * It must be packed into this: + * |--BT*-0-|--BT*-1-|............................................| */ + if (sizeof(BT*) < sizeof(RA_IMPORT_INTERNAL)) + { + IMG_UINT32 i; + /* The first element will always be in the correct place. Skip redundant move. */ + for (i = 1; i < uiImportCount; i++) + { + psBTArray->apsBTs[i] = psImports[i].pBT; + } + } + psBTArray->uiBTCount = uiImportCount; + break; + } + default: + PVR_ASSERT(!"unreachable"); + break; + } + + return eError; + +err_: + if ((void*) psImports != (void*) psBTArray->apsBTs && psImports != NULL) + { + OSFreeMem(psImports); + } + return eError; +} + +static RA_ARENA* +RA_CreateCommon(IMG_CHAR *name, + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT32 ui32LockClass, + enum RA_IMPORT_ALLOC_METHOD eImportAllocMethod, + void* imp_alloc, + PFN_RA_IMPORT_FREE imp_free, + RA_PERARENA_HANDLE arena_handle, + RA_POLICY_T ui32PolicyFlags) { RA_ARENA *pArena; PVRSRV_ERROR eError; @@ -1873,12 +2150,18 @@ RA_Create(IMG_CHAR *name, goto hash_fail; } - OSStringLCopy(pArena->name, name, RA_MAX_NAME_LENGTH); - pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail; + if (imp_alloc == NULL) { + eImportAllocMethod = RA_IMPORT_ALLOC_METHOD_SINGLE; + imp_alloc = &_RequestAllocFail; + } + + OSStringSafeCopy(pArena->name, name, RA_MAX_NAME_LENGTH); + pArena->eImportAllocMethod = eImportAllocMethod; + pArena->pImportAlloc = imp_alloc; /* import callback union */ pArena->pImportFree = imp_free; pArena->pImportHandle = arena_handle; pArena->pHeadSegment = NULL; - pArena->uQuantum = 1ULL << uLog2Quantum; + pArena->uQuantum = IMG_PAGE2BYTES64(uLog2Quantum); pArena->per_flags_buckets = NULL; pArena->ui32LockClass = ui32LockClass; pArena->ui32PolicyFlags = ui32PolicyFlags; @@ -1897,6 +2180,46 @@ RA_Create(IMG_CHAR *name, return NULL; } +IMG_INTERNAL RA_ARENA * +RA_Create(IMG_CHAR *name, + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT32 ui32LockClass, + PFN_RA_IMPORT_ALLOC_SINGLE imp_alloc, + PFN_RA_IMPORT_FREE imp_free, + RA_PERARENA_HANDLE arena_handle, + RA_POLICY_T ui32PolicyFlags) +{ + return RA_CreateCommon(name, + uLog2Quantum, + ui32LockClass, + RA_IMPORT_ALLOC_METHOD_SINGLE, + imp_alloc, + imp_free, + arena_handle, + ui32PolicyFlags + ); +} + +IMG_INTERNAL RA_ARENA * +RA_CreateMulti(IMG_CHAR *name, + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT32 ui32LockClass, + PFN_RA_IMPORT_ALLOC_MULTI imp_alloc, + PFN_RA_IMPORT_FREE imp_free, + RA_PERARENA_HANDLE arena_handle, + RA_POLICY_T ui32PolicyFlags) +{ + return RA_CreateCommon(name, + uLog2Quantum, + ui32LockClass, + RA_IMPORT_ALLOC_METHOD_MULTI, + imp_alloc, + imp_free, + arena_handle, + ui32PolicyFlags + ); +} + static void _LogRegionCreation(const char *pszMemType, IMG_UINT64 ui64CpuPA, IMG_UINT64 ui64DevPA, @@ -1971,7 +2294,7 @@ IMG_INTERNAL void RA_Delete(RA_ARENA *pArena) { IMG_UINT32 uIndex; - IMG_BOOL bWarn = IMG_TRUE; + IMG_BOOL bPrintWarn = IMG_TRUE; PVR_ASSERT(pArena != NULL); @@ -1992,15 +2315,27 @@ RA_Delete(RA_ARENA *pArena) if (pBT->type != btt_free) { - if (bWarn) +#if !defined(DEBUG) + if (bPrintWarn) { - PVR_DPF((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed. Arena Name: %s", __func__, pArena->name)); PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); PVR_DPF((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__, (unsigned long long)pBT->base, (unsigned long long)pBT->uSize)); PVR_DPF((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__)); - bWarn = IMG_FALSE; + bPrintWarn = IMG_FALSE; + } +#else + if (bPrintWarn) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed. Arena Name: %s", __func__, pArena->name)); + PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); + bPrintWarn = IMG_FALSE; } + PVR_DPF((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx flags=0x%llx", __func__, + (unsigned long long)pBT->base, (unsigned long long)pBT->uSize, + (unsigned long long)pBT->uFlags)); +#endif } else { @@ -2119,9 +2454,11 @@ RA_Alloc(RA_ARENA *pArena, bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); if (!bResult) { - RA_BASE_T uImportBase; - RA_LENGTH_T uImportSize; - BT *pBT = NULL; + BT *apsBTsStatic[RA_DEFAULT_IMPORTS_BUFFER_SIZE]; + BT_PTR_ARRAY sBTs = { + .uiBTCount = ARRAY_SIZE(apsBTsStatic), + .apsBTs = apsBTsStatic, + }; eError = _AttemptImportSpanAlloc(pArena, uSize, @@ -2129,11 +2466,13 @@ RA_Alloc(RA_ARENA *pArena, uImportFlags, uAlignment, pszAnnotation, - &uImportBase, - &uImportSize, - &pBT); + &sBTs); if (eError != PVRSRV_OK) { + if (sBTs.apsBTs != apsBTsStatic) + { + OSFreeMem(sBTs.apsBTs); + } OSLockRelease(pArena->hLock); return eError; } @@ -2141,6 +2480,7 @@ RA_Alloc(RA_ARENA *pArena, bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); if (!bResult) { + IMG_UINT32 i; PVR_DPF((PVR_DBG_ERROR, "%s: name='%s' second alloc failed!", __func__, pArena->name)); @@ -2150,32 +2490,50 @@ RA_Alloc(RA_ARENA *pArena, of failure, the imported segment may have been used and freed, or left untouched. If the later, we need to return it. */ - _FreeBT(pArena, pBT); + for (i = 0; i < sBTs.uiBTCount; i++) + { + _FreeBT(pArena, sBTs.apsBTs[i]); + } + if (sBTs.apsBTs != apsBTsStatic) + { + OSFreeMem(sBTs.apsBTs); + } OSLockRelease(pArena->hLock); return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; } else { - /* Check if the new allocation was in the span we just added... */ - if (*base < uImportBase || *base > (uImportBase + uImportSize)) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: name='%s' alloc did not occur in the imported span!", - __func__, pArena->name)); + IMG_UINT32 i; + IMG_BOOL bBaseInNewSpans = IMG_FALSE; - /* - Remove the imported span which should not be in use (if it is then - that is okay, but essentially no span should exist that is not used). - */ - _FreeBT(pArena, pBT); + for (i = 0; i < sBTs.uiBTCount; i++) + { + BT *pBT = sBTs.apsBTs[i]; + /* Check if the new allocation was in a span we just added... */ + if (*base >= pBT->base && *base < (pBT->base + pBT->uSize)) + { + bBaseInNewSpans = IMG_TRUE; + break; + } } - else + if (!bBaseInNewSpans) { - pArena->ui64FreeArenaSize += uImportSize; - pArena->ui64TotalArenaSize += uImportSize; + PVR_DPF((PVR_DBG_ERROR, + "%s: name='%s' alloc did not occur in the imported span!", + __func__, pArena->name)); + /* Remove the imported spans which should not be in use (if it is then that is okay). + * No spans should exist and not be used unless added through the import multiplier. */ + for (i = 0; i < sBTs.uiBTCount; i++) + { + _FreeBT(pArena, sBTs.apsBTs[i]); + } } } + if (sBTs.apsBTs != apsBTsStatic) + { + OSFreeMem(sBTs.apsBTs); + } } PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', size=0x%llx, " @@ -2210,8 +2568,8 @@ _RA_AllocMultiUnlocked(RA_ARENA *pArena, "One of the necessary parameters is 0", PVRSRV_ERROR_INVALID_PARAMS); - PVR_ASSERT((uRequestSize & ((1 << uiLog2ChunkSize) - 1)) == 0) - PVR_LOG_RETURN_IF_FALSE((uRequestSize & ((1 << uiLog2ChunkSize) - 1)) == 0, + PVR_ASSERT((uRequestSize & (IMG_PAGE2BYTES64(uiLog2ChunkSize) - 1)) == 0); + PVR_LOG_RETURN_IF_FALSE((uRequestSize & (IMG_PAGE2BYTES64(uiLog2ChunkSize) - 1)) == 0, "Require uiLog2ChunkSize pow 2 & multiple of uRequestSize", PVRSRV_ERROR_INVALID_PARAMS); @@ -2222,15 +2580,12 @@ _RA_AllocMultiUnlocked(RA_ARENA *pArena, PVRSRV_ERROR_INVALID_PARAMS); /* Ensure Base Array is large enough for intended allocation */ - PVR_LOG_RETURN_IF_FALSE(uiBaseArraySize * (1 << uiLog2ChunkSize) >= uRequestSize, + PVR_LOG_RETURN_IF_FALSE(((RA_LENGTH_T)uiBaseArraySize * (IMG_PAGE2BYTES64(uiLog2ChunkSize)) >= uRequestSize), "Not enough array space to store alloc bases", PVRSRV_ERROR_INVALID_PARAMS); PVR_ASSERT(is_arena_valid(pArena)); - /* Must be a power of 2 */ - PVR_ASSERT((uAlignment & (uAlignment - 1)) == 0); - PVR_DPF((PVR_DBG_MESSAGE, "%s: arena='%s', size=0x%llx(0x%llx), " "log2ChunkSize=0x%llx", __func__, pArena->name, @@ -2244,15 +2599,17 @@ _RA_AllocMultiUnlocked(RA_ARENA *pArena, uSize, uiLog2ChunkSize, uFlags, - 1ULL << uiLog2ChunkSize, + IMG_PAGE2BYTES64(uiLog2ChunkSize), aBaseArray, bSparseAlloc, bPhysContig); if (eError) { - RA_BASE_T uImportBase; - RA_LENGTH_T uImportSize; - BT *pBT; + BT *apsBTsStatic[RA_DEFAULT_IMPORTS_BUFFER_SIZE]; + BT_PTR_ARRAY sBTs = { + .uiBTCount = ARRAY_SIZE(apsBTsStatic), + .apsBTs = apsBTsStatic, + }; if (eError == PVRSRV_ERROR_RA_OUT_OF_RESOURCE) { @@ -2280,29 +2637,29 @@ _RA_AllocMultiUnlocked(RA_ARENA *pArena, uSize, uImportMultiplier, uFlags, - 1ULL << uiLog2ChunkSize, + IMG_PAGE2BYTES64(uiLog2ChunkSize), pszAnnotation, - &uImportBase, - &uImportSize, - &pBT); + &sBTs); if (eError != PVRSRV_OK) { + if (sBTs.apsBTs != apsBTsStatic) + { + OSFreeMem(sBTs.apsBTs); + } return eError; } - pArena->ui64FreeArenaSize += uImportSize; - pArena->ui64TotalArenaSize += uImportSize; - eError = _AttemptAllocAlignedAssured(pArena, uSize, uiLog2ChunkSize, uFlags, - 1Ull << uiLog2ChunkSize, + IMG_PAGE2BYTES64(uiLog2ChunkSize), aBaseArray, bSparseAlloc, bPhysContig); if (eError) { + IMG_UINT32 i; PVR_DPF((PVR_DBG_ERROR, "%s: name='%s' second alloc failed!", __func__, pArena->name)); @@ -2311,8 +2668,15 @@ _RA_AllocMultiUnlocked(RA_ARENA *pArena, of failure, the imported segment may have been used and freed, or left untouched. If the later, we need to return it. */ - _FreeBT(pArena, pBT); + for (i = 0; i < sBTs.uiBTCount; i++) + { + _FreeBT(pArena, sBTs.apsBTs[i]); + } + if (sBTs.apsBTs != apsBTsStatic) + { + OSFreeMem(sBTs.apsBTs); + } return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; } #if defined(DEBUG) @@ -2320,47 +2684,54 @@ _RA_AllocMultiUnlocked(RA_ARENA *pArena, * This block of code checks to see if the extra memory we just imported was * used for the second allocation. If we imported memory but did not use it, * it indicates there is a bug in the allocation logic. We can still recover by - * freeing the imported span but we emit an error to signal that there is an + * freeing the imported span(s) but we emit an error to signal that there is an * issue. * */ else { - IMG_UINT32 i; + IMG_UINT32 bt_index; + IMG_UINT32 base_index; IMG_BOOL bBasesInNewSpan = IMG_FALSE; - for (i = 0; i < uiBaseArraySize; i++) + for (bt_index = 0; bt_index < sBTs.uiBTCount && !bBasesInNewSpan; bt_index++) { - RA_BASE_T uiBase = RA_BASE_STRIP_GHOST_BIT(aBaseArray[i]); - - /* If the base hasn't been allocated then skip it */ - if (aBaseArray[i] == INVALID_BASE_ADDR) + BT *pBT = sBTs.apsBTs[bt_index]; + for (base_index = 0; base_index < uiBaseArraySize; base_index++) { - continue; - } - - if (uiBase >= uImportBase && - uiBase <= uImportBase + uImportSize) - { - bBasesInNewSpan = IMG_TRUE; + RA_BASE_T uiBase = RA_BASE_STRIP_GHOST_BIT(aBaseArray[base_index]); + + /* If the base hasn't been allocated then skip it */ + if (aBaseArray[base_index] == INVALID_BASE_ADDR) + { + continue; + } + + if (uiBase >= pBT->base && + uiBase <= pBT->base + pBT->uSize) + { + bBasesInNewSpan = IMG_TRUE; + break; + } } } - if (!bBasesInNewSpan) { PVR_DPF((PVR_DBG_ERROR, "%s: name='%s' alloc did not occur in the imported span!", __func__, pArena->name)); - /* - Remove the imported span which should not be in use (if it is then - that is okay, but essentially no span should exist that is not used). - */ - _FreeBT(pArena, pBT); - - pArena->ui64FreeArenaSize -= uImportSize; - pArena->ui64TotalArenaSize -= uImportSize; + /* Remove the imported spans which should not be in use (if it is then that is okay). + * No spans should exist and not be used unless added through the import multiplier. */ + for (bt_index = 0; bt_index < sBTs.uiBTCount; bt_index++) + { + _FreeBT(pArena, sBTs.apsBTs[bt_index]); + } } } #endif + if (sBTs.apsBTs != apsBTsStatic) + { + OSFreeMem(sBTs.apsBTs); + } } PVR_ASSERT(is_arena_valid(pArena)); @@ -2445,7 +2816,7 @@ RA_AllocMultiSparse(RA_ARENA *pArena, if (uiAllocCount == 1) { eError = _RA_AllocMultiUnlocked(pArena, - 1ULL << uiLog2ChunkSize, + IMG_PAGE2BYTES64(uiLog2ChunkSize), uiLog2ChunkSize, uImportMultiplier, uImportFlags, @@ -2479,7 +2850,7 @@ RA_AllocMultiSparse(RA_ARENA *pArena, } eError = _RA_AllocMultiUnlocked(pArena, - (IMG_UINT64) uiConsolidate << uiLog2ChunkSize, + IMG_PAGES2BYTES64(uiConsolidate, uiLog2ChunkSize), uiLog2ChunkSize, uImportMultiplier, uImportFlags, @@ -2502,7 +2873,7 @@ RA_AllocMultiSparse(RA_ARENA *pArena, eFreeError = _RA_FreeMultiUnlockedSparse(pArena, aBaseArray, uiBaseArraySize, - 1ULL << uiLog2ChunkSize, + IMG_PAGE2BYTES64(uiLog2ChunkSize), puiAllocIndices, &i); PVR_LOG_IF_ERROR(eFreeError, "_RA_FreeMultiUnlockedSparse"); @@ -2528,11 +2899,8 @@ static BT *RA_Find_BT_VARange(RA_ARENA *pArena, RA_FLAGS_T uImportFlags) { IMG_PSPLAY_TREE psSplaynode; - BT *pBT = pArena->pHeadSegment; IMG_UINT32 uIndex; - uIndex = pvr_log2 (uRequestSize); - /* Find the splay node associated with these import flags */ psSplaynode = PVRSRVFindNode(uImportFlags, pArena->per_flags_buckets); @@ -2541,10 +2909,12 @@ static BT *RA_Find_BT_VARange(RA_ARENA *pArena, return NULL; } + uIndex = pvr_log2(uRequestSize); + /* Find the free Boundary Tag from the bucket that holds the requested range */ while (uIndex < FREE_TABLE_LIMIT) { - pBT = psSplaynode->buckets[uIndex]; + BT *pBT = psSplaynode->buckets[uIndex]; while (pBT) { @@ -2702,8 +3072,6 @@ RA_Free(RA_ARENA *pArena, RA_BASE_T base) { BT *pBT; - PVR_ASSERT(pArena != NULL); - if (pArena == NULL) { PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); @@ -2790,13 +3158,14 @@ _RA_FreeMultiUnlockedSparse(RA_ARENA *pArena, /* Handle case where we only have 1 base to free. */ if (uiFreeCount == 1) { - eError = _FreeBaseArraySlice(pArena, - aBaseArray, - uiBaseArraySize, - uiChunkSize, - puiFreeIndices[0], - 1); - PVR_LOG_IF_ERROR(eError, "_FreeBaseArraySlice"); + eError = _ConvertAndFreeBaseArraySlice(pArena, + aBaseArray, + uiBaseArraySize, + uiChunkSize, + puiFreeIndices[0], + 1, + CONVERT_AND_FREE); + PVR_LOG_IF_ERROR(eError, "_ConvertAndFreeBaseArraySlice"); if (eError == PVRSRV_OK) { *puiFreeCount = uiFreeCount; @@ -2813,19 +3182,21 @@ _RA_FreeMultiUnlockedSparse(RA_ARENA *pArena, for (j = i; puiFreeIndices[j + 1] == puiFreeIndices[j] + 1 && - RA_BASE_IS_GHOST(aBaseArray[puiFreeIndices[j + 1]]); + RA_BASE_IS_GHOST(aBaseArray[puiFreeIndices[j + 1]]) && + j + 1 < uiFreeCount; j++) { uiConsolidate++; } - eError = _FreeBaseArraySlice(pArena, - aBaseArray, - uiBaseArraySize, - uiChunkSize, - puiFreeIndices[i], - uiConsolidate); - PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArraySlice"); + eError = _ConvertAndFreeBaseArraySlice(pArena, + aBaseArray, + uiBaseArraySize, + uiChunkSize, + puiFreeIndices[i], + uiConsolidate, + CONVERT_AND_FREE); + PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertAndFreeBaseArraySlice"); i += uiConsolidate; *puiFreeCount += uiConsolidate; @@ -2866,7 +3237,7 @@ RA_FreeMultiSparse(RA_ARENA *pArena, eError = _RA_FreeMultiUnlockedSparse(pArena, aBaseArray, uiBaseArraySize, - 1ULL << uiLog2ChunkSize, + IMG_PAGE2BYTES64(uiLog2ChunkSize), puiFreeIndices, puiFreeCount); OSLockRelease(pArena->hLock); @@ -2874,6 +3245,151 @@ RA_FreeMultiSparse(RA_ARENA *pArena, return eError; } +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +/*************************************************************************/ /*! + * @Function _RA_RealiseMultiSparseIndicesUnlocked + * + * @Description Given an array of indices to extract, prepares the base + * array so that the indices in puiExtractIndices can be + * moved to another base array. + * Called when some pages of the base array need to be + * transferred to another base array. As a result of this call, + * some ghost addresses in aBaseArray might be converted to + * real addresses. + * + * @Input pArena - The RA Arena to free the bases on. + * @Input aBaseArray - The Base array to free from + * @Input uiBaseArraySize - The Size of the base array to free. + * @Input uiChunkSize - The Base chunk size used to generate ghost entries + * @Input puiExtractIndices - Array of indices to extract + * @Input puiExtractCount - Number of indices to extract + * + * @Return PVRSRV_OK on Success, PVRSRV_ERROR code otherwise. +*/ /**************************************************************************/ +static PVRSRV_ERROR +_RA_RealiseMultiSparseIndicesUnlocked(RA_ARENA *pArena, + RA_BASE_ARRAY_T aBaseArray, + RA_BASE_ARRAY_SIZE_T uiBaseArraySize, + RA_LENGTH_T uiChunkSize, + IMG_UINT32 *puiExtractIndices, + IMG_UINT32 *puiExtractCount) +{ + IMG_UINT32 i; + PVRSRV_ERROR eError; + IMG_UINT32 uiExtractCount = *puiExtractCount; + *puiExtractCount = 0; + + /* Handle case where we only have 1 base to extract. */ + if (uiExtractCount == 1) + { + eError = _ConvertAndFreeBaseArraySlice(pArena, + aBaseArray, + uiBaseArraySize, + uiChunkSize, + puiExtractIndices[0], + 1, + CONVERT_DONT_FREE); + PVR_LOG_IF_ERROR(eError, "_ConvertAndFreeBaseArraySlice"); + if (eError == PVRSRV_OK) + { + *puiExtractCount = uiExtractCount; + } + return eError; + } + + for (i = 0; i < uiExtractCount;) + { + IMG_UINT32 j; + IMG_UINT32 uiConsolidate = 1; + + PVR_ASSERT(RA_BASE_IS_REAL(aBaseArray[i])); + + for (j = i; + puiExtractIndices[j + 1] == puiExtractIndices[j] + 1 && + RA_BASE_IS_GHOST(aBaseArray[puiExtractIndices[j + 1]]) && + j + 1 < uiExtractCount; + j++) + { + uiConsolidate++; + } + + eError = _ConvertAndFreeBaseArraySlice(pArena, + aBaseArray, + uiBaseArraySize, + uiChunkSize, + puiExtractIndices[i], + uiConsolidate, + CONVERT_DONT_FREE); + PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertAndFreeBaseArraySlice"); + + i += uiConsolidate; + *puiExtractCount += uiConsolidate; + } + + return PVRSRV_OK; +} + +IMG_INTERNAL PVRSRV_ERROR +RA_TransferMultiSparseIndices(RA_ARENA *pArena, + RA_BASE_ARRAY_T aSrcBaseArray, + RA_BASE_ARRAY_SIZE_T uiSrcBaseArraySize, + RA_BASE_ARRAY_T aDstBaseArray, + RA_BASE_ARRAY_SIZE_T uiDstBaseArraySize, + IMG_UINT32 uiLog2ChunkSize, + IMG_UINT32 *puiTransferIndices, + IMG_UINT32 *puiTransferCount) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + + PVR_LOG_RETURN_IF_FALSE(puiTransferCount != NULL, + "puiTransferCount Required", + PVRSRV_ERROR_INVALID_PARAMS); + + /* Ensure source Base Array is large enough for intended extract */ + PVR_LOG_RETURN_IF_FALSE(uiSrcBaseArraySize >= *puiTransferCount, + "Attempt to transfer more bases than src array holds", + PVRSRV_ERROR_INVALID_PARAMS); + + /* Ensure dst Base Array is large enough for intended extract */ + PVR_LOG_RETURN_IF_FALSE(uiDstBaseArraySize <= *puiTransferCount, + "Attempt to transfer more bases than dst array holds", + PVRSRV_ERROR_INVALID_PARAMS); + + PVR_LOG_RETURN_IF_FALSE(puiTransferIndices != NULL, + "puiTransferIndices Required", + PVRSRV_ERROR_INVALID_PARAMS); + + PVR_LOG_RETURN_IF_FALSE(uiLog2ChunkSize >= RA_BASE_FLAGS_LOG2 && + uiLog2ChunkSize <= RA_BASE_CHUNK_LOG2_MAX, + "Log2 chunk size must be 12-64", + PVRSRV_ERROR_INVALID_PARAMS); + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + /* First prepare the base array for subsequent transfer */ + eError = _RA_RealiseMultiSparseIndicesUnlocked(pArena, + aSrcBaseArray, + uiSrcBaseArraySize, + IMG_PAGE2BYTES64(uiLog2ChunkSize), + puiTransferIndices, + puiTransferCount); + OSLockRelease(pArena->hLock); + PVR_GOTO_IF_ERROR(eError, e0); + + /* Now do the transfer */ + for (i=0; i<*puiTransferCount; i++) + { + IMG_UINT32 idxSrc = puiTransferIndices[i]; + aDstBaseArray[i] = aSrcBaseArray[idxSrc]; + aSrcBaseArray[idxSrc] = INVALID_BASE_ADDR; + } + + return PVRSRV_OK; +e0: + return eError; +} +#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */ + static PVRSRV_ERROR _TrimBlockMakeReal(RA_ARENA *pArena, RA_BASE_ARRAY_T aBaseArray, @@ -2905,7 +3421,7 @@ _TrimBlockMakeReal(RA_ARENA *pArena, sRealBase, uiRealBaseIndex, uiStartIndex, - 1ULL << uiLog2ChunkSize); + IMG_PAGE2BYTES64(uiLog2ChunkSize)); PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); } @@ -2923,7 +3439,7 @@ _TrimBlockMakeReal(RA_ARENA *pArena, sRealBase, uiRealBaseIndex, uiEndIndex + 1, - 1ULL << uiLog2ChunkSize); + IMG_PAGE2BYTES64(uiLog2ChunkSize)); PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); } @@ -3044,6 +3560,12 @@ RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats) psRAStats->ui64FreeArenaSize = pArena->ui64FreeArenaSize; } +IMG_INTERNAL IMG_CHAR * +RA_GetArenaName(RA_ARENA *pArena) +{ + return pArena->name; +} + /* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */ #define _DBG(...) @@ -3177,6 +3699,70 @@ RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData) return IMG_TRUE; } +IMG_INTERNAL IMG_BOOL +RA_IteratorNextSpan(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData) +{ + BT *pNext; + + PVR_ASSERT(pIter != NULL); + + if (pIter == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "pIter in %s() is NULL", __func__)); + return IMG_FALSE; + } + + /* We have no more iterations to perform */ + if (pIter->pCurrent == NULL) + { + return IMG_FALSE; + } + + pNext = pIter->pCurrent; + + _DBG("(%s()) current segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " + "type=%u", __func__, (void *) pNext->base, pNext->uSize, + pNext->type); + + /* Extract details of current span */ + pData->uiAddr = pIter->pCurrent->base; + pData->uiSize = pIter->pCurrent->uSize; + pData->bFree = pIter->pCurrent->free_import; + + /* If this BT is a full span, can occur if we have no allocations + * on a span. + */ + if (pNext->is_leftmost == 1 && + pNext->is_rightmost == 1) + { + pNext = pNext->pNextSegment; + } + else + { + /* combine contiguous segments of span, these are allocations + * and free segments in a span. + */ + while ((pNext = pNext->pNextSegment) != NULL && + pNext->is_leftmost == 0) + { + _DBG("(%s()) combining segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " + "type=%u", __func__, (void *) pNext->base, pNext->uSize, + pNext->type); + pData->uiSize += pNext->uSize; + } + } + + _DBG("(%s()) next segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " + "type=%u", __func__, + pNext != NULL ? (void *) pNext->base : NULL, + pNext != NULL ? pNext->uSize : 0, + pNext != NULL ? pNext->type : 0); + + pIter->pCurrent = pNext; + + return IMG_TRUE; +} + IMG_INTERNAL PVRSRV_ERROR RA_BlockDump(RA_ARENA *pArena, void (*pfnLogDump)(void*, IMG_CHAR*, ...), void *pPrivData) { diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/sync.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/sync.c index e3eb07b71d17..3b6e8914d446 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/common/sync.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/sync.c @@ -200,9 +200,7 @@ SyncPrimBlockImport(RA_PERARENA_HANDLE hArena, RA_FLAGS_T uFlags, RA_LENGTH_T uBaseAlignment, const IMG_CHAR *pszAnnotation, - RA_BASE_T *puiBase, - RA_LENGTH_T *puiActualSize, - RA_PERISPAN_HANDLE *phImport) + RA_IMPORT *psImport) { SYNC_PRIM_CONTEXT *psContext = hArena; SYNC_PRIM_BLOCK *psSyncBlock = NULL; @@ -243,9 +241,9 @@ SyncPrimBlockImport(RA_PERARENA_HANDLE hArena, */ PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize); - *puiBase = psSyncBlock->uiSpanBase; - *puiActualSize = psSyncBlock->ui32SyncBlockSize; - *phImport = psSyncBlock; + psImport->base = psSyncBlock->uiSpanBase; + psImport->uSize = psSyncBlock->ui32SyncBlockSize; + psImport->hPriv = psSyncBlock; return PVRSRV_OK; fail_spanalloc: @@ -282,18 +280,16 @@ SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena, static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt) { - IMG_UINT64 ui64Temp; + IMG_UINT64 ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncBlock->uiSpanBase; - PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL); - - ui64Temp = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase; PVR_ASSERT(ui64Tempu.sLocal.psSyncBlock; + SYNC_PRIM_BLOCK *psSyncBlock = psSyncInt->psSyncBlock; psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr + (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32)); @@ -304,7 +300,7 @@ static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt, IMG_BOOL bFreeFirstSyncPrim) SYNC_PRIM_BLOCK *psSyncBlock; SYNC_PRIM_CONTEXT *psContext; - psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psSyncBlock = psSyncInt->psSyncBlock; psContext = psSyncBlock->psContext; #if !defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) @@ -316,17 +312,17 @@ static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt, IMG_BOOL bFreeFirstSyncPrim) { PVRSRV_ERROR eError; SHARED_DEV_CONNECTION hDevConnection = - psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection; + psSyncInt->psSyncBlock->psContext->hDevConnection; if (GetInfoPageDebugFlags(hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) { - if (psSyncInt->u.sLocal.hRecord) + if (psSyncInt->hRecord) { /* remove this sync record */ eError = DestroyServerResource(hDevConnection, NULL, BridgeSyncRecordRemoveByHandle, - psSyncInt->u.sLocal.hRecord); + psSyncInt->hRecord); PVR_LOG_IF_ERROR(eError, "BridgeSyncRecordRemoveByHandle"); } } @@ -351,7 +347,7 @@ static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt, IMG_BOOL bFreeFirstSyncPrim) (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE); #endif - RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr); + RA_Free(psContext->psSubAllocRA, psSyncInt->uiSpanAddr); OSFreeMem(psSyncInt); _SyncPrimContextUnref(psContext); } @@ -359,11 +355,11 @@ static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt, IMG_BOOL bFreeFirstSyncPrim) static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt) { - if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount)) + if (!OSAtomicRead(&psSyncInt->hRefCount)) { PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed")); } - else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount)) + else if (0 == OSAtomicDecrement(&psSyncInt->hRefCount)) { SyncPrimLocalFree(psSyncInt, IMG_FALSE); } @@ -373,7 +369,7 @@ static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt) { SYNC_PRIM_BLOCK *psSyncBlock; - psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psSyncBlock = psSyncInt->psSyncBlock; return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt); } @@ -414,7 +410,7 @@ SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, psContext->psSubAllocRA = RA_Create(psContext->azName, /* Params for imports */ _Log2(sizeof(IMG_UINT32)), - RA_LOCKCLASS_2, + RA_LOCKCLASS_3, SyncPrimBlockImport, SyncPrimBlockUnimport, psContext, @@ -434,7 +430,7 @@ SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, psContext->psSpanRA = RA_Create(psContext->azSpanName, /* Params for imports */ 0, - RA_LOCKCLASS_1, + RA_LOCKCLASS_0, NULL, NULL, NULL, @@ -519,10 +515,9 @@ static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, (RA_PERISPAN_HANDLE *) &psSyncBlock); PVR_GOTO_IF_ERROR(eError, fail_raalloc); - psNewSync->eType = SYNC_PRIM_TYPE_LOCAL; - OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1); - psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr; - psNewSync->u.sLocal.psSyncBlock = psSyncBlock; + OSAtomicWrite(&psNewSync->hRefCount, 1); + psNewSync->uiSpanAddr = uiSpanAddr; + psNewSync->psSyncBlock = psSyncBlock; SyncPrimGetCPULinAddr(psNewSync); *ppsSync = &psNewSync->sCommon; _SyncPrimContextRef(psContext); @@ -532,7 +527,7 @@ static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, #if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) /* If this is the first sync prim allocated in the context, keep a handle to it */ - if (psSyncBlock->uiSpanBase == 0 && psNewSync->u.sLocal.uiSpanAddr == 0) + if (psSyncBlock->uiSpanBase == 0 && psNewSync->uiSpanAddr == 0) { psContext->hFirstSyncPrim = psNewSync; } @@ -563,7 +558,7 @@ static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, /* record this sync */ eError = BridgeSyncRecordAdd( GetBridgeHandle(psSyncBlock->psContext->hDevConnection), - &psNewSync->u.sLocal.hRecord, + &psNewSync->hRecord, psSyncBlock->hServerSyncPrimBlock, psSyncBlock->ui32FirmwareAddr, SyncPrimGetOffset(psNewSync), @@ -576,7 +571,7 @@ static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, __func__, szClassName, PVRSRVGETERRORSTRING(eError))); - psNewSync->u.sLocal.hRecord = NULL; + psNewSync->hRecord = NULL; } } else @@ -620,24 +615,17 @@ _SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value) { PVRSRV_ERROR eError; - if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) - { - SYNC_PRIM_BLOCK *psSyncBlock; - SYNC_PRIM_CONTEXT *psContext; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; - psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; - psContext = psSyncBlock->psContext; + psSyncBlock = psSyncInt->psSyncBlock; + psContext = psSyncBlock->psContext; + + eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32), + ui32Value); - eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection), - psSyncBlock->hServerSyncPrimBlock, - SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32), - ui32Value); - } - else - { - /* Server sync not supported, attempted use of server sync */ - return PVRSRV_ERROR_NOT_SUPPORTED; - } return eError; } @@ -649,23 +637,9 @@ IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync) PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); - if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) - { - SyncPrimLocalUnref(psSyncInt); - } - else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER) - { - /* Server sync not supported, attempted use of server sync */ - return PVRSRV_ERROR_NOT_SUPPORTED; - } - else - { - /* - Either the client has given us a bad pointer or there is an - error in this module - */ - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); - } + + SyncPrimLocalUnref(psSyncInt); + err_out: return eError; @@ -682,10 +656,6 @@ SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); - /* There is no check for the psSyncInt to be LOCAL as this call - substitutes the Firmware updating a sync and that sync could - be a server one */ - eError = _SyncPrimSetValue(psSyncInt, ui32Value); err_out: @@ -702,11 +672,6 @@ SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); - if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) - { - /* Invalid sync type */ - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); - } eError = _SyncPrimSetValue(psSyncInt, ui32Value); @@ -730,17 +695,8 @@ IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRI psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); - if (likely(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)) - { - *phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock; - *pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase; - } - else - { - PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)", - __func__, psSyncInt->eType)); - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, err_out); - } + *phBlock = psSyncInt->psSyncBlock->hServerSyncPrimBlock; + *pui32Offset = psSyncInt->uiSpanAddr - psSyncInt->psSyncBlock->uiSpanBase; err_out: return eError; @@ -756,22 +712,7 @@ SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); - if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) - { - *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt); - } - else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER) - { - /* Server sync not supported, attempted use of server sync */ - return PVRSRV_ERROR_NOT_SUPPORTED; - } - else - { - /* Either the client has given us a bad pointer or there is an - * error in this module - */ - PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); - } + *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt); err_out: return eError; @@ -788,14 +729,7 @@ IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync) PVR_ASSERT(psSync != NULL); psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); - if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) - { - /* Invalid sync type */ - PVR_ASSERT(IMG_FALSE); - return; - } - - psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psSyncBlock = psSyncInt->psSyncBlock; psContext = psSyncBlock->psContext; eError = BridgeSyncPrimPDump(GetBridgeHandle(psContext->hDevConnection), @@ -815,14 +749,7 @@ IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 PVR_ASSERT(psSync != NULL); psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); - if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) - { - /* Invalid sync type */ - PVR_ASSERT(IMG_FALSE); - return; - } - - psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psSyncBlock = psSyncInt->psSyncBlock; psContext = psSyncBlock->psContext; eError = BridgeSyncPrimPDumpValue(GetBridgeHandle(psContext->hDevConnection), @@ -847,14 +774,7 @@ IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, PVR_ASSERT(psSync != NULL); psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); - if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) - { - /* Invalid sync type */ - PVR_ASSERT(IMG_FALSE); - return; - } - - psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psSyncBlock = psSyncInt->psSyncBlock; psContext = psSyncBlock->psContext; eError = BridgeSyncPrimPDumpPol(GetBridgeHandle(psContext->hDevConnection), @@ -881,14 +801,7 @@ IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, PVR_ASSERT(psSync != NULL); psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); - if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) - { - /* Invalid sync type */ - PVR_ASSERT(IMG_FALSE); - return; - } - - psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psSyncBlock = psSyncInt->psSyncBlock; psContext = psSyncBlock->psContext; #if defined(__linux__) && defined(__i386__) diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/tlclient.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/tlclient.c index 76983002c4f1..a72f8118483b 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/common/tlclient.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/tlclient.c @@ -174,7 +174,7 @@ PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection, hTLImportHandle); /* Copy stream name */ - OSStringLCopy(psSD->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE); + OSStringSafeCopy(psSD->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE); /* Return client descriptor handle to caller */ *phSD = psSD; diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/common/uniq_key_splay_tree.c b/drivers/gpu/drm/img/img-volcanic/services/shared/common/uniq_key_splay_tree.c index 8adf200cdcef..2d4f2fa8ce66 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/common/uniq_key_splay_tree.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/common/uniq_key_splay_tree.c @@ -47,11 +47,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "uniq_key_splay_tree.h" /** - * This function performs a simple top down splay + * PVRSRVSplay - perform a simple top down splay + * @ui32Flags: flags that must splayed to the root (if possible) + * @psTree: psTree The tree to splay. * - * @param uiFlags the flags that must splayed to the root (if possible). - * @param psTree The tree to splay. - * @return the resulting tree after the splay operation. + * Return the resulting tree after the splay operation. */ IMG_INTERNAL IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) @@ -143,12 +143,12 @@ IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) /** - * This function inserts a node into the Tree (unless it is already present, in + * PVRSRVInsert - insert a node into the Tree (unless it is already present, in * which case it is equivalent to performing only a splay operation + * @ui32Flags: the key of the new node + * @psTree: tree into which one wants to add a new node * - * @param uiFlags the key of the new node - * @param psTree The tree into which one wants to add a new node - * @return The resulting with the node in it + * Return the resulting tree after the splay operation. */ IMG_INTERNAL IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) @@ -203,12 +203,12 @@ IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) /** - * Deletes a node from the tree (unless it is not there, in which case it is - * equivalent to a splay operation) + * PVRSRVDelete - delete a node from the tree (unless it is not there, in which + * case it is equivalent to a splay operation) + * @ui32Flags: value of the node to remove + * @psTree: tree into which the node must be removed * - * @param uiFlags the value of the node to remove - * @param psTree the tree into which the node must be removed - * @return the resulting tree + * Return the resulting tree. */ IMG_INTERNAL IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) @@ -242,11 +242,11 @@ IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) } /** - * This function picks up the appropriate node for the given flags + * PVRSRVFindNode - pick up the appropriate node for the given flags + * @ui32Flags: flags that must associated with the node + * @psTree: current splay tree node * - * @param uiFlags the flags that must associated with the node. - * @param psTree current splay tree node. - * @return the resulting tree node after the search operation. + * Return the resulting tree node after the search operation. */ IMG_INTERNAL IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/devices/rogue/rgx_hwperf_table.c b/drivers/gpu/drm/img/img-volcanic/services/shared/devices/rogue/rgx_hwperf_table.c index 268ba65207ae..78d731c98eb6 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/devices/rogue/rgx_hwperf_table.c +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/devices/rogue/rgx_hwperf_table.c @@ -83,15 +83,12 @@ static bool rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 PVR_UNREFERENCED_PARAMETER(eBlkType); PVR_UNREFERENCED_PARAMETER(ui8UnitId); -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) - /* S7XT: JONES */ - return (eBlkType == RGX_CNTBLK_ID_JONES); -#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) +#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) /* S6XT: TA, TORNADO */ return true; #else /* S6 : TA, HUB, RASTER (RASCAL) */ - return (gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U; + return rgxfw_pow_is_rd_on(); #endif } @@ -102,8 +99,7 @@ static bool rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT { IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units(); - if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) && - (ui32NumDustsEnabled > 0U)) + if (rgxfw_pow_is_rd_on() && (ui32NumDustsEnabled > 0U)) { #if defined(RGX_FEATURE_DYNAMIC_DUST_POWER) IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2U; @@ -111,9 +107,6 @@ static bool rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT switch (eBlkType) { case RGX_CNTBLK_ID_TPU_MCU0: /* S6 and S6XT */ -#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) - case RGX_CNTBLK_ID_TEXAS0: /* S7 */ -#endif if (ui8UnitId >= ui32NumDustsEnabled) { return false; @@ -156,8 +149,8 @@ static bool rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT #else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ -# define rgxfw_hwperf_pow_st_direct ((void*)NULL) -# define rgxfw_hwperf_pow_st_indirect ((void*)NULL) +# define rgxfw_hwperf_pow_st_direct ((PFN_RGXFW_HWPERF_CNTBLK_POWERED)NULL) +# define rgxfw_hwperf_pow_st_indirect ((PFN_RGXFW_HWPERF_CNTBLK_POWERED)NULL) #endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ @@ -309,7 +302,7 @@ static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODE return IMG_FALSE; } -/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */ +/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPEARL, PBE */ static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) { DBG_ASSERT(psBlkTypeDesc != NULL); @@ -320,6 +313,7 @@ static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODE (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)); #if defined(__KERNEL__) /* Server context */ +#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_IDX) PVR_ASSERT(pvDev_km != NULL); PVR_ASSERT(pvRtInfo != NULL); { @@ -359,6 +353,7 @@ static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODE } } } +#endif #else /* FW context */ PVR_UNREFERENCED_PARAMETER(pvDev_km); PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); @@ -385,8 +380,7 @@ static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_ { RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; - if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE) && - RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) { if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) { diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicemem.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicemem.h index 028ddc36ec36..0a7d550bf194 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicemem.h +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicemem.h @@ -584,11 +584,18 @@ DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx, * * returns the device virtual address of the base of the heap. */ - PVRSRV_ERROR DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap, IMG_DEV_VIRTADDR *pDevVAddr); +/* + * DevmemGetHeapSize() + * + * returns the size of the heap. + */ +IMG_INTERNAL DEVMEM_SIZE_T +DevmemGetHeapSize(struct DEVMEM_HEAP_TAG *psHeap); + PVRSRV_ERROR DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc, IMG_HANDLE *phImport); @@ -629,12 +636,6 @@ IMG_INTERNAL PVRSRV_ERROR DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, IMG_DEV_VIRTADDR *psFaultAddress); -IMG_INTERNAL PVRSRV_ERROR -DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc, - IMG_DEV_VIRTADDR sDevVAddr, - IMG_DEVMEM_SIZE_T uiSize, - IMG_BOOL bInvalidate); - IMG_INTERNAL PVRSRV_ERROR DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext, IMG_UINT64 ui64FBSCEntries); @@ -667,14 +668,12 @@ DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap); @Input psContext Memory context the process that would like to be notified about. -@Input ui32PID The PID of the calling process. @Input bRegister If true, register. If false, de-register. @Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ error code */ /**************************************************************************/ IMG_INTERNAL PVRSRV_ERROR RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, - IMG_UINT32 ui32PID, IMG_BOOL bRegister); /*************************************************************************/ /*! diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicemem_utils.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicemem_utils.h index ec1c89637a14..9e24c1dceca7 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicemem_utils.h +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicemem_utils.h @@ -122,7 +122,7 @@ struct DEVMEM_CONTEXT_TAG #define DEVMEM_HEAP_MANAGER_USER (1U << 0) /* Heap VAs managed by the OSs kernel, VA from CPU mapping call used */ #define DEVMEM_HEAP_MANAGER_KERNEL (1U << 1) -/* Heap VAs managed by the heap's own RA */ +/* Heap VAs managed by the heap's own RA */ #define DEVMEM_HEAP_MANAGER_RA (1U << 2) /* Heap VAs managed jointly by Services and the client of Services. * The reserved region of the heap is managed explicitly by the client of Services diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicememx.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicememx.h index 96622edadbdc..86049c4a705c 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicememx.h +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/devicememx.h @@ -127,6 +127,16 @@ DevmemXUnmapVirtualRange(IMG_UINT32 ui32PageCount, DEVMEMX_VIRTDESC *psVirtDesc, IMG_UINT32 ui32VirtPgOffset); +/* DevmemXMapVirtualRangeToBackingPage() + * + * Map a virtual range to kernel backing page. + */ +PVRSRV_ERROR +DevmemXMapVirtualRangeToBackingPage(DEVMEMX_VIRTDESC *psVirtDesc, + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags, + IMG_UINT32 ui32VirtPgOffset, + IMG_UINT32 ui32PageCount); + /* DevmemXMapPhysicalToCPU() * * Map a full physical descriptor to CPU space. @@ -157,18 +167,6 @@ DevmemXReacquireCpuVirtAddr(DEVMEMX_PHYSDESC *psPhysDesc, void DevmemXReleaseCpuVirtAddr(DEVMEMX_PHYSDESC *psPhysDesc); -/* DevmemXCreateDevmemMemDescVA() - * - * (Deprecated) - * - * Create a devmem memdesc from a virtual address. - * Always destroy with DevmemXFreeDevmemMemDesc(). - */ - -PVRSRV_ERROR -DevmemXCreateDevmemMemDescVA(const IMG_DEV_VIRTADDR sVirtualAddress, - DEVMEM_MEMDESC **ppsMemDesc); - /* DevmemXCreateDevmemMemDesc() * * Create a devmem memdesc from a physical and diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/hash.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/hash.h index 92d48990af82..feb879c1489a 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/include/hash.h +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/hash.h @@ -227,6 +227,14 @@ uintptr_t HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k); */ /**************************************************************************/ PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args); +/*************************************************************************/ /*! +@Function HASH_Count +@Description Retrieve the number of entries in the hash table. +@Input pHash The hash table. +@Return The number of entries. +*/ /**************************************************************************/ +IMG_UINT32 HASH_Count(HASH_TABLE *pHash); + #ifdef HASH_TRACE /*************************************************************************/ /*! @Function HASH_Dump diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/hash_functions.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/hash_functions.h new file mode 100644 index 000000000000..154770848be4 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/hash_functions.h @@ -0,0 +1,66 @@ +/*************************************************************************/ /*! +@File +@Title Reusable hash functions for hash.c. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements common hash functions. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" + +/*************************************************************************/ /*! +@Function HASH_Djb2_Hash +@Description Hash function intended for hashing string keys. This function + implements DJB2 algorithm. +@Input uKeySize The size of the string hash key, in bytes. +@Input pKey A pointer to the string key to hash. +@Input uHashTabLen The length of the hash table. +@Return The hash value. +*/ /**************************************************************************/ +IMG_UINT32 HASH_Djb2_Hash(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); + +/*************************************************************************/ /*! +@Function HASH_Key_Comp_Default +@Description Compares string keys. +@Input uKeySize The size of the string key. +@Input pKey1 Pointer to first string hash key to compare. +@Input pKey2 Pointer to second string hash key to compare. +@Return IMG_TRUE - The keys match. + IMG_FALSE - The keys don't match. +*/ /**************************************************************************/ +IMG_BOOL HASH_Djb2_Compare(size_t uKeySize, void *pKey1, void *pKey2); diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/htbuffer.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/htbuffer.h index 4080275ebc2d..96daa9727c47 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/include/htbuffer.h +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/htbuffer.h @@ -56,73 +56,30 @@ extern "C" { #include "pvrsrv_error.h" #include "htbuffer_sf.h" #include "htbuffer_types.h" -#include "htbuffer_init.h" - -#if defined(__KERNEL__) -#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple((IMG_HANDLE) NULL, SF, ## args); } while (0) - -/* Host Trace Buffer name */ -#define HTB_STREAM_NAME "PVRHTBuffer" - -#else -#define HTBLOG(handle, SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(handle, SF, ## args); } while (0) -#endif - -/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */ -#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff)) -#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff)) - -/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */ -#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff)) -#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff)) - -/*************************************************************************/ /*! - @Function HTBLog - @Description Record a Host Trace Buffer log event - - @Input PID The PID of the process the event is associated - with. This is provided as an argument rather - than querying internally so that events associated - with a particular process, but performed by - another can be logged correctly. - - @Input TID The TID (Thread ID) of the thread the event is - associated with. - - @Input ui64TimeStamp The timestamp for this event - - @Input SF The log event ID - - @Input ... Log parameters - - @Return PVRSRV_OK Success. - -*/ /**************************************************************************/ -IMG_INTERNAL PVRSRV_ERROR -HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, IMG_UINT32 SF, ...); - /*************************************************************************/ /*! - @Function HTBLogSimple - @Description Record a Host Trace Buffer log event with implicit PID and Timestamp - - @Input SF The log event ID - - @Input ... Log parameters - - @Return PVRSRV_OK Success. - + @Function HTBControl + @Description Update the configuration of the Host Trace Buffer + @Input hSrvHandle Server Handle + @Input ui32NumFlagGroups Number of group enable flags words + @Input aui32GroupEnable Flags words controlling groups to be logged + @Input ui32LogLevel Log level to record + @Input ui32EnablePID PID to enable logging for a specific process + @Input eLogPidMode Enable logging for all or specific processes, + @Input eOpMode Control what trace data is dropped if the TL + buffer is full + @Return eError Internal services call returned eError error + number */ /**************************************************************************/ IMG_INTERNAL PVRSRV_ERROR -HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...); - - - -/* DEBUG log group enable */ -#if !defined(HTB_DEBUG_LOG_GROUP) -#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */ -#define HTB_LOG_TYPE_DBG __BUILDERROR__ -#endif +HTBControl( + IMG_HANDLE hSrvHandle, + IMG_UINT32 ui32NumFlagGroups, + IMG_UINT32 * aui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 ui32EnablePID, + HTB_LOGMODE_CTRL eLogPidMode, + HTB_OPMODE_CTRL eOpMode); #if defined(__cplusplus) diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/lock.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/lock.h index 3ef78215f624..386e3b2f88a2 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/include/lock.h +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/lock.h @@ -48,11 +48,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * with macros. This allows us to use the kernel lockdep feature for lock * debugging. */ #include "lock_types.h" +#include "pvr_debug.h" #if defined(__linux__) && defined(__KERNEL__) #include "allocmem.h" #include +#include #define OSLockCreateNoStats(phLock) ({ \ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ @@ -74,6 +76,16 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE) #define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE) +#if defined(DEBUG) +#if defined(CONFIG_LOCKDEP) +#define OSLockHeldAssert(hLock) ({ lockdep_assert_held((hLock)); }) +#else +#define OSLockHeldAssert(hLock) ({ PVR_ASSERT(OSLockIsLocked((hLock)) == IMG_TRUE); }) +#endif +#else +#define OSLockHeldAssert(hLock) +#endif + #define OSSpinLockCreate(_ppsLock) ({ \ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ *(_ppsLock) = OSAllocMem(sizeof(spinlock_t)); \ @@ -90,7 +102,7 @@ typedef unsigned long OS_SPINLOCK_FLAGS; #define OSAtomicWrite(pCounter, i) atomic_set(pCounter, i) /* The following atomic operations, in addition to being SMP-safe, also - imply a memory barrier around the operation */ + imply a memory barrier around the operation */ #define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter) #define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter) #define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv) @@ -114,7 +126,11 @@ static inline IMG_INT OSAtomicOr(ATOMIC_T *pCounter, IMG_INT iVal) } #define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter) -#define OSAtomicAddUnless(pCounter, incr, test) atomic_add_unless(pCounter, (incr), (test)) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) +#define OSAtomicAddUnless(pCounter, incr, test) atomic_fetch_add_unless(pCounter, (incr), (test)) +#else +#define OSAtomicAddUnless(pCounter, incr, test) __atomic_add_unless(pCounter,incr,test) +#endif #define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter) #define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), (test)) @@ -136,7 +152,7 @@ static inline IMG_INT OSAtomicOr(ATOMIC_T *pCounter, IMG_INT iVal) */ /**************************************************************************/ IMG_INTERNAL PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock); -#if defined(INTEGRITY_OS) +#if defined(INTEGRITY_OS) || defined(__QNXNTO__) #define OSLockCreateNoStats OSLockCreate #endif @@ -149,7 +165,7 @@ PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock); IMG_INTERNAL void OSLockDestroy(POS_LOCK hLock); -#if defined(INTEGRITY_OS) +#if defined(INTEGRITY_OS) || defined(__QNXNTO__) #define OSLockDestroyNoStats OSLockDestroy #endif /**************************************************************************/ /*! @@ -211,6 +227,8 @@ void OSLockRelease(POS_LOCK hLock); IMG_INTERNAL IMG_BOOL OSLockIsLocked(POS_LOCK hLock); +#define OSLockHeldAssert(hLock) PVR_ASSERT(OSLockIsLocked((hLock)) == IMG_TRUE) + #if defined(__linux__) /* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */ @@ -280,7 +298,7 @@ IMG_INTERNAL void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT32 v); /* For the following atomic operations, in addition to being SMP-safe, - should also have a memory barrier around each operation */ + should also have a memory barrier around each operation */ /*************************************************************************/ /*! @Function OSAtomicIncrement @Description Increment the value of a variable atomically. @@ -428,4 +446,82 @@ typedef unsigned long OS_SPINLOCK_FLAGS; #endif /* defined(__linux__) */ #endif /* defined(__linux__) && defined(__KERNEL__) */ -#endif /* LOCK_H */ +static inline PVRSRV_ERROR OSAtomicAddUnlessOverflow(ATOMIC_T *pCounter, IMG_INT iAdd, IMG_INT iMax) +{ + IMG_INT iCurrent; + IMG_INT iOld, iNew; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Should be using subtract function */ + if (iAdd <= 0) + { + PVR_ASSERT(!"Attempt to add negative number, rejecting."); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + iCurrent = OSAtomicRead(pCounter); + + do + { + iOld = iCurrent; +#if defined(__GNUC__) || defined(__clang__) + if (iAdd > (iMax - iCurrent) || __builtin_sadd_overflow(iCurrent, iAdd, &iNew)) + { + /* We would overflow so return error and do nothing */ + return PVRSRV_ERROR_ATOMIC_OVERFLOW; + } +#else + if (iAdd > (iMax - iCurrent) || iAdd > (IMG_INT_MAX - iCurrent)) + { + /* We would overflow so return error and do nothing */ + return PVRSRV_ERROR_ATOMIC_OVERFLOW; + } + iNew = iCurrent + iAdd; +#endif + iCurrent = OSAtomicCompareExchange(pCounter, iOld, iNew); + } + while (iOld != iCurrent); + + return eError; +} + +static inline PVRSRV_ERROR OSAtomicSubtractUnlessUnderflow(ATOMIC_T *pCounter, IMG_INT iSub, IMG_INT iMin) +{ + IMG_INT iCurrent; + IMG_INT iOld, iNew; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Should be using add function */ + if (iSub <= 0) + { + PVR_ASSERT(!"Attempt to subtract negative number, rejecting."); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + iCurrent = OSAtomicRead(pCounter); + + do + { + iOld = iCurrent; +#if defined(__GNUC__) || defined(__clang__) + if (iSub < (iMin + iCurrent) || __builtin_ssub_overflow(iCurrent, iSub, &iNew)) + { + /* We would underflow so return error and do nothing */ + return PVRSRV_ERROR_ATOMIC_UNDERFLOW; + } +#else + if (iCurrent < (iMin + iSub) || iCurrent < (IMG_INT_MIN + iSub)) + { + /* We would underflow so return error and do nothing */ + return PVRSRV_ERROR_ATOMIC_UNDERFLOW; + } + iNew = iCurrent - iSub; +#endif + iCurrent = OSAtomicCompareExchange(pCounter, iOld, iNew); + } + while (iOld != iCurrent); + + return eError; +} + +#endif /* LOCK_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/proc_stats.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/proc_stats.h index 8c1f5e951837..72c14c5f8e4d 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/include/proc_stats.h +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/proc_stats.h @@ -43,30 +43,140 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef PROC_STATS_H #define PROC_STATS_H -/* X-Macro for Process stat keys */ +#define DKP_HIDDEN "hidden" + +#define PROCESS_STAT_KMALLOC X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc", "host_kmalloc") +#define PROCESS_STAT_KMALLOC_MAX X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax", DKP_HIDDEN) + +#define PROCESS_STAT_VMALLOC X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc", "host_vmalloc") +#define PROCESS_STAT_VMALLOC_MAX X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax", DKP_HIDDEN) + +#define PROCESS_STAT_ALLOC_PAGES_PT_UMA X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA", "host_mem_dev_pt") +#define PROCESS_STAT_ALLOC_PAGES_PT_UMA_MAX X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax", DKP_HIDDEN) + +#define PROCESS_STAT_VMAP_PT_UMA X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA", DKP_HIDDEN) +#define PROCESS_STAT_VMAP_PT_UMA_MAX X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax", DKP_HIDDEN) + +#define PROCESS_STAT_ALLOC_PAGES_PT_LMA X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA", "local_mem_dev_pt") +#define PROCESS_STAT_ALLOC_PAGES_PT_LMA_MAX X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax", DKP_HIDDEN) + +#define PROCESS_STAT_IOREMAP_PT_LMA X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA", DKP_HIDDEN) +#define PROCESS_STAT_IOREMAP_PT_LMA_MAX X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax", DKP_HIDDEN) + +#define PROCESS_STAT_ALLOC_LMA_PAGES X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA", "local_mem_dev_buf") +#define PROCESS_STAT_ALLOC_LMA_PAGES_MAX X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax", DKP_HIDDEN) + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +#define PROCESS_STAT_ZOMBIE_LMA_PAGES X(PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES, "MemoryUsageZombieGPUMemLMA", "local_mem_dev_buf_purgeable") +#define PROCESS_STAT_ZOMBIE_LMA_PAGES_MAX X(PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES_MAX, "MemoryUsageZombieGPUMemLMAMax", DKP_HIDDEN) +#else +#define PROCESS_STAT_ZOMBIE_LMA_PAGES +#define PROCESS_STAT_ZOMBIE_LMA_PAGES_MAX +#endif + +#define PROCESS_STAT_ALLOC_UMA_PAGES X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA", "host_mem_dev_buf") +#define PROCESS_STAT_ALLOC_UMA_PAGES_MAX X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax", DKP_HIDDEN) + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +#define PROCESS_STAT_ZOMBIE_UMA_PAGES X(PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES, "MemoryUsageZombieGPUMemUMA", "host_mem_dev_buf_purgeable") +#define PROCESS_STAT_ZOMBIE_UMA_PAGES_MAX X(PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES_MAX, "MemoryUsageZombieGPUMemUMAMax", DKP_HIDDEN) +#else +#define PROCESS_STAT_ZOMBIE_UMA_PAGES +#define PROCESS_STAT_ZOMBIE_UMA_PAGES_MAX +#endif + +#define PROCESS_STAT_MAP_UMA_LMA_PAGES X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA", DKP_HIDDEN) +#define PROCESS_STAT_MAP_UMA_LMA_PAGES_MAX X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax", DKP_HIDDEN) + +#define PROCESS_STAT_DMA_BUF_IMPORT X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport", "dma_buf_import") +#define PROCESS_STAT_DMA_BUF_IMPORT_MAX X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax", DKP_HIDDEN) + +#if defined(SUPPORT_PMR_DEFERRED_FREE) +#define PROCESS_STAT_DMA_BUF_ZOMBIE X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE, "MemoryUsageDmaBufZombie", "dma_buf_purgeable") +#define PROCESS_STAT_DMA_BUF_ZOMBIE_MAX X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE_MAX, "MemoryUsageDmaBufZombieMax", DKP_HIDDEN) +#else +#define PROCESS_STAT_DMA_BUF_ZOMBIE +#define PROCESS_STAT_DMA_BUF_ZOMBIE_MAX +#endif + +#define PROCESS_STAT_TOTAL X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal", DKP_HIDDEN) +#define PROCESS_STAT_TOTAL_MAX X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax", DKP_HIDDEN) + +/* Process stat keys */ #define PVRSRV_PROCESS_STAT_KEY \ - X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \ - X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \ - X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA") \ - X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \ - X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA") \ - X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \ - X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA") \ - X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA") \ - X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA") \ - X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ - X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") \ - X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal") \ - X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax") + PROCESS_STAT_KMALLOC \ + PROCESS_STAT_KMALLOC_MAX \ + PROCESS_STAT_VMALLOC \ + PROCESS_STAT_VMALLOC_MAX \ + PROCESS_STAT_ALLOC_PAGES_PT_UMA \ + PROCESS_STAT_ALLOC_PAGES_PT_UMA_MAX \ + PROCESS_STAT_VMAP_PT_UMA \ + PROCESS_STAT_VMAP_PT_UMA_MAX \ + PROCESS_STAT_ALLOC_PAGES_PT_LMA \ + PROCESS_STAT_ALLOC_PAGES_PT_LMA_MAX \ + PROCESS_STAT_IOREMAP_PT_LMA \ + PROCESS_STAT_IOREMAP_PT_LMA_MAX \ + PROCESS_STAT_ALLOC_LMA_PAGES \ + PROCESS_STAT_ALLOC_LMA_PAGES_MAX \ + PROCESS_STAT_ZOMBIE_LMA_PAGES \ + PROCESS_STAT_ZOMBIE_LMA_PAGES_MAX \ + PROCESS_STAT_ALLOC_UMA_PAGES \ + PROCESS_STAT_ALLOC_UMA_PAGES_MAX \ + PROCESS_STAT_ZOMBIE_UMA_PAGES \ + PROCESS_STAT_ZOMBIE_UMA_PAGES_MAX \ + PROCESS_STAT_MAP_UMA_LMA_PAGES \ + PROCESS_STAT_MAP_UMA_LMA_PAGES_MAX \ + PROCESS_STAT_DMA_BUF_IMPORT \ + PROCESS_STAT_DMA_BUF_IMPORT_MAX \ + PROCESS_STAT_DMA_BUF_ZOMBIE \ + PROCESS_STAT_DMA_BUF_ZOMBIE_MAX \ + PROCESS_STAT_TOTAL \ + PROCESS_STAT_TOTAL_MAX + +#if defined(SUPPORT_LINUX_FDINFO) +/* DKP process stats within the drm-memory- key */ +#define PVRSRV_DKP_MEM_STAT_GROUP_MEMORY \ + PROCESS_STAT_KMALLOC \ + PROCESS_STAT_VMALLOC \ + PROCESS_STAT_ALLOC_PAGES_PT_UMA \ + PROCESS_STAT_ALLOC_UMA_PAGES \ + PROCESS_STAT_ZOMBIE_UMA_PAGES + +/* DKP process stats within the drm-shared- key */ +#define PVRSRV_DKP_MEM_STAT_GROUP_SHARED \ + PROCESS_STAT_DMA_BUF_IMPORT \ + PROCESS_STAT_DMA_BUF_ZOMBIE + +/* DKP process stats within the drm-total- key */ +#define PVRSRV_DKP_MEM_STAT_GROUP_TOTAL \ + PROCESS_STAT_KMALLOC \ + PROCESS_STAT_VMALLOC \ + PROCESS_STAT_ALLOC_PAGES_PT_UMA \ + PROCESS_STAT_ALLOC_UMA_PAGES \ + PROCESS_STAT_ALLOC_PAGES_PT_LMA \ + PROCESS_STAT_ALLOC_LMA_PAGES + +/* DKP process stats within the drm-resident- key */ +#define PVRSRV_DKP_MEM_STAT_GROUP_RESIDENT \ + PROCESS_STAT_KMALLOC \ + PROCESS_STAT_VMALLOC \ + PROCESS_STAT_ALLOC_PAGES_PT_UMA \ + PROCESS_STAT_ALLOC_UMA_PAGES \ + PROCESS_STAT_ZOMBIE_UMA_PAGES \ + PROCESS_STAT_ALLOC_PAGES_PT_LMA \ + PROCESS_STAT_ALLOC_LMA_PAGES \ + PROCESS_STAT_ZOMBIE_LMA_PAGES \ + +/* DKP process stats within the drm-purgeable- key */ +#define PVRSRV_DKP_MEM_STAT_GROUP_PURGEABLE \ + PROCESS_STAT_ZOMBIE_LMA_PAGES \ + PROCESS_STAT_ZOMBIE_UMA_PAGES \ + PROCESS_STAT_DMA_BUF_ZOMBIE + +/* DKP process stats within the drm-active- key */ +#define PVRSRV_DKP_MEM_STAT_GROUP_ACTIVE + +#endif /* X-Macro for Device stat keys */ #define PVRSRV_DEVICE_STAT_KEY \ @@ -80,6 +190,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. X(PVRSRV_DEVICE_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \ X(PVRSRV_DEVICE_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \ X(PVRSRV_DEVICE_STAT_TYPE_RC_TDM_STORES, "RenderContextTDMStores") \ + X(PVRSRV_DEVICE_STAT_TYPE_RC_RAY_STORES, "RenderContextRayStores") \ X(PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \ X(PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \ X(PVRSRV_DEVICE_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \ @@ -106,18 +217,24 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, "MemoryUsageAllocGPUMemLMA") \ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX, "MemoryUsageAllocGPUMemLMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_LMA, "MemoryUsageZombieGPUMemLMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_LMA_MAX, "MemoryUsageZombieGPUMemLMAMax") \ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, "MemoryUsageAllocGPUMemUMA") \ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX, "MemoryUsageAllocGPUMemUMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_UMA, "MemoryUsageZombieGPUMemUMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_UMA_MAX, "MemoryUsageZombieGPUMemUMAMax") \ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, "MemoryUsageAllocGPUMemUMAPool") \ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX, "MemoryUsageAllocGPUMemUMAPoolMax") \ X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, "MemoryUsageMappedGPUMemUMA/LMA") \ X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ - X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") + X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_ZOMBIE, "MemoryUsageDmaBufZombie") \ + X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_ZOMBIE_MAX, "MemoryUsageDmaBufZombieMax") typedef enum { -#define X(stat_type, stat_str) stat_type, +#define X(stat_type, stat_str, drm_str) stat_type, PVRSRV_PROCESS_STAT_KEY #undef X PVRSRV_PROCESS_STAT_TYPE_COUNT diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/ra.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/ra.h index db897b0bc744..ee4492afa1ed 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/include/ra.h +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/ra.h @@ -61,6 +61,9 @@ typedef struct _RA_ARENA_ITERATOR_ RA_ARENA_ITERATOR; typedef struct _RA_ITERATOR_DATA_ { IMG_UINT64 uiAddr; IMG_UINT64 uiSize; + /* Standard Iteration: is the segment free + * Span Iteration: is the span a free import + */ IMG_BOOL bFree; } RA_ITERATOR_DATA; @@ -70,7 +73,7 @@ typedef struct _RA_ITERATOR_DATA_ { typedef struct _RA_USAGE_STATS { IMG_UINT64 ui64TotalArenaSize; IMG_UINT64 ui64FreeArenaSize; -}RA_USAGE_STATS, *PRA_USAGE_STATS; +} RA_USAGE_STATS, *PRA_USAGE_STATS; /* * Per-Arena handle - this is private data for the caller of the RA. @@ -96,6 +99,8 @@ typedef IMG_UINT32 RA_POLICY_T; typedef struct _RA_BASE_MULTI_ RA_BASE_MULTI_T; typedef IMG_UINT32 RA_BASE_ARRAY_SIZE_T; + + /* * RA_BASE_ARRAY can represent a number of bases of which are packed, * that is, they can be one of two types, a Real Base or a Ghost base. @@ -113,7 +118,8 @@ typedef IMG_UINT32 RA_BASE_ARRAY_SIZE_T; * they appear Real from another perspective but we the RA know they are a ghost of the * Real Base. * */ -typedef RA_BASE_T *RA_BASE_ARRAY_T; +typedef RA_BASE_T RA_BASE_ARRAY_T[IMG_FLEX_ARRAY_MEMBER]; + /* Since 0x0 is a valid BaseAddr, we rely on max 64-bit value to be an invalid * page address. */ @@ -134,10 +140,13 @@ typedef RA_BASE_T *RA_BASE_ARRAY_T; typedef struct _RA_MULTIBASE_ITERATOR_ RA_MULTIBASE_ITERATOR; -/* Lock classes: describes the level of nesting between different arenas. */ +/* Lock classes: describes the level of nesting between different arenas. + * A RA with lockclass x is permitted to obtain locks of a lockclass less than x. + * A RA with lockclass 0 cannot obtain the lock of any other RA. */ #define RA_LOCKCLASS_0 0 #define RA_LOCKCLASS_1 1 #define RA_LOCKCLASS_2 2 +#define RA_LOCKCLASS_3 3 #define RA_NO_IMPORT_MULTIPLIER 1 @@ -213,38 +222,73 @@ typedef struct _RA_MULTIBASE_ITERATOR_ RA_MULTIBASE_ITERATOR; */ typedef IMG_UINT64 RA_FLAGS_T; +typedef struct _RA_IMPORT_ { + RA_BASE_T base; /* Allocation base */ + RA_LENGTH_T uSize; /* Allocation size */ + RA_PERISPAN_HANDLE hPriv; /* Per import private data */ +} RA_IMPORT; + +/*************************************************************************/ /*! +@Function Callback function PFN_RA_IMPORT_ALLOC_SINGLE +@Description RA import allocate function create a single span when + requesting extra resource for the arena. +@Input RA_PERARENA_HANDLE RA handle +@Input RA_LENGTH_T Request size +@Input RA_FLAGS_T RA flags +@Input RA_LENGTH_T Base Alignment +@Input IMG_CHAR Annotation +@Output RA_IMPORT Attributes of an import. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_RA_IMPORT_ALLOC_SINGLE)(RA_PERARENA_HANDLE, + RA_LENGTH_T, + RA_FLAGS_T, + RA_LENGTH_T, + const IMG_CHAR*, + RA_IMPORT*); + /*************************************************************************/ /*! -@Function Callback function PFN_RA_ALLOC -@Description RA import allocate function +@Function Callback function PFN_RA_IMPORT_ALLOC_MULTI +@Description RA import allocate function is able to create multiple spans + for a single import request. + The pointer to the array of inputs will be provided but + can be set to a larger array via OSAllocMem + if the count is not large enough. + The pointer will be freed by OSFreeMem after the callback. + It is permissible for the callback to set: + count == 0 or array pointer == NULL + but the callback MUST return an error in these cases. @Input RA_PERARENA_HANDLE RA handle @Input RA_LENGTH_T Request size @Input RA_FLAGS_T RA flags @Input RA_LENGTH_T Base Alignment @Input IMG_CHAR Annotation -@Input RA_BASE_T Allocation base -@Input RA_LENGTH_T Actual size -@Input RA_PERISPAN_HANDLE Per import private data +@Inout IMG_UINT32 Num of imports = Num of elements in array below. + Will always be >= 1. Must be set to the number of + imports returned. +@Inout RA_IMPORT Pointer to array of imports. Will always point + to an array of equal length as Num of Imports. + Will be freed by OSFreeMem. @Return PVRSRV_ERROR PVRSRV_OK or error code */ /**************************************************************************/ -typedef PVRSRV_ERROR (*PFN_RA_ALLOC)(RA_PERARENA_HANDLE, - RA_LENGTH_T, - RA_FLAGS_T, - RA_LENGTH_T, - const IMG_CHAR*, - RA_BASE_T*, - RA_LENGTH_T*, - RA_PERISPAN_HANDLE*); +typedef PVRSRV_ERROR (*PFN_RA_IMPORT_ALLOC_MULTI)(RA_PERARENA_HANDLE, + RA_LENGTH_T, + RA_FLAGS_T, + RA_LENGTH_T, + const IMG_CHAR*, + IMG_UINT32*, + RA_IMPORT**); /*************************************************************************/ /*! -@Function Callback function PFN_RA_FREE +@Function Callback function PFN_RA_IMPORT_FREE @Description RA free imported allocation @Input RA_PERARENA_HANDLE RA handle @Input RA_BASE_T Allocation base -@Output RA_PERISPAN_HANDLE Per import private data +@Input RA_PERISPAN_HANDLE Per import private data */ /**************************************************************************/ -typedef void (*PFN_RA_FREE)(RA_PERARENA_HANDLE, - RA_BASE_T, - RA_PERISPAN_HANDLE); +typedef void (*PFN_RA_IMPORT_FREE)(RA_PERARENA_HANDLE, + RA_BASE_T, + RA_PERISPAN_HANDLE); /** * @Function RA_Create @@ -265,11 +309,35 @@ RA_Create(IMG_CHAR *name, /* subsequent imports: */ RA_LOG2QUANTUM_T uLog2Quantum, IMG_UINT32 ui32LockClass, - PFN_RA_ALLOC imp_alloc, - PFN_RA_FREE imp_free, + PFN_RA_IMPORT_ALLOC_SINGLE imp_alloc, + PFN_RA_IMPORT_FREE imp_free, RA_PERARENA_HANDLE per_arena_handle, RA_POLICY_T ui32PolicyFlags); +/** + * @Function RA_CreateMulti + * + * @Description To create a resource arena using the + * ALLOC_MULTI callback. + * + * @Input name - the name of the arena for diagnostic purposes. + * @Input uLog2Quantum - the arena allocation quantum. + * @Input ui32LockClass - the lock class level this arena uses. + * @Input imp_alloc - a resource allocation callback or 0. + * @Input imp_free - a resource de-allocation callback or 0. + * @Input arena_handle - private handle passed to alloc and free or 0. + * @Input ui32PolicyFlags - Policies that govern the arena. + * @Return pointer to arena, or NULL. + */ +RA_ARENA * +RA_CreateMulti(IMG_CHAR *name, + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT32 ui32LockClass, + PFN_RA_IMPORT_ALLOC_MULTI imp_alloc, + PFN_RA_IMPORT_FREE imp_free, + RA_PERARENA_HANDLE arena_handle, + RA_POLICY_T ui32PolicyFlags); + /** * @Function RA_Create_With_Span * @@ -509,6 +577,39 @@ RA_FreeMultiSparse(RA_ARENA *pArena, IMG_UINT32 *puiFreeIndices, IMG_UINT32 *puiFreeCount); +#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) +/** + * @Function RA_TransferMultiSparseIndices + * + * @Description Transfers a set of indices specified in puiTransferIndices from + * aSrcBaseArray to aDstBaseArray. + * Called when some pages of the base array need to be + * transfered to another base array. As a result of this call, + * some ghost addresses in aBaseArray might be converted to + * real addresses before being transferred.. + * + * @Input pArena - The arena the segment was originally allocated from. + * @Input aSrcBaseArray - The array to transfer bases from. + * @Input uiSrcBaseArraySize - Size of the array to transfer bases from. + * @Input aDstBaseArray - The array to transfer bases to. + * @Input uiDstBaseArraySize - Size of the array to transfer bases to. + * @Input uiLog2ChunkSize - The log2 chunk size used to generate the Ghost bases. + * @Input puiTransferIndices - The indices into the array to be extracted. + * @InOut puiTransferCount - The number of bases to prepare for extraction. + * + * @Return PVRSRV_OK - success + */ +PVRSRV_ERROR +RA_TransferMultiSparseIndices(RA_ARENA *pArena, + RA_BASE_ARRAY_T aSrcBaseArray, + RA_BASE_ARRAY_SIZE_T uiSrcBaseArraySize, + RA_BASE_ARRAY_T aDstBaseArray, + RA_BASE_ARRAY_SIZE_T uiDstBaseArraySize, + IMG_UINT32 uiLog2ChunkSize, + IMG_UINT32 *puiTransferIndices, + IMG_UINT32 *puiTransferCount); +#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */ + /** * @Function RA_Alloc_Range * @@ -590,6 +691,18 @@ RA_SwapSparseMem(RA_ARENA *pArena, IMG_INTERNAL void RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats); +/** + * @Function RA_GetArenaName + * + * @Description To obtain the arena name. + * + * @Input pArena - the arena to acquire the name from. + * + * @Return IMG_CHAR* Arena name. + */ +IMG_INTERNAL IMG_CHAR * +RA_GetArenaName(RA_ARENA *pArena); + IMG_INTERNAL RA_ARENA_ITERATOR * RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments); @@ -599,9 +712,39 @@ RA_IteratorReset(RA_ARENA_ITERATOR *pIter); IMG_INTERNAL void RA_IteratorRelease(RA_ARENA_ITERATOR *pIter); +/*************************************************************************/ /*! +@Function RA_IteratorNext +@Description Function used to iterate over segments in the arena. Segments + can either be allocated or free. Iteration can be configured + to include free segments when creating the iterator via + RA_IteratorAcquire. + +@Input pIter Iteration handle used to keep state between calls +@InOut pData Data about current block iteration, base and size + etc... +@Return IMG_BOOL Boolean value signalling if next element exists +*/ /**************************************************************************/ IMG_INTERNAL IMG_BOOL RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData); +/*************************************************************************/ /* +@Function RA_IteratorNextSpan +@Description Function used to iterate over spans in the arena. Spans + can either be allocated or free. Iteration can be configured + to include free spans when creating the iterator via + RA_IteratorAcquire. Spans are not allocations on the arena but + instead the regions of resource added that allocations can + be made from. + See RA_Add(). + +@Input pIter Iteration handle used to keep state between calls +@InOut pData Data about current block iteration, base and size + etc... +@Return IMG_BOOL Boolean value signalling if next element exists +*/ /**************************************************************************/ +IMG_INTERNAL IMG_BOOL +RA_IteratorNextSpan(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData); + /*************************************************************************/ /*! @Function RA_BlockDump @Description Debug dump of all memory allocations within the RA and the space diff --git a/drivers/gpu/drm/img/img-volcanic/services/shared/include/sync_internal.h b/drivers/gpu/drm/img/img-volcanic/services/shared/include/sync_internal.h index 29c836054cae..929b66e0f37c 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/shared/include/sync_internal.h +++ b/drivers/gpu/drm/img/img-volcanic/services/shared/include/sync_internal.h @@ -91,28 +91,13 @@ typedef struct SYNC_PRIM_BLOCK_TAG DLLIST_NODE sListNode; /*!< List node for the sync block list */ } SYNC_PRIM_BLOCK; -typedef enum SYNC_PRIM_TYPE_TAG -{ - SYNC_PRIM_TYPE_UNKNOWN = 0, - SYNC_PRIM_TYPE_LOCAL, - SYNC_PRIM_TYPE_SERVER, -} SYNC_PRIM_TYPE; - -typedef struct SYNC_PRIM_LOCAL_TAG +typedef struct SYNC_PRIM_TAG { - ATOMIC_T hRefCount; /*!< Ref count for this sync */ + PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */ + ATOMIC_T hRefCount; /*!< Ref count for this sync */ SYNC_PRIM_BLOCK *psSyncBlock; /*!< Synchronisation block this primitive is allocated on */ IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */ IMG_HANDLE hRecord; /*!< Sync record handle */ -} SYNC_PRIM_LOCAL; - -typedef struct SYNC_PRIM_TAG -{ - PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */ - SYNC_PRIM_TYPE eType; /*!< Sync primitive type */ - union { - SYNC_PRIM_LOCAL sLocal; /*!< Local sync primitive data */ - } u; } SYNC_PRIM; diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/common/env/linux/pci_support.c b/drivers/gpu/drm/img/img-volcanic/services/system/common/env/linux/pci_support.c index c3bbcc46cb2c..a8321a493e3e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/common/env/linux/pci_support.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/common/env/linux/pci_support.c @@ -54,9 +54,7 @@ typedef struct _PVR_PCI_DEV_TAG struct pci_dev *psPCIDev; HOST_PCI_INIT_FLAGS ePCIFlags; IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE]; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) int iMTRR[DEVICE_COUNT_RESOURCE]; -#endif } PVR_PCI_DEV; /*************************************************************************/ /*! @@ -113,9 +111,7 @@ PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags) for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) psPVRPCI->iMTRR[i] = -1; -#endif } return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI; @@ -602,91 +598,19 @@ PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 u start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) res = arch_io_reserve_memtype_wc(start, end - start); if (res) { return PVRSRV_ERROR_PCI_CALL_FAILED; } -#endif res = arch_phys_wc_add(start, end - start); if (res < 0) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) arch_io_free_memtype_wc(start, end - start); -#endif return PVRSRV_ERROR_PCI_CALL_FAILED; } psPVRPCI->iMTRR[ui32Index] = res; -#else - - res = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0); - if (res < 0) - { - printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); - return PVRSRV_ERROR_PCI_CALL_FAILED; - } - - res = mtrr_del(res, start, end - start); - if (res < 0) - { - printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); - return PVRSRV_ERROR_PCI_CALL_FAILED; - } - - /* Workaround for overlapping MTRRs. */ - { - IMG_BOOL bGotMTRR0 = IMG_FALSE; - - /* Current mobo BIOSes will normally set up a WRBACK MTRR spanning - * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic & - * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour. - * - * WRBACK is incompatible with some PCI devices, so try to split - * the UNCACHABLE regions up and insert a WRCOMB region instead. - */ - res = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0); - if (res < 0) - { - /* If this fails, services has probably run before and created - * a write-combined MTRR for the test chip. Assume it has, and - * don't return an error here. - */ - return PVRSRV_OK; - } - - if (res == 0) - bGotMTRR0 = IMG_TRUE; - - res = mtrr_del(res, start, end - start); - if (res < 0) - { - printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); - return PVRSRV_ERROR_PCI_CALL_FAILED; - } - - if (bGotMTRR0) - { - /* Replace 0 with a non-overlapping WRBACK MTRR */ - res = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0); - if (res < 0) - { - printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); - return PVRSRV_ERROR_PCI_CALL_FAILED; - } - - /* Add a WRCOMB MTRR for the PCI device memory bar */ - res = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0); - if (res < 0) - { - printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); - return PVRSRV_ERROR_PCI_CALL_FAILED; - } - } - } -#endif return PVRSRV_OK; } @@ -699,7 +623,6 @@ PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 u */ /**************************************************************************/ void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; if (psPVRPCI->iMTRR[ui32Index] >= 0) @@ -707,7 +630,6 @@ void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Ind arch_phys_wc_del(psPVRPCI->iMTRR[ui32Index]); psPVRPCI->iMTRR[ui32Index] = -1; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) { resource_size_t start, end; @@ -716,11 +638,6 @@ void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Ind arch_io_free_memtype_wc(start, end - start); } -#endif } -#else - PVR_UNREFERENCED_PARAMETER(hPVRPCI); - PVR_UNREFERENCED_PARAMETER(ui32Index); -#endif } #endif /* defined(CONFIG_MTRR) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/common/sysconfig_cmn.c b/drivers/gpu/drm/img/img-volcanic/services/system/common/sysconfig_cmn.c index ac878dd8f709..d8a09182dc04 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/common/sysconfig_cmn.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/common/sysconfig_cmn.c @@ -47,6 +47,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_device.h" #include "syscommon.h" #include "pvr_debug.h" +#include "os_apphint.h" +//#include "physmem.h" void SysRGXErrorNotify(IMG_HANDLE hSysData, PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData) @@ -73,6 +75,8 @@ void SysRGXErrorNotify(IMG_HANDLE hSysData, } case RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR: case RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR: + case RGX_CONTEXT_RESET_REASON_GPU_PARITY_HWR: + case RGX_CONTEXT_RESET_REASON_GPU_LATENT_HWR: { ui32DgbLvl = PVR_DBG_WARNING; break; @@ -80,9 +84,13 @@ void SysRGXErrorNotify(IMG_HANDLE hSysData, case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: case RGX_CONTEXT_RESET_REASON_FW_ECC_ERR: + case RGX_CONTEXT_RESET_REASON_FW_PTE_PARITY_ERR: + case RGX_CONTEXT_RESET_REASON_FW_PARITY_ERR: case RGX_CONTEXT_RESET_REASON_FW_WATCHDOG: case RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT: case RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR: + case RGX_CONTEXT_PVRIC_SIGNATURE_MISMATCH: + case RGX_CONTEXT_RESET_REASON_DCLS_ERR: { ui32DgbLvl = PVR_DBG_ERROR; break; @@ -127,6 +135,33 @@ void SysRGXErrorNotify(IMG_HANDLE hSysData, #endif /* PVRSRV_NEED_PVR_DPF */ } +IMG_UINT64 SysRestrictGpuLocalPhysheap(IMG_UINT64 uiHeapSize) +{ + return uiHeapSize; +} + +IMG_BOOL SysRestrictGpuLocalAddPrivateHeap(void) +{ + return IMG_FALSE; +} + +IMG_BOOL SysDefaultToCpuLocalHeap(void) +{ +//#if (TC_MEMORY_CONFIG == TC_MEMORY_HYBRID) + void *pvAppHintState = NULL; + IMG_BOOL bAppHintDefault = IMG_FALSE; + IMG_BOOL bSetToCPULocal = IMG_FALSE; + + OSCreateAppHintState(&pvAppHintState); + OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, + PhysHeapHybridDefault2CpuLocal, &bAppHintDefault, &bSetToCPULocal); + OSFreeAppHintState(pvAppHintState); + + return bSetToCPULocal; +//#else +// return IMG_FALSE; +//#endif +} /****************************************************************************** End of file (sysconfig_cmn.c) ******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/common/uma_heap_fns.c b/drivers/gpu/drm/img/img-volcanic/services/system/common/uma_heap_fns.c new file mode 100644 index 000000000000..77e481a74e42 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/system/common/uma_heap_fns.c @@ -0,0 +1,88 @@ +/***************************************************************************** +@File +@Title Boilerplate heap functions for UMA systems +@Codingstyle IMG +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements generic nop conversions for physical addresses in + UMA systems. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +******************************************************************************/ + +#include "uma_heap_fns.h" + +/* + * CPU to Device physical address translation + */ +static +void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + /* Optimise common case */ + psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; + } +} +/* + * Device to CPU physical address translation + */ +static +void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + /* Optimise common case */ + psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr; + } +} + +PHYS_HEAP_FUNCTIONS g_sUmaHeapFns = { + .pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr, +}; diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/common/vmm_type_stub.c b/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/common/vmm_type_stub.c index cafe57bbab77..0f52c3447e88 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/common/vmm_type_stub.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/common/vmm_type_stub.c @@ -94,17 +94,21 @@ static VMM_PVZ_CONNECTION gsStubVmmPvz = } }; -PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection) +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) { + PVR_UNREFERENCED_PARAMETER(psDevConfig); PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); *psPvzConnection = &gsStubVmmPvz; PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support")); return PVRSRV_OK; } -void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection) +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) { - PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); } /****************************************************************************** diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/sysconfig.c b/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/sysconfig.c index d94c76cc68b9..d3dec65f39c9 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/sysconfig.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/sysconfig.c @@ -55,7 +55,7 @@ IMG_UINT64 *cpu_cache_flush_addr = NULL; extern void eswin_l2_flush64(phys_addr_t addr, size_t size); #else void eswin_l2_flush64(phys_addr_t addr, size_t size) { - arch_sync_dma_for_device(addr, size, DMA_TO_DEVICE); + arch_sync_dma_for_device(addr, size, DMA_TO_DEVICE); }; #endif void riscv_invalidate_addr(phys_addr_t addr, size_t size,IMG_BOOL virtual) { @@ -184,14 +184,15 @@ static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = }; static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut, - IMG_UINT32 *puiPhysHeapCountOut) + IMG_UINT32 *puiPhysHeapCountOut, + PVRSRV_DEVICE_CONFIG *psDevConfig) { PHYS_HEAP_CONFIG *pasPhysHeaps; IMG_UINT32 ui32NextHeapID = 0; IMG_UINT32 uiHeapCount = 1; - uiHeapCount += !PVRSRV_VZ_MODE_IS(NATIVE) ? 1:0; + uiHeapCount += !PVRSRV_VZ_MODE_IS(NATIVE, DEVCFG, psDevConfig) ? 1:0; pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount); if (!pasPhysHeaps) @@ -200,17 +201,19 @@ static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut, } pasPhysHeaps[ui32NextHeapID].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; - pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSMEM"; + //eswin fixme: not sure if following states are okay! + pasPhysHeaps[ui32NextHeapID].uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM"; pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA; - pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs; + pasPhysHeaps[ui32NextHeapID].uConfig.sUMA.psMemFuncs = &gsPhysHeapFuncs; ui32NextHeapID++; - if (! PVRSRV_VZ_MODE_IS(NATIVE)) + if (! PVRSRV_VZ_MODE_IS(NATIVE, DEVCFG, psDevConfig)) { pasPhysHeaps[ui32NextHeapID].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; - pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSMEM"; + //eswin fixme: not sure if following states are okay! + pasPhysHeaps[ui32NextHeapID].uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM"; pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA; - pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs; + pasPhysHeaps[ui32NextHeapID].uConfig.sUMA.psMemFuncs = &gsPhysHeapFuncs; ui32NextHeapID++; } @@ -229,6 +232,7 @@ void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) { int ret; + printk(KERN_WARNING "eswin zdbg1107 %s\n", __func__); PhysHeapsDestroy(psDevConfig->pasPhysHeaps); /* eswin, assert the reset */ @@ -344,8 +348,8 @@ static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice, PVRSRV_DEVICE_CONFIG ** psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); - - eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount); + psDevConfig->eDriverMode = DRIVER_MODE_NATIVE; + eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount, psDevConfig); if (eError) { goto ErrorFreeDevConfig; @@ -373,7 +377,7 @@ static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice, PVRSRV_DEVICE_CONFIG ** // get gpu clk, it is supposed to be 800MHz rgx_freq = clk_get_rate(psDevConfig->aclk); - printk(KERN_ALERT "%s:%d, eswin print : read back aclk %dHZ \n", __func__, __LINE__, rgx_freq); + printk(KERN_ALERT "%s:%d, zdbg1107 eswin print : read back aclk %dHZ \n", __func__, __LINE__, rgx_freq); ret = clk_prepare_enable(psDevConfig->aclk); if (ret) @@ -541,7 +545,9 @@ static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice, PVRSRV_DEVICE_CONFIG ** psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; - if (! PVRSRV_VZ_MODE_IS(NATIVE)) + //eswin fixme: DRIVER_MODE_NATIVE okay? + psDevConfig->eDriverMode = DRIVER_MODE_NATIVE; + if (! PVRSRV_VZ_MODE_IS(NATIVE, DEVCFG, psDevConfig)) { /* Virtualization support services needs to know which heap ID corresponds to FW */ // psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PHYS_HEAP_IDX_VIRTFW; diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/sysinfo.h b/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/sysinfo.h index 98901c19936e..ec76a48bb4bc 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/sysinfo.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/eswin_cpu/sysinfo.h @@ -18,6 +18,7 @@ //#define RGX_ESWIN_PROJECT_V91 #define SYS_RGX_DEV_NAME "pvrsrvkm" +#define EVENT_OBJECT_TIMEOUT_US (100000) #define RGX_ESWIN_DISP_NUM_BUFS 2 //pay attention to this val diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/include/syscommon.h b/drivers/gpu/drm/img/img-volcanic/services/system/include/syscommon.h index 934974834e50..b7babda19cab 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/include/syscommon.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/include/syscommon.h @@ -143,4 +143,33 @@ PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData); void SysRGXErrorNotify(IMG_HANDLE hSysData, PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData); +/**************************************************************************/ /*! +@Function SysRestrictGpuLocalPhysheap +@Description If the Restriction apphint has been set, validate the + restriction value and return the new GPU_LOCAL heap size. + +@Input uiHeapSize Current syslayer detected GPU_LOCAL heap size. +@Return IMG_UINT64 New GPU_LOCAL heap size in bytes. +*/ /***************************************************************************/ +IMG_UINT64 SysRestrictGpuLocalPhysheap(IMG_UINT64 uiHeapSize); + +/**************************************************************************/ /*! +@Function SysRestrictGpuLocalAddPrivateHeap +@Description Determine if the restriction apphint has been set. + +@Return IMG_BOOL IMG_TRUE if the restriction apphint has been + set. +*/ /***************************************************************************/ +IMG_BOOL SysRestrictGpuLocalAddPrivateHeap(void); + +/**************************************************************************/ /*! +@Function SysDefaultToCpuLocalHeap +@Description Determine if the Default Heap should be CPU_LOCAL + Can only be used on TC_MEMORY_HYBRID systems. + +@Return IMG_BOOL IMG_TRUE if the Default heap apphint has been + set. +*/ /***************************************************************************/ +IMG_BOOL SysDefaultToCpuLocalHeap(void); + #endif /* !defined(SYSCOMMON_H) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/include/sysvalidation.h b/drivers/gpu/drm/img/img-volcanic/services/system/include/sysvalidation.h index 5f6d5f9c67dc..e3e731e8b810 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/include/sysvalidation.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/include/sysvalidation.h @@ -50,12 +50,13 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "rgxdefs_km.h" #include "virt_validation_defs.h" -void SysInitVirtInitialization(IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], - IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); +void SysInitFirewall(IMG_HANDLE hSysData, + IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); -#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) -void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState); -void SysSetTrustedDeviceAceEnabled(void); +#if defined(EMULATOR) +void SysSetAxiProtOSid(IMG_HANDLE hSysData, IMG_UINT32 ui32OSid, IMG_BOOL bState); +void SysSetTrustedDeviceAceEnabled(IMG_HANDLE hSysData); #endif #endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/include/uma_heap_fns.h b/drivers/gpu/drm/img/img-volcanic/services/system/include/uma_heap_fns.h new file mode 100644 index 000000000000..bfef1e520880 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/system/include/uma_heap_fns.h @@ -0,0 +1,53 @@ +/***************************************************************************** +@File +@Title Boilerplate heap functions for UMA systems +@Codingstyle IMG +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares generic nop conversions for physical addresses in UMA + systems. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +******************************************************************************/ + +#ifndef UMA_HEAP_FNS_H +#define UMA_HEAP_FNS_H + +#include "physheap_config.h" + +extern PHYS_HEAP_FUNCTIONS g_sUmaHeapFns; + +#endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/Kbuild.mk b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/Kbuild.mk index 2ea1c928f391..14b67465936f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/Kbuild.mk +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/Kbuild.mk @@ -38,15 +38,8 @@ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ### ########################################################################### -PVRSRVKM_NAME = $(PVRSRV_MODNAME) - -$(PVRSRVKM_NAME)-y += \ - services/system/$(PVR_SYSTEM)/sysconfig.o \ - services/server/common/vmm_pvz_client.o \ - services/server/common/vmm_pvz_server.o \ - services/server/common/vz_vmm_pvz.o \ - services/server/common/vz_vmm_vm.o \ - services/system/rogue/common/vmm_type_$(VMM_TYPE).o +$(PVRSRV_MODNAME)-y += \ + services/system/$(PVR_SYSTEM)/sysconfig.o ccflags-y += \ -I$(TOP)/services/system/rogue/common/env/linux diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/sysconfig.c b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/sysconfig.c index c7d4585e8c82..df4613df5fe7 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/sysconfig.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/sysconfig.c @@ -62,20 +62,23 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define PHYS_HEAP_ID_CPU_LOCAL 0 #elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) #define PHYS_HEAP_ID_GPU_LOCAL 0 -#define PHYS_HEAP_ID_CPU_LOCAL 1 +#define PHYS_HEAP_ID_FW_SHARED 1 +#define PHYS_HEAP_ID_CPU_LOCAL 2 #if defined(SUPPORT_PLATO_DISPLAY) -#define PHYS_HEAP_ID_PDP_LOCAL 2 -#define PHYS_HEAP_ID_NON_MAPPABLE 3 +#define PHYS_HEAP_ID_PDP_LOCAL 3 +#define PHYS_HEAP_ID_NON_MAPPABLE 4 #else -#define PHYS_HEAP_ID_NON_MAPPABLE 2 +#define PHYS_HEAP_ID_NON_MAPPABLE 3 #endif + #elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) #define PHYS_HEAP_ID_GPU_LOCAL 0 +#define PHYS_HEAP_ID_FW_SHARED 1 #if defined(SUPPORT_PLATO_DISPLAY) -#define PHYS_HEAP_ID_PDP_LOCAL 1 -#define PHYS_HEAP_ID_NON_MAPPABLE 2 +#define PHYS_HEAP_ID_PDP_LOCAL 2 +#define PHYS_HEAP_ID_NON_MAPPABLE 3 #else -#define PHYS_HEAP_ID_NON_MAPPABLE 1 +#define PHYS_HEAP_ID_NON_MAPPABLE 2 #endif #endif @@ -167,59 +170,82 @@ static PVRSRV_ERROR InitLocalHeaps(SYS_DATA *psSysData, IMG_HANDLE hPhysHeapPrivData) { PHYS_HEAP_CONFIG *psPhysHeap; + IMG_UINT64 ui64FwCarveoutSize = (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE); psPhysHeap = &pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL]; psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; - psPhysHeap->pszPDumpMemspaceName = "LMA"; - psPhysHeap->psMemFuncs = &gsLocalPhysHeapFuncs; - psPhysHeap->hPrivData = hPhysHeapPrivData; psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; + psPhysHeap->uConfig.sLMA.pszPDumpMemspaceName = "LMA"; + psPhysHeap->uConfig.sLMA.psMemFuncs = &gsLocalPhysHeapFuncs; + psPhysHeap->uConfig.sLMA.pszHeapName = "lma_gpu_local"; + psPhysHeap->uConfig.sLMA.hPrivData = hPhysHeapPrivData; /* Configure mappable heap region */ - psPhysHeap->sStartAddr.uiAddr = psSysData->pdata->rogue_heap_mappable.base; - psPhysHeap->sCardBase.uiAddr = psSysData->pdata->rogue_heap_dev_addr; - psPhysHeap->uiSize = psSysData->pdata->rogue_heap_mappable.size; + psPhysHeap->uConfig.sLMA.sStartAddr.uiAddr = psSysData->pdata->rogue_heap_mappable.base; + psPhysHeap->uConfig.sLMA.sCardBase.uiAddr = psSysData->pdata->rogue_heap_dev_addr; + psPhysHeap->uConfig.sLMA.uiSize = psSysData->pdata->rogue_heap_mappable.size - ui64FwCarveoutSize; PVR_LOG(("Added mappable local memory heap. Base = 0x%016llx, Size=0x%016llx", - psPhysHeap->sCardBase.uiAddr, - psPhysHeap->uiSize)); + psPhysHeap->uConfig.sLMA.sCardBase.uiAddr, + psPhysHeap->uConfig.sLMA.uiSize)); + + psPhysHeap = &pasPhysHeaps[PHYS_HEAP_ID_FW_SHARED]; + psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; + psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_FW_SHARED; + psPhysHeap->uConfig.sLMA.pszPDumpMemspaceName = "LMA"; + psPhysHeap->uConfig.sLMA.psMemFuncs = &gsLocalPhysHeapFuncs; + psPhysHeap->uConfig.sLMA.pszHeapName = "lma_fw_shared"; + psPhysHeap->uConfig.sLMA.hPrivData = hPhysHeapPrivData; + + /* Configure mappable heap region */ + psPhysHeap->uConfig.sLMA.sStartAddr.uiAddr = psSysData->pdata->rogue_heap_mappable.base + + pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.uiSize; + psPhysHeap->uConfig.sLMA.sCardBase.uiAddr = psSysData->pdata->rogue_heap_dev_addr + + pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.uiSize; + psPhysHeap->uConfig.sLMA.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + + PVR_LOG(("Added mappable Firmware heap. Base = 0x%016llx, Size=0x%016llx", + psPhysHeap->uConfig.sLMA.sCardBase.uiAddr, + psPhysHeap->uConfig.sLMA.uiSize)); /* Setup non-mappable region if BAR size is less than actual memory size (8GB) */ if (PLATO_HAS_NON_MAPPABLE(psSysData)) { psPhysHeap = &pasPhysHeaps[PHYS_HEAP_ID_NON_MAPPABLE]; psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; - psPhysHeap->pszPDumpMemspaceName = "LMA"; - psPhysHeap->psMemFuncs = &gsLocalPhysHeapFuncs; - psPhysHeap->hPrivData = hPhysHeapPrivData; psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_GPU_PRIVATE; + psPhysHeap->uConfig.sLMA.pszPDumpMemspaceName = "LMA"; + psPhysHeap->uConfig.sLMA.psMemFuncs = &gsLocalPhysHeapFuncs; + psPhysHeap->uConfig.sLMA.pszHeapName = "lma_gpu_private"; + psPhysHeap->uConfig.sLMA.hPrivData = hPhysHeapPrivData; - psPhysHeap->sCardBase.uiAddr = psSysData->pdata->rogue_heap_nonmappable.base; - psPhysHeap->uiSize = psSysData->pdata->rogue_heap_nonmappable.size; - psPhysHeap->sStartAddr.uiAddr = 0; + psPhysHeap->uConfig.sLMA.sCardBase.uiAddr = psSysData->pdata->rogue_heap_nonmappable.base; + psPhysHeap->uConfig.sLMA.uiSize = psSysData->pdata->rogue_heap_nonmappable.size; + psPhysHeap->uConfig.sLMA.sStartAddr.uiAddr = 0; PVR_LOG(("Added non-mappable local memory heap. Base = 0x%016llx, Size=0x%016llx", - psPhysHeap->sCardBase.uiAddr, - psPhysHeap->uiSize)); + psPhysHeap->uConfig.sLMA.sCardBase.uiAddr, + psPhysHeap->uConfig.sLMA.uiSize)); - PVR_ASSERT(psPhysHeap->uiSize < SYS_DEV_MEM_REGION_SIZE); + PVR_ASSERT(psPhysHeap->uConfig.sLMA.uiSize < SYS_DEV_MEM_REGION_SIZE); } #if defined(SUPPORT_PLATO_DISPLAY) psPhysHeap = &pasPhysHeaps[PHYS_HEAP_ID_PDP_LOCAL]; psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; - psPhysHeap->pszPDumpMemspaceName = "LMA"; - psPhysHeap->psMemFuncs = &gsLocalPhysHeapFuncs; - psPhysHeap->hPrivData = hPhysHeapPrivData; psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_EXTERNAL; + psPhysHeap->uConfig.sLMA.pszPDumpMemspaceName = "LMA"; + psPhysHeap->uConfig.sLMA.psMemFuncs = &gsLocalPhysHeapFuncs; + psPhysHeap->uConfig.sLMA.pszHeapName = "lma_pdp_local"; + psPhysHeap->uConfig.sLMA.hPrivData = hPhysHeapPrivData; - psPhysHeap->sCardBase.uiAddr = PLATO_DDR_DEV_PHYSICAL_BASE; - psPhysHeap->sStartAddr.uiAddr = psSysData->pdata->pdp_heap.base; - psPhysHeap->uiSize = psSysData->pdata->pdp_heap.size; + psPhysHeap->uConfig.sLMA.sCardBase.uiAddr = PLATO_DDR_DEV_PHYSICAL_BASE; + psPhysHeap->uConfig.sLMA.sStartAddr.uiAddr = psSysData->pdata->pdp_heap.base; + psPhysHeap->uConfig.sLMA.uiSize = psSysData->pdata->pdp_heap.size; PVR_LOG(("Added PDP heap. Base = 0x%016llx, Size=0x%016llx", - psPhysHeap->sStartAddr.uiAddr, - psPhysHeap->uiSize)); + psPhysHeap->uConfig.sLMA.sStartAddr.uiAddr, + psPhysHeap->uConfig.sLMA.uiSize)); #endif return PVRSRV_OK; @@ -238,19 +264,21 @@ static PVRSRV_ERROR InitHostHeaps(SYS_DATA *psSysData, psPhysHeap = &pasPhysHeaps[0]; psPhysHeap->eType = PHYS_HEAP_TYPE_UMA; - psPhysHeap->pszPDumpMemspaceName = "SYSMEM"; - psPhysHeap->psMemFuncs = &gsHostPhysHeapFuncs; - psPhysHeap->hPrivData = hPhysHeapPrivData; + psPhysHeap->uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM"; + psPhysHeap->uConfig.sUMA.psMemFuncs = &gsHostPhysHeapFuncs; #if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; + psPhysHeap->uConfig.sUMA.pszHeapName = "uma_gpu_local"; #elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL; + psPhysHeap->uConfig.sUMA.pszHeapName = "uma_cpu_local"; PVR_DPF((PVR_DBG_WARNING, "Initialising CPU_LOCAL UMA Host PhysHeaps")); #if !defined(SUPPORT_PLATO_DISPLAY) psPhysHeap->ui32UsageFlags |= PHYS_HEAP_USAGE_EXTERNAL; #endif #endif - psPhysHeap->sCardBase.uiAddr = PLATO_HOSTRAM_DEV_PHYSICAL_BASE; + psPhysHeap->uConfig.sUMA.sCardBase.uiAddr = PLATO_HOSTRAM_DEV_PHYSICAL_BASE; + psPhysHeap->uConfig.sUMA.hPrivData = hPhysHeapPrivData; return PVRSRV_OK; } @@ -283,9 +311,13 @@ static PVRSRV_ERROR InitHybridHeaps(SYS_DATA *psSysData, } /* Adjust the pdump memory space names */ - pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].pszPDumpMemspaceName = "LMA0"; + pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.pszPDumpMemspaceName = "LMA0"; + pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.pszHeapName = "lma0_gpu_local"; + pasPhysHeaps[PHYS_HEAP_ID_FW_SHARED].uConfig.sLMA.pszPDumpMemspaceName = "LMA0"; + pasPhysHeaps[PHYS_HEAP_ID_FW_SHARED].uConfig.sLMA.pszHeapName = "lma0_fw_shared"; #if defined(SUPPORT_PLATO_DISPLAY) - pasPhysHeaps[PHYS_HEAP_ID_PDP_LOCAL].pszPDumpMemspaceName = "LMA1"; + pasPhysHeaps[PHYS_HEAP_ID_PDP_LOCAL].uConfig.sLMA.pszPDumpMemspaceName = "LMA1"; + pasPhysHeaps[PHYS_HEAP_ID_PDP_LOCAL].uConfig.sLMA.pszHeapName = "lma1_pdp_local"; #endif return PVRSRV_OK; } @@ -296,7 +328,7 @@ static PVRSRV_ERROR PhysHeapsCreate(SYS_DATA *psSysData, PHYS_HEAP_CONFIG **ppasPhysHeapsOut, IMG_UINT32 *puiPhysHeapCountOut) { - IMG_UINT32 uiHeapCount = 1; + IMG_UINT32 uiHeapCount = 2; /* GPU_LOCAL + FW_SHARED*/ PHYS_HEAP_CONFIG *pasPhysHeaps; PVRSRV_ERROR eError; @@ -349,16 +381,16 @@ static void PlatoLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, /* Optimise common case */ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr - - psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr + - psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sCardBase.uiAddr; + psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.sStartAddr.uiAddr + + psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.sCardBase.uiAddr; if (ui32NumOfAddr > 1) { IMG_UINT32 ui32Idx; for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) { psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr - - psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr + - psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sCardBase.uiAddr; + psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.sStartAddr.uiAddr + + psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.sCardBase.uiAddr; } } } @@ -372,16 +404,16 @@ static void PlatoLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, /* Optimise common case */ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr - - psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sCardBase.uiAddr + - psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr; + psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.sCardBase.uiAddr + + psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.sStartAddr.uiAddr; if (ui32NumOfAddr > 1) { IMG_UINT32 ui32Idx; for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) { psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr - - psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sCardBase.uiAddr + - psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr; + psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.sCardBase.uiAddr + + psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.sStartAddr.uiAddr; } } } @@ -503,8 +535,8 @@ static PVRSRV_ERROR PlatoLocalMemoryTest(PVRSRV_DEVICE_CONFIG *psDevConfig) IMG_UINT32 tmp = 0; IMG_UINT32 chunk = sizeof(IMG_UINT32) * 10; - IMG_UINT64 ui64TestMemoryBase = psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr; - IMG_UINT64 ui64TestMemorySize = psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uiSize; + IMG_UINT64 ui64TestMemoryBase = psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.sStartAddr.uiAddr; + IMG_UINT64 ui64TestMemorySize = psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uConfig.sLMA.uiSize; PVR_LOG(("%s: Starting Local memory test from 0x%llx to 0x%llx (in CPU space)", __func__, ui64TestMemoryBase, ui64TestMemoryBase + ui64TestMemorySize)); diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/sysinfo.h b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/sysinfo.h index eb4554fca5a1..fd796a75797f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/sysinfo.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_linux_plato/sysinfo.h @@ -44,18 +44,40 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if !defined(SYSINFO_H) #define SYSINFO_H +#if defined(__KERNEL__) +#include "plato_drv.h" +#endif + #define SYS_RGX_DEV_VENDOR_ID (0x1AEE) #define SYS_RGX_DEV_DEVICE_ID (0x0003) -#define SYS_RGX_DEV_NAME "plato_rogue" + +#if defined(__KERNEL__) +#if defined(PLATO_MULTI_DEVICE) +#define SYS_RGX_DEV_NAME_0 PLATO_MAKE_DEVICE_NAME_ROGUE(0) +#define SYS_RGX_DEV_NAME_1 PLATO_MAKE_DEVICE_NAME_ROGUE(1) +#define SYS_RGX_DEV_NAME_2 PLATO_MAKE_DEVICE_NAME_ROGUE(2) +#define SYS_RGX_DEV_NAME_3 PLATO_MAKE_DEVICE_NAME_ROGUE(3) +#else +#define SYS_RGX_DEV_NAME PLATO_DEVICE_NAME_ROGUE +#endif +#endif /*!< System specific poll/timeout details */ #if defined(VIRTUAL_PLATFORM) || defined(EMULATOR) /* Emulator clock ~600 times slower than HW */ #define MAX_HW_TIME_US (300000000) #define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1000000) + +#if defined(VIRTUAL_PLATFORM) +#define EVENT_OBJECT_TIMEOUT_US (120000000) +#elif defined(EMULATOR) +#define EVENT_OBJECT_TIMEOUT_US (2000000) +#endif + #else #define MAX_HW_TIME_US (500000) #define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(100000) +#define EVENT_OBJECT_TIMEOUT_US (100000) #endif #define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/Kbuild.mk b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/Kbuild.mk new file mode 100644 index 000000000000..41b532383198 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/Kbuild.mk @@ -0,0 +1,52 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### +$(PVRSRV_MODNAME)-y += \ + services/system/$(PVR_SYSTEM)/sysconfig.o \ + services/system/common/sysconfig_cmn.o \ + services/system/common/uma_heap_fns.o + +ifeq ($(SUPPORT_ION),1) +$(PVRSRV_MODNAME)-y += services/system/common/env/linux/ion_support_generic.o +endif + +ifeq ($(PVR_ARCH),volcanic) + ccflags-y += -DVOLCANIC_FBCDCV31 +endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysconfig.c b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysconfig.c new file mode 100644 index 000000000000..4543f8e7d25f --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysconfig.c @@ -0,0 +1,330 @@ +/*************************************************************************/ /*! +@File +@Title System Configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System Configuration functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "pvrsrv_device.h" +#include "syscommon.h" +#include "vz_vmm_pvz.h" +#include "allocmem.h" +#include "sysinfo.h" +#include "sysconfig.h" +#include "physheap.h" +#include "pvr_debug.h" +#if defined(SUPPORT_ION) +#include "ion_support.h" +#endif +#include "uma_heap_fns.h" + +#if defined(__linux__) +#include +#endif + + +/* + * In systems that support trusted device address protection, there are three + * physical heaps from which pages should be allocated: + * - one heap for normal allocations + * - one heap for allocations holding META code memory + * - one heap for allocations holding secured DRM data + */ + +#define PHYS_HEAP_IDX_GENERAL 0 +#define PHYS_HEAP_IDX_FW 1 + +#if defined(SUPPORT_TRUSTED_DEVICE) +#define PHYS_HEAP_IDX_TDFWMEM 2 +#define PHYS_HEAP_IDX_TDSECUREBUF 3 +#elif defined(SUPPORT_DEDICATED_FW_MEMORY) +#define PHYS_HEAP_IDX_FW_MEMORY 2 +#endif + +/* Change to test CPU_LOCAL sys layers*/ +#define UMA_HEAP_USAGE_FLAG PHYS_HEAP_USAGE_GPU_LOCAL +//#define UMA_HEAP_USAGE_FLAG PHYS_HEAP_USAGE_CPU_LOCAL + +#define UMA_DEFAULT_HEAP PVRSRV_PHYS_HEAP_GPU_LOCAL +//#define UMA_DEFAULT_HEAP PVRSRV_PHYS_HEAP_CPU_LOCAL + +#if defined(PVRSRV_ENABLE_XD_MEM) +static DECLARE_PHYS_HEAP_SPAS_REGION(sSpasSystemMemory); +#endif + +static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut, + IMG_UINT32 *puiPhysHeapCountOut) +{ + /* + * This function is called during device initialisation, which on Linux, + * means it won't be called concurrently. As such, there's no need to + * protect it with a lock or use an atomic variable. + */ + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 uiHeapCount = 2; + +#if defined(SUPPORT_TRUSTED_DEVICE) + uiHeapCount += 2; +#elif defined(SUPPORT_DEDICATED_FW_MEMORY) + uiHeapCount += 1; +#endif + + pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount); + if (!pasPhysHeaps) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32UsageFlags = UMA_HEAP_USAGE_FLAG; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM"; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].uConfig.sUMA.psMemFuncs = &g_sUmaHeapFns; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].uConfig.sUMA.pszHeapName = "uma_local"; +#if defined(PVRSRV_ENABLE_XD_MEM) + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].uConfig.sUMA.psSpasRegion = &sSpasSystemMemory; +#endif +#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) + /* Provide a specific set of Intermediate Physical Address modifiers + * for the default heap. Also name this as "SYSMEM_IPA" to aid PDump + * processing. + * Modifiers are: + * IPAPolicyDefault = 3 + * IPAPolicyMask = 7 + * IPAPolicyShift = 52 + * This will result in the PhysAddr values having bits 53..55 set with '3' + * by default. + */ + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM_IPA"; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].sIPAConfig.ui8IPAPolicyDefault = 3; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].sIPAConfig.ui8IPAPolicyMask = 7; + pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].sIPAConfig.ui8IPAPolicyShift = 52; +#endif + + pasPhysHeaps[PHYS_HEAP_IDX_FW].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHYS_HEAP_IDX_FW].ui32UsageFlags = PHYS_HEAP_USAGE_FW_SHARED; + pasPhysHeaps[PHYS_HEAP_IDX_FW].uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM_FW"; + pasPhysHeaps[PHYS_HEAP_IDX_FW].uConfig.sUMA.psMemFuncs = &g_sUmaHeapFns; + pasPhysHeaps[PHYS_HEAP_IDX_FW].uConfig.sUMA.pszHeapName = "uma_fw_shared"; + + +#if defined(SUPPORT_TRUSTED_DEVICE) + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].ui32UsageFlags = + PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA; + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].uConfig.sUMA.pszPDumpMemspaceName = "TDFWMEM"; + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].uConfig.sUMA.psMemFuncs = &g_sUmaHeapFns; + pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].uConfig.sUMA.pszHeapName = "uma_tdfwmem"; + + pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_SECURE; + pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].uConfig.sUMA.pszPDumpMemspaceName = "TDSECBUFMEM"; + pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].uConfig.sUMA.psMemFuncs = &g_sUmaHeapFns; + pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].uConfig.sUMA.pszHeapName = "uma_tdsecbufmem"; + +#elif defined(SUPPORT_DEDICATED_FW_MEMORY) + pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].ui32UsageFlags = + PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA; + pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].uConfig.sUMA.pszPDumpMemspaceName = "DEDICATEDFWMEM"; + pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].uConfig.sUMA.psMemFuncs = &g_sUmaHeapFns; + pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].uConfig.sUMA.pszHeapName = "uma_dedicatedfwmem"; +#endif + + *ppasPhysHeapsOut = pasPhysHeaps; + *puiPhysHeapCountOut = uiHeapCount; + + return PVRSRV_OK; +} + +static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps) +{ + OSFreeMem(pasPhysHeaps); +} + +static void SysDevFeatureDepInit(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT64 ui64Features) +{ +#if defined(SUPPORT_AXI_ACE_TEST) + if ( ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK) + { + psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + }else +#endif + { + psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + } +} + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 uiPhysHeapCount; + PVRSRV_ERROR eError; + +#if defined(__linux__) + dma_set_mask(pvOSDevice, DMA_BIT_MASK(40)); +#endif + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + + sizeof(*psRGXData) + + sizeof(*psRGXTimingInfo)); + if (!psDevConfig) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); + + eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount); + if (eError) + { + goto ErrorFreeDevConfig; + } + + /* Setup RGX specific timing data */ + psRGXTimingInfo->ui32CoreClockSpeed = RGX_NOHW_CORE_CLOCK_SPEED; + psRGXTimingInfo->bEnableActivePM = IMG_FALSE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; + psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; + + /* Set up the RGX data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; + + /* Setup the device config */ + psDevConfig->pvOSDevice = pvOSDevice; + psDevConfig->pszName = "nohw"; + psDevConfig->pszVersion = NULL; + + /* Device setup information */ + psDevConfig->sRegsCpuPBase.uiAddr = 0x00f00000; + psDevConfig->ui32RegsSize = 0x100000; + psDevConfig->ui32IRQ = 0x00000bad; + + psDevConfig->pasPhysHeaps = pasPhysHeaps; + psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; + psDevConfig->eDefaultHeap = UMA_DEFAULT_HEAP; + + /* No power management on no HW system */ + psDevConfig->pfnPrePowerState = NULL; + psDevConfig->pfnPostPowerState = NULL; + + /* No clock frequency either */ + psDevConfig->pfnClockFreqGet = NULL; + + psDevConfig->hDevData = psRGXData; + + psDevConfig->bDevicePA0IsValid = IMG_FALSE; + psDevConfig->pfnSysDevFeatureDepInit = SysDevFeatureDepInit; + + /* Setup other system specific stuff */ +#if defined(SUPPORT_ION) + IonInit(NULL); +#endif + + /* Pdump validation system registers */ + +#if !defined(HW_ERN_66622) && !defined(VOLCANIC_FBCDCV31) + psDevConfig->bHasFBCDCVersion31 = IMG_FALSE; +#else + psDevConfig->bHasFBCDCVersion31 = IMG_TRUE; +#endif + + /* Set psDevConfig->pfnSysDevErrorNotify callback */ + psDevConfig->pfnSysDevErrorNotify = SysRGXErrorNotify; + + *ppsDevConfig = psDevConfig; + + return PVRSRV_OK; + +ErrorFreeDevConfig: + OSFreeMem(psDevConfig); + return eError; +} + +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ +#if defined(SUPPORT_ION) + IonDeinit(); +#endif + + PhysHeapsDestroy(psDevConfig->pasPhysHeaps); + OSFreeMem(psDevConfig); +} + +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + PVR_UNREFERENCED_PARAMETER(hSysData); + PVR_UNREFERENCED_PARAMETER(ui32IRQ); + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(pfnLISR); + PVR_UNREFERENCED_PARAMETER(pvData); + PVR_UNREFERENCED_PARAMETER(phLISRData); + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + PVR_UNREFERENCED_PARAMETER(hLISRData); + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (sysconfig.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmmuinit.h b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysconfig.h similarity index 74% rename from drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmmuinit.h rename to drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysconfig.h index 6e0c52edb46a..acbadc22e6a4 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/server/devices/volcanic/rgxmmuinit.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysconfig.h @@ -1,8 +1,8 @@ /*************************************************************************/ /*! @File -@Title Device specific initialisation routines +@Title System Description Header @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -@Description Device specific MMU initialisation +@Description This header provides system-specific declarations and macros @License Dual MIT/GPLv2 The contents of this file are subject to the MIT license as set out below. @@ -41,21 +41,18 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ -/* NB: this file is not to be included arbitrarily. It exists solely - for the linkage between rgxinit.c and rgxmmuinit.c, the former - being otherwise cluttered by the contents of the latter */ +#include "pvrsrv_device.h" +#include "rgxdevice.h" -#ifndef SRVKM_RGXMMUINIT_H -#define SRVKM_RGXMMUINIT_H +#if !defined(SYSCCONFIG_H) +#define SYSCCONFIG_H -#include "device.h" -#include "img_types.h" -#include "mmu_common.h" -#include "img_defs.h" -PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); -PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); +#define RGX_NOHW_CORE_CLOCK_SPEED 100000000 +#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (0) -IMG_UINT64 RGXMMUInit_GetConfigRangeValue(IMG_UINT32 ui32DataPageSize, IMG_UINT64 ui64BaseAddress, IMG_UINT64 ui64RangeSize); +/***************************************************************************** + * system specific data structures + *****************************************************************************/ -#endif /* #ifndef SRVKM_RGXMMUINIT_H */ +#endif /* SYSCCONFIG_H */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysinfo.h b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysinfo.h new file mode 100644 index 000000000000..709b54c3bd0b --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rgx_nohw/sysinfo.h @@ -0,0 +1,58 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(SYSINFO_H) +#define SYSINFO_H + +/*!< System specific poll/timeout details */ +#define MAX_HW_TIME_US (500000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000) +#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) +#define WAIT_TRY_COUNT (10000) +#define EVENT_OBJECT_TIMEOUT_US (100000) + +#if defined(__linux__) +#define SYS_RGX_DEV_NAME "rgxnohw" +#endif + +#endif /* !defined(SYSINFO_H) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/env/linux/dma_support.c b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/env/linux/dma_support.c index f7a4f68bccc4..c561d0622d5f 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/env/linux/dma_support.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/env/linux/dma_support.c @@ -139,7 +139,7 @@ SysDmaAcquireKernelAddress(struct page *psPage, IMG_UINT64 ui64Size, DMA_ALLOC * } /* Remap pages into VMALLOC space */ - pvVirtAddr = pvr_vmap(pagearray, ui32PgCount, VM_READ | VM_WRITE, prot); + pvVirtAddr = pvr_vmap(pagearray, ui32PgCount, VM_MAP, prot); psDmaAlloc->PageProps = prot; /* Clean-up tmp buffers */ @@ -380,7 +380,7 @@ PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc) ******************************************************************************/ void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc) { - size_t uiSize; + __maybe_unused size_t uiSize; IMG_UINT32 ui32Idx; if (psDmaAlloc == NULL || diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/vmm_type_stub.c b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/vmm_type_stub.c index cafe57bbab77..a2237281d703 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/vmm_type_stub.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/vmm_type_stub.c @@ -94,16 +94,20 @@ static VMM_PVZ_CONNECTION gsStubVmmPvz = } }; -PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection) +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) { + PVR_UNREFERENCED_PARAMETER(psDevConfig); PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); *psPvzConnection = &gsStubVmmPvz; - PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support")); + PVR_DPF((PVR_DBG_MESSAGE, "Using a stub VM manager type, no hypercall support")); return PVRSRV_OK; } -void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection) +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) { + PVR_UNREFERENCED_PARAMETER(psDevConfig); PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); } diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/vmm_type_vzfpga.c b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/vmm_type_vzfpga.c new file mode 100644 index 000000000000..d379fa061cc1 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/common/vmm_type_vzfpga.c @@ -0,0 +1,170 @@ +/*************************************************************************/ /*! +@File vmm_type_vzfpga.c +@Title VZFPGA manager type +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description VM manager implementation to support vzfpga platform +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "pvrsrv.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxheapconfig.h" +#include "rgxdevice.h" + +#include "vz_vm.h" +#include "vmm_impl.h" +#include "vmm_pvz_server.h" + +static PVRSRV_ERROR +GetDriverIDFromHeapBase(IMG_UINT64 ui64Addr, IMG_UINT32 *pui32DriverID) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + IMG_DEV_PHYADDR sHostFwHeapBaseDevPA = {0}; + PHYS_HEAP *psHostFwHeap = NULL; + PVRSRV_DEVICE_NODE *psHostDevNode = PVRSRVGetDeviceInstance(0); + + PVR_LOG_RETURN_IF_FALSE((psHostDevNode != NULL), + "Host Device Node not initialised.", + PVRSRV_ERROR_NO_DEVICENODE_FOUND); + + psHostFwHeap = psHostDevNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM]; + PVR_LOG_RETURN_IF_FALSE((psHostFwHeap != NULL), + "Host Fw heap not initialised.", + PVRSRV_ERROR_PHYSHEAP_ID_INVALID); + + eErr = PhysHeapGetDevPAddr(psHostFwHeap, &sHostFwHeapBaseDevPA); + PVR_LOG_RETURN_IF_ERROR(eErr, "PhysHeapGetDevPAddr"); + + *pui32DriverID = (ui64Addr - sHostFwHeapBaseDevPA.uiAddr) / PVRSRV_APPHINT_GUESTFWHEAPSTRIDE; + PVR_LOG_RETURN_IF_FALSE((*pui32DriverID >= RGXFW_GUEST_DRIVER_ID_START) && + (*pui32DriverID < RGX_NUM_DRIVERS_SUPPORTED), + "Invalid Guest DriverID", + PVRSRV_ERROR_INVALID_PVZ_OSID); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +VZFPGAMapDevPhysHeap(IMG_UINT64 ui64Size, + IMG_UINT64 ui64Addr) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + IMG_UINT32 ui32GuestDriverID; + + eErr = GetDriverIDFromHeapBase(ui64Addr, &ui32GuestDriverID); + PVR_LOG_RETURN_IF_ERROR(eErr, "GetDriverIDFromHeapBase"); + + eErr = PvzServerOnVmOnline(ui32GuestDriverID, 0); + PVR_LOG_RETURN_IF_ERROR(eErr, "PvzServerOnVmOnline"); + + eErr = PvzServerMapDevPhysHeap(ui32GuestDriverID, 0, ui64Size, ui64Addr); + PVR_LOG_RETURN_IF_ERROR(eErr, "PvzServerMapDevPhysHeap"); + + return eErr; +} + +static PVRSRV_ERROR +VZFPGAUnmapDevPhysHeap(void) +{ + IMG_UINT32 ui32ID; + + /* During shutdown, the Guests will be deinitialised in reverse order */ + for (ui32ID=(RGX_NUM_DRIVERS_SUPPORTED-1); + ui32ID >= RGXFW_HOST_DRIVER_ID; ui32ID--) + { + if (IsVmOnline(ui32ID, 0)) + { + PvzServerUnmapDevPhysHeap(ui32ID, 0); + PvzServerOnVmOffline(ui32ID, 0); + break; + } + } + + return PVRSRV_OK; +} + +static VMM_PVZ_CONNECTION gsVZFPGAPvz = +{ + .sClientFuncTab = { + /* pfnMapDevPhysHeap */ + &VZFPGAMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &VZFPGAUnmapDevPhysHeap + }, + + .sServerFuncTab = { + /* pfnMapDevPhysHeap */ + &PvzServerMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &PvzServerUnmapDevPhysHeap + }, + + .sVmmFuncTab = { + /* pfnOnVmOnline */ + &PvzServerOnVmOnline, + + /* pfnOnVmOffline */ + &PvzServerOnVmOffline, + + /* pfnVMMConfigure */ + &PvzServerVMMConfigure + } +}; + +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); + *psPvzConnection = &gsVZFPGAPvz; + return PVRSRV_OK; +} + +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); +} + +/****************************************************************************** + End of file (vmm_type_vzfpga.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/Kbuild.mk b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/Kbuild.mk index a51b83894e48..e5594c7feabe 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/Kbuild.mk +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/Kbuild.mk @@ -38,25 +38,18 @@ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ### ########################################################################### - -PVRSRVKM_NAME = $(PVRSRV_MODNAME) - ifeq ($(KERNEL_DRIVER_DIR),) SYSTEM_BASEDIR := services/system/rogue/$(PVR_SYSTEM) else SYSTEM_BASEDIR := external/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM) endif -$(PVRSRVKM_NAME)-y += \ +$(PVRSRV_MODNAME)-y += \ $(SYSTEM_BASEDIR)/mt8173_mfgsys.o \ $(SYSTEM_BASEDIR)/mt8173_sysconfig.o \ - services/server/common/vmm_pvz_client.o \ - services/server/common/vmm_pvz_server.o \ - services/server/common/vz_vmm_pvz.o \ - services/server/common/vz_vmm_vm.o \ - services/system/rogue/common/vmm_type_$(VMM_TYPE).o + services/system/common/uma_heap_fns.o ifeq ($(SUPPORT_ION),1) - $(PVRSRVKM_NAME)-y += \ + $(PVRSRV_MODNAME)-y += \ services/system/common/env/linux/ion_support_generic.o endif diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_mfgsys.c b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_mfgsys.c index 5080acc8c767..8aace9dae30a 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_mfgsys.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_mfgsys.c @@ -129,7 +129,7 @@ int mtk_mfg_enable(struct mtk_mfg *mfg) int ret; ret = pm_runtime_get_sync(mfg->dev); - if (ret) + if (ret < 0) return ret; ret = mtk_mfg_enable_clock(mfg); diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_mfgsys.h b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_mfgsys.h index 3c1e9cad1284..c8f07e6f7a26 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_mfgsys.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_mfgsys.h @@ -48,10 +48,7 @@ struct mtk_mfg { struct clk *top_mmpll; struct clk *clk26m; -#if (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) || \ - (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) struct thermal_zone_device *tz; -#endif }; struct mtk_mfg *mtk_mfg_create(struct device *dev); diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_sysconfig.c b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_sysconfig.c index 24776190e095..58e4543670d9 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_sysconfig.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/mt8173_sysconfig.c @@ -60,25 +60,15 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if defined(SUPPORT_ION) #include "ion_support.h" #endif +#include "uma_heap_fns.h" #include "mt8173_mfgsys.h" #define SYS_RGX_ACTIVE_POWER_LATENCY_MS 10 #define RGX_HW_CORE_CLOCK_SPEED 395000000 +#define MT8173_SYSTEM_NAME "mt8173" -/* Setup RGX specific timing data */ -static RGX_TIMING_INFORMATION gsRGXTimingInfo = { - .ui32CoreClockSpeed = RGX_HW_CORE_CLOCK_SPEED, - .bEnableActivePM = IMG_TRUE, - .ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS, - .bEnableRDPowIsland = IMG_TRUE, -}; - -static RGX_DATA gsRGXData = { - .psRGXTimingInfo = &gsRGXTimingInfo, -}; - -static PVRSRV_DEVICE_CONFIG gsDevice; +static IMG_HANDLE ghSysData; typedef struct { @@ -99,59 +89,6 @@ static irqreturn_t MTKLISRWrapper(int iIrq, void *pvData) return IRQ_NONE; } -/* - * CPU to Device physical address translation - */ -static -void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, - IMG_UINT32 ui32NumOfAddr, - IMG_DEV_PHYADDR *psDevPAddr, - IMG_CPU_PHYADDR *psCpuPAddr) -{ - PVR_UNREFERENCED_PARAMETER(hPrivData); - - /* Optimise common case */ - psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; - if (ui32NumOfAddr > 1) { - IMG_UINT32 ui32Idx; - for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) - psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; - } -} - -/* - * Device to CPU physical address translation - */ -static -void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, - IMG_UINT32 ui32NumOfAddr, - IMG_CPU_PHYADDR *psCpuPAddr, - IMG_DEV_PHYADDR *psDevPAddr) -{ - PVR_UNREFERENCED_PARAMETER(hPrivData); - - /* Optimise common case */ - psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr; - if (ui32NumOfAddr > 1) { - IMG_UINT32 ui32Idx; - for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) - psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr; - } -} - -static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = { - .pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr, - .pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr, -}; - -static PHYS_HEAP_CONFIG gsPhysHeapConfig = { - .pszPDumpMemspaceName = "SYSMEM", - .eType = PHYS_HEAP_TYPE_UMA, - .psMemFuncs = &gsPhysHeapFuncs, - .hPrivData = NULL, - .ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL, -}; - static PVRSRV_ERROR MTKSysDevPrePowerState( IMG_HANDLE hSysData, PVRSRV_SYS_POWER_STATE eNewPowerState, @@ -267,14 +204,10 @@ static unsigned long mtk_mfg_get_static_power(struct devfreq *df, static unsigned long mtk_mfg_get_static_power(unsigned long voltage) #endif { - struct mtk_mfg *mfg = gsDevice.hSysData; + struct mtk_mfg *mfg = ghSysData; struct thermal_zone_device *tz = mfg->tz; unsigned long power; -#if !defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) - unsigned long temperature = FALLBACK_STATIC_TEMPERATURE; -#else int temperature = FALLBACK_STATIC_TEMPERATURE; -#endif int low_idx = 0, high_idx = POWER_TABLE_NUM_VOLT - 1; int i; @@ -393,90 +326,146 @@ static struct devfreq_cooling_power sPowerOps = { }; #endif -static void SetFrequency(IMG_UINT32 freq) +static void SetFrequency(IMG_HANDLE hSysData, IMG_UINT32 freq) { - struct mtk_mfg *mfg = gsDevice.hSysData; + struct mtk_mfg *mfg = hSysData; /* freq is in Hz */ mtk_mfg_freq_set(mfg, freq); } -static void SetVoltage(IMG_UINT32 volt) +static void SetVoltage(IMG_HANDLE hSysData, IMG_UINT32 volt) { - struct mtk_mfg *mfg = gsDevice.hSysData; + struct mtk_mfg *mfg = hSysData; mtk_mfg_volt_set(mfg, volt); } #endif -PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice, + struct mtk_mfg *mfg, + PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) { - struct device *dev = pvOSDevice; - struct mtk_mfg *mfg; - - if (gsDevice.pvOSDevice) + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + PHYS_HEAP_CONFIG *psPhysHeapConfig; + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + + sizeof(*psRGXData) + + sizeof(*psRGXTimingInfo) + + sizeof(*psPhysHeapConfig)); + if (!psDevConfig) { - return PVRSRV_ERROR_INVALID_DEVICE; + return PVRSRV_ERROR_OUT_OF_MEMORY; } - mfg = mtk_mfg_create(dev); - if (IS_ERR(mfg)) { - if (PTR_ERR(mfg) == -EPROBE_DEFER) - return PVRSRV_ERROR_PROBE_DEFER; - else - return PVRSRV_ERROR_INIT_FAILURE; - } + psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); + psPhysHeapConfig = (PHYS_HEAP_CONFIG *)((IMG_CHAR *)psRGXTimingInfo + sizeof(*psRGXTimingInfo)); - dma_set_mask(dev, DMA_BIT_MASK(33)); + /* Set up the RGX timing information */ + psRGXTimingInfo->ui32CoreClockSpeed = RGX_HW_CORE_CLOCK_SPEED; + psRGXTimingInfo->bEnableActivePM = IMG_TRUE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_TRUE; + psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; - /* Make sure everything we don't care about is set to 0 */ - memset(&gsDevice, 0, sizeof(gsDevice)); + /* Set up the RGX data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; - /* Setup RGX device */ - gsDevice.pvOSDevice = pvOSDevice; - gsDevice.pszName = "mt8173"; - gsDevice.pszVersion = NULL; + psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA; + psPhysHeapConfig->ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; + psPhysHeapConfig->uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM"; + psPhysHeapConfig->uConfig.sUMA.psMemFuncs = &g_sUmaHeapFns; + psPhysHeapConfig->uConfig.sUMA.pszHeapName = "uma_gpu_local"; + psPhysHeapConfig->uConfig.sUMA.hPrivData = NULL; - /* Device's physical heaps */ - gsDevice.pasPhysHeaps = &gsPhysHeapConfig; - gsDevice.ui32PhysHeapCount = 1; - gsDevice.eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; + psDevConfig->pasPhysHeaps = psPhysHeapConfig; + psDevConfig->ui32PhysHeapCount = 1U; - gsDevice.ui32IRQ = mfg->rgx_irq; + psDevConfig->pvOSDevice = pvOSDevice; + psDevConfig->pszName = MT8173_SYSTEM_NAME; + psDevConfig->pszVersion = NULL; - gsDevice.sRegsCpuPBase.uiAddr = mfg->rgx_start; - gsDevice.ui32RegsSize = mfg->rgx_size; + psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; + + psDevConfig->bHasFBCDCVersion31 = IMG_FALSE; + psDevConfig->bDevicePA0IsValid = IMG_FALSE; + + psDevConfig->hDevData = psRGXData; + psDevConfig->hSysData = (IMG_HANDLE) mfg; + ghSysData = psDevConfig->hSysData; + + psDevConfig->pfnSysDevFeatureDepInit = NULL; + + psDevConfig->ui32IRQ = mfg->rgx_irq; + + psDevConfig->sRegsCpuPBase.uiAddr = mfg->rgx_start; + psDevConfig->ui32RegsSize = mfg->rgx_size; #ifdef SUPPORT_LINUX_DVFS - gsDevice.sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; - gsDevice.sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; - gsDevice.sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; - gsDevice.sDVFS.sDVFSDeviceCfg.ui32PollMs = MTK_DVFS_SWITCH_INTERVAL; + psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; + psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = MTK_DVFS_SWITCH_INTERVAL; #if defined(CONFIG_DEVFREQ_THERMAL) - gsDevice.sDVFS.sDVFSDeviceCfg.psPowerOps = &sPowerOps; + psDevConfig->sDVFS.sDVFSDeviceCfg.psPowerOps = &sPowerOps; #endif - gsDevice.sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; - gsDevice.sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; #endif /* power management on HW system */ - gsDevice.pfnPrePowerState = MTKSysDevPrePowerState; - gsDevice.pfnPostPowerState = MTKSysDevPostPowerState; + psDevConfig->pfnPrePowerState = MTKSysDevPrePowerState; + psDevConfig->pfnPostPowerState = MTKSysDevPostPowerState; /* clock frequency */ - gsDevice.pfnClockFreqGet = NULL; + psDevConfig->pfnClockFreqGet = NULL; - gsDevice.hDevData = &gsRGXData; - gsDevice.hSysData = mfg; + /* device error notify callback function */ + psDevConfig->pfnSysDevErrorNotify = NULL; - gsDevice.bHasFBCDCVersion31 = IMG_FALSE; - gsDevice.bDevicePA0IsValid = IMG_FALSE; + *ppsDevConfigOut = psDevConfig; - /* device error notify callback function */ - gsDevice.pfnSysDevErrorNotify = NULL; + return PVRSRV_OK; +} - *ppsDevConfig = &gsDevice; +static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + /* + * The device config, RGX data and RGX timing info are part of the same + * allocation so do only one free. + */ + OSFreeMem(psDevConfig); +} + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + struct device *dev = pvOSDevice; + struct mtk_mfg *mfg; + PVRSRV_ERROR eError; + + mfg = mtk_mfg_create(dev); + if (IS_ERR(mfg)) { + if (PTR_ERR(mfg) == -EPROBE_DEFER) + return PVRSRV_ERROR_PROBE_DEFER; + else + return PVRSRV_ERROR_INIT_FAILURE; + } + + dma_set_mask(dev, DMA_BIT_MASK(33)); + + eError = DeviceConfigCreate(pvOSDevice, mfg, &psDevConfig); + if (eError != PVRSRV_OK) + { + mtk_mfg_destroy(mfg); + + return eError; + } + + *ppsDevConfig = psDevConfig; #if defined(SUPPORT_ION) IonInit(NULL); @@ -493,9 +482,9 @@ void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) IonDeinit(); #endif - mtk_mfg_destroy(mfg); + DeviceConfigDestroy(psDevConfig); - psDevConfig->pvOSDevice = NULL; + mtk_mfg_destroy(mfg); } PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, @@ -509,7 +498,7 @@ PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, PVR_UNREFERENCED_PARAMETER(hSysData); - psWrapperData = kmalloc(sizeof(*psWrapperData), GFP_KERNEL); + psWrapperData = OSAllocMem(sizeof(*psWrapperData)); if (!psWrapperData) { return PVRSRV_ERROR_OUT_OF_MEMORY; @@ -522,7 +511,7 @@ PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, if (request_irq(ui32IRQ, MTKLISRWrapper, IRQF_TRIGGER_LOW, pszName, psWrapperData)) { - kfree(psWrapperData); + OSFreeMem(psWrapperData); return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER; } diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/sysinfo.h b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/sysinfo.h index 1ecf1ca4cb6f..bd7bc38e3412 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/sysinfo.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/mt8173/sysinfo.h @@ -51,6 +51,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000) #define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) #define WAIT_TRY_COUNT (20000) +#define EVENT_OBJECT_TIMEOUT_US (100000) #define SYS_RGX_OF_COMPATIBLE "mediatek,mt8173-gpu" diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/Kbuild.mk b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/Kbuild.mk index 4d43f6310860..55d0369bcc6d 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/Kbuild.mk +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/Kbuild.mk @@ -38,16 +38,9 @@ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ### ########################################################################### -PVRSRVKM_NAME = $(PVRSRV_MODNAME) - -$(PVRSRVKM_NAME)-y += \ +$(PVRSRV_MODNAME)-y += \ services/system/rogue/$(PVR_SYSTEM)/sysconfig.o \ - services/system/common/sysconfig_cmn.o \ - services/server/common/vmm_pvz_client.o \ - services/server/common/vmm_pvz_server.o \ - services/server/common/vz_vmm_pvz.o \ - services/server/common/vz_vmm_vm.o \ - services/system/rogue/common/vmm_type_$(VMM_TYPE).o + services/system/common/sysconfig_cmn.o ccflags-y += \ -I$(TOP)/services/system/rogue/common/env/linux -I$(TOP)/services/system/rogue/common/env/linux \ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/sysconfig.c b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/sysconfig.c index c7886e7f4968..16191d7de281 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/sysconfig.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/sysconfig.c @@ -52,6 +52,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "syscommon.h" #include "allocmem.h" #include "pvr_debug.h" +#include "rgxfwutils.h" #if defined(SUPPORT_ION) #include PVR_ANDROID_ION_HEADER @@ -59,11 +60,85 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "ion_sys.h" #endif +#include "vmm_pvz_server.h" +#include "pvr_bridge_k.h" +#include "pvr_drv.h" #include "tc_drv.h" #include #include +#define SECURE_FW_MEM_SIZE (0x400000) /* 4MB */ +#define SECURE_MEM_SIZE (0x4000000) /* 64MB */ + +typedef struct +{ + PHYS_HEAP_USAGE_FLAGS ui32UsageFlags; + IMG_UINT64 uiSize; + IMG_BOOL bUsed; +} CARD_PHYS_HEAP_CONFIG_SPEC; + +#define HEAP_SPEC_IDX_GPU_PRIVATE (0U) +#define HEAP_SPEC_IDX_GPU_LOCAL (1U) + +static const CARD_PHYS_HEAP_CONFIG_SPEC gasCardHeapTemplate[] = +{ + { + PHYS_HEAP_USAGE_GPU_PRIVATE, + 0, /* determined at runtime by apphints */ + false /* determined at runtime by apphints */ + }, + { + PHYS_HEAP_USAGE_GPU_LOCAL, + 0, /* determined at runtime */ + true + }, + { + PHYS_HEAP_USAGE_GPU_SECURE, + SECURE_MEM_SIZE, +#if defined(SUPPORT_SECURITY_VALIDATION) + true +#else + false +#endif + }, + { + PHYS_HEAP_USAGE_FW_PRIVATE, + SECURE_FW_MEM_SIZE, +#if defined(SUPPORT_SECURITY_VALIDATION) + true +#else + false +#endif + }, + { + PHYS_HEAP_USAGE_FW_SHARED, +#if defined(SUPPORT_SECURITY_VALIDATION) && defined(RGX_PREMAP_FW_HEAPS) + /* simultaneous virtualisation and security support requires premapped heaps, + * i.e. FW_PRIVATE and FW_SHARED must fit contiguously into Fw's VA heap (RGX_FIRMWARE_RAW_HEAP_SIZE) */ + RGX_FIRMWARE_RAW_HEAP_SIZE - SECURE_FW_MEM_SIZE, +#else + RGX_FIRMWARE_RAW_HEAP_SIZE, +#endif +#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) + true /* VZ drivers need dedicated Fw heaps */ +#else + false /* Native drivers can fallback on GPU_LOCAL for Fw mem */ +#endif + }, + { + PHYS_HEAP_USAGE_FW_PREMAP_PT, + RGX_FIRMWARE_MAX_PAGETABLE_SIZE, +#if defined(RGX_PREMAP_FW_HEAPS) + true +#else + false +#endif + } +}; + +#define ODIN_MEMORY_HYBRID_DEVICE_BASE 0x400000000 + #define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10) #define UI64_TOPWORD_IS_ZERO(ui64) ((ui64 >> 32) == 0) @@ -82,13 +157,17 @@ static const IMG_OPP asOPPTable[] = #define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP)) -static void SetFrequency(IMG_UINT32 ui32Frequency) +static void SetFrequency(IMG_HANDLE hSysData, IMG_UINT32 ui32Frequency) { + PVR_UNREFERENCED_PARAMETER(hSysData); + PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency)); } -static void SetVoltage(IMG_UINT32 ui32Voltage) +static void SetVoltage(IMG_HANDLE hSysData, IMG_UINT32 ui32Voltage) { + PVR_UNREFERENCED_PARAMETER(hSysData); + PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage)); } @@ -126,6 +205,22 @@ static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs = .pfnDevPAddrToCpuPAddr = TCHostDevPAddrToCpuPAddr, }; +static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +static PHYS_HEAP_FUNCTIONS gsHybridPhysHeapFuncs = +{ + .pfnCpuPAddrToDevPAddr = TCHybridCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = TCHybridDevPAddrToCpuPAddr +}; + typedef struct _SYS_DATA_ SYS_DATA; struct _SYS_DATA_ @@ -345,97 +440,173 @@ static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, } +static inline +IMG_CHAR* GetHeapName(PHYS_HEAP_USAGE_FLAGS ui32Flags) +{ + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_GPU_LOCAL)) return "lma_gpu_local"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_GPU_SECURE)) return "lma_gpu_secure"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_GPU_PRIVATE)) return "lma_gpu_private"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_FW_PRIVATE)) return "lma_fw_private"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_FW_SHARED)) return "lma_fw_shared"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_FW_PREMAP_PT)) return "lma_fw_pagetables"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_CPU_LOCAL)) return "lma_cpu_local"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_DISPLAY)) return "lma_gpu_display"; + else return "Unexpected Heap"; +} + +static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psDevPAddr[ui32Idx].uiAddr = + (psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base) + + ODIN_MEMORY_HYBRID_DEVICE_BASE; + } +} + +static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psCpuPAddr[ui32Idx].uiAddr = + (psDevPAddr[ui32Idx].uiAddr - ODIN_MEMORY_HYBRID_DEVICE_BASE) + + psSysData->pdata->tc_memory_base; + } +} + static PVRSRV_ERROR InitLocalHeap(PHYS_HEAP_CONFIG *psPhysHeap, IMG_UINT64 uiBaseAddr, IMG_UINT64 uiStartAddr, IMG_UINT64 uiSize, PHYS_HEAP_FUNCTIONS *psFuncs, PHYS_HEAP_USAGE_FLAGS ui32Flags) { - psPhysHeap->sCardBase.uiAddr = uiBaseAddr; - psPhysHeap->sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(uiStartAddr); - psPhysHeap->uiSize = uiSize; - psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; - psPhysHeap->pszPDumpMemspaceName = "LMA"; - psPhysHeap->psMemFuncs = psFuncs; psPhysHeap->ui32UsageFlags = ui32Flags; + psPhysHeap->uConfig.sLMA.pszPDumpMemspaceName = "LMA"; + psPhysHeap->uConfig.sLMA.psMemFuncs = psFuncs; + psPhysHeap->uConfig.sLMA.pszHeapName = GetHeapName(ui32Flags); + psPhysHeap->uConfig.sLMA.sCardBase.uiAddr = uiBaseAddr; + psPhysHeap->uConfig.sLMA.sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(uiStartAddr); + psPhysHeap->uConfig.sLMA.uiSize = uiSize; return PVRSRV_OK; } static PVRSRV_ERROR -InitLocalHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 *pui32PhysHeapCount) +CreateCardGPUHeaps(const SYS_DATA *psSysData, + CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, + PHYS_HEAP_CONFIG *pasPhysHeaps, + PHYS_HEAP_FUNCTIONS *psHeapFuncs, + IMG_UINT32 *pui32HeapIdx, + IMG_UINT64 ui64CardAddr) { - struct tc_rogue_platform_data *pdata = psSysData->pdata; - PHYS_HEAP_FUNCTIONS *psHeapFuncs; - IMG_UINT64 uiCardBase; - IMG_UINT64 uiCpuBase = pdata->rogue_heap_memory_base; - IMG_UINT64 uiHeapSize = pdata->rogue_heap_memory_size; PVRSRV_ERROR eError; -#if (RGX_NUM_OS_SUPPORTED > 1) - IMG_UINT64 uiFwCarveoutSize; -#endif + IMG_UINT64 ui64StartAddr = psSysData->pdata->rogue_heap_memory_base; + IMG_UINT32 ui32SpecIdx; - if (pdata->mem_mode == TC_MEMORY_HYBRID) + for (ui32SpecIdx = 0; ui32SpecIdx < ARRAY_SIZE(gasCardHeapTemplate); ui32SpecIdx++) { - psHeapFuncs = &gsHostPhysHeapFuncs; - uiCardBase = pdata->tc_memory_base; + if (pasCardHeapSpec[ui32SpecIdx].bUsed) + { + IMG_UINT64 ui64HeapSize = pasCardHeapSpec[ui32SpecIdx].uiSize; + + eError = InitLocalHeap(&pasPhysHeaps[*pui32HeapIdx], + ui64CardAddr, + IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), + ui64HeapSize, + psHeapFuncs, + pasCardHeapSpec[ui32SpecIdx].ui32UsageFlags); + if (eError != PVRSRV_OK) + { + return eError; + } + + ui64CardAddr += pasCardHeapSpec[ui32SpecIdx].uiSize; + ui64StartAddr += pasCardHeapSpec[ui32SpecIdx].uiSize; + (*pui32HeapIdx)++; + } } - else + + return PVRSRV_OK; +} + +#if TC_DISPLAY_MEM_SIZE != 0 +static PVRSRV_ERROR +CreateCardEXTHeap(const SYS_DATA *psSysData, + PHYS_HEAP_CONFIG *pasPhysHeaps, + PHYS_HEAP_FUNCTIONS *psHeapFuncs, + IMG_UINT32 *pui32HeapIdx, + IMG_UINT64 ui64CardBase) +{ + IMG_UINT64 ui64StartAddr = psSysData->pdata->pdp_heap_memory_base; + IMG_UINT64 ui64Size = psSysData->pdata->pdp_heap_memory_size; + PVRSRV_ERROR eError; + + eError = InitLocalHeap(&pasPhysHeaps[*pui32HeapIdx], + ui64CardBase + psSysData->pdata->rogue_heap_memory_size, + IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), + ui64Size, psHeapFuncs, + PHYS_HEAP_USAGE_EXTERNAL | PHYS_HEAP_USAGE_DISPLAY); + if (eError != PVRSRV_OK) { - psHeapFuncs = &gsLocalPhysHeapFuncs; - uiCardBase = 0; + return eError; } -#if (RGX_NUM_OS_SUPPORTED > 1) -#if defined(SUPPORT_AUTOVZ) - /* Carveout out enough LMA memory to hold the heaps of - * all supported OSIDs and the FW page tables */ - uiFwCarveoutSize = (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE) + - RGX_FIRMWARE_MAX_PAGETABLE_SIZE; -#elif defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) - /* Carveout out enough LMA memory to hold the heaps of all supported OSIDs */ - uiFwCarveoutSize = (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE); -#else - /* Create a memory carveout just for the Host's Firmware heap. - * Guests will allocate their own physical memory. */ - uiFwCarveoutSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + (*pui32HeapIdx)++; + + return PVRSRV_OK; +} #endif - uiHeapSize -= uiFwCarveoutSize; -#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ - - eError = InitLocalHeap(&pasPhysHeaps[(*pui32PhysHeapCount)++], - uiCardBase, - uiCpuBase, - uiHeapSize, - psHeapFuncs, - PHYS_HEAP_USAGE_GPU_LOCAL); - if (eError != PVRSRV_OK) + +static PVRSRV_ERROR +InitLocalHeaps(const SYS_DATA *psSysData, + CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, + PHYS_HEAP_CONFIG *pasPhysHeaps, + IMG_UINT32 *pui32HeapIdx) +{ + PHYS_HEAP_FUNCTIONS *psHeapFuncs; + PVRSRV_ERROR eError; + IMG_UINT64 ui64CardBase; + + if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN && + psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) { - return eError; + psHeapFuncs = &gsHybridPhysHeapFuncs; + ui64CardBase = ODIN_MEMORY_HYBRID_DEVICE_BASE; + } + else if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + psHeapFuncs = &gsHostPhysHeapFuncs; + ui64CardBase = psSysData->pdata->tc_memory_base; + } + else + { + psHeapFuncs = &gsLocalPhysHeapFuncs; + ui64CardBase = psSysData->pdata->rogue_heap_memory_base - psSysData->pdata->tc_memory_base; } -#if (RGX_NUM_OS_SUPPORTED > 1) - /* allocate the Host Driver's Firmware Heap from the reserved carveout */ - eError = InitLocalHeap(&pasPhysHeaps[(*pui32PhysHeapCount)++], - uiCardBase + uiHeapSize, - uiCpuBase + uiHeapSize, - RGX_FIRMWARE_RAW_HEAP_SIZE, - psHeapFuncs, - PHYS_HEAP_USAGE_FW_MAIN); + eError = CreateCardGPUHeaps(psSysData, pasCardHeapSpec, pasPhysHeaps, psHeapFuncs, pui32HeapIdx, ui64CardBase); if (eError != PVRSRV_OK) { return eError; } -#endif #if TC_DISPLAY_MEM_SIZE != 0 - eError = InitLocalHeap(&pasPhysHeaps[(*pui32PhysHeapCount)++], - uiCardBase, - pdata->pdp_heap_memory_base, - pdata->pdp_heap_memory_size, - psHeapFuncs, - PHYS_HEAP_USAGE_EXTERNAL | PHYS_HEAP_USAGE_DISPLAY); + eError = CreateCardEXTHeap(psSysData, pasPhysHeaps, psHeapFuncs, pui32HeapIdx, ui64CardBase); if (eError != PVRSRV_OK) { return eError; @@ -446,16 +617,17 @@ InitLocalHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UI } static PVRSRV_ERROR -InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 *pui32PhysHeapCount) +InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 *pui32HeapIdx) { if (psSysData->pdata->mem_mode != TC_MEMORY_LOCAL) { - pasPhysHeaps[*pui32PhysHeapCount].eType = PHYS_HEAP_TYPE_UMA; - pasPhysHeaps[*pui32PhysHeapCount].pszPDumpMemspaceName = "SYSMEM"; - pasPhysHeaps[*pui32PhysHeapCount].psMemFuncs = &gsHostPhysHeapFuncs; - pasPhysHeaps[*pui32PhysHeapCount].ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL; + pasPhysHeaps[*pui32HeapIdx].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[*pui32HeapIdx].ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL; + pasPhysHeaps[*pui32HeapIdx].uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM"; + pasPhysHeaps[*pui32HeapIdx].uConfig.sUMA.psMemFuncs = &gsHostPhysHeapFuncs; + pasPhysHeaps[*pui32HeapIdx].uConfig.sUMA.pszHeapName = "uma_cpu_local"; - (*pui32PhysHeapCount)++; + (*pui32HeapIdx)++; PVR_DPF((PVR_DBG_WARNING, "Initialising CPU_LOCAL UMA Host PhysHeaps with memory mode: %d", @@ -466,88 +638,159 @@ InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UIN } static PVRSRV_ERROR -PhysHeapsInit(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, - void *pvPrivData, IMG_UINT32 *pui32PhysHeapCount) +PhysHeapsInit(const SYS_DATA *psSysData, + CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, + PHYS_HEAP_CONFIG *pasPhysHeaps, + void *pvPrivData, IMG_UINT32 ui32NumHeaps) { PVRSRV_ERROR eError; IMG_UINT32 i; + IMG_UINT32 ui32HeapCounter = 0; - eError = InitLocalHeaps(psSysData, pasPhysHeaps, pui32PhysHeapCount); + eError = InitLocalHeaps(psSysData, pasCardHeapSpec, pasPhysHeaps, &ui32HeapCounter); if (eError != PVRSRV_OK) { return eError; } - eError = InitHostHeaps(psSysData, pasPhysHeaps, pui32PhysHeapCount); + eError = InitHostHeaps(psSysData, pasPhysHeaps, &ui32HeapCounter); if (eError != PVRSRV_OK) { return eError; } + PVR_LOG_RETURN_IF_FALSE((ui32HeapCounter == ui32NumHeaps), + "Number of PhysHeapConfigs set up doesn't match the initial requirement.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + /* Initialise fields that don't change between memory modes. * Fix up heap IDs. This is needed for multi-testchip systems to * ensure the heap IDs are unique as this is what Services expects. */ - for (i = 0; i < *pui32PhysHeapCount; i++) + for (i = 0; i < ui32NumHeaps; i++) + { + switch (pasPhysHeaps[i].eType) + { + case PHYS_HEAP_TYPE_UMA: + pasPhysHeaps[i].uConfig.sUMA.hPrivData = pvPrivData; + break; + case PHYS_HEAP_TYPE_LMA: + pasPhysHeaps[i].uConfig.sLMA.hPrivData = pvPrivData; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Invalid PHYS_HEAP_TYPE: %u in %s", + pasPhysHeaps[i].eType, + __func__)); + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PhysHeapSetRequirements(const SYS_DATA *psSysData, + CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, + IMG_UINT32 *pui32CardPhysHeapCfgCount) +{ + IMG_UINT32 i; + IMG_UINT64 ui64FreeCardMemory = psSysData->pdata->rogue_heap_memory_size; + + PVR_LOG_RETURN_IF_FALSE( + BITMASK_HAS(pasCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].ui32UsageFlags, PHYS_HEAP_USAGE_GPU_PRIVATE) && + BITMASK_HAS(pasCardHeapSpec[HEAP_SPEC_IDX_GPU_LOCAL].ui32UsageFlags, PHYS_HEAP_USAGE_GPU_LOCAL), + "PhysHeapConfigs not set correctly in the system layer.", PVRSRV_ERROR_PHYSHEAP_CONFIG); + + for (i = 0; i < ARRAY_SIZE(gasCardHeapTemplate); i++) + { + if (pasCardHeapSpec[i].bUsed) + { + /* Determine the memory requirements of heaps with a fixed size */ + ui64FreeCardMemory -= pasCardHeapSpec[i].uiSize; + + /* Count card physheap configs used by the system */ + (*pui32CardPhysHeapCfgCount)++; + } + } + + if (SysRestrictGpuLocalAddPrivateHeap()) { - pasPhysHeaps[i].hPrivData = pvPrivData; + IMG_UINT64 ui64GpuSharedMem = SysRestrictGpuLocalPhysheap(ui64FreeCardMemory); + + if (ui64GpuSharedMem == ui64FreeCardMemory) + { + /* No memory reserved for GPU private use, special heap not needed */ + } + else + { + /* Set up the GPU private heap */ + pasCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].bUsed = true; + pasCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].uiSize = ui64FreeCardMemory - ui64GpuSharedMem; + ui64FreeCardMemory = ui64GpuSharedMem; + (*pui32CardPhysHeapCfgCount)++; + } } + /* all remaining memory card memory goes to GPU_LOCAL */ + pasCardHeapSpec[HEAP_SPEC_IDX_GPU_LOCAL].uiSize = ui64FreeCardMemory; + return PVRSRV_OK; } static PVRSRV_ERROR -PhysHeapsCreate(const SYS_DATA *psSysData, void *pvPrivData, +PhysHeapsCreate(const SYS_DATA *psSysData, PVRSRV_DEVICE_CONFIG *psDevConfig, PHYS_HEAP_CONFIG **ppasPhysHeapsOut, IMG_UINT32 *puiPhysHeapCountOut) { PHYS_HEAP_CONFIG *pasPhysHeaps; - IMG_UINT32 ui32NumPhysHeaps; - IMG_UINT32 ui32PhysHeapCount = 0; PVRSRV_ERROR eError; + IMG_UINT32 ui32NumHeaps = 0; + CARD_PHYS_HEAP_CONFIG_SPEC asCardHeapSpec[ARRAY_SIZE(gasCardHeapTemplate)]; - switch (psSysData->pdata->mem_mode) + PVR_LOG_RETURN_IF_FALSE((psSysData->pdata->mem_mode == TC_MEMORY_LOCAL) || + (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID), + "Unsupported memory mode", PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Initialise the local heap specs with the build-time template */ + memcpy(asCardHeapSpec, gasCardHeapTemplate, sizeof(gasCardHeapTemplate)); + + eError = PhysHeapSetRequirements(psSysData, asCardHeapSpec, &ui32NumHeaps); + if (eError != PVRSRV_OK) { - case TC_MEMORY_LOCAL: ui32NumPhysHeaps = 1U; break; - case TC_MEMORY_HYBRID: ui32NumPhysHeaps = 2U; break; - default: - { - PVR_DPF((PVR_DBG_ERROR, "%s: unsupported memory mode %d", __func__, psSysData->pdata->mem_mode)); - return PVRSRV_ERROR_NOT_IMPLEMENTED; - } + return eError; + } + + psDevConfig->bHasNonMappableLocalMemory = asCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].bUsed; + + if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + /* CPU_LOCAL heap */ + ui32NumHeaps++; } #if TC_DISPLAY_MEM_SIZE != 0 if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL || psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) { - ui32NumPhysHeaps += 1U; + /* EXTERNAL / DISPLAY heap */ + ui32NumHeaps++; } #endif - if (RGX_NUM_OS_SUPPORTED > 1) - { - /* dedicated LMA heap for Firmware */ - ui32NumPhysHeaps += 1U; - } - - pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * ui32NumPhysHeaps); + pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * ui32NumHeaps); if (!pasPhysHeaps) { return PVRSRV_ERROR_OUT_OF_MEMORY; } - eError = PhysHeapsInit(psSysData, pasPhysHeaps, pvPrivData, &ui32PhysHeapCount); + eError = PhysHeapsInit(psSysData, asCardHeapSpec, pasPhysHeaps, psDevConfig, ui32NumHeaps); if (eError != PVRSRV_OK) { OSFreeMem(pasPhysHeaps); return eError; } - PVR_ASSERT(ui32PhysHeapCount == ui32NumPhysHeaps); - *ppasPhysHeapsOut = pasPhysHeaps; - *puiPhysHeapCountOut = ui32PhysHeapCount; + *puiPhysHeapCountOut = ui32NumHeaps; return PVRSRV_OK; } @@ -621,6 +864,61 @@ static void odinTCFreeCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, tc_dma_chan_free(psDev->parent, chan); } +static void GetDriverMode(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32DeviceID; + + /* + * Drivers with virtualization support should check if the mode in which the + * driver must control a device has been explicitly specified at load time + * through module parameters. + * Multi-device platforms must find the internal ID of the device currently + * being created when checking for its associated DriverMode parameter. + */ + if (PVRSRVAcquireInternalID(&ui32DeviceID) != PVRSRV_OK) + { + psDevConfig->eDriverMode = DRIVER_MODE_NATIVE; + return; + } + + if (psPVRSRVData->aeModuleParamDriverMode[ui32DeviceID] == DRIVER_MODE_DEFAULT) + { +#if (RGX_NUM_DRIVERS_SUPPORTED > 1) + void __iomem *pvRegBase; + + pvRegBase = (void __iomem *) OSMapPhysToLin(psDevConfig->sRegsCpuPBase, psDevConfig->ui32RegsSize, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + + if (pvRegBase == NULL) + { + /* failed to map register bank, default to native mode */ + psDevConfig->eDriverMode = DRIVER_MODE_NATIVE; + } + else + { + IMG_UINT64 ui64ClkCtrl; + + /* the CLK_CTRL register is valid only in the Os 0 (Host) register bank + * if it reads 0 then we can conclude this Os is set up to run as Guest */ +#if defined(RGX_CR_CLK_CTRL) + ui64ClkCtrl = OSReadHWReg64(pvRegBase, RGX_CR_CLK_CTRL); +#else + ui64ClkCtrl = OSReadHWReg64(pvRegBase, RGX_CR_CLK_CTRL1); +#endif + OSUnMapPhysToLin((void __force *) pvRegBase, psDevConfig->ui32RegsSize); + + psDevConfig->eDriverMode = (ui64ClkCtrl != 0) ? (DRIVER_MODE_HOST) : (DRIVER_MODE_GUEST); + } +#else + psDevConfig->eDriverMode = DRIVER_MODE_NATIVE; +#endif + } + else + { + psDevConfig->eDriverMode = psPVRSRVData->aeModuleParamDriverMode[ui32DeviceID]; + } +} + static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) { @@ -630,6 +928,14 @@ static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps; IMG_UINT32 uiPhysHeapCount; PVRSRV_ERROR eError; + IMG_UINT32 ui32DeviceID; + +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + PVR_LOG_RETURN_IF_FALSE((psSysData->pdata->baseboard == TC_BASEBOARD_ODIN && + psSysData->pdata->mem_mode == TC_MEMORY_LOCAL), + "Multidevice virtualization setup supported only on Odin device with TC_MEMORY_LOCAL", + PVRSRV_ERROR_INVALID_DEVICE); +#endif psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + sizeof(*psRGXData) + @@ -642,21 +948,15 @@ static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, psRGXData = (RGX_DATA *) IMG_OFFSET_ADDR(psDevConfig, sizeof(*psDevConfig)); psRGXTimingInfo = (RGX_TIMING_INFORMATION *) IMG_OFFSET_ADDR(psRGXData, sizeof(*psRGXData)); - eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount); - if (eError != PVRSRV_OK) - { - goto ErrorFreeDevConfig; - } - /* Setup RGX specific timing data */ #if defined(TC_APOLLO_BONNIE) /* For BonnieTC there seems to be an additional 5x multiplier that occurs to the clock as measured speed is 540Mhz not 108Mhz. */ - psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6 * 5; + psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(psSysData->pdev->dev.parent) * 6 * 5; #elif defined(TC_APOLLO_ES2) - psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6; + psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(psSysData->pdev->dev.parent) * 6; #else - psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) / - tc_core_clock_multiplex(&psSysData->pdev->dev); + psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(psSysData->pdev->dev.parent) / + tc_core_clock_multiplex(psSysData->pdev->dev.parent); #endif psRGXTimingInfo->bEnableActivePM = IMG_FALSE; psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; @@ -672,9 +972,58 @@ static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start; psDevConfig->ui32RegsSize = resource_size(psSysData->registers); - psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; + PVRSRVAcquireInternalID(&ui32DeviceID); +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + /* Rogue FPGA images are correctly routing the OSID interrupts + * for cores with the IRQ_PER_OS feature */ + psDevConfig->ui32IRQ = TC_INTERRUPT_OSID0 + ui32DeviceID; +#else psDevConfig->ui32IRQ = TC_INTERRUPT_EXT; +#endif + + GetDriverMode(psDevConfig); + +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + /* If there is device running in native mode, prevent any attempts at + * creating any Guest devices, as there will be no Host to support them. + * Currently the VZFPGA supports only one physical GPU. */ + if (PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig)) + { + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDN; + + OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); + for (psDN = psPVRSRVData->psDeviceNodeList; psDN != NULL; psDN = psDN->psNext) + { + if (PVRSRV_VZ_MODE_IS(NATIVE, DEVNODE, psDN)) + { + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); + PVR_DPF((PVR_DBG_ERROR, "%s() Device %u is already running in native mode, no other Guests supported in the system.", __func__, psDN->sDevId.ui32InternalID)); + eError = PVRSRV_ERROR_INVALID_DEVICE; + goto ErrorFreeDevConfig; + } + } + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); + } +#endif + + eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount); + if (eError != PVRSRV_OK) + { + goto ErrorFreeDevConfig; + } + + if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN && + psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + psDevConfig->eDefaultHeap = SysDefaultToCpuLocalHeap() ? + PVRSRV_PHYS_HEAP_CPU_LOCAL : PVRSRV_PHYS_HEAP_GPU_LOCAL; + } + else + { + psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; + } psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; @@ -694,12 +1043,17 @@ static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, "supported only in LMA mode", __func__)); goto ErrorFreeDevConfig; } - - /* Using display memory base as the alternative GPU register base, - * since the display memory range is not used by the firmware. */ - TCLocalCpuPAddrToDevPAddr(psDevConfig, 1, - &psDevConfig->sAltRegsGpuPBase, - &pasPhysHeaps[PHY_HEAP_CARD_EXT].sStartAddr); + else + { + /* Using display memory base as the alternative GPU register base, + * since the display memory range is not used by the firmware. */ + IMG_CPU_PHYADDR sDisplayMemAddr; + + sDisplayMemAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psSysData->pdata->pdp_heap_memory_base); + TCLocalCpuPAddrToDevPAddr(psDevConfig, 1, + &psDevConfig->sAltRegsGpuPBase, + &sDisplayMemAddr); + } #endif #if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) @@ -758,13 +1112,13 @@ static PVRSRV_ERROR PrePower(IMG_HANDLE hSysData, /* The transition might be both from ON or OFF states to OFF state so check * only for the *new* state. Also this is only valid for suspend requests. */ if (eNewPowerState != PVRSRV_SYS_POWER_STATE_OFF || - !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_SUSPEND_REQ)) + !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ)) { return PVRSRV_OK; } eError = LMA_HeapIteratorCreate(psSysData->psDevConfig->psDevNode, - PHYS_HEAP_USAGE_GPU_LOCAL, + PVRSRV_PHYS_HEAP_GPU_LOCAL, &psSysData->psHeapIter); PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorCreate", return_error); @@ -848,7 +1202,7 @@ static PVRSRV_ERROR PostPower(IMG_HANDLE hSysData, /* The transition might be both to ON or OFF states from OFF state so check * only for the *current* state. Also this is only valid for resume requests. */ if (eCurrentPowerState != PVRSRV_SYS_POWER_STATE_OFF || - !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_RESUME_REQ) || + !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ) || psSysData->pvS3Buffer == NULL) { return PVRSRV_OK; @@ -903,6 +1257,7 @@ PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) PVRSRV_DEVICE_CONFIG *psDevConfig; SYS_DATA *psSysData; resource_size_t uiRegistersSize; + IMG_UINT32 ui32MinRegBankSize; PVRSRV_ERROR eError; int err = 0; @@ -952,11 +1307,19 @@ PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) /* Check the address range is large enough. */ uiRegistersSize = resource_size(psSysData->registers); - if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + /* each GPU instance gets the minimum 64kb register range */ + ui32MinRegBankSize = RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE; +#else + /* the GPU gets the entire 64MB IO range */ + ui32MinRegBankSize = SYS_RGX_REG_REGION_SIZE; +#endif + + if (uiRegistersSize < ui32MinRegBankSize) { PVR_DPF((PVR_DBG_ERROR, "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)", - __func__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE)); + __func__, &uiRegistersSize, ui32MinRegBankSize)); eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL; goto ErrorDevDisable; @@ -1090,7 +1453,8 @@ PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, PVRSRV_ERROR eError; int err; - if (ui32IRQ != TC_INTERRUPT_EXT) + if ((ui32IRQ != TC_INTERRUPT_EXT) && + (ui32IRQ < TC_INTERRUPT_OSID0) && (ui32IRQ > TC_INTERRUPT_OSID7)) { PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ)); return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; @@ -1163,3 +1527,214 @@ PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) return PVRSRV_OK; } + +/****************************************************************************************************/ +/**** VM migration test code ****/ +/****************************************************************************************************/ +static void SwapHyperlanes(PVRSRV_DEVICE_NODE *psSrcNode, PVRSRV_DEVICE_NODE *psDestNode); +static void PreMigrationDeviceSuspend(struct drm_device *psDev); +static void PostMigrationDeviceResume(struct drm_device *psDev); + +void PVRVMMigration(unsigned int src, unsigned int dest); +EXPORT_SYMBOL(PVRVMMigration); + +#define SWAP_REGSBASE_PTR(a, b) do \ + { \ + a = (void __iomem *)(((uintptr_t)a)^((uintptr_t)b)); \ + b = (void __iomem *)(((uintptr_t)a)^((uintptr_t)b)); \ + a = (void __iomem *)(((uintptr_t)a)^((uintptr_t)b)); \ + } while (0) + +static void SwapHyperlanes(PVRSRV_DEVICE_NODE *psSrcNode, PVRSRV_DEVICE_NODE *psDestNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psHostNode = PVRSRVGetDeviceInstance(0); + PVRSRV_RGXDEV_INFO *psSrcInfo = psSrcNode->pvDevice; + PVRSRV_RGXDEV_INFO *psDestInfo = psDestNode->pvDevice; + PVRSRV_DEVICE_CONFIG *psSrcConfig = psSrcNode->psDevConfig; + PVRSRV_DEVICE_CONFIG *psDestConfig = psDestNode->psDevConfig; + LISR_DATA *psSrcLISRData = (LISR_DATA *) psSrcInfo->pvLISRData; + void *pfnLISR = psSrcLISRData->pfnLISR; + IMG_UINT32 ui32SrcHyperLane, ui32DestHyperLane; + + PVR_LOG_RETURN_VOID_IF_FALSE(((psHostNode != NULL) && + (psHostNode->psDevConfig != NULL)), + "Device 0 (expected Host) not initialised."); + + /* Determine the HyperLane ID used by a Guest Device from the Register Bank Base address used */ + ui32SrcHyperLane = (psSrcConfig->sRegsCpuPBase.uiAddr - psHostNode->psDevConfig->sRegsCpuPBase.uiAddr) / psSrcConfig->ui32RegsSize; + ui32DestHyperLane = (psDestConfig->sRegsCpuPBase.uiAddr - psHostNode->psDevConfig->sRegsCpuPBase.uiAddr) / psDestConfig->ui32RegsSize; + + PVR_DPF((PVR_DBG_WARNING, "%s: Swapping hyperlanes between Dev%u (hyperlane%u) and Dev%u (hyperlane%u)", __func__, + psSrcNode->sDevId.ui32InternalID, ui32SrcHyperLane, + psDestNode->sDevId.ui32InternalID, ui32DestHyperLane)); + PVR_DPF((PVR_DBG_WARNING, "%s: Resulting configuration: Dev%u (hyperlane%u) and Dev%u (hyperlane%u)", __func__, + psSrcNode->sDevId.ui32InternalID, ui32DestHyperLane, + psDestNode->sDevId.ui32InternalID, ui32SrcHyperLane)); + + /* swap the register bank details */ + SWAP_REGSBASE_PTR(psSrcInfo->pvRegsBaseKM, psDestInfo->pvRegsBaseKM); + SWAP(psSrcConfig->sRegsCpuPBase.uiAddr, psDestConfig->sRegsCpuPBase.uiAddr); + /* DevConfig->ui32RegsSize remains the same */ + + /* Swap interrupt lines between devices */ + eError = SysUninstallDeviceLISR(psSrcInfo->pvLISRData); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, "SysUninstallDeviceLISR(IRQ%u, Device %u)", + psSrcConfig->ui32IRQ, ui32SrcHyperLane); + eError = SysUninstallDeviceLISR(psDestInfo->pvLISRData); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, "SysUninstallDeviceLISR(IRQ%u, Device %u)", + psDestConfig->ui32IRQ, ui32DestHyperLane); + + SWAP(psSrcConfig->ui32IRQ, psDestConfig->ui32IRQ); + + eError = SysInstallDeviceLISR(psSrcConfig->hSysData, + psSrcConfig->ui32IRQ, + PVRSRV_MODNAME, + pfnLISR, + psSrcNode, + &psSrcInfo->pvLISRData); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, "SysInstallDeviceLISR(IRQ%u, Device %u)", + psSrcConfig->ui32IRQ, ui32SrcHyperLane); + + eError = SysInstallDeviceLISR(psDestConfig->hSysData, + psDestConfig->ui32IRQ, + PVRSRV_MODNAME, + pfnLISR, + psDestNode, + &psDestInfo->pvLISRData); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, "SysInstallDeviceLISR(IRQ%u, Device %u)", + psDestConfig->ui32IRQ, ui32DestHyperLane); + + /* Swap contents of LMA carveouts between virtual devices */ + { + /* Guest Raw Fw Heap mapping is done using the Host Devices */ + PHYS_HEAP *psSrcHeap = NULL; + PHYS_HEAP *psDestHeap = NULL; + IMG_DEV_PHYADDR sSrcHeapBase, sDestHeapBase; + + psSrcHeap = psHostNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32SrcHyperLane]; + psDestHeap = psHostNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DestHyperLane]; + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcHeap != NULL) && + (psDestHeap != NULL)), + "Guest firmware heaps not premapped by the Host Device."); + + eError = PhysHeapGetDevPAddr(psSrcHeap, &sSrcHeapBase); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PhysHeapGetDevPAddr(src fw heap)"); + eError = PhysHeapGetDevPAddr(psDestHeap, &sDestHeapBase); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PhysHeapGetDevPAddr(dest fw heap)"); + + eError = PvzServerUnmapDevPhysHeap(ui32SrcHyperLane, 0); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PvzServerUnmapDevPhysHeap(src fw heap)"); + eError = PvzServerUnmapDevPhysHeap(ui32DestHyperLane, 0); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PvzServerUnmapDevPhysHeap(dest fw heap)"); + + PhysHeapRelease(psHostNode->apsFWPremapPhysHeap[ui32SrcHyperLane]); + PhysHeapRelease(psHostNode->apsFWPremapPhysHeap[ui32DestHyperLane]); + + /* create new heaps with new base addresses */ + eError = PvzServerMapDevPhysHeap(ui32SrcHyperLane, 0, RGX_FIRMWARE_RAW_HEAP_SIZE, sDestHeapBase.uiAddr); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PvzServerMapDevPhysHeap(src fw heap)"); + eError = PvzServerMapDevPhysHeap(ui32DestHyperLane, 0, RGX_FIRMWARE_RAW_HEAP_SIZE, sSrcHeapBase.uiAddr); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PvzServerMapDevPhysHeap(dest fw heap)"); + } +} + +static void PreMigrationDeviceSuspend(struct drm_device *psDev) +{ + struct pvr_drm_private *psDevPriv = psDev->dev_private; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node; + PVRSRV_ERROR eError; + + /* LinuxBridgeBlockClientsAccess prevents processes from using the driver + * while it's suspended (this is needed for Android). */ + eError = LinuxBridgeBlockClientsAccess(psDevPriv, IMG_TRUE); + PVR_LOG_RETURN_VOID_IF_FALSE(eError == PVRSRV_OK, + "LinuxBridgeBlockClientsAccess()"); + +#if defined(SUPPORT_AUTOVZ) + /* To allow the driver to power down the GPU under AutoVz, the firmware must + * be declared as offline, otherwise all power requests will be ignored. */ + psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; +#endif + + if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF, + PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ) != PVRSRV_OK) + { + /* Ignore return error as we're already returning an error here. */ + (void) LinuxBridgeUnblockClientsAccess(psDevPriv); + } +} + +static void PostMigrationDeviceResume(struct drm_device *psDev) +{ + struct pvr_drm_private *psDevPriv = psDev->dev_private; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node; + + PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_ON, + PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ); + + /* Ignore return error. We should proceed even if this fails. */ + (void) LinuxBridgeUnblockClientsAccess(psDevPriv); + + /* + * Reprocess the device queues in case commands were blocked during + * suspend. + */ + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE) + { + PVRSRVCheckStatus(NULL); + } +} + +void PVRVMMigration(unsigned int src, unsigned int dest) +{ + PVRSRV_DEVICE_NODE *psSrcNode = PVRSRVGetDeviceInstance(src); + PVRSRV_DEVICE_NODE *psDestNode = PVRSRVGetDeviceInstance(dest); + struct device *psSrcDev, *psDestDev; + struct drm_device *psSrcDrmDev, *psDestDrmDev; + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcNode != NULL) && (psDestNode != NULL) && (psSrcNode != psDestNode)), + "Invalid Device IDs requested for migration."); + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE) && + (psDestNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE)), + "Devices not fully initialised."); + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcNode->psDevConfig != NULL) && + (psDestNode->psDevConfig != NULL)), + "Device config structure is NULL."); + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcNode->psDevConfig->pvOSDevice != NULL) && + (psDestNode->psDevConfig->pvOSDevice != NULL)), + "Linux kernel device pointer is NULL."); + + psSrcDev = psSrcNode->psDevConfig->pvOSDevice; + psDestDev = psDestNode->psDevConfig->pvOSDevice; + psSrcDrmDev = dev_get_drvdata(psSrcDev); + psDestDrmDev = dev_get_drvdata(psDestDev); + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcDrmDev != NULL) && + (psDestDrmDev != NULL)), + "Linux kernel drm_device pointer is NULL."); + + PVR_DPF((PVR_DBG_WARNING, "%s: Suspending device %u before migration", + __func__, psSrcNode->sDevId.ui32InternalID)); + PreMigrationDeviceSuspend(psSrcDrmDev); + + PVR_DPF((PVR_DBG_WARNING, "%s: Suspending device %u before migration", + __func__, psDestNode->sDevId.ui32InternalID)); + PreMigrationDeviceSuspend(psDestDrmDev); + + PVR_DPF((PVR_DBG_WARNING, "%s: Migrating vGPU resources (regbank, irq, osid)", __func__)); + SwapHyperlanes(psSrcNode, psDestNode); + + PVR_DPF((PVR_DBG_WARNING, "%s: Resuming device %u", __func__, + psSrcNode->sDevId.ui32InternalID)); + PostMigrationDeviceResume(psSrcDrmDev); + PVR_DPF((PVR_DBG_WARNING, "%s: Resuming device %u", __func__, + psDestNode->sDevId.ui32InternalID)); + PostMigrationDeviceResume(psDestDrmDev); +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/sysinfo.h b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/sysinfo.h index b71df887b113..6355a98482c3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/sysinfo.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/rogue/rgx_linux_tc/sysinfo.h @@ -55,6 +55,14 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) #define WAIT_TRY_COUNT (10000) +#if defined(VIRTUAL_PLATFORM) +#define EVENT_OBJECT_TIMEOUT_US (120000000) +#elif defined(TC_APOLLO_TCF5) +#define EVENT_OBJECT_TIMEOUT_US (2000000) +#else +#define EVENT_OBJECT_TIMEOUT_US (100000) +#endif + #define SYS_RGX_DEV_NAME "tc_rogue" #endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/common/vmm_type_stub.c b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/common/vmm_type_stub.c index cafe57bbab77..2dc3fe1e92d3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/common/vmm_type_stub.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/common/vmm_type_stub.c @@ -49,6 +49,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "vmm_impl.h" #include "vmm_pvz_server.h" +#if 0 +//eswin: conflict with eswin version! static PVRSRV_ERROR StubVMMMapDevPhysHeap(IMG_UINT64 ui64Size, IMG_UINT64 ui64Addr) @@ -94,18 +96,23 @@ static VMM_PVZ_CONNECTION gsStubVmmPvz = } }; -PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection) +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) { + PVR_UNREFERENCED_PARAMETER(psDevConfig); PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); *psPvzConnection = &gsStubVmmPvz; - PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support")); + PVR_DPF((PVR_DBG_MESSAGE, "Using a stub VM manager type, no hypercall support")); return PVRSRV_OK; } -void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection) +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) { + PVR_UNREFERENCED_PARAMETER(psDevConfig); PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); } +#endif /****************************************************************************** End of file (vmm_type_stub.c) diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/common/vmm_type_vzfpga.c b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/common/vmm_type_vzfpga.c new file mode 100644 index 000000000000..71f85d715f17 --- /dev/null +++ b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/common/vmm_type_vzfpga.c @@ -0,0 +1,169 @@ +/*************************************************************************/ /*! +@File vmm_type_vzfpga.c +@Title VZFPGA manager type +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description VM manager implementation to support vzfpga platform +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "pvrsrv.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxheapconfig.h" + +#include "vz_vm.h" +#include "vmm_impl.h" +#include "vmm_pvz_server.h" + +static PVRSRV_ERROR +GetDriverIDFromHeapBase(IMG_UINT64 ui64Addr, IMG_UINT32 *pui32DriverID) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + IMG_DEV_PHYADDR sHostFwHeapBaseDevPA = {0}; + PHYS_HEAP *psHostFwHeap = NULL; + PVRSRV_DEVICE_NODE *psHostDevNode = PVRSRVGetDeviceInstance(0); + + PVR_LOG_RETURN_IF_FALSE((psHostDevNode != NULL), + "Host Device Node not initialised.", + PVRSRV_ERROR_NO_DEVICENODE_FOUND); + + psHostFwHeap = psHostDevNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM]; + PVR_LOG_RETURN_IF_FALSE((psHostFwHeap != NULL), + "Host Fw heap not initialised.", + PVRSRV_ERROR_PHYSHEAP_ID_INVALID); + + eErr = PhysHeapGetDevPAddr(psHostFwHeap, &sHostFwHeapBaseDevPA); + PVR_LOG_RETURN_IF_ERROR(eErr, "PhysHeapGetDevPAddr"); + + *pui32DriverID = (ui64Addr - sHostFwHeapBaseDevPA.uiAddr) / PVRSRV_APPHINT_GUESTFWHEAPSTRIDE; + PVR_LOG_RETURN_IF_FALSE((*pui32DriverID >= RGXFW_GUEST_DRIVER_ID_START) && + (*pui32DriverID < RGX_NUM_DRIVERS_SUPPORTED), + "Invalid Guest DriverID", + PVRSRV_ERROR_INVALID_PVZ_OSID); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +VZFPGAMapDevPhysHeap(IMG_UINT64 ui64Size, + IMG_UINT64 ui64Addr) +{ + PVRSRV_ERROR eErr = PVRSRV_OK; + IMG_UINT32 ui32GuestDriverID; + + eErr = GetDriverIDFromHeapBase(ui64Addr, &ui32GuestDriverID); + PVR_LOG_RETURN_IF_ERROR(eErr, "GetDriverIDFromHeapBase"); + + eErr = PvzServerOnVmOnline(ui32GuestDriverID, 0); + PVR_LOG_RETURN_IF_ERROR(eErr, "PvzServerOnVmOnline"); + + eErr = PvzServerMapDevPhysHeap(ui32GuestDriverID, 0, ui64Size, ui64Addr); + PVR_LOG_RETURN_IF_ERROR(eErr, "PvzServerMapDevPhysHeap"); + + return eErr; +} + +static PVRSRV_ERROR +VZFPGAUnmapDevPhysHeap(void) +{ + IMG_UINT32 ui32ID; + + /* During shutdown, the Guests will be deinitialised in reverse order */ + for (ui32ID=(RGX_NUM_DRIVERS_SUPPORTED-1); + ui32ID >= RGXFW_HOST_DRIVER_ID; ui32ID--) + { + if (IsVmOnline(ui32ID, 0)) + { + PvzServerUnmapDevPhysHeap(ui32ID, 0); + PvzServerOnVmOffline(ui32ID, 0); + break; + } + } + + return PVRSRV_OK; +} + +static VMM_PVZ_CONNECTION gsVZFPGAPvz = +{ + .sClientFuncTab = { + /* pfnMapDevPhysHeap */ + &VZFPGAMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &VZFPGAUnmapDevPhysHeap + }, + + .sServerFuncTab = { + /* pfnMapDevPhysHeap */ + &PvzServerMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &PvzServerUnmapDevPhysHeap + }, + + .sVmmFuncTab = { + /* pfnOnVmOnline */ + &PvzServerOnVmOnline, + + /* pfnOnVmOffline */ + &PvzServerOnVmOffline, + + /* pfnVMMConfigure */ + &PvzServerVMMConfigure + } +}; + +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); + *psPvzConnection = &gsVZFPGAPvz; + return PVRSRV_OK; +} + +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection, + PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); +} + +/****************************************************************************** + End of file (vmm_type_vzfpga.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/Kbuild.mk b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/Kbuild.mk index 917bdc71d0cc..3a0b79b13c7c 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/Kbuild.mk +++ b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/Kbuild.mk @@ -38,20 +38,12 @@ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ### ########################################################################### -PVRSRVKM_NAME = $(PVRSRV_MODNAME) - -$(PVRSRVKM_NAME)-y += \ +$(PVRSRV_MODNAME)-y += \ services/system/volcanic/$(PVR_SYSTEM)/sysconfig.o \ - services/system/common/sysconfig_cmn.o \ - services/server/common/vmm_pvz_client.o \ - services/server/common/vmm_pvz_server.o \ - services/server/common/vz_vmm_pvz.o \ - services/server/common/vz_vmm_vm.o \ - services/system/volcanic/common/vmm_type_$(VMM_TYPE).o - -$(PVRSRVKM_NAME)-y += \ - services/system/volcanic/$(PVR_SYSTEM)/fpga.o + services/system/common/sysconfig_cmn.o +$(PVRSRV_MODNAME)-y += \ + services/system/volcanic/$(PVR_SYSTEM)/fpga.o ccflags-y += \ -I$(TOP)/services/system/volcanic/common/env/linux \ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/fpga.c b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/fpga.c index b9461ba7fee4..2698c12bf534 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/fpga.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/fpga.c @@ -47,7 +47,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "img_types.h" #include "img_defs.h" #include "pci_support.h" -#include "oskm_apphint.h" +#include "os_apphint.h" #include "pdump_km.h" #include "rgxtbdefs_km.h" diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/sysconfig.c b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/sysconfig.c index 5d4776e4f5b5..0dda8636bf8e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/sysconfig.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/sysconfig.c @@ -50,6 +50,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "pvrsrv_device.h" #include "rgxdevice.h" #include "syscommon.h" +#include "os_apphint.h" +#include "rgxfwutils.h" #if defined(SUPPORT_ION) #include PVR_ANDROID_ION_HEADER @@ -57,6 +59,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "ion_sys.h" #endif +#include "vmm_pvz_server.h" +#include "pvr_bridge_k.h" +#include "pvr_drv.h" #include "tc_drv.h" #include "fpga.h" @@ -64,42 +69,116 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #include -/* Must be consecutive and start from 0 */ -#define PHY_HEAP_CARD_GPU 0 -#define PHY_HEAP_CARD_EXT 1 -#if defined(SUPPORT_SECURITY_VALIDATION) -#define PHY_HEAP_SEC_FW_CODE 2 -#define PHY_HEAP_SEC_FW_DATA 3 -#define PHY_HEAP_SEC_MEM 4 -#define PHY_HEAP_SYSTEM 5 -#elif (RGX_NUM_OS_SUPPORTED > 1) -#define PHY_HEAP_CARD_FW 2 -#define PHY_HEAP_SYSTEM 3 -#else -#define PHY_HEAP_SYSTEM 2 +#define SECURE_FW_MEM_SIZE (0x80000) /* 512Kb */ +#define SECURE_MEM_SIZE (0x4000000) /* 64MB */ + +typedef struct +{ + PHYS_HEAP_USAGE_FLAGS ui32UsageFlags; + IMG_UINT64 uiSize; + IMG_BOOL bUsed; + IMG_UINT32 ui32DriverModeMask; +} CARD_PHYS_HEAP_CONFIG_SPEC; + +#define PHYSHEAP_ALL_DRIVERS (1 << DRIVER_MODE_NATIVE | \ + 1 << DRIVER_MODE_HOST | \ + 1 << DRIVER_MODE_GUEST) + +#define PHYSHEAP_NO_GUESTS (1 << DRIVER_MODE_NATIVE | \ + 1 << DRIVER_MODE_HOST) + +#define PHYSHEAP_ONLY_VZ (1 << DRIVER_MODE_GUEST | \ + 1 << DRIVER_MODE_HOST) + +#define HEAP_SPEC_IDX_GPU_PRIVATE (0U) +#define HEAP_SPEC_IDX_GPU_LOCAL (1U) + +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(FPGA) && !defined(SUPPORT_SECURITY_VALIDATION) +static PVRSRV_ERROR TEE_LoadFirmwareWrapper(IMG_HANDLE hSysData, PVRSRV_FW_PARAMS *psTDFWParams); +extern PVRSRV_ERROR TEE_LoadFirmware(IMG_HANDLE hSysData, PVRSRV_FW_PARAMS *psTDFWParams); +extern PVRSRV_ERROR TEE_SetPowerParams(IMG_HANDLE hSysData, PVRSRV_TD_POWER_PARAMS *psTDPowerParams); +extern PVRSRV_ERROR TEE_RGXStart(IMG_HANDLE hSysData); +extern PVRSRV_ERROR TEE_RGXStop(IMG_HANDLE hSysData); #endif -#if defined(SUPPORT_SECURITY_VALIDATION) -#define PHY_HEAP_LMA_NUM 5 -#define PHY_HEAP_HYBRID_NUM 6 -#elif (RGX_NUM_OS_SUPPORTED > 1) -#define PHY_HEAP_LMA_NUM 3 -#define PHY_HEAP_HYBRID_NUM 4 +/* + ----------------------------------------------------------------------- + | Phys Heap definition matrix | + ----------------------------------------------------------------------- + | Phys | GPU | GPU | Fw | Fw | Fw | + | Heap | Private | Secure | Private | Shared | PageTab | + ----------------------------------------------------------------------- + | VzMode | N H G | N H G | N H G | N H G | N H G | + ----------------------------------------------------------------------- + | TEE | 1 1 1 | 1 1 1 | 1 1 0 | 1 1 1 | 1 1 0 | + | PREMAP | 1 1 1 | 0 0 0 | 0 0 0 | 1 1 1 | 1 1 0 | + | DEFAULT | 1 1 1 | 0 0 0 | 0 0 0 | 0 0 1 | 0 0 0 | + ----------------------------------------------------------------------- +*/ +static const CARD_PHYS_HEAP_CONFIG_SPEC gasCardHeapTemplate[] = +{ + { + PHYS_HEAP_USAGE_GPU_PRIVATE, + 0, /* determined at runtime by apphints */ + false, /* determined at runtime by apphints */ + PHYSHEAP_ALL_DRIVERS + }, + { + PHYS_HEAP_USAGE_GPU_LOCAL, + 0, /* determined at runtime */ + true, + PHYSHEAP_ALL_DRIVERS + }, + { + PHYS_HEAP_USAGE_GPU_SECURE, + SECURE_MEM_SIZE, +#if defined(SUPPORT_TRUSTED_DEVICE) + true, #else -#define PHY_HEAP_LMA_NUM 2 -#define PHY_HEAP_HYBRID_NUM 3 + false, #endif - -#if defined(SUPPORT_SECURITY_VALIDATION) && (RGX_NUM_OS_SUPPORTED > 1) -#error "Security support and virtualization are currently mutually exclusive." + PHYSHEAP_ALL_DRIVERS + }, + { + PHYS_HEAP_USAGE_FW_PREMAP_PT, + RGX_FIRMWARE_MAX_PAGETABLE_SIZE, +#if defined(RGX_PREMAP_FW_HEAPS) + true, +#else + false, #endif + PHYSHEAP_NO_GUESTS + }, + { + PHYS_HEAP_USAGE_FW_PRIVATE, + SECURE_FW_MEM_SIZE, +#if defined(SUPPORT_TRUSTED_DEVICE) + true, +#else + false, +#endif + PHYSHEAP_NO_GUESTS + }, + { + PHYS_HEAP_USAGE_FW_SHARED, + RGX_FIRMWARE_RAW_HEAP_SIZE, + true, +#if defined(RGX_PREMAP_FW_HEAPS) + PHYSHEAP_ALL_DRIVERS +#else + PHYSHEAP_ONLY_VZ +#endif + } +}; + +#define ODIN_MEMORY_HYBRID_DEVICE_BASE 0x400000000 + +#define VALI_MEMORY_DEVICE_BASE (0x800000000U) #define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10) -#if defined(SUPPORT_SECURITY_VALIDATION) -#define SECURE_FW_CODE_MEM_SIZE (0x200000) /* 2MB (max HMMU page size) */ -#define SECURE_FW_DATA_MEM_SIZE (0x200000) /* 2MB (max HMMU page size) */ -#define SECURE_MEM_SIZE (0x4000000) /* 32MB (multiple of max HMMU page size) */ +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) +static PVRSRV_DEVICE_CONFIG *apsDevCfgs[PVRSRV_MAX_DEVICES] = {0}; #endif #if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) @@ -116,13 +195,17 @@ static const IMG_OPP asOPPTable[] = #define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP)) -static void SetFrequency(IMG_UINT32 ui32Frequency) +static void SetFrequency(IMG_HANDLE hSysData, IMG_UINT32 ui32Frequency) { + PVR_UNREFERENCED_PARAMETER(hSysData); + PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency)); } -static void SetVoltage(IMG_UINT32 ui32Voltage) +static void SetVoltage(IMG_HANDLE hSysData, IMG_UINT32 ui32Voltage) { + PVR_UNREFERENCED_PARAMETER(hSysData); + PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage)); } @@ -144,6 +227,22 @@ static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs = .pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr, }; +static void TCValiLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void TCValiLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +static PHYS_HEAP_FUNCTIONS gsValiLocalPhysHeapFuncs = +{ + .pfnCpuPAddrToDevPAddr = TCValiLocalCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = TCValiLocalDevPAddrToCpuPAddr, +}; + static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, IMG_UINT32 ui32NumOfAddr, IMG_DEV_PHYADDR *psDevPAddr, @@ -160,16 +259,44 @@ static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs = .pfnDevPAddrToCpuPAddr = TCHostDevPAddrToCpuPAddr, }; +static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +static PHYS_HEAP_FUNCTIONS gsHybridPhysHeapFuncs = +{ + .pfnCpuPAddrToDevPAddr = TCHybridCpuPAddrToDevPAddr, + .pfnDevPAddrToCpuPAddr = TCHybridDevPAddrToCpuPAddr +}; + typedef struct _SYS_DATA_ SYS_DATA; struct _SYS_DATA_ { + IMG_UINT32 ui32SysDataSize; + IMG_UINT64 ui64GpuRegisterBase; + IMG_UINT64 ui64FwHeapCpuBase; + IMG_UINT64 ui64FwHeapGpuBase; + IMG_UINT64 ui64FwPrivateHeapSize; + IMG_UINT64 ui64FwTotalHeapSize; + IMG_UINT64 ui64FwHeapStride; + IMG_UINT64 ui64FwPageTableHeapCpuBase; + IMG_UINT64 ui64FwPageTableHeapGpuBase; + IMG_UINT64 ui64FwPageTableHeapSize; + + PVRSRV_DEVICE_FEATURE_CONFIG sDevFeatureCfg; + struct platform_device *pdev; struct tc_rogue_platform_data *pdata; struct resource *registers; - #if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) struct ion_client *ion_client; struct ion_handle *ion_rogue_allocation; @@ -320,6 +447,38 @@ static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, } } +static void TCValiLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psDevPAddr[ui32Idx].uiAddr = + psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base + VALI_MEMORY_DEVICE_BASE; + } +} + +static void TCValiLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psCpuPAddr[ui32Idx].uiAddr = + psDevPAddr[ui32Idx].uiAddr - VALI_MEMORY_DEVICE_BASE + psSysData->pdata->tc_memory_base; + } +} + static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, IMG_UINT32 uiNumOfAddr, IMG_DEV_PHYADDR *psDevPAddr, @@ -338,156 +497,143 @@ static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, OSCachedMemCopy(psCpuPAddr, psDevPAddr, uiNumOfAddr * sizeof(*psCpuPAddr)); } +static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psDevPAddr[ui32Idx].uiAddr = + (psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base) + + ODIN_MEMORY_HYBRID_DEVICE_BASE; + } +} + +static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; + SYS_DATA *psSysData = psDevConfig->hSysData; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) + { + psCpuPAddr[ui32Idx].uiAddr = + (psDevPAddr[ui32Idx].uiAddr - ODIN_MEMORY_HYBRID_DEVICE_BASE) + + psSysData->pdata->tc_memory_base; + } +} + +static inline +IMG_CHAR* GetHeapName(PHYS_HEAP_USAGE_FLAGS ui32Flags) +{ + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_GPU_LOCAL)) return "lma_gpu_local"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_GPU_SECURE)) return "lma_gpu_secure"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_GPU_PRIVATE)) return "lma_gpu_private"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_FW_PRIVATE)) return "lma_fw_private"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_FW_SHARED)) return "lma_fw_shared"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_FW_PREMAP_PT)) return "lma_fw_pagetables"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_CPU_LOCAL)) return "lma_cpu_local"; + if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_DISPLAY)) return "lma_gpu_display"; + else return "Unexpected Heap"; +} + static PVRSRV_ERROR InitLocalHeap(PHYS_HEAP_CONFIG *psPhysHeap, IMG_UINT64 uiBaseAddr, IMG_UINT64 uiStartAddr, IMG_UINT64 uiSize, PHYS_HEAP_FUNCTIONS *psFuncs, PHYS_HEAP_USAGE_FLAGS ui32Flags) { - psPhysHeap->sCardBase.uiAddr = uiBaseAddr; - psPhysHeap->sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(uiStartAddr); - psPhysHeap->uiSize = uiSize; psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; - psPhysHeap->pszPDumpMemspaceName = "LMA"; - psPhysHeap->psMemFuncs = psFuncs; psPhysHeap->ui32UsageFlags = ui32Flags; + psPhysHeap->uConfig.sLMA.pszPDumpMemspaceName = "LMA"; + psPhysHeap->uConfig.sLMA.psMemFuncs = psFuncs; + psPhysHeap->uConfig.sLMA.pszHeapName = GetHeapName(ui32Flags); + psPhysHeap->uConfig.sLMA.sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(uiStartAddr); + psPhysHeap->uConfig.sLMA.sCardBase.uiAddr = uiBaseAddr; + psPhysHeap->uConfig.sLMA.uiSize = uiSize; return PVRSRV_OK; } static PVRSRV_ERROR -CreateCardGPUHeaps(const SYS_DATA *psSysData, +CreateCardGPUHeaps(SYS_DATA *psSysData, + CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, PHYS_HEAP_CONFIG *pasPhysHeaps, - PHYS_HEAP_FUNCTIONS *psHeapFuncs) + PHYS_HEAP_FUNCTIONS *psHeapFuncs, + IMG_UINT32 *pui32HeapIdx, + IMG_UINT64 ui64CardAddr) { PVRSRV_ERROR eError; - IMG_UINT64 ui64CardAddr = 0; IMG_UINT64 ui64StartAddr = psSysData->pdata->rogue_heap_memory_base; - IMG_UINT64 ui64RogueHeapSize = psSysData->pdata->rogue_heap_memory_size; -#if defined(SUPPORT_SECURITY_VALIDATION) - IMG_UINT64 uiTDFWCodeSize = SECURE_FW_CODE_MEM_SIZE; - IMG_UINT64 uiTDFWDataSize = SECURE_FW_DATA_MEM_SIZE; - IMG_UINT64 uiTDSecBufSize = SECURE_MEM_SIZE; -#elif (RGX_NUM_OS_SUPPORTED > 1) - IMG_UINT64 uiFwCarveoutSize; -#if defined(SUPPORT_AUTOVZ) - /* Carveout out enough LMA memory to hold the heaps of - * all supported OSIDs and the FW page tables */ - uiFwCarveoutSize = (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE) + - RGX_FIRMWARE_MAX_PAGETABLE_SIZE; -#elif defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) - /* Carveout out enough LMA memory to hold the heaps of all supported OSIDs */ - uiFwCarveoutSize = (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE); -#else - /* Create a memory carveout just for the Host's Firmware heap. - * Guests will allocate their own physical memory. */ - uiFwCarveoutSize = RGX_FIRMWARE_RAW_HEAP_SIZE; -#endif -#endif + IMG_UINT32 ui32SpecIdx; - if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + for (ui32SpecIdx = 0; ui32SpecIdx < ARRAY_SIZE(gasCardHeapTemplate); ui32SpecIdx++) { - ui64CardAddr = psSysData->pdata->tc_memory_base; - } + if (pasCardHeapSpec[ui32SpecIdx].bUsed) + { + IMG_UINT64 ui64HeapSize = pasCardHeapSpec[ui32SpecIdx].uiSize; + + eError = InitLocalHeap(&pasPhysHeaps[*pui32HeapIdx], + ui64CardAddr, + IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), + ui64HeapSize, + psHeapFuncs, + pasCardHeapSpec[ui32SpecIdx].ui32UsageFlags); + if (eError != PVRSRV_OK) + { + return eError; + } -#if defined(SUPPORT_SECURITY_VALIDATION) - /* Take some space from the main heap region */ - ui64RogueHeapSize -= uiTDFWCodeSize + uiTDFWDataSize + uiTDSecBufSize; -#elif (RGX_NUM_OS_SUPPORTED > 1) - ui64RogueHeapSize -= uiFwCarveoutSize; +#if defined(SUPPORT_TRUSTED_DEVICE) + /* save the heap details in a structure passed to the tee_ddk module */ + if (BITMASK_HAS(pasCardHeapSpec[ui32SpecIdx].ui32UsageFlags, PHYS_HEAP_USAGE_FW_PREMAP_PT)) + { + psSysData->ui64FwPageTableHeapCpuBase = ui64StartAddr; + psSysData->ui64FwPageTableHeapGpuBase = ui64CardAddr; + psSysData->ui64FwPageTableHeapSize = pasCardHeapSpec[ui32SpecIdx].uiSize; + } + else if (BITMASK_HAS(pasCardHeapSpec[ui32SpecIdx].ui32UsageFlags, PHYS_HEAP_USAGE_FW_PRIVATE)) + { + psSysData->ui64FwPrivateHeapSize = pasCardHeapSpec[ui32SpecIdx].uiSize; + psSysData->ui64FwHeapCpuBase = ui64StartAddr; + psSysData->ui64FwHeapGpuBase = ui64CardAddr; + } + else if (BITMASK_HAS(pasCardHeapSpec[ui32SpecIdx].ui32UsageFlags, PHYS_HEAP_USAGE_FW_SHARED)) + { + psSysData->ui64FwTotalHeapSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + } #endif - eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_GPU], - ui64CardAddr, - IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), - ui64RogueHeapSize, - psHeapFuncs, - PHYS_HEAP_USAGE_GPU_LOCAL); - if (eError != PVRSRV_OK) - { - return eError; - } - - ui64CardAddr += ui64RogueHeapSize; - ui64StartAddr += ui64RogueHeapSize; - -#if defined(SUPPORT_SECURITY_VALIDATION) - /* Setup the secure FW code heap */ - eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_SEC_FW_CODE], - ui64CardAddr, - IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), - uiTDFWCodeSize, psHeapFuncs, - PHYS_HEAP_USAGE_FW_CODE); - if (eError != PVRSRV_OK) - { - return eError; - } - - ui64CardAddr += uiTDFWCodeSize; - ui64StartAddr += uiTDFWCodeSize; - - /* Setup the secure FW data heap */ - eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_SEC_FW_DATA], - ui64CardAddr, - IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), - uiTDFWDataSize, psHeapFuncs, - PHYS_HEAP_USAGE_FW_PRIV_DATA); - if (eError != PVRSRV_OK) - { - return eError; - } - - ui64CardAddr += uiTDFWDataSize; - ui64StartAddr += uiTDFWDataSize; - - /* Setup the secure buffers heap */ - eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_SEC_MEM], - ui64CardAddr, - IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), - uiTDSecBufSize, psHeapFuncs, - PHYS_HEAP_USAGE_GPU_SECURE); - if (eError != PVRSRV_OK) - { - return eError; - } - - ui64CardAddr += uiTDSecBufSize; - ui64StartAddr += uiTDSecBufSize; -#elif (RGX_NUM_OS_SUPPORTED > 1) - /* allocate the Host Driver's Firmware Heap from the reserved carveout */ - eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_FW], - ui64CardAddr, - IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), - RGX_FIRMWARE_RAW_HEAP_SIZE, - psHeapFuncs, - PHYS_HEAP_USAGE_FW_MAIN); - if (eError != PVRSRV_OK) - { - return eError; + ui64CardAddr += ui64HeapSize; + ui64StartAddr += ui64HeapSize; + (*pui32HeapIdx)++; + } } -#endif - return PVRSRV_OK; } static PVRSRV_ERROR CreateCardEXTHeap(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, - PHYS_HEAP_FUNCTIONS *psHeapFuncs) + PHYS_HEAP_FUNCTIONS *psHeapFuncs, + IMG_UINT32 *pui32HeapIdx, + IMG_UINT64 ui64CardBase) { - IMG_UINT64 ui64CardAddr = 0; IMG_UINT64 ui64StartAddr = psSysData->pdata->pdp_heap_memory_base; IMG_UINT64 ui64Size = psSysData->pdata->pdp_heap_memory_size; PVRSRV_ERROR eError; - if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) - { - ui64CardAddr = psSysData->pdata->tc_memory_base; - } - - eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_EXT], - ui64CardAddr, + eError = InitLocalHeap(&pasPhysHeaps[*pui32HeapIdx], + ui64CardBase + psSysData->pdata->rogue_heap_memory_size, IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), ui64Size, psHeapFuncs, PHYS_HEAP_USAGE_EXTERNAL | PHYS_HEAP_USAGE_DISPLAY); @@ -496,31 +642,50 @@ CreateCardEXTHeap(const SYS_DATA *psSysData, return eError; } + (*pui32HeapIdx)++; + return PVRSRV_OK; } static PVRSRV_ERROR -InitLocalHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps) +InitLocalHeaps(SYS_DATA *psSysData, + CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, + PHYS_HEAP_CONFIG *pasPhysHeaps, + IMG_UINT32 *pui32HeapIdx) { PHYS_HEAP_FUNCTIONS *psHeapFuncs; PVRSRV_ERROR eError; + IMG_UINT64 ui64CardBase; - if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN && + psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + psHeapFuncs = &gsHybridPhysHeapFuncs; + ui64CardBase = ODIN_MEMORY_HYBRID_DEVICE_BASE; + } + else if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) { psHeapFuncs = &gsHostPhysHeapFuncs; + ui64CardBase = 0; + } + else if (psSysData->pdata->baseboard == TC_BASEBOARD_VALI) + { + psHeapFuncs = &gsValiLocalPhysHeapFuncs; + ui64CardBase = VALI_MEMORY_DEVICE_BASE; } else { psHeapFuncs = &gsLocalPhysHeapFuncs; + ui64CardBase = psSysData->pdata->rogue_heap_memory_base - psSysData->pdata->tc_memory_base; } - eError = CreateCardGPUHeaps(psSysData, pasPhysHeaps, psHeapFuncs); + eError = CreateCardGPUHeaps(psSysData, pasCardHeapSpec, pasPhysHeaps, psHeapFuncs, pui32HeapIdx, ui64CardBase); if (eError != PVRSRV_OK) { return eError; } - eError = CreateCardEXTHeap(psSysData, pasPhysHeaps, psHeapFuncs); + eError = CreateCardEXTHeap(psSysData, pasPhysHeaps, psHeapFuncs, pui32HeapIdx, ui64CardBase); if (eError != PVRSRV_OK) { return eError; @@ -530,75 +695,188 @@ InitLocalHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps) } static PVRSRV_ERROR -InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps) +InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 *pui32HeapIdx) { PVR_UNREFERENCED_PARAMETER(psSysData); - pasPhysHeaps[PHY_HEAP_SYSTEM].eType = PHYS_HEAP_TYPE_UMA; - pasPhysHeaps[PHY_HEAP_SYSTEM].pszPDumpMemspaceName = "SYSMEM"; - pasPhysHeaps[PHY_HEAP_SYSTEM].psMemFuncs = &gsHostPhysHeapFuncs; - pasPhysHeaps[PHY_HEAP_SYSTEM].ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL; + if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + pasPhysHeaps[*pui32HeapIdx].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[*pui32HeapIdx].ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL; + pasPhysHeaps[*pui32HeapIdx].uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM"; + pasPhysHeaps[*pui32HeapIdx].uConfig.sUMA.psMemFuncs = &gsHostPhysHeapFuncs; + pasPhysHeaps[*pui32HeapIdx].uConfig.sUMA.pszHeapName = "uma_cpu_local"; + + (*pui32HeapIdx)++; - PVR_DPF((PVR_DBG_WARNING, - "Initialising CPU_LOCAL UMA Host PhysHeaps with memory mode: %d", - psSysData->pdata->mem_mode)); + PVR_DPF((PVR_DBG_WARNING, + "Initialising CPU_LOCAL UMA Host PhysHeaps with memory mode: %d", + psSysData->pdata->mem_mode)); + } return PVRSRV_OK; } static PVRSRV_ERROR -PhysHeapsInit(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, - void *pvPrivData) +PhysHeapsInit(SYS_DATA *psSysData, + CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, + PHYS_HEAP_CONFIG *pasPhysHeaps, + void *pvPrivData, IMG_UINT32 ui32NumHeaps) { PVRSRV_ERROR eError; IMG_UINT32 i; - IMG_UINT32 ui32NumHeaps = psSysData->pdata->mem_mode == TC_MEMORY_HYBRID ? - PHY_HEAP_HYBRID_NUM : PHY_HEAP_LMA_NUM; + IMG_UINT32 ui32HeapCounter = 0; - eError = InitLocalHeaps(psSysData, pasPhysHeaps); + eError = InitLocalHeaps(psSysData, pasCardHeapSpec, pasPhysHeaps, &ui32HeapCounter); if (eError != PVRSRV_OK) { return eError; } - if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + eError = InitHostHeaps(psSysData, pasPhysHeaps, &ui32HeapCounter); + if (eError != PVRSRV_OK) { - eError = InitHostHeaps(psSysData, pasPhysHeaps); - if (eError != PVRSRV_OK) - { - return eError; - } + return eError; } + PVR_LOG_RETURN_IF_FALSE((ui32HeapCounter == ui32NumHeaps), + "Number of PhysHeapConfigs set up doesn't match the initial requirement.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + /* Initialise fields that don't change between memory modes. * Fix up heap IDs. This is needed for multi-testchip systems to * ensure the heap IDs are unique as this is what Services expects. */ for (i = 0; i < ui32NumHeaps; i++) { - pasPhysHeaps[i].hPrivData = pvPrivData; + switch (pasPhysHeaps[i].eType) + { + case PHYS_HEAP_TYPE_LMA: + pasPhysHeaps[i].uConfig.sLMA.hPrivData = (void*) pvPrivData; + break; + case PHYS_HEAP_TYPE_UMA: + pasPhysHeaps[i].uConfig.sUMA.hPrivData = (void*) pvPrivData; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Invalid PHYS_HEAP_TYPE: %u in %s", + pasPhysHeaps[i].eType, + __func__)); + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PhysHeapSetRequirements(const SYS_DATA *psSysData, + PVRSRV_DEVICE_CONFIG *psDevConfig, + CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, + IMG_UINT32 *pui32CardPhysHeapCfgCount) +{ + IMG_UINT32 i; + IMG_UINT64 ui64FreeCardMemory = psSysData->pdata->rogue_heap_memory_size; + + PVR_LOG_RETURN_IF_FALSE( + BITMASK_HAS(pasCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].ui32UsageFlags, PHYS_HEAP_USAGE_GPU_PRIVATE) && + BITMASK_HAS(pasCardHeapSpec[HEAP_SPEC_IDX_GPU_LOCAL].ui32UsageFlags, PHYS_HEAP_USAGE_GPU_LOCAL), + "PhysHeapConfigs not set correctly in the system layer.", PVRSRV_ERROR_PHYSHEAP_CONFIG); + + for (i = 0; i < ARRAY_SIZE(gasCardHeapTemplate); i++) + { + if (pasCardHeapSpec[i].bUsed) + { + if (BITMASK_HAS(pasCardHeapSpec[i].ui32DriverModeMask, + BIT(psDevConfig->eDriverMode))) + { +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_PREMAP_FW_HEAPS) + IMG_UINT32 ui32UsageFlags = pasCardHeapSpec[i].ui32UsageFlags; + + if (BITMASK_HAS(ui32UsageFlags, PHYS_HEAP_USAGE_FW_SHARED) && + (psDevConfig->eDriverMode != DRIVER_MODE_GUEST)) + { + /* The Firmware private heap in which the TEE loads the Fw binary + * carves some memory out of the Host/Native driver's Fw shared heap. + * Both Fw Private and Fw Shared heaps are premapped by the TEE as + * one contiguous range of RGX_FIRMWARE_RAW_HEAP_SIZE */ + pasCardHeapSpec[i].uiSize -= SECURE_FW_MEM_SIZE; + } +#endif + + /* Determine the memory requirements of heaps with a fixed size */ + ui64FreeCardMemory -= pasCardHeapSpec[i].uiSize; + + /* Count card physheap configs used by the system */ + (*pui32CardPhysHeapCfgCount)++; + } + else + { + /* Heap not used in this driver mode */ + pasCardHeapSpec[i].bUsed = false; + } + } + } + + if (SysRestrictGpuLocalAddPrivateHeap()) + { + IMG_UINT64 ui64GpuSharedMem = SysRestrictGpuLocalPhysheap(ui64FreeCardMemory); + + if (ui64GpuSharedMem == ui64FreeCardMemory) + { + /* No memory reserved for GPU private use, special heap not needed */ + } + else + { + /* Set up the GPU private heap */ + pasCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].bUsed = true; + pasCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].uiSize = ui64FreeCardMemory - ui64GpuSharedMem; + ui64FreeCardMemory = ui64GpuSharedMem; + (*pui32CardPhysHeapCfgCount)++; + } } + /* all remaining memory card memory goes to GPU_LOCAL */ + pasCardHeapSpec[HEAP_SPEC_IDX_GPU_LOCAL].uiSize = ui64FreeCardMemory; + return PVRSRV_OK; } static PVRSRV_ERROR -PhysHeapsCreate(const SYS_DATA *psSysData, void *pvPrivData, +PhysHeapsCreate(SYS_DATA *psSysData, PVRSRV_DEVICE_CONFIG *psDevConfig, PHYS_HEAP_CONFIG **ppasPhysHeapsOut, IMG_UINT32 *puiPhysHeapCountOut) { PHYS_HEAP_CONFIG *pasPhysHeaps; PVRSRV_ERROR eError; - IMG_UINT32 ui32NumHeaps = psSysData->pdata->mem_mode == TC_MEMORY_HYBRID ? - PHY_HEAP_HYBRID_NUM : PHY_HEAP_LMA_NUM; + IMG_UINT32 ui32NumHeaps = 0; + CARD_PHYS_HEAP_CONFIG_SPEC asCardHeapSpec[ARRAY_SIZE(gasCardHeapTemplate)]; + + /* Initialise the local heap specs with the build-time template */ + memcpy(asCardHeapSpec, gasCardHeapTemplate, sizeof(gasCardHeapTemplate)); + + eError = PhysHeapSetRequirements(psSysData, psDevConfig, asCardHeapSpec, &ui32NumHeaps); + if (eError != PVRSRV_OK) + { + return eError; + } + + psDevConfig->bHasNonMappableLocalMemory = asCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].bUsed; + + if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + /* CPU_LOCAL heap also required */ + ui32NumHeaps++; + } + + /* DISPLAY heap is always present */ + ui32NumHeaps++; - pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * ui32NumHeaps); + pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * ui32NumHeaps); if (!pasPhysHeaps) { return PVRSRV_ERROR_OUT_OF_MEMORY; } - eError = PhysHeapsInit(psSysData, pasPhysHeaps, pvPrivData); + eError = PhysHeapsInit(psSysData, asCardHeapSpec, pasPhysHeaps, psDevConfig, ui32NumHeaps); if (eError != PVRSRV_OK) { OSFreeMem(pasPhysHeaps); @@ -613,6 +891,19 @@ PhysHeapsCreate(const SYS_DATA *psSysData, void *pvPrivData, static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) { +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + IMG_UINT32 ui32DeviceID; + + for (ui32DeviceID=0; ui32DeviceID < PVRSRV_MAX_DEVICES; ui32DeviceID++) + { + PVRSRV_DEVICE_CONFIG *psDC = apsDevCfgs[ui32DeviceID]; + if (psDC == psDevConfig) + { + apsDevCfgs[ui32DeviceID] = NULL; + } + } +#endif + if (psDevConfig->pszVersion) { OSFreeMem(psDevConfig->pszVersion); @@ -679,6 +970,86 @@ static void odinTCFreeCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, tc_dma_chan_free(psDev->parent, chan); } + +static void GetDriverMode(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32DeviceID; + + /* + * Drivers with virtualization support should check if the mode in which the + * driver must control a device has been explicitly specified at load time + * through module parameters. + * Multi-device platforms must find the internal ID of the device currently + * being created when checking for its associated DriverMode parameter. + */ + if (PVRSRVAcquireInternalID(&ui32DeviceID) != PVRSRV_OK) + { + psDevConfig->eDriverMode = DRIVER_MODE_NATIVE; + return; + } + + if (psPVRSRVData->aeModuleParamDriverMode[ui32DeviceID] == DRIVER_MODE_DEFAULT) + { +#if (RGX_NUM_DRIVERS_SUPPORTED > 1) + void __iomem *pvRegBase; + + pvRegBase = (void __iomem *) OSMapPhysToLin(psDevConfig->sRegsCpuPBase, psDevConfig->ui32RegsSize, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + + if (pvRegBase == NULL) + { + /* failed to map register bank, default to native mode */ + psDevConfig->eDriverMode = DRIVER_MODE_NATIVE; + } + else + { + IMG_UINT64 ui64ClkCtrl; + + /* the CLK_CTRL register is valid only in the Os 0 (Host) register bank + * if it reads 0 then we can conclude this Os is set up to run as Guest */ +#if defined(RGX_CR_CLK_CTRL) + ui64ClkCtrl = OSReadHWReg64(pvRegBase, RGX_CR_CLK_CTRL); +#else + ui64ClkCtrl = OSReadHWReg64(pvRegBase, RGX_CR_CLK_CTRL1); +#endif + OSUnMapPhysToLin((void __force *) pvRegBase, psDevConfig->ui32RegsSize); + + psDevConfig->eDriverMode = (ui64ClkCtrl != 0) ? (DRIVER_MODE_HOST) : (DRIVER_MODE_GUEST); + } +#else + psDevConfig->eDriverMode = DRIVER_MODE_NATIVE; +#endif + } + else + { + psDevConfig->eDriverMode = psPVRSRVData->aeModuleParamDriverMode[ui32DeviceID]; + } +} + +static PVRSRV_ERROR PrepareFWImage(IMG_HANDLE hSysData, PVRSRV_FW_PARAMS *psTDFWParams); + +static PVRSRV_ERROR PrepareFWImage(IMG_HANDLE hSysData, PVRSRV_FW_PARAMS *psTDFWParams) +{ + if ((psTDFWParams != NULL) && + (psTDFWParams->pvFirmware != 0) && + (psTDFWParams->ui32FirmwareSize > 0)) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware image size = %u;", + __func__, psTDFWParams->ui32FirmwareSize)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Firmware image.", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Firmware binary not signed on this platform */ + psTDFWParams->pvSignature = NULL; + psTDFWParams->ui32SignatureSize = 0; + + return PVRSRV_OK; +} + static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) { @@ -688,6 +1059,14 @@ static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps; IMG_UINT32 uiPhysHeapCount; PVRSRV_ERROR eError; + IMG_UINT32 ui32DeviceID; + +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + PVR_LOG_RETURN_IF_FALSE((psSysData->pdata->baseboard == TC_BASEBOARD_ODIN && + psSysData->pdata->mem_mode == TC_MEMORY_LOCAL), + "Multidevice virtualization setup supported only on Odin device with TC_MEMORY_LOCAL", + PVRSRV_ERROR_INVALID_DEVICE); +#endif psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + sizeof(*psRGXData) + @@ -697,18 +1076,12 @@ static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, return PVRSRV_ERROR_OUT_OF_MEMORY; } - psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); - psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); - - eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount); - if (eError != PVRSRV_OK) - { - goto ErrorFreeDevConfig; - } + psRGXData = IMG_OFFSET_ADDR(psDevConfig, sizeof(*psDevConfig)); + psRGXTimingInfo = IMG_OFFSET_ADDR(psRGXData, sizeof(*psRGXData)); /* Setup RGX specific timing data */ - psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) / - tc_core_clock_multiplex(&psSysData->pdev->dev); + psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(psSysData->pdev->dev.parent) / + tc_core_clock_multiplex(psSysData->pdev->dev.parent); psRGXTimingInfo->bEnableActivePM = IMG_FALSE; psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; @@ -724,11 +1097,61 @@ static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start; psDevConfig->ui32RegsSize = resource_size(psSysData->registers); - psDevConfig->ui32IRQ = TC_INTERRUPT_EXT; + PVRSRVAcquireInternalID(&ui32DeviceID); + /* + * Current FPGA images route don't route the GPU's OSID IRQs to their proper card signals. + * All OSID IRQs are ORed together and output on the legacy DUT_IRQ pin. + * To work around this, we pass the legacy DUT IRQ to the Host driver and leave the floating + * OSID IRQs to Guest Drivers. When an IRQ is received via the DUT line, the system layer's + * IRQ handler can query the Firmware's state register to find out which device/OSID emitted + * an interrupt and call the appropriate handler for each device. + */ + psDevConfig->ui32IRQ = (ui32DeviceID == 0) ? TC_INTERRUPT_EXT : (TC_INTERRUPT_OSID0 + ui32DeviceID); + + GetDriverMode(psDevConfig); + +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + /* If there is device running in native mode, prevent any attempts at + * creating any Guest devices, as there will be no Host to support them. + * Currently the VZFPGA supports only one physical GPU. */ + if (PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig)) + { + IMG_UINT32 i; + + for (i=0; i < PVRSRV_MAX_DEVICES; i++) + { + PVRSRV_DEVICE_CONFIG *psDC = apsDevCfgs[i]; + if ((psDC != NULL) && (PVRSRV_VZ_MODE_IS(NATIVE, DEVCFG, psDC))) + { + PVR_DPF((PVR_DBG_ERROR, "%s() Device %u is already running in native mode, no other Guests supported in the system.", __func__, psDC->psDevNode->sDevId.ui32InternalID)); + eError = PVRSRV_ERROR_INVALID_DEVICE; + goto ErrorFreeDevConfig; + } + } + } + + apsDevCfgs[ui32DeviceID] = psDevConfig; +#endif + + eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount); + if (eError != PVRSRV_OK) + { + goto ErrorFreeDevConfig; + } psDevConfig->pasPhysHeaps = pasPhysHeaps; psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; - psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; + + if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN && + psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) + { + psDevConfig->eDefaultHeap = SysDefaultToCpuLocalHeap() ? + PVRSRV_PHYS_HEAP_CPU_LOCAL : PVRSRV_PHYS_HEAP_GPU_LOCAL; + } + else + { + psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; + } /* Only required for LMA but having this always set shouldn't be a problem */ psDevConfig->bDevicePA0IsValid = IMG_TRUE; @@ -760,11 +1183,19 @@ static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, psDevConfig->pfnDevPhysAddr2DmaAddr = odinTCDevPhysAddr2DmaAddr; psDevConfig->pszDmaTxChanName = psSysData->pdata->tc_dma_tx_chan_name; psDevConfig->pszDmaRxChanName = psSysData->pdata->tc_dma_rx_chan_name; - psDevConfig->bHasDma = true; + psDevConfig->bHasDma = IMG_TRUE; /* Following two values are expressed in number of bytes */ psDevConfig->ui32DmaTransferUnit = 1; psDevConfig->ui32DmaAlignment = 1; +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(FPGA) && !defined(SUPPORT_SECURITY_VALIDATION) + psDevConfig->pfnTDSendFWImage = TEE_LoadFirmwareWrapper; + psDevConfig->pfnTDSetPowerParams = TEE_SetPowerParams; + psDevConfig->pfnTDRGXStart = TEE_RGXStart; + psDevConfig->pfnTDRGXStop = TEE_RGXStop; +#endif + psDevConfig->pfnPrepareFWImage = PrepareFWImage; + *ppsDevConfigOut = psDevConfig; return PVRSRV_OK; @@ -779,6 +1210,7 @@ PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) PVRSRV_DEVICE_CONFIG *psDevConfig; SYS_DATA *psSysData; resource_size_t uiRegistersSize; + IMG_UINT32 ui32MinRegBankSize; PVRSRV_ERROR eError; int err = 0; @@ -826,13 +1258,40 @@ PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) goto ErrorDevDisable; } +#if defined(SUPPORT_TRUSTED_DEVICE) + psSysData->ui64FwPrivateHeapSize = SECURE_FW_MEM_SIZE; + psSysData->ui64GpuRegisterBase = psSysData->registers->start; + psSysData->ui32SysDataSize = sizeof(SYS_DATA); + + { + void *pvAppHintState = NULL; + IMG_UINT64 ui64AppHintDefault; + + OSCreateAppHintState(&pvAppHintState); + ui64AppHintDefault = PVRSRV_APPHINT_GUESTFWHEAPSTRIDE; + OSGetAppHintUINT64(APPHINT_NO_DEVICE, pvAppHintState, GuestFWHeapStride, + &ui64AppHintDefault, &psSysData->ui64FwHeapStride); + OSFreeAppHintState(pvAppHintState); + } +#else + psSysData->ui64FwPrivateHeapSize = 0; +#endif + /* Check the address range is large enough. */ uiRegistersSize = resource_size(psSysData->registers); - if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE) +#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) + /* each GPU instance gets the minimum 64kb register range */ + ui32MinRegBankSize = RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE; +#else + /* the GPU gets the entire 64MB IO range */ + ui32MinRegBankSize = SYS_RGX_REG_REGION_SIZE; +#endif + + if (uiRegistersSize < ui32MinRegBankSize) { PVR_DPF((PVR_DBG_ERROR, "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)", - __func__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE)); + __func__, &uiRegistersSize, ui32MinRegBankSize)); eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL; goto ErrorDevDisable; @@ -851,15 +1310,18 @@ PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) goto ErrorDevDisable; } - /* - * Reset the device as required. - */ - eError = DevReset(psSysData, IMG_TRUE); - if (eError != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't reset device", __func__)); - goto ErrorDevDisable; - } + if (psSysData->pdata->baseboard != TC_BASEBOARD_VALI) + { + /* + * Reset the device as required. + */ + eError = DevReset(psSysData, IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't reset device", __func__)); + goto ErrorDevDisable; + } + } eError = DeviceConfigCreate(psSysData, &psDevConfig); if (eError != PVRSRV_OK) @@ -961,8 +1423,35 @@ typedef struct static void TCInterruptHandler(void* pvData) { +#if (RGX_NUM_DRIVERS_SUPPORTED == 1) LISR_DATA *psLISRData = pvData; psLISRData->pfnLISR(psLISRData->pvData); +#else + IMG_UINT32 ui32DeviceID; + + PVR_UNREFERENCED_PARAMETER(pvData); + + for (ui32DeviceID=0; ui32DeviceID < PVRSRV_MAX_DEVICES; ui32DeviceID++) + { + PVRSRV_DEVICE_CONFIG *psDC = apsDevCfgs[ui32DeviceID]; + + if (psDC != NULL) + { + PVRSRV_RGXDEV_INFO *psDI = (PVRSRV_RGXDEV_INFO *) psDC->psDevNode->pvDevice; + + if ((psDI != NULL) && (psDI->pvRegsBaseKM != NULL)) + { + IMG_UINT32 ui32Status = OSReadHWReg32(psDI->pvRegsBaseKM, RGX_CR_IRQ_OS0_EVENT_STATUS); + + if (ui32Status) + { + LISR_DATA *psLISRData = psDI->pvLISRData; + psLISRData->pfnLISR(psLISRData->pvData); + } + } + } + } +#endif } PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, @@ -977,7 +1466,8 @@ PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, PVRSRV_ERROR eError; int err; - if (ui32IRQ != TC_INTERRUPT_EXT) + if ((ui32IRQ != TC_INTERRUPT_EXT) && + ((ui32IRQ < TC_INTERRUPT_OSID0) || (ui32IRQ > TC_INTERRUPT_OSID7))) { PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ)); return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; @@ -1048,3 +1538,235 @@ PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) return PVRSRV_OK; } + +#if defined(SUPPORT_TRUSTED_DEVICE) && defined(FPGA) && !defined(SUPPORT_SECURITY_VALIDATION) +/* The TEE needs to query the device's hardware feature capabilities and ERNs/BRNs. + * On first entry to the TEE, supply the config structure embedded in SYS_DATA */ +static PVRSRV_ERROR TEE_LoadFirmwareWrapper(IMG_HANDLE hSysData, PVRSRV_FW_PARAMS *psTDFWParams) +{ + SYS_DATA *psSysData = (SYS_DATA *)hSysData; + + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); + psDeviceNode = psPVRSRVData->psDeviceNodeList; + psDevInfo = psDeviceNode->pvDevice; + psSysData->sDevFeatureCfg = psDevInfo->sDevFeatureCfg; + OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); + + return TEE_LoadFirmware(hSysData, psTDFWParams); +} +#endif + +/****************************************************************************************************/ +/**** VM migration test code ****/ +/****************************************************************************************************/ +static void SwapHyperlanes(PVRSRV_DEVICE_NODE *psSrcNode, PVRSRV_DEVICE_NODE *psDestNode); +static void PreMigrationDeviceSuspend(struct drm_device *psDev); +static void PostMigrationDeviceResume(struct drm_device *psDev); + +void PVRVMMigration(unsigned int src, unsigned int dest); +EXPORT_SYMBOL(PVRVMMigration); + +#define SWAP_REGSBASE_PTR(a, b) do \ + { \ + a = (void __iomem *)(((uintptr_t)a)^((uintptr_t)b)); \ + b = (void __iomem *)(((uintptr_t)a)^((uintptr_t)b)); \ + a = (void __iomem *)(((uintptr_t)a)^((uintptr_t)b)); \ + } while (0) + +static void SwapHyperlanes(PVRSRV_DEVICE_NODE *psSrcNode, PVRSRV_DEVICE_NODE *psDestNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psHostNode = PVRSRVGetDeviceInstance(0); + PVRSRV_RGXDEV_INFO *psSrcInfo = psSrcNode->pvDevice; + PVRSRV_RGXDEV_INFO *psDestInfo = psDestNode->pvDevice; + PVRSRV_DEVICE_CONFIG *psSrcConfig = psSrcNode->psDevConfig; + PVRSRV_DEVICE_CONFIG *psDestConfig = psDestNode->psDevConfig; + LISR_DATA *psSrcLISRData = (LISR_DATA *) psSrcInfo->pvLISRData; + void *pfnLISR = psSrcLISRData->pfnLISR; + IMG_UINT32 ui32SrcHyperLane, ui32DestHyperLane; + + PVR_LOG_RETURN_VOID_IF_FALSE(((psHostNode != NULL) && + (psHostNode->psDevConfig != NULL)), + "Device 0 (expected Host) not initialised."); + + /* Determine the HyperLane ID used by a Guest Device from the Register Bank Base address used */ + ui32SrcHyperLane = (psSrcConfig->sRegsCpuPBase.uiAddr - psHostNode->psDevConfig->sRegsCpuPBase.uiAddr) / psSrcConfig->ui32RegsSize; + ui32DestHyperLane = (psDestConfig->sRegsCpuPBase.uiAddr - psHostNode->psDevConfig->sRegsCpuPBase.uiAddr) / psDestConfig->ui32RegsSize; + + PVR_DPF((PVR_DBG_WARNING, "%s: Swapping hyperlanes between Dev%u (hyperlane%u) and Dev%u (hyperlane%u)", __func__, + psSrcNode->sDevId.ui32InternalID, ui32SrcHyperLane, + psDestNode->sDevId.ui32InternalID, ui32DestHyperLane)); + PVR_DPF((PVR_DBG_WARNING, "%s: Resulting configuration: Dev%u (hyperlane%u) and Dev%u (hyperlane%u)", __func__, + psSrcNode->sDevId.ui32InternalID, ui32DestHyperLane, + psDestNode->sDevId.ui32InternalID, ui32SrcHyperLane)); + + /* swap the register bank details */ + SWAP_REGSBASE_PTR(psSrcInfo->pvRegsBaseKM, psDestInfo->pvRegsBaseKM); + SWAP(psSrcConfig->sRegsCpuPBase.uiAddr, psDestConfig->sRegsCpuPBase.uiAddr); + /* DevConfig->ui32RegsSize remains the same */ + + /* Swap interrupt lines between devices */ + eError = SysUninstallDeviceLISR(psSrcInfo->pvLISRData); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, "SysUninstallDeviceLISR(IRQ%u, Device %u)", + psSrcConfig->ui32IRQ, ui32SrcHyperLane); + eError = SysUninstallDeviceLISR(psDestInfo->pvLISRData); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, "SysUninstallDeviceLISR(IRQ%u, Device %u)", + psDestConfig->ui32IRQ, ui32DestHyperLane); + + SWAP(psSrcConfig->ui32IRQ, psDestConfig->ui32IRQ); + + eError = SysInstallDeviceLISR(psSrcConfig->hSysData, + psSrcConfig->ui32IRQ, + PVRSRV_MODNAME, + pfnLISR, + psSrcNode, + &psSrcInfo->pvLISRData); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, "SysInstallDeviceLISR(IRQ%u, Device %u)", + psSrcConfig->ui32IRQ, ui32SrcHyperLane); + + eError = SysInstallDeviceLISR(psDestConfig->hSysData, + psDestConfig->ui32IRQ, + PVRSRV_MODNAME, + pfnLISR, + psDestNode, + &psDestInfo->pvLISRData); + PVR_LOG_IF_ERROR_VA(PVR_DBG_ERROR, eError, "SysInstallDeviceLISR(IRQ%u, Device %u)", + psDestConfig->ui32IRQ, ui32DestHyperLane); + + /* Swap contents of LMA carveouts between virtual devices */ + { + /* Guest Raw Fw Heap mapping is done using the Host Devices */ + PHYS_HEAP *psSrcHeap = NULL; + PHYS_HEAP *psDestHeap = NULL; + IMG_DEV_PHYADDR sSrcHeapBase, sDestHeapBase; + + psSrcHeap = psHostNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32SrcHyperLane]; + psDestHeap = psHostNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DestHyperLane]; + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcHeap != NULL) && + (psDestHeap != NULL)), + "Guest firmware heaps not premapped by the Host Device."); + + eError = PhysHeapGetDevPAddr(psSrcHeap, &sSrcHeapBase); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PhysHeapGetDevPAddr(src fw heap)"); + eError = PhysHeapGetDevPAddr(psDestHeap, &sDestHeapBase); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PhysHeapGetDevPAddr(dest fw heap)"); + + eError = PvzServerUnmapDevPhysHeap(ui32SrcHyperLane, 0); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PvzServerUnmapDevPhysHeap(src fw heap)"); + eError = PvzServerUnmapDevPhysHeap(ui32DestHyperLane, 0); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PvzServerUnmapDevPhysHeap(dest fw heap)"); + + PhysHeapRelease(psHostNode->apsFWPremapPhysHeap[ui32SrcHyperLane]); + PhysHeapRelease(psHostNode->apsFWPremapPhysHeap[ui32DestHyperLane]); + + /* create new heaps with new base addresses */ + eError = PvzServerMapDevPhysHeap(ui32SrcHyperLane, 0, RGX_FIRMWARE_RAW_HEAP_SIZE, sDestHeapBase.uiAddr); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PvzServerMapDevPhysHeap(src fw heap)"); + eError = PvzServerMapDevPhysHeap(ui32DestHyperLane, 0, RGX_FIRMWARE_RAW_HEAP_SIZE, sSrcHeapBase.uiAddr); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PvzServerMapDevPhysHeap(dest fw heap)"); + } +} + +static void PreMigrationDeviceSuspend(struct drm_device *psDev) +{ + struct pvr_drm_private *psDevPriv = psDev->dev_private; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node; + PVRSRV_ERROR eError; + + /* LinuxBridgeBlockClientsAccess prevents processes from using the driver + * while it's suspended (this is needed for Android). */ + eError = LinuxBridgeBlockClientsAccess(psDevPriv, IMG_TRUE); + PVR_LOG_RETURN_VOID_IF_FALSE(eError == PVRSRV_OK, + "LinuxBridgeBlockClientsAccess()"); + +#if defined(SUPPORT_AUTOVZ) + /* To allow the driver to power down the GPU under AutoVz, the firmware must + * be declared as offline, otherwise all power requests will be ignored. */ + psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; +#endif + + if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF, + PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ) != PVRSRV_OK) + { + /* Ignore return error as we're already returning an error here. */ + (void) LinuxBridgeUnblockClientsAccess(psDevPriv); + } +} + +static void PostMigrationDeviceResume(struct drm_device *psDev) +{ + struct pvr_drm_private *psDevPriv = psDev->dev_private; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node; + + PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_ON, + PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ); + + /* Ignore return error. We should proceed even if this fails. */ + (void) LinuxBridgeUnblockClientsAccess(psDevPriv); + + /* + * Reprocess the device queues in case commands were blocked during + * suspend. + */ + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE) + { + PVRSRVCheckStatus(NULL); + } +} + +void PVRVMMigration(unsigned int src, unsigned int dest) +{ + PVRSRV_DEVICE_NODE *psSrcNode = PVRSRVGetDeviceInstance(src); + PVRSRV_DEVICE_NODE *psDestNode = PVRSRVGetDeviceInstance(dest); + struct device *psSrcDev, *psDestDev; + struct drm_device *psSrcDrmDev, *psDestDrmDev; + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcNode != NULL) && (psDestNode != NULL) && (psSrcNode != psDestNode)), + "Invalid Device IDs requested for migration."); + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE) && + (psDestNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE)), + "Devices not fully initialised."); + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcNode->psDevConfig != NULL) && + (psDestNode->psDevConfig != NULL)), + "Device config structure is NULL."); + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcNode->psDevConfig->pvOSDevice != NULL) && + (psDestNode->psDevConfig->pvOSDevice != NULL)), + "Linux kernel device pointer is NULL."); + + psSrcDev = psSrcNode->psDevConfig->pvOSDevice; + psDestDev = psDestNode->psDevConfig->pvOSDevice; + psSrcDrmDev = dev_get_drvdata(psSrcDev); + psDestDrmDev = dev_get_drvdata(psDestDev); + + PVR_LOG_RETURN_VOID_IF_FALSE(((psSrcDrmDev != NULL) && + (psDestDrmDev != NULL)), + "Linux kernel drm_device pointer is NULL."); + + PVR_DPF((PVR_DBG_WARNING, "%s: Suspending device %u before migration", + __func__, psSrcNode->sDevId.ui32InternalID)); + PreMigrationDeviceSuspend(psSrcDrmDev); + + PVR_DPF((PVR_DBG_WARNING, "%s: Suspending device %u before migration", + __func__, psDestNode->sDevId.ui32InternalID)); + PreMigrationDeviceSuspend(psDestDrmDev); + + PVR_DPF((PVR_DBG_WARNING, "%s: Migrating vGPU resources (regbank, irq, osid)", __func__)); + SwapHyperlanes(psSrcNode, psDestNode); + + PVR_DPF((PVR_DBG_WARNING, "%s: Resuming device %u", __func__, + psSrcNode->sDevId.ui32InternalID)); + PostMigrationDeviceResume(psSrcDrmDev); + PVR_DPF((PVR_DBG_WARNING, "%s: Resuming device %u", __func__, + psDestNode->sDevId.ui32InternalID)); + PostMigrationDeviceResume(psDestDrmDev); +} diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/sysinfo.h b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/sysinfo.h index 7171a525f5e4..91b41bc4b66d 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/sysinfo.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/volcanic/rgx_linux_tc/sysinfo.h @@ -48,13 +48,23 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if defined(VIRTUAL_PLATFORM) #define MAX_HW_TIME_US (240000000) #define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (120000) +#define EVENT_OBJECT_TIMEOUT_US (120000000) #else #define MAX_HW_TIME_US (20000000) #define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000) // (1500) + +#if defined(TC_APOLLO_TCF5) +#define EVENT_OBJECT_TIMEOUT_US (2000000) +#else +#define EVENT_OBJECT_TIMEOUT_US (100000) +#endif + #endif #define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) #define WAIT_TRY_COUNT (10000) + + #define SYS_RGX_DEV_NAME "tc_rogue" #endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/Kbuild.mk b/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/Kbuild.mk index 780424d20a6b..fe58742c81a3 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/Kbuild.mk +++ b/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/Kbuild.mk @@ -38,24 +38,5 @@ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ### ########################################################################### -PVRSRVKM_NAME = $(PVRSRV_MODNAME) -$(PVRSRVKM_NAME)-y += services/system/$(PVR_SYSTEM)/sysconfig.o \ - services/system/$(PVR_SYSTEM)/vz_validation.o \ - services/system/common/env/linux/interrupt_support.o \ - -$(PVRSRVKM_NAME)-y += \ - services/server/common/vmm_pvz_client.o \ - services/server/common/vmm_pvz_server.o \ - services/server/common/vz_vmm_pvz.o \ - services/server/common/vz_vmm_vm.o \ - -ifeq ($(PVR_ARCH),rogue) - $(PVRSRVKM_NAME)-y += \ - services/system/rogue/common/env/linux/dma_support.o \ - services/system/rogue/common/vmm_type_$(VMM_TYPE).o - else - $(PVRSRVKM_NAME)-y += \ - services/system/volcanic/common/env/linux/dma_support.o \ - services/system/volcanic/common/vmm_type_$(VMM_TYPE).o -endif +$(PVRSRV_MODNAME)-y += services/system/$(PVR_SYSTEM)/sysconfig.o diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysconfig.c b/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysconfig.c index ba1fa4ea780b..6b65db8c40fd 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysconfig.c +++ b/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysconfig.c @@ -52,9 +52,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "interrupt_support.h" #include "sysconfig.h" -#if defined(SUPPORT_GPUVIRT_VALIDATION) -#include "vz_validation.h" -#endif +#define VZ_EXAMPLE_SYSTEM_NAME "vz_example" static void SysCpuPAToDevPA(IMG_HANDLE hPrivData, IMG_UINT32 ui32NumOfAddr, IMG_DEV_PHYADDR *psDevPA, IMG_CPU_PHYADDR *psCpuPA); @@ -66,28 +64,19 @@ typedef struct _SYS_DATA_ IMG_HANDLE hSysLISRData; PFN_LISR pfnDeviceLISR; void *pvDeviceLISRData; + PVRSRV_DRIVER_MODE eDriverMode; } SYS_DATA; typedef enum _PHYS_HEAP_IDX_ { PHYS_HEAP_IDX_SYSMEM, PHYS_HEAP_IDX_FIRMWARE, +#if defined(RGX_PREMAP_FW_HEAPS) + PHYS_HEAP_IDX_FW_PT, +#endif PHYS_HEAP_IDX_COUNT, } PHYS_HEAP_IDX; -static PHYS_HEAP_CONFIG gsPhysHeapConfig[PHYS_HEAP_IDX_COUNT]; -static PVRSRV_DEVICE_CONFIG gsDevCfg; -static SYS_DATA gsSysData = {NULL, NULL, NULL}; - -static RGX_TIMING_INFORMATION gsTimingInfo = { - DEFAULT_CLOCK_RATE, /* ui32CoreClockSpeed */ - IMG_FALSE, /* bEnableActivePM */ - IMG_FALSE, /* bEnableRDPowIsland */ - 0 /* ui32ActivePMLatencyms */ -}; - -static RGX_DATA gsRGXData = {&gsTimingInfo}; - static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = { SysCpuPAToDevPA, /* pfnCpuPAddrToDevPAddr */ SysDevPAToCpuPA, /* pfnDevPAddrToCpuPAddr */ @@ -142,6 +131,29 @@ static IMG_BOOL SystemISRHandler(void *pvData) /* Any special system interrupt handling goes here */ +#if defined(RGX_SINGLE_IRQ_WORKAROUND) + if (psSysData->eDriverMode == DRIVER_MODE_HOST) + { + PVRSRV_DEVICE_NODE *psDeviceNode = psSysData->pvDeviceLISRData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32DriverID; + + /* When hardware support for multiple IRQs is not implemented, the virtualized GPU + * cannot notify drivers of events using dedicated interrupt lines. The single line + * available is routed to Host driver, which in turn broadcasts the IRQ to all other + * active Drivers currently connected to the Firmware. */ + FOREACH_ACTIVE_DRIVER(psDevInfo, ui32DriverID) + { + /* the customer must implement a method which allows the Host driver to trigger + * an interrupt signal into each operating system running a Guest GPU driver. + * This method is platform specific. */ + + /* INJECT_IRQ_INTO_VM(ui32DriverID); */ + } + END_FOREACH_ACTIVE_DRIVER + } +#endif + bHandled = psSysData->pfnDeviceLISR(psSysData->pvDeviceLISRData); return bHandled; } @@ -207,10 +219,15 @@ static PVRSRV_ERROR SysPrePower(IMG_HANDLE hSysData, PVRSRV_SYS_POWER_STATE eCurrentPowerState, PVRSRV_POWER_FLAGS ePwrFlags) { - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + SYS_DATA *psSysData = (SYS_DATA *)hSysData; + + if (psSysData->eDriverMode == DRIVER_MODE_GUEST) + { + return PVRSRV_OK; + } + PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", PVRSRV_OK); - PVR_UNREFERENCED_PARAMETER(hSysData); PVR_UNREFERENCED_PARAMETER(ePwrFlags); /* on powering down */ @@ -234,10 +251,15 @@ static PVRSRV_ERROR SysPostPower(IMG_HANDLE hSysData, PVRSRV_SYS_POWER_STATE eCurrentPowerState, PVRSRV_POWER_FLAGS ePwrFlags) { - PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + SYS_DATA *psSysData = (SYS_DATA *)hSysData; + + if (psSysData->eDriverMode == DRIVER_MODE_GUEST) + { + return PVRSRV_OK; + } + PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", PVRSRV_OK); - PVR_UNREFERENCED_PARAMETER(hSysData); PVR_UNREFERENCED_PARAMETER(ePwrFlags); /* on powering up */ @@ -284,7 +306,7 @@ static PVRSRV_DRIVER_MODE GetDriverMode(struct platform_device *psDev) { PVRSRV_DRIVER_MODE eDriverMode; -#if (RGX_NUM_OS_SUPPORTED > 1) +#if (RGX_NUM_DRIVERS_SUPPORTED > 1) if (of_property_read_u32(psDev->dev.of_node, "vz-mode", (IMG_UINT32*) &eDriverMode)) { PVR_DPF((PVR_DBG_WARNING, "%s: Cannot retrieve driver mode from Device Tree. " @@ -299,76 +321,116 @@ static PVRSRV_DRIVER_MODE GetDriverMode(struct platform_device *psDev) } static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice, - PVRSRV_DEVICE_CONFIG *psDevCfg) + PVRSRV_DEVICE_CONFIG **ppsDevConfig) { PVRSRV_ERROR eError = PVRSRV_OK; struct platform_device *psDev; struct resource *dev_res = NULL; int dev_irq; PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + SYS_DATA *psSysData; + PHYS_HEAP_CONFIG *psPhysHeapConfig; + IMG_UINT32 ui32DeviceID; + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + + sizeof(*psRGXData) + + sizeof(*psRGXTimingInfo) + + sizeof(*psSysData) + + sizeof(*psPhysHeapConfig) * PHYS_HEAP_IDX_COUNT); + + PVR_LOG_GOTO_IF_NOMEM(psDevConfig, eError, err_sys); + + psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); + psSysData = (SYS_DATA *)((IMG_CHAR *)psRGXTimingInfo + sizeof(*psRGXTimingInfo)); + psPhysHeapConfig = (PHYS_HEAP_CONFIG *)((IMG_CHAR *)psSysData + sizeof(*psSysData)); psDev = to_platform_device((struct device *)pvOSDevice); dma_set_mask(pvOSDevice, DMA_BIT_MASK(40)); + /* + * Drivers with virtualization support should check if the mode in which the + * driver must control a device has been explicitly specified at load time + * through module parameters. + * Multi-device platforms must find the internal ID of the device currently + * being created when checking for its associated DriverMode parameter. + */ + eError = PVRSRVAcquireInternalID(&ui32DeviceID); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAcquireInternalID", err_sys); + + if (psPVRSRVData->aeModuleParamDriverMode[ui32DeviceID] == DRIVER_MODE_DEFAULT) + { + psDevConfig->eDriverMode = GetDriverMode(psDev); + } + else + { + psDevConfig->eDriverMode = psPVRSRVData->aeModuleParamDriverMode[ui32DeviceID]; + } + dev_irq = platform_get_irq(psDev, 0); if (dev_irq < 0) { - PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __func__, -dev_irq)); - eError = PVRSRV_ERROR_INVALID_DEVICE; - return eError; + PVR_LOG_GOTO_WITH_ERROR("platform_get_irq", eError, PVRSRV_ERROR_INVALID_DEVICE, err_sys); } dev_res = platform_get_resource(psDev, IORESOURCE_MEM, 0); if (dev_res == NULL) { - PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __func__)); - eError = PVRSRV_ERROR_INVALID_DEVICE; - return eError; + PVR_LOG_GOTO_WITH_ERROR("platform_get_resource", eError, PVRSRV_ERROR_INVALID_DEVICE, err_sys); } /* Device Setup */ - psDevCfg->pvOSDevice = pvOSDevice; - psDevCfg->pszName = "pvrsrvkm"; - psDevCfg->pszVersion = NULL; + psDevConfig->pvOSDevice = pvOSDevice; + psDevConfig->pszName = VZ_EXAMPLE_SYSTEM_NAME; + psDevConfig->pszVersion = NULL; /* Device setup information */ - psDevCfg->sRegsCpuPBase.uiAddr = dev_res->start; - psDevCfg->ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start); - psDevCfg->ui32IRQ = dev_irq; + psDevConfig->sRegsCpuPBase.uiAddr = dev_res->start; + psDevConfig->ui32IRQ = dev_irq; + + psDevConfig->ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start); + + PVR_ASSERT(psDevConfig->ui32RegsSize >= RGX_VIRTUALISATION_REG_SIZE_PER_OS); /* Power management */ - psDevCfg->pfnPrePowerState = SysPrePower; - psDevCfg->pfnPostPowerState = SysPostPower; - psDevCfg->pfnGpuDomainPower = PVRSRV_VZ_MODE_IS(GUEST) ? NULL : RGXGpuDomainPower; + psDevConfig->pfnPrePowerState = SysPrePower; + psDevConfig->pfnPostPowerState = SysPostPower; + psDevConfig->pfnGpuDomainPower = PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig) ? NULL : RGXGpuDomainPower; + + /* Setup RGX specific timing data */ + psRGXTimingInfo->ui32CoreClockSpeed = DEFAULT_CLOCK_RATE; + psRGXTimingInfo->bEnableActivePM = IMG_FALSE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; + psRGXTimingInfo->ui32ActivePMLatencyms = 0; + + /* Setup RGX specific data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; /* Minimal configuration */ - psDevCfg->pfnClockFreqGet = NULL; - psDevCfg->hDevData = &gsRGXData; - psDevCfg->hSysData = &gsSysData; - psDevCfg->pfnSysDevFeatureDepInit = SysDevFeatureDepInit; - psDevCfg->bHasFBCDCVersion31 = IMG_FALSE; - - /* If driver mode is not overridden by the apphint, set it here */ - if (!psPVRSRVData->bForceApphintDriverMode) - { - psPVRSRVData->eDriverMode = GetDriverMode(psDev); - } + psDevConfig->pfnClockFreqGet = NULL; + psDevConfig->hDevData = psRGXData; + psDevConfig->hSysData = psSysData; + psDevConfig->pfnSysDevFeatureDepInit = SysDevFeatureDepInit; + psDevConfig->bHasFBCDCVersion31 = IMG_FALSE; -#if defined(SUPPORT_GPUVIRT_VALIDATION) - psDevCfg->pfnSysDevVirtInit = SysInitValidation; + psDevConfig->pasPhysHeaps = psPhysHeapConfig; - CreateMPUWatchdogThread(); -#endif + return eError; +err_sys: + OSFreeMem(psDevConfig); return eError; } #if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) /* Obtain the IPA of the carveout range reserved for this VM */ -static IMG_UINT64 GetCarveoutBase(PVRSRV_DEVICE_CONFIG *psDevCfg) +static IMG_UINT64 GetFwCarveoutBase(PVRSRV_DEVICE_CONFIG *psDevConfig) { - struct platform_device *psDev = to_platform_device((struct device *)psDevCfg->pvOSDevice); + struct platform_device *psDev = to_platform_device((struct device *)psDevConfig->pvOSDevice); IMG_UINT64 ui64BaseAddress; if (of_property_read_u64(psDev->dev.of_node, "fw-carveout", &ui64BaseAddress)) @@ -381,27 +443,41 @@ static IMG_UINT64 GetCarveoutBase(PVRSRV_DEVICE_CONFIG *psDevCfg) return ui64BaseAddress; } + +#if defined(RGX_PREMAP_FW_HEAPS) +static IMG_UINT64 GetFwPageTableCarveoutBase(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + struct platform_device *psDev = to_platform_device((struct device *)psDevConfig->pvOSDevice); + IMG_UINT64 ui64BaseAddress; + + if (of_property_read_u64(psDev->dev.of_node, "fw-pt-carveout", &ui64BaseAddress)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Cannot retrieve firmware page table carveout address" + " from Device Tree. Using default Base Address: 0x%llX", + __func__, FW_PT_CARVEOUT_IPA_BASE)); + ui64BaseAddress = FW_PT_CARVEOUT_IPA_BASE; + } + + return ui64BaseAddress; +} +#endif #endif -static PVRSRV_ERROR PhysHeapCfgCreate(PVRSRV_DEVICE_CONFIG *psDevCfg) +static PVRSRV_ERROR PhysHeapCfgCreate(PVRSRV_DEVICE_CONFIG *psDevConfig) { IMG_CPU_PHYADDR sCpuBase; IMG_DEV_PHYADDR sDeviceBase; + PHYS_HEAP_CONFIG *psPhysHeapConfig = psDevConfig->pasPhysHeaps; PVRSRV_ERROR eError = PVRSRV_OK; + /* Virtualization driver */ /* Heap configuration for general use */ - gsPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].pszPDumpMemspaceName = "SYSMEM"; - gsPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].eType = PHYS_HEAP_TYPE_UMA; - gsPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].psMemFuncs = &gsPhysHeapFuncs; - gsPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].hPrivData = NULL; - gsPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; - - /* Heap configuration for memory shared with the firmware */ - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].pszPDumpMemspaceName = "SYSMEM_FW"; - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].psMemFuncs = &gsPhysHeapFuncs; - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].hPrivData = NULL; - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN; - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].eType = PHYS_HEAP_TYPE_UMA; + psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; + psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM"; + psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].uConfig.sUMA.psMemFuncs = &gsPhysHeapFuncs; + psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].uConfig.sUMA.pszHeapName = "uma_gpu_local"; + psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].uConfig.sUMA.hPrivData = NULL; #if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) { @@ -424,16 +500,22 @@ static PVRSRV_ERROR PhysHeapCfgCreate(PVRSRV_DEVICE_CONFIG *psDevCfg) * - Host driver sets up the Firmware before Guests submits work */ - sCpuBase.uiAddr = GetCarveoutBase(psDevCfg); + sCpuBase.uiAddr = GetFwCarveoutBase(psDevConfig); SysCpuPAToDevPA(NULL, 1, &sDeviceBase, &sCpuBase); - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].eType = PHYS_HEAP_TYPE_LMA; - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].sStartAddr = sCpuBase; - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].sCardBase = sDeviceBase; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].eType = PHYS_HEAP_TYPE_LMA; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].ui32UsageFlags = PHYS_HEAP_USAGE_FW_SHARED; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sLMA.pszPDumpMemspaceName = "SYSMEM_FW"; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sLMA.psMemFuncs = &gsPhysHeapFuncs; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sLMA.pszHeapName = "lma_fw_shared"; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sLMA.sStartAddr = sCpuBase; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sLMA.sCardBase = sDeviceBase; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sLMA.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sLMA.hPrivData = NULL; } #else /* Dynamic Firmware heap allocation */ - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig)) { /* * Guest drivers must provide a physically contiguous memory @@ -448,7 +530,7 @@ static PVRSRV_ERROR PhysHeapCfgCreate(PVRSRV_DEVICE_CONFIG *psDevCfg) eError = (psDmaAlloc == NULL) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_OK; if (eError == PVRSRV_OK) { - psDmaAlloc->pvOSDevice = psDevCfg->pvOSDevice; + psDmaAlloc->pvOSDevice = psDevConfig->pvOSDevice; psDmaAlloc->ui64Size = RGX_FIRMWARE_RAW_HEAP_SIZE; eError = SysDmaAllocMem(psDmaAlloc); @@ -461,10 +543,15 @@ static PVRSRV_ERROR PhysHeapCfgCreate(PVRSRV_DEVICE_CONFIG *psDevCfg) sCpuBase.uiAddr = psDmaAlloc->sBusAddr.uiAddr; SysCpuPAToDevPA(NULL, 1, &sDeviceBase, &sCpuBase); - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].eType = PHYS_HEAP_TYPE_DMA; - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].hPrivData = psDmaAlloc; - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].sStartAddr = sCpuBase; - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].sCardBase = sDeviceBase; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].eType = PHYS_HEAP_TYPE_DMA; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].ui32UsageFlags = PHYS_HEAP_USAGE_FW_SHARED; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sDMA.pszPDumpMemspaceName = "SYSMEM_FW"; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sDMA.psMemFuncs = &gsPhysHeapFuncs; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sDMA.pszHeapName = "dma_fw_shared"; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sDMA.sStartAddr = sCpuBase; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sDMA.sCardBase = sDeviceBase; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sDMA.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sDMA.hPrivData = psDmaAlloc; } else { @@ -485,45 +572,69 @@ static PVRSRV_ERROR PhysHeapCfgCreate(PVRSRV_DEVICE_CONFIG *psDevCfg) * the kernel on a page granularity and creates on-demand * mappings into the Firmware's address space. */ - gsPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].eType = PHYS_HEAP_TYPE_UMA; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].eType = PHYS_HEAP_TYPE_UMA; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].ui32UsageFlags = PHYS_HEAP_USAGE_FW_SHARED; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sUMA.pszPDumpMemspaceName = "SYSMEM_FW"; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sUMA.psMemFuncs = &gsPhysHeapFuncs; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sUMA.pszHeapName = "uma_fw_shared"; + psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uConfig.sUMA.hPrivData = NULL; } -#endif +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) && defined(RGX_PREMAP_FW_HEAPS) + if (!PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig)) + { + sCpuBase.uiAddr = GetFwPageTableCarveoutBase(psDevConfig); + SysCpuPAToDevPA(NULL, 1, &sDeviceBase, &sCpuBase); + + psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].eType = PHYS_HEAP_TYPE_LMA; + psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].ui32UsageFlags = PHYS_HEAP_USAGE_FW_PREMAP_PT; + psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].uConfig.sLMA.pszPDumpMemspaceName = "SYSMEM_FW_PT"; + psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].uConfig.sLMA.psMemFuncs = &gsPhysHeapFuncs; + psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].uConfig.sLMA.pszHeapName = "lma_fw_pt"; + psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].uConfig.sLMA.sStartAddr = sCpuBase; + psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].uConfig.sLMA.sCardBase = sDeviceBase; + psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].uConfig.sLMA.uiSize = RGX_FIRMWARE_MAX_PAGETABLE_SIZE; + psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].uConfig.sLMA.hPrivData = NULL; + } +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ /* Device's physical heaps */ - psDevCfg->pasPhysHeaps = gsPhysHeapConfig; - psDevCfg->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; - psDevCfg->ui32PhysHeapCount = PHYS_HEAP_IDX_COUNT; + psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; + psDevConfig->ui32PhysHeapCount = PHYS_HEAP_IDX_COUNT; return eError; } -static void PhysHeapCfgDestroy(PVRSRV_DEVICE_CONFIG *psDevCfg) +static void PhysHeapCfgDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) { #if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) - if (PVRSRV_VZ_MODE_IS(GUEST)) + if (PVRSRV_VZ_MODE_IS(GUEST, DEVCFG, psDevConfig)) { - DMA_ALLOC *psDmaAlloc = psDevCfg->pasPhysHeaps[PHYS_HEAP_IDX_FIRMWARE].hPrivData; + DMA_ALLOC *psDmaAlloc = psDevConfig->pasPhysHeaps[PHYS_HEAP_IDX_FIRMWARE].uConfig.sDMA.hPrivData; SysDmaDeregisterForIoRemapping(psDmaAlloc); SysDmaFreeMem(psDmaAlloc); OSFreeMem(psDmaAlloc); } #endif - -#if defined(SUPPORT_GPUVIRT_VALIDATION) - DestroyMPUWatchdogThread(); -#endif } PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) { + PVRSRV_DEVICE_CONFIG *psDevConfig; PVRSRV_ERROR eError; - eError = DeviceConfigCreate(pvOSDevice, &gsDevCfg); + eError = DeviceConfigCreate(pvOSDevice, &psDevConfig); if (eError == PVRSRV_OK) { - eError = PhysHeapCfgCreate(&gsDevCfg); - *ppsDevConfig = &gsDevCfg; + eError = PhysHeapCfgCreate(psDevConfig); + if (eError != PVRSRV_OK) + { + OSFreeMem(psDevConfig); + } + + *ppsDevConfig = psDevConfig; } return eError; @@ -532,7 +643,8 @@ PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) { PhysHeapCfgDestroy(psDevConfig); - psDevConfig->pvOSDevice = NULL; + + OSFreeMem(psDevConfig); } PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysconfig.h b/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysconfig.h index ebed76f5af8b..0670c7dde015 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysconfig.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysconfig.h @@ -54,6 +54,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* fixed IPA Base of the memory carveout reserved for the GPU Firmware Heaps */ #define FW_CARVEOUT_IPA_BASE IMG_UINT64_C(0x7E000000) +/* fixed IPA Base of the memory carveout reserved for the Firmware's Page Tables */ +#define FW_PT_CARVEOUT_IPA_BASE IMG_UINT64_C(0x8F000000) + /* mock SoC registers */ #define SOC_REGBANK_BASE IMG_UINT64_C(0xF0000000) #define SOC_REGBANK_SIZE IMG_UINT32_C(0x10000) @@ -63,70 +66,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define POW_DOMAIN_GPU IMG_UINT32_C(0x1) -#define MPU_EVENT_STATUS_REG IMG_UINT32_C(0xB000) -#define MPU_EVENT_OSID_REG IMG_UINT32_C(0xB008) -#define MPU_EVENT_ADDRESS_REG IMG_UINT32_C(0xB010) -#define MPU_EVENT_DIRECTION_REG IMG_UINT32_C(0xB018) -#define MPU_EVENT_CLEAR_REG IMG_UINT32_C(0xB020) - -#define MPU_GPU_BUS_REQUESTER IMG_UINT32_C(1) -#define MPU_WRITE_ACCESS IMG_UINT32_C(1) - -#define MPU_PROTECTED_RANGE0_START_REG IMG_UINT32_C(0xC000) -#define MPU_PROTECTED_RANGE1_START_REG IMG_UINT32_C(0xC008) -#define MPU_PROTECTED_RANGE2_START_REG IMG_UINT32_C(0xC010) -#define MPU_PROTECTED_RANGE3_START_REG IMG_UINT32_C(0xC018) -#define MPU_PROTECTED_RANGE4_START_REG IMG_UINT32_C(0xC020) -#define MPU_PROTECTED_RANGE5_START_REG IMG_UINT32_C(0xC028) -#define MPU_PROTECTED_RANGE6_START_REG IMG_UINT32_C(0xC030) -#define MPU_PROTECTED_RANGE7_START_REG IMG_UINT32_C(0xC038) -#define MPU_PROTECTED_RANGE8_START_REG IMG_UINT32_C(0xC040) -#define MPU_PROTECTED_RANGE9_START_REG IMG_UINT32_C(0xC048) -#define MPU_PROTECTED_RANGE10_START_REG IMG_UINT32_C(0xC050) -#define MPU_PROTECTED_RANGE11_START_REG IMG_UINT32_C(0xC058) -#define MPU_PROTECTED_RANGE12_START_REG IMG_UINT32_C(0xC060) -#define MPU_PROTECTED_RANGE13_START_REG IMG_UINT32_C(0xC068) -#define MPU_PROTECTED_RANGE14_START_REG IMG_UINT32_C(0xC070) -#define MPU_PROTECTED_RANGE15_START_REG IMG_UINT32_C(0xC078) - -#define MPU_PROTECTED_RANGE0_END_REG IMG_UINT32_C(0xC100) -#define MPU_PROTECTED_RANGE1_END_REG IMG_UINT32_C(0xC108) -#define MPU_PROTECTED_RANGE2_END_REG IMG_UINT32_C(0xC110) -#define MPU_PROTECTED_RANGE3_END_REG IMG_UINT32_C(0xC118) -#define MPU_PROTECTED_RANGE4_END_REG IMG_UINT32_C(0xC120) -#define MPU_PROTECTED_RANGE5_END_REG IMG_UINT32_C(0xC128) -#define MPU_PROTECTED_RANGE6_END_REG IMG_UINT32_C(0xC130) -#define MPU_PROTECTED_RANGE7_END_REG IMG_UINT32_C(0xC138) -#define MPU_PROTECTED_RANGE8_END_REG IMG_UINT32_C(0xC140) -#define MPU_PROTECTED_RANGE9_END_REG IMG_UINT32_C(0xC148) -#define MPU_PROTECTED_RANGE10_END_REG IMG_UINT32_C(0xC150) -#define MPU_PROTECTED_RANGE11_END_REG IMG_UINT32_C(0xC158) -#define MPU_PROTECTED_RANGE12_END_REG IMG_UINT32_C(0xC160) -#define MPU_PROTECTED_RANGE13_END_REG IMG_UINT32_C(0xC168) -#define MPU_PROTECTED_RANGE14_END_REG IMG_UINT32_C(0xC170) -#define MPU_PROTECTED_RANGE15_END_REG IMG_UINT32_C(0xC178) - - -#define MPU_PROTECTED_RANGE0_OSID_REG IMG_UINT32_C(0xC200) -#define MPU_PROTECTED_RANGE1_OSID_REG IMG_UINT32_C(0xC208) -#define MPU_PROTECTED_RANGE2_OSID_REG IMG_UINT32_C(0xC210) -#define MPU_PROTECTED_RANGE3_OSID_REG IMG_UINT32_C(0xC218) -#define MPU_PROTECTED_RANGE4_OSID_REG IMG_UINT32_C(0xC220) -#define MPU_PROTECTED_RANGE5_OSID_REG IMG_UINT32_C(0xC228) -#define MPU_PROTECTED_RANGE6_OSID_REG IMG_UINT32_C(0xC230) -#define MPU_PROTECTED_RANGE7_OSID_REG IMG_UINT32_C(0xC238) -#define MPU_PROTECTED_RANGE8_OSID_REG IMG_UINT32_C(0xC240) -#define MPU_PROTECTED_RANGE9_OSID_REG IMG_UINT32_C(0xC248) -#define MPU_PROTECTED_RANGE10_OSID_REG IMG_UINT32_C(0xC250) -#define MPU_PROTECTED_RANGE11_OSID_REG IMG_UINT32_C(0xC258) -#define MPU_PROTECTED_RANGE12_OSID_REG IMG_UINT32_C(0xC260) -#define MPU_PROTECTED_RANGE13_OSID_REG IMG_UINT32_C(0xC268) -#define MPU_PROTECTED_RANGE14_OSID_REG IMG_UINT32_C(0xC270) -#define MPU_PROTECTED_RANGE15_OSID_REG IMG_UINT32_C(0xC278) - -#define MPU_PROTECTION_ENABLE_REG IMG_UINT32_C(0xC300) - - /***************************************************************************** * system specific data structures *****************************************************************************/ diff --git a/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysinfo.h b/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysinfo.h index 0c1357017824..0ab175e0692e 100644 --- a/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysinfo.h +++ b/drivers/gpu/drm/img/img-volcanic/services/system/vz_example/sysinfo.h @@ -49,6 +49,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000) #define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) #define WAIT_TRY_COUNT (10000) +#define EVENT_OBJECT_TIMEOUT_US (100000) #if defined(__linux__) #define SYS_RGX_DEV_NAME "powervr-vz-example" diff --git a/drivers/gpu/drm/img/kernel_compatibility.h b/drivers/gpu/drm/img/kernel_compatibility.h index e254b6c1cd78..048f4449ae68 100644 --- a/drivers/gpu/drm/img/kernel_compatibility.h +++ b/drivers/gpu/drm/img/kernel_compatibility.h @@ -46,6 +46,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include #include +/* Explicitly error out if DDK is built against out-of-support Linux kernel */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) +#error Linux kernels older than 4.9.0 are not supported +#endif + /* * Stop supporting an old kernel? Remove the top block. * New incompatible kernel? Append a new block at the bottom. @@ -58,239 +63,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * included after it. */ -/* Linux 3.6 introduced seq_vprintf(). Earlier versions don't have this - * so we work around the limitation by vsnprintf() + seq_puts(). - */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) -#define seq_vprintf(seq_file, fmt, args) \ -do { \ - char aszBuffer[512]; /* maximum message buffer size */ \ - vsnprintf(aszBuffer, sizeof(aszBuffer), fmt, args); \ - seq_puts(seq_file, aszBuffer); \ -} while (0) -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) */ - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) - -/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */ -#define VM_DONTDUMP VM_RESERVED - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */ - -/* - * Note: this fix had to be written backwards because get_unused_fd_flags - * was already defined but not exported on kernels < 3.7 - * - * When removing support for kernels < 3.7, this block should be removed - * and all `get_unused_fd()` should be manually replaced with - * `get_unused_fd_flags(0)` - */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) - -/* Linux 3.19 removed get_unused_fd() */ -/* get_unused_fd_flags was introduced in 3.7 */ -#define get_unused_fd() get_unused_fd_flags(0) - -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) - -/* - * Headers shouldn't normally be included by this file but this is a special - * case as it's not obvious from the name that devfreq_add_device needs this - * include. - */ -#include - -#define devfreq_add_device(dev, profile, name, data) \ - ({ \ - struct devfreq *__devfreq; \ - if (name && !strcmp(name, "simple_ondemand")) \ - __devfreq = devfreq_add_device(dev, profile, \ - &devfreq_simple_ondemand, data); \ - else \ - __devfreq = ERR_PTR(-EINVAL); \ - __devfreq; \ - }) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) */ - - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) - -#define DRIVER_RENDER 0 -#define DRM_RENDER_ALLOW 0 - -/* Linux 3.12 introduced a new shrinker API */ -#define SHRINK_STOP (~0UL) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */ - - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) - -#define dev_pm_opp_get_opp_count(dev) opp_get_opp_count(dev) -#define dev_pm_opp_get_freq(opp) opp_get_freq(opp) -#define dev_pm_opp_get_voltage(opp) opp_get_voltage(opp) -#define dev_pm_opp_add(dev, freq, u_volt) opp_add(dev, freq, u_volt) -#define dev_pm_opp_find_freq_ceil(dev, freq) opp_find_freq_ceil(dev, freq) - -#if defined(CONFIG_ARM) -/* Linux 3.13 renamed ioremap_cached to ioremap_cache */ -#define ioremap_cache(cookie, size) ioremap_cached(cookie, size) -#endif /* defined(CONFIG_ARM) */ - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) */ - - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) - -/* Linux 3.14 introduced a new set of sized min and max defines */ -#ifndef U32_MAX -#define U32_MAX ((u32)UINT_MAX) -#endif - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) */ - - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) - -/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to - * `struct page **pages` */ -#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ - - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) - -/* - * Linux 4.7 removed this function but its replacement was available since 3.19. - */ -#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e) - -/* seq_has_overflowed() was introduced in 3.19 but the structure elements - * have been available since 2.x - */ -#include -static inline bool seq_has_overflowed(struct seq_file *m) -{ - return m->count == m->size; -} - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */ - - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) - -#define debugfs_create_file_size(name, mode, parent, data, fops, file_size) \ - ({ \ - struct dentry *de; \ - de = debugfs_create_file(name, mode, parent, data, fops); \ - if (de) \ - de->d_inode->i_size = file_size; \ - de; \ - }) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) */ - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) -#define drm_fb_helper_unregister_fbi(fb_helper) \ - ({ \ - if ((fb_helper) && (fb_helper)->fbdev) \ - unregister_framebuffer((fb_helper)->fbdev); \ - }) -#endif - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) - -/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */ -#define __GFP_RECLAIM __GFP_WAIT - -#if !defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) -#define dev_pm_opp_of_add_table(dev) of_init_opp_table(dev) -#define dev_pm_opp_of_remove_table(dev) of_free_opp_table(dev) -#else -#define sync_fence_create(data_name, sync_pt) sync_fence_create(data_name, &(sync_pt)->base) -#endif - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */ - - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \ - (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) - -/* Linux 4.5 added a new printf-style parameter for debug messages */ - -#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \ - drm_encoder_init(dev, encoder, funcs, encoder_type) - -#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ - ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type); }) - -#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \ - drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs) - -#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) - -#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ - ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ##__VA_ARGS__); }) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ - - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) - -/* - * Linux 4.6 removed the first two parameters, the "struct task_struct" type - * pointer "current" is defined in asm/current.h, which makes it pointless - * to pass it on every function call. -*/ -#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ - get_user_pages(current, current->mm, start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) - -#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) - -/* Linux 4.9 replaced the write/force parameters with "gup_flags" */ -#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ - get_user_pages(start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) */ - - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ - (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) - -/* - * Linux 4.6 removed the start and end arguments as it now always maps - * the entire DMA-BUF. - * Additionally, dma_buf_end_cpu_access() now returns an int error. - */ -#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION) -#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; }) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ - (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) */ - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) - -/* Linux 4.7 removed the first arguments as it's never been used */ -#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle) - -/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */ -#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */ - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) - -/* Linux 4.9 changed the second argument to a drm_file pointer */ -#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp) -#define drm_vma_node_allow(node, file_priv) drm_vma_node_allow(node, (file_priv)->filp) -#define drm_vma_node_revoke(node, file_priv) drm_vma_node_revoke(node, (file_priv)->filp) - -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */ - #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) #define refcount_read(r) atomic_read(r) #define drm_mm_insert_node(mm, node, size) drm_mm_insert_node(mm, node, size, 0, DRM_MM_SEARCH_DEFAULT) @@ -327,6 +99,15 @@ static inline bool seq_has_overflowed(struct seq_file *m) #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + +#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ + ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ##__VA_ARGS__); }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ + + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) #define drm_dev_put(dev) drm_dev_unref(dev) @@ -451,6 +232,10 @@ __pvr_access_ok_compat(int type, const void __user * addr, unsigned long size) #endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)) +#define MODULE_IMPORT_NS(ns) +#endif + /* * Before v5.8, the "struct mm" has a semaphore named "mmap_sem" which is * renamed to "mmap_lock" in v5.8. Moreover, new APIs are provided to @@ -523,6 +308,76 @@ struct dma_buf_map { #define uaccess_disable_privileged() uaccess_disable() #endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)) +#define pde_data PDE_DATA +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) +#define kthread_complete_and_exit(comp, ret) complete_and_exit(comp, ret); +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0)) +#define iosys_map dma_buf_map +#define iosys_map_set_vaddr dma_buf_map_set_vaddr +#define iosys_map_set_vaddr_iomem dma_buf_map_set_vaddr_iomem +#define iosys_map_clear dma_buf_map_clear +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + +#define register_shrinker(shrinker, name) \ + register_shrinker(shrinker) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) +#define DRM_PLANE_NO_SCALING DRM_PLANE_HELPER_NO_SCALING +#define drm_plane_helper_destroy drm_primary_helper_destroy +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) +#define genl_split_ops genl_ops +#define COMPAT_FB_INFO fbdev +#define drm_fb_helper_alloc_info drm_fb_helper_alloc_fbi +#define drm_fb_helper_unregister_info drm_fb_helper_unregister_fbi +#else +#define COMPAT_FB_INFO info +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) || \ + ((LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)) && !defined(ANDROID)) +static inline void pvr_vm_flags_set(struct vm_area_struct *vma, + vm_flags_t flags) +{ + vma->vm_flags |= flags; +} +static inline void pvr_vm_flags_init(struct vm_area_struct *vma, + vm_flags_t flags) +{ + vma->vm_flags = flags; +} +static inline void pvr_vm_flags_clear(struct vm_area_struct *vma, + vm_flags_t flags) +{ + vma->vm_flags &= ~flags; +} +#else +#define pvr_vm_flags_set vm_flags_set +#define pvr_vm_flags_init vm_flags_init +#define pvr_vm_flags_clear vm_flags_clear +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 4, 0)) +#define pvr_class_create(name) class_create(THIS_MODULE, name) +#else +#define pvr_class_create(name) class_create(name) +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 4, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) +#define thermal_tripless_zone_device_register(type, devdata, ops, tzp) \ + thermal_zone_device_register((type), 0, 0, (devdata), (ops), (tzp), 0, 0) +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) */ + #if defined(__GNUC__) #define GCC_VERSION_AT_LEAST(major, minor) \ (__GNUC__ > (major) || \ @@ -531,8 +386,15 @@ struct dma_buf_map { #define GCC_VERSION_AT_LEAST(major, minor) 0 #endif +#if defined(__clang__) +#define CLANG_VERSION_AT_LEAST(major) \ + (__clang_major__ >= (major)) +#else +#define CLANG_VERSION_AT_LEAST(major) 0 +#endif + #if !defined(__fallthrough) - #if GCC_VERSION_AT_LEAST(7, 0) + #if GCC_VERSION_AT_LEAST(7, 0) || CLANG_VERSION_AT_LEAST(10) #define __fallthrough __attribute__((__fallthrough__)) #else #define __fallthrough diff --git a/drivers/gpu/drm/img/kernel_nospec.h b/drivers/gpu/drm/img/kernel_nospec.h index e27a3ebc2ac6..ef6fee368327 100644 --- a/drivers/gpu/drm/img/kernel_nospec.h +++ b/drivers/gpu/drm/img/kernel_nospec.h @@ -49,9 +49,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 18)) || \ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && \ - LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 81)) || \ - (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && \ - LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 118))) + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 81))) #include #include #include diff --git a/drivers/gpu/drm/img/pvr_dma_resv.h b/drivers/gpu/drm/img/pvr_dma_resv.h index d71b2676213b..a51c9de84ada 100644 --- a/drivers/gpu/drm/img/pvr_dma_resv.h +++ b/drivers/gpu/drm/img/pvr_dma_resv.h @@ -47,12 +47,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) #include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) -#define dma_resv_get_excl dma_resv_excl_fence -#define dma_resv_get_list dma_resv_shared_list -#define dma_resv_test_signaled_rcu dma_resv_test_signaled -#define dma_resv_wait_timeout_rcu dma_resv_wait_timeout -#endif #else #include diff --git a/drivers/gpu/drm/img/pvr_vmap.h b/drivers/gpu/drm/img/pvr_vmap.h index 19fe8b8f190b..6424b20e764c 100644 --- a/drivers/gpu/drm/img/pvr_vmap.h +++ b/drivers/gpu/drm/img/pvr_vmap.h @@ -64,19 +64,19 @@ static inline void *pvr_vmap(struct page **pages, #endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */ } -static inline void pvr_vunmap(void *pages, +static inline void pvr_vunmap(const void *vaddr, __maybe_unused unsigned int count, __maybe_unused pgprot_t prot) { #if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) - vunmap(pages); + vunmap(vaddr); #elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) - vm_unmap_ram(pages, count); + vm_unmap_ram(vaddr, count); #else if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) - vm_unmap_ram(pages, count); + vm_unmap_ram(vaddr, count); else - vunmap(pages); + vunmap(vaddr); #endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */ } diff --git a/drivers/gpu/drm/img/pvrversion.h b/drivers/gpu/drm/img/pvrversion.h index adbef2a038b4..4d17f6e55594 100644 --- a/drivers/gpu/drm/img/pvrversion.h +++ b/drivers/gpu/drm/img/pvrversion.h @@ -44,22 +44,22 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef PVRVERSION_H #define PVRVERSION_H -#define PVRVERSION_MAJ 1U -#define PVRVERSION_MIN 18U +#define PVRVERSION_MAJ 24U +#define PVRVERSION_MIN 2U #define PVRVERSION_FAMILY "rogueddk" -#define PVRVERSION_BRANCHNAME "1.18" -#define PVRVERSION_BUILD 6307965 +#define PVRVERSION_BRANCHNAME "24.2" +#define PVRVERSION_BUILD 6643903 #define PVRVERSION_BSCONTROL "Rogue_DDK_Linux_WS" -#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.18@6307965" -#define PVRVERSION_STRING_SHORT "1.18@6307965" +#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 24.2@6643903" +#define PVRVERSION_STRING_SHORT "24.2@6643903" #define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved." -#define PVRVERSION_BUILD_HI 630 -#define PVRVERSION_BUILD_LO 7965 -#define PVRVERSION_STRING_NUMERIC "1.18.630.7965" +#define PVRVERSION_BUILD_HI 664 +#define PVRVERSION_BUILD_LO 3903 +#define PVRVERSION_STRING_NUMERIC "24.2.664.3903" #define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U)) #define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU) diff --git a/drivers/hwmon/eswin-fan-control.c b/drivers/hwmon/eswin-fan-control.c index aff50deb2740..cc931a861159 100644 --- a/drivers/hwmon/eswin-fan-control.c +++ b/drivers/hwmon/eswin-fan-control.c @@ -3,7 +3,6 @@ * ESWIN Fan Control CORE driver * * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. - * SPDX-License-Identifier: GPL-2.0 * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,9 +16,10 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . * - * Author: Han Min + * Authors: Han Min */ + #include #include #include @@ -509,9 +509,21 @@ static int eswin_fan_control_probe(struct platform_device *pdev) /* Then fill it with the reference config */ pwm_get_args(ctl->pwm, &pwm_args); - state.period = pwm_args.period; - state.duty_cycle = state.period/2; - dev_err(&pdev->dev, "state.period: %d state.duty_cycle: %d\n", + if (0 == ctl->pwm_inverted) + { + state.period = pwm_args.period; + state.duty_cycle = state.period * 99 / 100; /* default set max speed */ + } + else + { + state.period = pwm_args.period; + state.duty_cycle = state.period / 100; /* default set max speed */ + if(0 == state.duty_cycle) + { + state.duty_cycle = 1; + } + } + dev_err(&pdev->dev, "state.period: %lld state.duty_cycle: %lld\n", state.period,state.duty_cycle); ret = pwm_apply_state(ctl->pwm, &state); if (ret) { diff --git a/drivers/hwmon/pac193x.c b/drivers/hwmon/pac193x.c index ae2735139729..c0d165a206c3 100644 --- a/drivers/hwmon/pac193x.c +++ b/drivers/hwmon/pac193x.c @@ -73,7 +73,7 @@ #define PAC193X_CMD_NEG_PWR 0x1D #define PAC193X_CMD_REFRESH_G 0x1E #define PAC193X_CMD_REFRESH_V 0x1F -#define PAC193X_CMD_SLOW 0x1F +#define PAC193X_CMD_SLOW 0x20 #define PAC193X_CMD_CTRL_ACT 0x21 #define PAC193X_CMD_DIS_ACT 0x22 #define PAC193X_CMD_NEG_PWR_ACT 0x23 diff --git a/drivers/iommu/eswin/eswin-win2030-sid.c b/drivers/iommu/eswin/eswin-win2030-sid.c index 8e89b75e71ea..27ce32947e69 100644 --- a/drivers/iommu/eswin/eswin-win2030-sid.c +++ b/drivers/iommu/eswin/eswin-win2030-sid.c @@ -923,11 +923,11 @@ static int __init tcu_proc_init(void) { char proc_name[64]; - sprintf(proc_name, "%s_info", "tcu"); - pr_debug("%s, proc_name:%s\n", __func__, proc_name); - if (NULL == proc_create_single_data(proc_name, 0, NULL, tcu_proc_show, NULL)) { - return -1; - } + // sprintf(proc_name, "%s_info", "tcu"); + // pr_debug("%s, proc_name:%s\n", __func__, proc_name); + // if (NULL == proc_create_single_data(proc_name, 0, NULL, tcu_proc_show, NULL)) { + // return -1; + // } return 0; } diff --git a/drivers/memory/eswin/es_iommu_rsv/Makefile b/drivers/memory/eswin/es_iommu_rsv/Makefile index a4fa098eb518..361a05e9f900 100644 --- a/drivers/memory/eswin/es_iommu_rsv/Makefile +++ b/drivers/memory/eswin/es_iommu_rsv/Makefile @@ -1,5 +1 @@ obj-$(CONFIG_ESWIN_IOMMU_RSV) += iommu_rsv.o - -ES_IOMMU_RSV_HEADER := drivers/memory/eswin/es_iommu_rsv/include/linux - -COPY_HEADERS := $(shell cp $(ES_IOMMU_RSV_HEADER)/*h include/linux) diff --git a/drivers/memory/eswin/es_iommu_rsv/iommu_rsv.c b/drivers/memory/eswin/es_iommu_rsv/iommu_rsv.c index 397601410e6e..9bcbec3fa4e7 100644 --- a/drivers/memory/eswin/es_iommu_rsv/iommu_rsv.c +++ b/drivers/memory/eswin/es_iommu_rsv/iommu_rsv.c @@ -370,8 +370,8 @@ int iommu_unmap_rsv_iova(struct device *dev, void *cpu_addr, dma_addr_t iova, un phys = iommu_iova_to_phys(domain, iova); if (phys == 0) { - dev_err(dev, "%s, iova=0x%llx, have not maps\n", __func__, iova); - return -EINVAL; + dev_dbg(dev, "%s, iova=0x%llx, have not maps\n", __func__, iova); + return -EFAULT; } if (!size) @@ -389,6 +389,61 @@ int iommu_unmap_rsv_iova(struct device *dev, void *cpu_addr, dma_addr_t iova, un } EXPORT_SYMBOL(iommu_unmap_rsv_iova); +ssize_t iommu_rsv_iova_map_sgt(struct device *dev, dma_addr_t iova, struct sg_table *sgt, unsigned long attrs, size_t buf_size) +{ + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + bool coherent = dev_is_dma_coherent(dev); + int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); + struct scatterlist *sg; + phys_addr_t phys; + int i; + size_t size = 0; + ssize_t ret = 0; + + phys = iommu_iova_to_phys(domain, iova); + if (phys != 0) { + if (phys != sg_phys(sgt->sgl)) { + dev_err(dev, "%s, iova=0x%llx, have exits phys=0x%llx\n", __func__, iova, phys); + return -EINVAL; + } + else { + dev_dbg(dev, "%s, iova=0x%llx mapped!\n", __func__, iova); + return ret; + } + } +#if 0 + if (!(ioprot & IOMMU_CACHE)) + { + int i; + + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) + dma_prep_coherent(sg_page(sg), sg->length); + } +#endif + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) + size += sg->length; + + if (size != buf_size) { + dev_err(dev, "%s %d fail, sg size 0x%lx != buf_size 0x%lx\n", __func__, __LINE__, size, buf_size); + return -EINVAL; + } + + ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot, GFP_KERNEL); + if (ret < 0 || ret < size) + { + dev_err(dev, "%s %d fail, ret %ld, size %ld\n", __func__, __LINE__, ret, size); + goto out; + } + + return ret; +out: + if (ret != -ENOMEM && ret != -EREMOTEIO) + return -EINVAL; + + return ret; +} +EXPORT_SYMBOL(iommu_rsv_iova_map_sgt); + static void eswin_iommu_put_func(void) { if (__iommu_dma_free) diff --git a/drivers/memory/eswin/es_mmz_vb/mmz_vb.c b/drivers/memory/eswin/es_mmz_vb/mmz_vb.c index 8fad791695d7..9876cc2fe319 100644 --- a/drivers/memory/eswin/es_mmz_vb/mmz_vb.c +++ b/drivers/memory/eswin/es_mmz_vb/mmz_vb.c @@ -1,11 +1,22 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: GPL-2.0 /* * ESWIN MMZ VB driver. MMZ VB stands for Media Memory Zone Video Buffer. * - * Copyright 2024 Beijing ESWIN Computing Technology Co., Ltd. - * Authors: - * LinMin + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Min Lin */ #include @@ -2076,7 +2087,7 @@ static int vb_blk_to_pool(struct esVB_BLOCK_TO_POOL_CMD_S *blkToPoolCmd) int ret = 0; struct dma_buf *dmabuf; struct mmz_vb_buffer *buffer; - struct esw_export_buffer_info *splittedBuffer; + struct eswin_split_buffer *splittedBuffer; struct dma_buf *blkDmabuf; bool isSplittedBlk; @@ -2119,7 +2130,7 @@ static int vb_get_blk_offset(struct esVB_GET_BLOCKOFFSET_CMD_S *getBlkOffsetCmd) int ret = 0; struct dma_buf *dmabuf; struct mmz_vb_buffer *buffer; - struct esw_export_buffer_info *splittedBuffer; + struct eswin_split_buffer *splittedBuffer; struct dma_buf *blkDmabuf; __u64 blkSize, offsetInPool; diff --git a/drivers/memory/eswin/es_rsvmem_heap/dmabuf-heap-import-helper.c b/drivers/memory/eswin/es_rsvmem_heap/dmabuf-heap-import-helper.c index fdbcfe7e6c70..d9ba68d918ef 100644 --- a/drivers/memory/eswin/es_rsvmem_heap/dmabuf-heap-import-helper.c +++ b/drivers/memory/eswin/es_rsvmem_heap/dmabuf-heap-import-helper.c @@ -1,21 +1,37 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: GPL-2.0 /* * ESWIN DMABUF heap helper APIs * - * Copyright 2024 Beijing ESWIN Computing Technology Co., Ltd. - * Authors: - * LinMin + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Min Lin */ #include #include #include +#include #include #include +#include +#include #include #include +static struct device *split_dmabuf_dev; + struct drm_prime_member { struct dma_buf *dma_buf; uint64_t handle; @@ -24,6 +40,20 @@ struct drm_prime_member { struct rb_node handle_rb; }; +struct dma_heap_attachment { + struct device *dev; + struct sg_table *table; + struct list_head list; + bool mapped; +}; + +struct dma_heap_attachment_cma { + struct device *dev; + struct sg_table table; + struct list_head list; + bool mapped; +}; + static int dmabuf_heap_add_buf_handle(struct dmaheap_file_private *prime_fpriv, struct dma_buf *dma_buf, uint64_t handle) { @@ -433,220 +463,709 @@ common_dmabuf_heap_import_from_kernel(struct heap_root *root, char *name, size_t } EXPORT_SYMBOL(common_dmabuf_heap_import_from_kernel); -struct esw_exp_attachment { - struct heap_mem *hmem; - struct sg_table table; +struct heap_mem *common_dmabuf_heap_rsv_iova_map(struct heap_root *root, int fd, dma_addr_t iova, size_t size) +{ + struct dma_buf *dma_buf; + struct dma_buf_attachment *attach; + struct dma_heap_attachment *a; + + uint64_t handle; + struct heap_mem *heap_obj; + ssize_t ret = 0; + + /* get dmabuf handle */ + dma_buf = dma_buf_get(fd); + if (IS_ERR(dma_buf)) { + return ERR_CAST(dma_buf); + } + + mutex_lock(&root->lock); + dev_dbg(root->dev, "%s, fd=%d, iova=0x%llx, size=0x%lx\n", __func__, fd, iova, size); + + ret = dmabuf_heap_lookup_buf_handle(&root->fp, dma_buf, &handle); + if (ret == 0) { + heap_obj = (struct heap_mem *)handle; + dma_buf_put(dma_buf); + kref_get(&heap_obj->refcount); + mutex_unlock(&root->lock); + return heap_obj; + } + + heap_obj = kzalloc(sizeof(*heap_obj), GFP_KERNEL); + if (!heap_obj) { + mutex_unlock(&root->lock); + dma_buf_put(dma_buf); + return ERR_PTR(-ENOMEM); + } + + attach = dma_buf_attach(dma_buf, root->dev); + if (IS_ERR(attach)) { + ret = PTR_ERR(attach); + goto clean_up; + } + + a = (struct dma_heap_attachment *)attach->priv; + ret = iommu_rsv_iova_map_sgt(root->dev, iova, a->table, 0, size); + dma_buf_detach(dma_buf, attach); + if (ret < 0) + goto clean_up; + + heap_obj->dbuf_fd = fd; + heap_obj->dbuf = dma_buf; + heap_obj->root = root; + heap_obj->vaddr = NULL; + heap_obj->dir = DMA_BIDIRECTIONAL; + heap_obj->iova = iova; + heap_obj->size = size; + + /* get_dma_buf was called in dmabuf_heap_add_buf_handle()*/ + ret = dmabuf_heap_add_buf_handle(&root->fp, dma_buf, (uint64_t)heap_obj); + if (ret) { + goto fail_add_handle; + } + /* get_dma_buf was called in dmabuf_heap_add_buf_handle(), need to put back since + * we don't want to hold the dma_buf in this API + */ + dma_buf_put(dma_buf); + + kref_init(&heap_obj->refcount); + + list_add(&heap_obj->list, &root->header); + + mutex_unlock(&root->lock); + + dma_buf_put(dma_buf); + + return heap_obj; + +fail_add_handle: + iommu_unmap_rsv_iova(root->dev, 0, iova, size); +clean_up: + kfree(heap_obj); + mutex_unlock(&root->lock); + dma_buf_put(dma_buf); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(common_dmabuf_heap_rsv_iova_map); + +static void __common_dmabuf_heap_rsv_iova_unmap(struct kref *kref) +{ + struct heap_root *root; + struct heap_mem *heap_obj = container_of(kref, struct heap_mem, refcount); + + WARN_ON(!heap_obj); + if (!heap_obj) + return; + + root = heap_obj->root; + WARN_ON(!mutex_is_locked(&root->lock)); + list_del(&heap_obj->list); + + + iommu_unmap_rsv_iova(root->dev, 0, heap_obj->iova, heap_obj->size); + /* dma_buf_put will be called in _dmabuf_heap_remove_buf_handle(), + * so, call get_dma_buf first + */ + get_dma_buf(heap_obj->dbuf); + _dmabuf_heap_remove_buf_handle(&root->fp, heap_obj->dbuf); + kfree(heap_obj); +} + +void common_dmabuf_heap_rsv_iova_unmap(struct heap_mem *heap_obj) +{ + struct heap_root *root = heap_obj->root; + + mutex_lock(&root->lock); + kref_put(&heap_obj->refcount, __common_dmabuf_heap_rsv_iova_unmap); + mutex_unlock(&root->lock); +} +EXPORT_SYMBOL(common_dmabuf_heap_rsv_iova_unmap); + +void common_dmabuf_heap_rsv_iova_uninit(struct heap_root *root) +{ + struct heap_mem *h, *tmp; + + list_for_each_entry_safe(h, tmp, &root->header, list) { + common_dmabuf_heap_rsv_iova_unmap(h); + } +} +EXPORT_SYMBOL(common_dmabuf_heap_rsv_iova_uninit); + + +struct eswin_split_attachment { struct device *dev; - struct heap_root root; + struct sg_table *table; + struct list_head list; + bool mapped; }; +static struct sg_table *dup_sg_table(struct sg_table *table) +{ + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; + + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg = sg_next(new_sg); + } + + return new_table; +} + // #define PRINT_ORIGINAL_SPLITTERS 1 -static int esw_common_heap_attach(struct dma_buf *dmabuf, +static int eswin_split_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { - struct esw_export_buffer_info *buffer = dmabuf->priv; - struct esw_exp_attachment *a; - int out_mapped_nents[1]; - int ret = 0; - struct sg_table *sgt = NULL; - struct scatterlist *sg; - int i; - size_t size, len; + struct eswin_split_buffer *buffer = dmabuf->priv; + struct eswin_split_attachment *a; + struct sg_table *table; a = kzalloc(sizeof(*a), GFP_KERNEL); if (!a) return -ENOMEM; - a->dev = attachment->dev; - - common_dmabuf_heap_import_init(&a->root, a->dev); - a->hmem = common_dmabuf_heap_import_from_user(&a->root, buffer->dbuf_fd); - if (IS_ERR(a->hmem)) - return PTR_ERR(a->hmem); - - ret = sg_split(a->hmem->sgt->sgl, a->hmem->sgt->nents, buffer->slice.offset, 1, &buffer->slice.len, - &a->table.sgl, &out_mapped_nents[0], GFP_KERNEL); - if (ret) { - common_dmabuf_heap_release(a->hmem); + table = dup_sg_table(&buffer->sg_table); + if (IS_ERR(table)) { kfree(a); - return ret; - } - a->table.nents = out_mapped_nents[0]; - a->table.orig_nents = out_mapped_nents[0]; - sgt = &a->table; - #ifdef PRINT_ORIGINAL_SPLITTERS - { - pr_info("%s:orig:sgt->orig_nents=%d, out_mapped_nents[0]=%d\n", - __func__, sgt->orig_nents, out_mapped_nents[0]); - for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { - pr_info("orig[%d]:sg->length=0x%x, sg_dma_len=0x%x, sg_phys=0x%lx\n", - i, sg->length, sg_dma_len(sg), (unsigned long)sg_phys(sg)); - } - } - #endif - /* Re-format the splitted sg list in the actual slice len */ - { - size = buffer->slice.len; - for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { - if (sg->length >= size) { - sg->length = size; - sg_dma_len(sg) = size; - sg_mark_end(sg); - pr_debug("refmt[%d]:sg->length=0x%x, sg_dma_len=0x%x, sg_phys=0x%lx\n", - i, sg->length, sg_dma_len(sg), (unsigned long)sg_phys(sg)); - break; - } - len = min_t(size_t, size, sg->length); - size -= len; - pr_debug("refmt[%d]:sg->length=0x%x, sg_dma_len=0x%x, sg_phys=0x%lx\n", - i, sg->length, sg_dma_len(sg), (unsigned long)sg_phys(sg)); - } - sgt->orig_nents = sgt->nents = i + 1; + return -ENOMEM; } + a->table = table; + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + attachment->priv = a; - return ret; + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; } -static void esw_common_heap_detach(struct dma_buf *dmabuf, +static void eswin_split_heap_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { - struct esw_exp_attachment *a = attachment->priv; + struct eswin_split_buffer *buffer = dmabuf->priv; + struct eswin_split_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); - kfree(a->table.sgl); - common_dmabuf_heap_release(a->hmem); - common_dmabuf_heap_import_uninit(&a->root); + sg_free_table(a->table); + kfree(a->table); kfree(a); } -static struct sg_table *esw_common_map_dma_buf(struct dma_buf_attachment *attachment, +static struct sg_table *eswin_split_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { - struct esw_exp_attachment *a = attachment->priv; - return &a->table; + struct eswin_split_attachment *a = attachment->priv; + struct sg_table *table =a->table; + int ret; + unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; + + /* Skipt cache sync, since it takes a lot of time when import to device. + * It's the user's responsibility for guaranteeing the cache coherency by + flusing cache explicitly before importing to device. + */ + ret = dma_map_sgtable(attachment->dev, table, direction, attrs); + + if (ret) + return ERR_PTR(-ENOMEM); + a->mapped = true; + return table; } -static void esw_common_unmap_dma_buf(struct dma_buf_attachment *attachment, +static void eswin_spilt_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { -} + struct eswin_split_attachment *a = attachment->priv; + unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; -static int esw_common_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -{ - struct esw_export_buffer_info *buffer = dmabuf->priv; - // printk("%s enter\n", __func__); - return dma_buf_mmap(buffer->dmabuf, vma, buffer->slice.offset >> PAGE_SHIFT); + a->mapped = false; + + /* Skipt cache sync, since it takes a lot of time when unmap from device. + * It's the user's responsibility for guaranteeing the cache coherency after + the device has done processing the data.(For example, CPU do NOT read untill + the device has done) + */ + dma_unmap_sgtable(attachment->dev, table, direction, attrs); } -static int esw_common_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, +static int eswin_split_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { - struct esw_export_buffer_info *buffer = dmabuf->priv; - return dma_buf_begin_cpu_access(buffer->dmabuf, direction); + struct eswin_split_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + struct scatterlist *sg; + int i; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + /* Since the cache sync was skipped when eswin_rsvmem_heap_map_dma_buf/eswin_rsvmem_heap_unmap_dma_buf, + So force cache sync here when user call ES_SYS_MemFlushCache, eventhough there + is no device attached to this dmabuf. + */ + for_each_sg(table->sgl, sg, table->orig_nents, i) + arch_sync_dma_for_cpu(sg_phys(sg), sg->length, direction); + + mutex_unlock(&buffer->lock); + + return 0; } -static int esw_common_dma_buf_end_cpu_access(struct dma_buf *dmabuf, +static int eswin_split_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { - struct esw_export_buffer_info *buffer = dmabuf->priv; - return dma_buf_end_cpu_access(buffer->dmabuf, direction); + struct eswin_split_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + struct scatterlist *sg; + int i; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + /* Since the cache sync was skipped while eswin_rsvmem_heap_map_dma_buf/eswin_rsvmem_heap_unmap_dma_buf, + So force cache sync here when user call ES_SYS_MemFlushCache, eventhough there + is no device attached to this dmabuf. + */ + for_each_sg(table->sgl, sg, table->orig_nents, i) + arch_sync_dma_for_device(sg_phys(sg), sg->length, direction); + mutex_unlock(&buffer->lock); + + return 0; } -static int esw_common_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static int eswin_split_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { - struct esw_export_buffer_info *buffer = dmabuf->priv; - struct dma_buf_map pmap; + struct eswin_split_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + unsigned long addr = vma->vm_start; + unsigned long pgoff = (table->sgl->offset >> PAGE_SHIFT); + // unsigned long pgoff = (buffer->slice.offset >> PAGE_SHIFT); + unsigned long size_remaining = vma->vm_end - vma->vm_start;//vma_pages(vma); + struct sg_page_iter piter; int ret; - ret = dma_buf_vmap(buffer->dmabuf, &pmap); + /* Mapping secure_memory with cached proprty to user space for CPU is NOT permitted */ + if (unlikely(!strncmp("secure_memory", dmabuf->exp_name, 13))) { + if (!(vma->vm_flags & VM_NORESERVE)) + return -EPERM; + } + + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) + return -EINVAL; + + /* vm_private_data will be used by eswin-ipc-scpu.c. + ipc will import this dmabuf to get iova. + */ + vma->vm_private_data = dmabuf; + + /* support mman flag MAP_SHARED_VALIDATE | VM_NORESERVE, used to map uncached memory to user space. + Users should guarantee this buffer has been flushed to cache already. + */ + if (vma->vm_flags & VM_NORESERVE) { + vm_flags_clear(vma, VM_NORESERVE); + vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); + /* skip sync cache, users should guarantee the cache is clean after done using it in + cached mode(i.e, ES_SYS_Mmap(SYS_CACHE_MODE_CACHED)) + */ + } + pr_debug("%s, vma->vm_start:0x%lx, vma->vm_end:0x%lx\n", __func__, vma->vm_start, vma->vm_end); + pr_debug("%s, size_remaining:0x%lx, pgoff:0x%lx, vma->vm_pgoff:0x%lx, dmabuf->size:0x%lx, start_phys:0x%llx, pfn sg_page(sg):0x%lx,sg->length=0x%x\n", + __func__, size_remaining, pgoff, vma->vm_pgoff, dmabuf->size, sg_phys(table->sgl),page_to_pfn(sg_page(table->sgl)), table->sgl->length); + + for_each_sgtable_page(table, &piter, pgoff) { + struct page *page = sg_page_iter_page(&piter); + // pr_debug("addr:0x%lx, page_to_pfn:0x%lx\n", addr, page_to_pfn(page)); + ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, + vma->vm_page_prot); + if (ret) + return ret; + addr += PAGE_SIZE; + if (addr >= vma->vm_end) + return 0; + } + + return 0; +} + +static void *eswin_split_heap_do_vmap(struct dma_buf *dmabuf) +{ + struct eswin_split_buffer *buffer = dmabuf->priv; + pgprot_t prot = PAGE_KERNEL; + struct sg_table *table = &buffer->sg_table; + int npages = PAGE_ALIGN(buffer->slice.len) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + struct sg_page_iter piter; + void *vaddr; + unsigned long pgoff = (table->sgl->offset >> PAGE_SHIFT); + + if (!pages) + return ERR_PTR(-ENOMEM); + + for_each_sgtable_page(table, &piter, pgoff) { + WARN_ON(tmp - pages >= npages); + *tmp++ = sg_page_iter_page(&piter); + } - map->is_iomem = false; - map->vaddr_iomem = pmap.vaddr_iomem + buffer->slice.offset; + /* The property of this dmabuf in kernel space is determined by heap alloc with fd_flag. */ + if (buffer->fd_flags & O_DSYNC) { + prot = pgprot_dmacoherent(PAGE_KERNEL); + pr_debug("%s syport uncached kernel dmabuf!, prot=0x%x\n", __func__, (unsigned int)pgprot_val(prot)); + } + else { + pr_debug("%s memport cached kernel dmabuf!\n", __func__); + } + + vaddr = vmap(pages, npages, VM_MAP, prot); + vfree(pages); + + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int eswin_split_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct eswin_split_buffer *buffer = dmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); + goto out; + } + + vaddr = eswin_split_heap_do_vmap(dmabuf); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto out; + } + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); +out: + mutex_unlock(&buffer->lock); return ret; } -static void esw_common_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static void eswin_split_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) { - struct esw_export_buffer_info *buffer = dmabuf->priv; - struct dma_buf_map pmap = *map; + struct eswin_split_buffer *buffer = dmabuf->priv; - pmap.vaddr_iomem -= buffer->slice.offset; - dma_buf_vunmap(buffer->dmabuf, &pmap); + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + dma_buf_map_clear(map); } -static void esw_common_dma_buf_release(struct dma_buf *dmabuf) +/* split parent dmabuf, and generate a new child sgt */ +static int eswin_get_split_dmabuf(struct eswin_split_buffer *split_buffer) { - struct esw_export_buffer_info *buffer = dmabuf->priv; + struct dma_buf *par_dma_buf = split_buffer->dmabuf; + u64 offset = split_buffer->slice.offset; + size_t size = split_buffer->slice.len; + struct sg_table *splitted_sgt = &split_buffer->sg_table; + struct sg_table *orig_splitted_sgt = &split_buffer->orig_sg_table; + struct dma_buf_attachment *attach; + struct dma_heap_attachment *a; + struct dma_heap_attachment_cma *cma_a; + struct sg_table *par_origin_table; + int out_nents[1]; + struct scatterlist *sg; + struct scatterlist *split_sg; + size_t size_remaining, len; + int i; + int ret = 0; - // printk("%s %d\n", __func__, __LINE__); + if (split_dmabuf_dev == NULL) { + pr_err("split_dmabuf_dev has not been created!!!\n"); + return -EINVAL; + } + + get_dma_buf(par_dma_buf); + attach = dma_buf_attach(par_dma_buf, split_dmabuf_dev); + if (IS_ERR(attach)) { + ret = PTR_ERR(attach); + dma_buf_put(par_dma_buf); + return ret; + } + + if (unlikely(!strcmp("linux,cma", par_dma_buf->exp_name))) + { + cma_a = (struct dma_heap_attachment_cma *)attach->priv; + par_origin_table = &cma_a->table; + } + else { + a = (struct dma_heap_attachment *)attach->priv; + par_origin_table = a->table; + } + + #ifdef PRINT_ORIGINAL_SPLITTERS + { + pr_debug("%s:parent[0x%px]:sgt->orig_nents=%d, nents=%d, sg_nents(in) %d\n", + __func__, par_dma_buf, + par_origin_table->orig_nents, par_origin_table->nents, + sg_nents(par_origin_table->sgl)); + for_each_sg(par_origin_table->sgl, sg, par_origin_table->orig_nents, i) { + pr_debug("parent[0x%px]:orig[%d]:sg->offset=0x%x ,sg->length=0x%x, sg_dma_len=0x%x, sg_phys=0x%lx\n", + par_dma_buf, i, sg->offset, sg->length, sg_dma_len(sg), (unsigned long)sg_phys(sg)); + } + } + #endif - dma_buf_put(buffer->dmabuf); - kfree(buffer); + /* split unmaped/original sgt of parent */ + ret = sg_split(par_origin_table->sgl, 0, offset, 1, &size, + &split_sg, &out_nents[0], GFP_KERNEL); + if (ret) { + pr_err("Failed to split from parents's sgt\n"); + } + + orig_splitted_sgt->orig_nents = out_nents[0]; + orig_splitted_sgt->nents = out_nents[0]; + orig_splitted_sgt->sgl = split_sg; + + splitted_sgt->orig_nents = out_nents[0]; + splitted_sgt->nents = out_nents[0]; + /* skip the first sgl if it's length is 0 */ + if (split_sg->length == 0) { + splitted_sgt->sgl = sg_next(split_sg); + splitted_sgt->orig_nents--; + splitted_sgt->nents--; + } + else { + splitted_sgt->sgl = split_sg; + } + /* Re-format the splitted sg list in the actual slice len */ + pr_debug("%s:%d, split[0x%px]:out_nents[0]=%d, Re-format the splitted sgl.\n", __func__, __LINE__, par_dma_buf, out_nents[0]); + size_remaining = size; + + for_each_sgtable_sg(splitted_sgt, sg, i) { + len = min_t(size_t, size_remaining, sg->length); + size_remaining -= len; + if (size_remaining == 0) { + sg->length = len; + sg_mark_end(sg); + pr_debug("split[0x%px]:orig[%d]:sg->offset=0x%x ,sg->length=0x%x, sg_dma_len=0x%x, sg_phys=0x%lx\n", + par_dma_buf, i, sg->offset, sg->length, sg_dma_len(sg), (unsigned long)sg_phys(sg)); + break; + } + pr_debug("split[0x%px]:orig[%d]:sg->offset=0x%x ,sg->length=0x%x, sg_dma_len=0x%x, sg_phys=0x%lx\n", + par_dma_buf, i, sg->offset, sg->length, sg_dma_len(sg), (unsigned long)sg_phys(sg)); + } + splitted_sgt->orig_nents = splitted_sgt->nents = i + 1; + + pr_debug("%s:split[0x%px]reformatted, splitted:sgt->orig_nents=%d, nents=%d, out_nents[0]=%d\n", + __func__, par_dma_buf, splitted_sgt->orig_nents, splitted_sgt->nents, out_nents[0]); + /* parent's dupped sgt table is useless*/ + dma_buf_detach(par_dma_buf, attach); + dma_buf_put(par_dma_buf); + + return ret; +} + +static void eswin_put_split_dmabuf(struct sg_table *splitted_sgt) +{ + kfree(splitted_sgt->sgl); } -static const struct dma_buf_ops esw_common_buf_ops = { - .attach = esw_common_heap_attach, - .detach = esw_common_heap_detach, - .map_dma_buf = esw_common_map_dma_buf, - .unmap_dma_buf = esw_common_unmap_dma_buf, - .begin_cpu_access = esw_common_dma_buf_begin_cpu_access, - .end_cpu_access = esw_common_dma_buf_end_cpu_access, - .mmap = esw_common_mmap, - .vmap = esw_common_heap_vmap, - .vunmap = esw_common_heap_vunmap, - .release = esw_common_dma_buf_release, +static void eswin_split_dma_buf_release(struct dma_buf *dmabuf) +{ + struct eswin_split_buffer *split_buffer = dmabuf->priv; + struct sg_table *table = &split_buffer->orig_sg_table;; + + pr_debug("%s %d\n", __func__, __LINE__); + /* free splitted sgt->sgl which was allocated during esw_common_dmabuf_split_export()*/ + eswin_put_split_dmabuf(table); + + /* put parent's dmabuf which was got during esw_common_dmabuf_split_export()*/ + dma_buf_put(split_buffer->dmabuf); + + /* free split buffer which was allocated during esw_common_dmabuf_split_export()*/ + kfree(split_buffer); +} + +static const struct dma_buf_ops eswin_common_buf_ops = { + .attach = eswin_split_heap_attach, + .detach = eswin_split_heap_detach, + .map_dma_buf = eswin_split_map_dma_buf, + .unmap_dma_buf = eswin_spilt_unmap_dma_buf, + .begin_cpu_access = eswin_split_dma_buf_begin_cpu_access, + .end_cpu_access = eswin_split_dma_buf_end_cpu_access, + .mmap = eswin_split_mmap, + .vmap = eswin_split_heap_vmap, + .vunmap = eswin_split_heap_vunmap, + .release = eswin_split_dma_buf_release, }; -int esw_common_dmabuf_split_export(int dbuf_fd, unsigned int offset, size_t len, int fd_flags, char *name) + +int esw_common_dmabuf_split_export(int par_dmabuf_fd, unsigned int offset, size_t len, int fd_flags, char *name) { - struct esw_export_buffer_info *buffer_info; + struct eswin_split_buffer *split_buffer; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct sg_table *table; int fd; struct dma_buf *dmabuf; + int ret = 0; - buffer_info = kzalloc(sizeof(*buffer_info), GFP_KERNEL); - if (!buffer_info) + split_buffer = kzalloc(sizeof(*split_buffer), GFP_KERNEL); + if (!split_buffer) return -ENOMEM; - buffer_info->dbuf_fd = dbuf_fd; - buffer_info->fd_flags = fd_flags; - buffer_info->slice.offset = offset; - buffer_info->slice.len = len; - snprintf(buffer_info->name, sizeof(buffer_info->name), "%s", name); + INIT_LIST_HEAD(&split_buffer->attachments); + mutex_init(&split_buffer->lock); - buffer_info->dmabuf = dma_buf_get(buffer_info->dbuf_fd); - if (IS_ERR(buffer_info->dmabuf)) - return PTR_ERR(buffer_info->dmabuf); + split_buffer->dbuf_fd = par_dmabuf_fd; + split_buffer->fd_flags = fd_flags; + split_buffer->slice.offset = offset; + split_buffer->slice.len = len; + snprintf(split_buffer->name, sizeof(split_buffer->name), "%s", name); + + split_buffer->dmabuf = dma_buf_get(split_buffer->dbuf_fd); + if (IS_ERR(split_buffer->dmabuf)) { + kfree(split_buffer); + return PTR_ERR(split_buffer->dmabuf); + } -// printk("input slice: oft=0x%d, len=%lu\n", buffer_info->slice.offset, buffer_info->slice.len); + pr_debug("%s:%d, par_dbuf_fd %d, input slice: oft=0x%llx, len=0x%lx\n", __func__, __LINE__, + split_buffer->dbuf_fd, split_buffer->slice.offset, split_buffer->slice.len); - buffer_info->slice.offset = PAGE_ALIGN(buffer_info->slice.offset); - buffer_info->slice.len = PAGE_ALIGN(buffer_info->slice.len); + split_buffer->slice.offset = PAGE_ALIGN(split_buffer->slice.offset); + split_buffer->slice.len = PAGE_ALIGN(split_buffer->slice.len); -// printk("align slice: oft=0x%d, len=%lu\n", buffer_info->slice.offset, buffer_info->slice.len); + table = &split_buffer->sg_table; + ret = eswin_get_split_dmabuf(split_buffer); + if (ret) { + /* put back parent's dmabuf*/ + dma_buf_put(split_buffer->dmabuf); + /* free split buffer */ + kfree(split_buffer); + return ret; + } /* create the dmabuf */ - exp_info.exp_name = buffer_info->name; - exp_info.ops = &esw_common_buf_ops; - exp_info.size = buffer_info->slice.len; - exp_info.flags = buffer_info->fd_flags; - exp_info.priv = buffer_info; + exp_info.exp_name = split_buffer->name; + exp_info.ops = &eswin_common_buf_ops; + exp_info.size = split_buffer->slice.len; + exp_info.flags = O_RDWR | O_CLOEXEC; + exp_info.priv = split_buffer; dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { + eswin_put_split_dmabuf(table); + /* put back parent's dmabuf*/ + dma_buf_put(split_buffer->dmabuf); + /* free split buffer */ + kfree(split_buffer); return PTR_ERR(dmabuf); } - fd = dma_buf_fd(dmabuf, buffer_info->fd_flags); + fd = dma_buf_fd(dmabuf, split_buffer->fd_flags); if (fd < 0) { dma_buf_put(dmabuf); - /* put the splitted dmabuf, the esw_common_dma_buf_release will be called, - the parent dmabuf will be put and the buffer_info will be free at that time */ + /* put the splitted dmabuf, the eswin_split_dma_buf_release will be called, + the parent dmabuf will be put and the split_buffer will be free at that time */ } return fd; } EXPORT_SYMBOL(esw_common_dmabuf_split_export); + +/* create a misc device for attaching the parent dmabuf to get the parent's original sgt*/ +#define DRIVER_NAME "split_dmabuf" + +static int split_dmabuf_open(struct inode *inode, struct file *file) +{ + + pr_debug("%s:%d, success!\n", __func__, __LINE__); + + return 0; +} + +static int split_dmabuf_release(struct inode *inode, struct file *file) +{ + pr_debug("%s:%d, success!\n", __func__, __LINE__); + + return 0; +} + +static struct file_operations split_dmabuf_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = split_dmabuf_open, + .release = split_dmabuf_release, +}; + +static struct miscdevice split_dmabuf_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = DRIVER_NAME, + .fops = &split_dmabuf_fops, +}; + +static int __init split_dmabuf_init(void) +{ + int ret = 0; + struct device *dev; + + ret = misc_register(&split_dmabuf_miscdev); + if(ret) { + pr_err ("cannot register miscdev (err=%d)\n", ret); + return ret; + } + split_dmabuf_dev = split_dmabuf_miscdev.this_device; + dev = split_dmabuf_dev; + if (!dev->dma_mask) { + dev->dma_mask = &dev->coherent_dma_mask; + } + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) { + pr_err("Unable to set coherent mask\n"); + goto deregister_dev; + } + + return 0; + +deregister_dev: + misc_deregister(&split_dmabuf_miscdev); + split_dmabuf_dev = NULL; + return ret; +} + +static void __exit split_dmabuf_exit(void) +{ + misc_deregister(&split_dmabuf_miscdev); +} + +module_init(split_dmabuf_init); +module_exit(split_dmabuf_exit); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-win2030.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-win2030.c index 66e0cc12ce44..e3b6c28394b6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-win2030.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-win2030.c @@ -205,7 +205,7 @@ static void dwc_qos_fix_speed(void *priv, unsigned int speed, unsigned int mode) case SPEED_1000: rate = 125000000; - if (dwc_priv->dev_id == 0) { + if ((dwc_priv->dev_id & 0x1) == 0) { regmap_write(dwc_priv->hsp_regmap, 0x118, 0x800c8023); regmap_write(dwc_priv->hsp_regmap, 0x11c, 0x0c0c0c0c); regmap_write(dwc_priv->hsp_regmap, 0x114, 0x23232323); @@ -226,7 +226,7 @@ static void dwc_qos_fix_speed(void *priv, unsigned int speed, unsigned int mode) case SPEED_100: rate = 25000000; - if (dwc_priv->dev_id == 0) { + if ((dwc_priv->dev_id & 0x1) == 0) { regmap_write(dwc_priv->hsp_regmap, 0x118, 0x803f8050); regmap_write(dwc_priv->hsp_regmap, 0x11c, 0x3f3f3f3f); regmap_write(dwc_priv->hsp_regmap, 0x114, 0x50505050); @@ -247,7 +247,7 @@ static void dwc_qos_fix_speed(void *priv, unsigned int speed, unsigned int mode) case SPEED_10: rate = 2500000; - if (dwc_priv->dev_id == 0) { + if ((dwc_priv->dev_id & 0x1) == 0) { regmap_write(dwc_priv->hsp_regmap, 0x118, 0x0); regmap_write(dwc_priv->hsp_regmap, 0x11c, 0x0); regmap_write(dwc_priv->hsp_regmap, 0x114, 0x0); @@ -468,6 +468,7 @@ static int dwc_qos_probe(struct platform_device *pdev, plat_dat->bsp_priv = dwc_priv; plat_dat->phy_addr = PHY_ADDR; plat_dat->clks_config = dwc_clks_config; + plat_dat->bus_id = dwc_priv->dev_id; return 0; } diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 8ea6e4f7421a..1c0df67e7556 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -49,8 +49,8 @@ config PINCTRL_AMD Requires ACPI/FDT device enumeration code to set up a platform device. -config PINCTRL_EIC7700 - tristate "EIC7700 PINCTRL driver" +config PINCTRL_EIC7X + tristate "EIC7X PINCTRL driver" depends on ARCH_ESWIN_EIC770X_SOC_FAMILY select GENERIC_PINCONF select PINMUX @@ -58,7 +58,7 @@ config PINCTRL_EIC7700 select GENERIC_PINCTRL_GROUPS select GENERIC_PINMUX_FUNCTIONS help - say Y here to add pinctrl eic7700 driver + say Y here to add pinctrl eic7x driver config PINCTRL_APPLE_GPIO tristate "Apple SoC GPIO pin controller driver" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 3b1057c5411e..571811fb66ee 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o obj-$(CONFIG_OF) += devicetree.o obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o -obj-$(CONFIG_PINCTRL_EIC7700) += pinctrl-eic7700.o +obj-$(CONFIG_PINCTRL_EIC7X) += pinctrl-eic7x.o obj-$(CONFIG_PINCTRL_APPLE_GPIO) += pinctrl-apple-gpio.o obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o diff --git a/drivers/pinctrl/pinctrl-eic7700.c b/drivers/pinctrl/pinctrl-eic7x.c similarity index 84% rename from drivers/pinctrl/pinctrl-eic7700.c rename to drivers/pinctrl/pinctrl-eic7x.c index c464e6964f26..3183b0dc424a 100644 --- a/drivers/pinctrl/pinctrl-eic7700.c +++ b/drivers/pinctrl/pinctrl-eic7x.c @@ -37,10 +37,9 @@ #define ESWIN_PINCONF_IE BIT(0) #define ESWIN_PINCONF_PULLUP BIT(1) #define ESWIN_PINCONF_PULLDOWN BIT(2) -#define ESWIN_PINCONF_DRIVER_STRENGTH_MASK 0xf // 0111 1000 +#define ESWIN_PINCONF_DRIVER_STRENGTH_MASK 0xf #define ESWIN_PINCONF_DRIVER_SHIFT 3 #define ESWIN_PINCONF_SMT BIT(7) - struct eswin_function_desc { const char *name; const char * const *groups; @@ -239,16 +238,69 @@ static const struct pinctrl_pin_desc eswin_pins[] = { .pins = _name ## _pins, \ .npins = ARRAY_SIZE(_name ## _pins), \ } + +#ifdef CONFIG_ARCH_ESWIN_EIC7702_SOC +static const char * const jtag1_on_group[] = {"jtag1_on_group"}; +static const char * const jtag1_off_group[] = {"jtag1_off_group"}; //fun1 + +static const char * const jtag2_on_group[] = {"jtag2_on_group"}; +static const char * const jtag2_off_group[] = {"jtag2_off_group"}; //fun1 + +static const char * const gpio7_on_group[] = {"gpio7_on_group"}; +static const char * const gpio7_off_group[] = {"gpio7_off_group"}; //fun1 + +static const char * const gpio8_on_group[] = {"gpio8_on_group"}; +static const char * const gpio8_off_group[] = {"gpio8_off_group"}; //fun1 + +static const char * const gpio9_on_group[] = {"gpio9_on_group"}; +static const char * const gpio9_off_group[] = {"gpio9_off_group"}; //fun1 + +static const char * const gpio17_on_group[] = {"gpio17_on_group"}; +static const char * const gpio17_off_group[] = {"gpio17_off_group"}; //fun1 + +static const char * const gpio64_on_group[] = {"gpio64_on_group"}; +static const char * const gpio64_off_group[] = {"gpio64_off_group"}; //fun1 + +static const char * const gpio65_on_group[] = {"gpio65_on_group"}; +static const char * const gpio65_off_group[] = {"gpio65_off_group"}; //fun1 + +static const char * const gpio66_on_group[] = {"gpio66_on_group"}; +static const char * const gpio66_off_group[] = {"gpio66_off_group"}; //fun1 + +//func6 +static const char * const vc_g2d0_debug_out_on_group[] = {"vc_g2d0_debug_out_on_group"}; +static const char * const vc_g2d0_debug_out_off_group[] = {"vc_g2d0_debug_out_off_group"}; //func3 + +//func7 +static const char * const ftm_test_out_on_group[] = {"ftm_test_out_on_group"}; +static const char * const ftm_test_out_off_group[] = {"ftm_test_out_off_group"}; //func3 + +#else +static const char * const jtag1_group[] = {"jtag1_group"}; +static const char * const jtag2_group[] = {"jtag2_group"}; +static const char * const gpio7_group[] = {"gpio7_group"}; +static const char * const gpio8_group[] = {"gpio8_group"}; +static const char * const gpio9_group[] = {"gpio9_group"}; + +static const char * const gpio17_group[] = {"gpio17_group"}; +static const char * const gpio64_group[] = {"gpio64_group"}; +static const char * const gpio65_group[] = {"gpio65_group"}; +static const char * const gpio66_group[] = {"gpio66_group"}; + +//func6 +static const char * const vc_g2d0_debug_out_group[] = {"vc_g2d0_debug_out_group"}; + +//func7 +static const char * const ftm_test_out_group[] = {"ftm_test_out_group"}; +#endif + //func0 static const char * const sdio0_group[] = {"sdio0_group"}; static const char * const sdio1_group[] = {"sdio1_group"}; -static const char * const por_sel_group[] = {"por_sel_group"}; static const char * const jtag0_group[] = {"jtag0_group"}; -static const char * const jtag1_group[] = {"jtag1_group"}; static const char * const spi2_cs_group[] = {"spi2_cs_group"}; static const char * const pcie_group[] = {"pcie_group"}; static const char * const hdmi_group[] = {"hdmi_group"}; -static const char * const jtag2_group[] = {"jtag2_group"}; static const char * const rgmii0_group[] = {"rgmii0_group"}; static const char * const i2s0_group[] = {"i2s0_group"}; static const char * const i2s1_group[] = {"i2s1_group"}; @@ -275,12 +327,14 @@ static const char * const mipi_csi0_group[] = {"mipi_csi0_group"}; static const char * const mipi_csi1_group[] = {"mipi_csi1_group"}; static const char * const mipi_csi2_group[] = {"mipi_csi2_group"}; static const char * const mipi_csi3_group[] = {"mipi_csi3_group"}; +#ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC static const char * const mipi_csi4_group[] = {"mipi_csi4_group"}; static const char * const mipi_csi5_group[] = {"mipi_csi5_group"}; +#endif static const char * const spi3_group[] = {"spi3_group"}; static const char * const i2c8_group[] = {"i2c8_group"}; static const char * const s_mode_group[] = {"s_mode_group"}; -static const char * const pinmux_ddr_refclk_sel_group[] = {"pinmux_ddr_refclk_sel_group"}; +static const char * const ddr_refclk_sel_group[] = {"ddr_refclk_sel_group"}; static const char * const spi0_group[] = {"spi0_group"}; static const char * const i2c10_group[] = {"i2c10_group"}; static const char * const i2c11_group[] = {"i2c11_group"}; @@ -311,9 +365,6 @@ static const char * const gpio3_group[] = {"gpio3_group"}; static const char * const gpio4_group[] = {"gpio4_group"}; static const char * const gpio5_group[] = {"gpio5_group"}; static const char * const gpio6_group[] = {"gpio6_group"}; -static const char * const gpio7_group[] = {"gpio7_group"}; -static const char * const gpio8_group[] = {"gpio8_group"}; -static const char * const gpio9_group[] = {"gpio9_group"}; static const char * const gpio10_group[] = {"gpio10_group"}; static const char * const gpio11_group[] = {"gpio11_group"}; static const char * const gpio12_group[] = {"gpio12_group"}; @@ -321,7 +372,6 @@ static const char * const gpio13_group[] = {"gpio13_group"}; static const char * const gpio14_group[] = {"gpio14_group"}; static const char * const gpio15_group[] = {"gpio15_group"}; static const char * const gpio16_group[] = {"gpio16_group"}; -static const char * const gpio17_group[] = {"gpio17_group"}; static const char * const gpio18_group[] = {"gpio18_group"}; static const char * const gpio19_group[] = {"gpio19_group"}; static const char * const gpio20_group[] = {"gpio20_group"}; @@ -370,10 +420,9 @@ static const char * const gpio60_group[] = {"gpio60_group"}; static const char * const gpio61_group[] = {"gpio61_group"}; static const char * const gpio62_group[] = {"gpio62_group"}; static const char * const gpio63_group[] = {"gpio63_group"}; -static const char * const gpio64_group[] = {"gpio64_group"}; -static const char * const gpio65_group[] = {"gpio65_group"}; -static const char * const gpio66_group[] = {"gpio66_group"}; +#ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC static const char * const gpio67_group[] = {"gpio67_group"}; +#endif static const char * const gpio68_group[] = {"gpio68_group"}; static const char * const gpio69_group[] = {"gpio69_group"}; @@ -390,12 +439,14 @@ static const char * const gpio79_group[] = {"gpio79_group"}; static const char * const gpio80_group[] = {"gpio80_group"}; static const char * const gpio81_group[] = {"gpio81_group"}; +#ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC static const char * const gpio82_group[] = {"gpio82_group"}; static const char * const gpio83_group[] = {"gpio83_group"}; static const char * const gpio84_group[] = {"gpio84_group"}; static const char * const gpio85_group[] = {"gpio85_group"}; static const char * const gpio86_group[] = {"gpio86_group"}; static const char * const gpio87_group[] = {"gpio87_group"}; +#endif static const char * const gpio88_group[] = {"gpio88_group"}; static const char * const gpio89_group[] = {"gpio89_group"}; @@ -434,22 +485,82 @@ static const char * const csi_mon_out_valid_group[] = {"csi_mon_out_valid_group" static const char * const csi_parity_error_group[] = {"csi_parity_error_group"}; static const char * const csi_dtb_out_group[] = {"csi_dtb_out_group"}; static const char * const csi_phy_sel_group[] = {"csi_phy_sel_group"}; -static const char * const vc_g2d0_debug_out_group[] = {"vc_g2d0_debug_out_group"}; static const char * const vc_g2d1_debug_out_group[] = {"vc_g2d1_debug_out_group"}; static const char * const sata_mpll_clk_group[] = {"sata_mpll_clk_group"}; static const char * const sata_ref_repeat_clk_m_group[] = {"sata_ref_repeat_clk_m_group"}; static const char * const sata_ref_repeat_clk_p_group[] = {"sata_ref_repeat_clk_p_group"}; +//func7 +static const char * const clk_d2d_test_out_group[] = {"clk_d2d_test_out_group"}; +static const char * const clk_spll0_test_out_group[] = {"clk_spll0_test_out_group"}; +#ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC +static const char * const clk_spll1_test_out_group[] = {"clk_spll1_test_out_group"}; +static const char * const clk_spll2_test_out_group[] = {"clk_spll2_test_out_group"}; +static const char * const clk_vpll_test_out_group[] = {"clk_vpll_test_out_group"}; +static const char * const clk_apll_test_out_group[] = {"clk_apll_test_out_group"}; +static const char * const clk_cpll_test_out_group[] = {"clk_cpll_test_out_group"}; +static const char * const clk_pll_lpddr_test_out_group[] = {"clk_pll_lpddr_test_out_group"}; +#endif + + +#ifdef CONFIG_ARCH_ESWIN_EIC7702_SOC +static const unsigned int jtag1_on_pins[] = {20,21,22}; +static const unsigned int jtag1_off_pins[] = {20,21,22}; + +static const unsigned int jtag2_on_pins[] = {32,111,112,113}; +static const unsigned int jtag2_off_pins[] = {32,111,112,113}; + +static const unsigned int gpio7_on_pins[] = {20}; +static const unsigned int gpio7_off_pins[] = {20}; + +static const unsigned int gpio8_on_pins[] = {21}; +static const unsigned int gpio8_off_pins[] = {21}; + +static const unsigned int gpio9_on_pins[] = {22}; +static const unsigned int gpio9_off_pins[] = {22}; + +static const unsigned int gpio17_on_pins[] = {32}; +static const unsigned int gpio17_off_pins[] = {32}; + +static const unsigned int gpio64_on_pins[] = {111}; +static const unsigned int gpio64_off_pins[] = {111}; + +static const unsigned int gpio65_on_pins[] = {112}; +static const unsigned int gpio65_off_pins[] = {112}; + +static const unsigned int gpio66_on_pins[] = {113}; +static const unsigned int gpio66_off_pins[] = {113}; + +//func6 +static const unsigned int vc_g2d0_debug_out_on_pins[] = {110,115,116,117}; +static const unsigned int vc_g2d0_debug_out_off_pins[] = {111,112,113}; //func3 + +//func7 +static const unsigned int ftm_test_out_on_pins[] = {109,110,115,116,117,118,119,120,121,122,123,124}; +static const unsigned int ftm_test_out_off_pins[] = {111,112,113}; //func3 +#else +static const unsigned int jtag1_pins[] = {20,21,22,23}; +static const unsigned int jtag2_pins[] = {32,111,112,113,114}; +static const unsigned int gpio7_pins[] = {20}; +static const unsigned int gpio8_pins[] = {21}; +static const unsigned int gpio9_pins[] = {22}; +static const unsigned int gpio17_pins[] = {32}; +static const unsigned int gpio64_pins[] = {111}; +static const unsigned int gpio65_pins[] = {112}; +static const unsigned int gpio66_pins[] = {113}; +//func6 +static const unsigned int vc_g2d0_debug_out_pins[] = {110,112,113,114,115,116,117}; +//func7 +static const unsigned int ftm_test_out_pins[] = {109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124}; +#endif + //func0 static const unsigned int sdio0_pins[] = {1,2}; static const unsigned int sdio1_pins[] = {3,4}; -static const unsigned int por_sel_pins[] = {13}; static const unsigned int jtag0_pins[] = {14,15,16,17}; -static const unsigned int jtag1_pins[] = {20,21,22,23}; static const unsigned int spi2_cs_pins[] = {19,25}; static const unsigned int pcie_pins[] = {26,27,28}; static const unsigned int hdmi_pins[] = {29,30,31}; -static const unsigned int jtag2_pins[] = {32,111,112,113,114}; static const unsigned int rgmii0_pins[] = {33,34,35,36,37,38,39,45,46,47,48,49,50,58,59,60}; static const unsigned int i2s0_pins[] = {40,41,42,43,44}; static const unsigned int i2s1_pins[] = {68,69,70,71,44}; @@ -475,12 +586,14 @@ static const unsigned int mipi_csi0_pins[] = {117,118,119}; static const unsigned int mipi_csi1_pins[] = {120,121,122}; static const unsigned int mipi_csi2_pins[] = {123,124,125}; static const unsigned int mipi_csi3_pins[] = {126,127,128}; +#ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC static const unsigned int mipi_csi4_pins[] = {129,130,131}; static const unsigned int mipi_csi5_pins[] = {132,133,134}; +#endif static const unsigned int spi3_pins[] = {135,136,137,138}; static const unsigned int i2c8_pins[] = {139,140}; static const unsigned int s_mode_pins[] = {141}; -static const unsigned int pinmux_ddr_refclk_sel_pins[] = {142}; +static const unsigned int ddr_refclk_sel_pins[] = {142}; static const unsigned int spi0_pins[] = {143,144,145,146,147,148}; static const unsigned int i2c10_pins[] = {149,150}; static const unsigned int i2c11_pins[] = {151,152}; @@ -511,9 +624,6 @@ static const unsigned int gpio3_pins[] = {16}; static const unsigned int gpio4_pins[] = {17}; static const unsigned int gpio5_pins[] = {18}; static const unsigned int gpio6_pins[] = {19}; -static const unsigned int gpio7_pins[] = {20}; -static const unsigned int gpio8_pins[] = {21}; -static const unsigned int gpio9_pins[] = {22}; static const unsigned int gpio10_pins[] = {23}; static const unsigned int gpio11_pins[] = {24}; @@ -522,7 +632,6 @@ static const unsigned int gpio13_pins[] = {1}; static const unsigned int gpio14_pins[] = {2}; static const unsigned int gpio15_pins[] = {3}; static const unsigned int gpio16_pins[] = {4}; -static const unsigned int gpio17_pins[] = {32}; static const unsigned int gpio18_pins[] = {40}; static const unsigned int gpio19_pins[] = {41}; @@ -574,10 +683,9 @@ static const unsigned int gpio60_pins[] = {107}; static const unsigned int gpio61_pins[] = {108}; static const unsigned int gpio62_pins[] = {109}; static const unsigned int gpio63_pins[] = {110}; -static const unsigned int gpio64_pins[] = {111}; -static const unsigned int gpio65_pins[] = {112}; -static const unsigned int gpio66_pins[] = {113}; +#ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC static const unsigned int gpio67_pins[] = {114}; +#endif static const unsigned int gpio68_pins[] = {115}; static const unsigned int gpio69_pins[] = {116}; @@ -594,12 +702,14 @@ static const unsigned int gpio79_pins[] = {126}; static const unsigned int gpio80_pins[] = {127}; static const unsigned int gpio81_pins[] = {128}; +#ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC static const unsigned int gpio82_pins[] = {129}; static const unsigned int gpio83_pins[] = {130}; static const unsigned int gpio84_pins[] = {131}; static const unsigned int gpio85_pins[] = {132}; static const unsigned int gpio86_pins[] = {133}; static const unsigned int gpio87_pins[] = {134}; +#endif static const unsigned int gpio88_pins[] = {135}; static const unsigned int gpio89_pins[] = {136}; @@ -636,26 +746,92 @@ static const unsigned int csi_mon_out_pins[] = {32,33,34,35,36,37,38,39,40,41,42 static const unsigned int csi_ocla_clk_pins[] = {96}; static const unsigned int csi_mon_out_valid_pins[] = {97}; static const unsigned int csi_parity_error_pins[] = {98}; +#ifdef CONFIG_ARCH_ESWIN_EIC7702_SOC +static const unsigned int csi_dtb_out_pins[] = {99,100,101,102}; +static const unsigned int csi_phy_sel_pins[] = {109}; +#else static const unsigned int csi_dtb_out_pins[] = {99,100,101,102,129,130,131,132}; static const unsigned int csi_phy_sel_pins[] = {133,134,109}; -static const unsigned int vc_g2d0_debug_out_pins[] = {110,111,112,113,114,115,116,117}; +#endif static const unsigned int vc_g2d1_debug_out_pins[] = {118,119,120,121,122,123,124,125}; static const unsigned int sata_mpll_clk_pins[] = {126}; static const unsigned int sata_ref_repeat_clk_m_pins[] = {127}; static const unsigned int sata_ref_repeat_clk_p_pins[] = {128}; +//func7 +static const unsigned int clk_d2d_test_out_pins[] = {127}; +static const unsigned int clk_spll0_test_out_pins[] = {128}; +#ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC +static const unsigned int clk_spll1_test_out_pins[] = {129}; +static const unsigned int clk_spll2_test_out_pins[] = {130}; +static const unsigned int clk_vpll_test_out_pins[] = {131}; +static const unsigned int clk_apll_test_out_pins[] = {132}; +static const unsigned int clk_cpll_test_out_pins[] = {133}; +static const unsigned int clk_pll_lpddr_test_out_pins[] = {134}; +#endif + static const struct eswin_group_desc eswin_pinctrl_groups[] = { + #ifdef CONFIG_ARCH_ESWIN_EIC7702_SOC + //func0 + ESWIN_PINCTRL_GRP(jtag1_on), + ESWIN_PINCTRL_GRP(jtag1_off), + + ESWIN_PINCTRL_GRP(jtag2_on), + ESWIN_PINCTRL_GRP(jtag2_off), + + //func2 + ESWIN_PINCTRL_GRP(gpio7_on), + ESWIN_PINCTRL_GRP(gpio7_off), + + ESWIN_PINCTRL_GRP(gpio8_on), + ESWIN_PINCTRL_GRP(gpio8_off), + + ESWIN_PINCTRL_GRP(gpio9_on), + ESWIN_PINCTRL_GRP(gpio9_off), + + ESWIN_PINCTRL_GRP(gpio17_on), + ESWIN_PINCTRL_GRP(gpio17_off), + + ESWIN_PINCTRL_GRP(gpio64_on), + ESWIN_PINCTRL_GRP(gpio64_off), + + ESWIN_PINCTRL_GRP(gpio65_on), + ESWIN_PINCTRL_GRP(gpio65_off), + + ESWIN_PINCTRL_GRP(gpio66_on), + ESWIN_PINCTRL_GRP(gpio66_off), + + //func6 + ESWIN_PINCTRL_GRP(vc_g2d0_debug_out_on), + ESWIN_PINCTRL_GRP(vc_g2d0_debug_out_off), + + //func7 + ESWIN_PINCTRL_GRP(ftm_test_out_on), + ESWIN_PINCTRL_GRP(ftm_test_out_off), + #else + ESWIN_PINCTRL_GRP(jtag1), + ESWIN_PINCTRL_GRP(jtag2), + ESWIN_PINCTRL_GRP(gpio7), + ESWIN_PINCTRL_GRP(gpio8), + ESWIN_PINCTRL_GRP(gpio9), + ESWIN_PINCTRL_GRP(gpio17), + ESWIN_PINCTRL_GRP(gpio64), + ESWIN_PINCTRL_GRP(gpio65), + ESWIN_PINCTRL_GRP(gpio66), + //func6 + ESWIN_PINCTRL_GRP(vc_g2d0_debug_out), + //func7 + ESWIN_PINCTRL_GRP(ftm_test_out), + #endif + //func0 ESWIN_PINCTRL_GRP(sdio0), ESWIN_PINCTRL_GRP(sdio1), - ESWIN_PINCTRL_GRP(por_sel), ESWIN_PINCTRL_GRP(jtag0), - ESWIN_PINCTRL_GRP(jtag1), ESWIN_PINCTRL_GRP(spi2_cs), ESWIN_PINCTRL_GRP(pcie), ESWIN_PINCTRL_GRP(hdmi), - ESWIN_PINCTRL_GRP(jtag2), ESWIN_PINCTRL_GRP(rgmii0), ESWIN_PINCTRL_GRP(i2s0), ESWIN_PINCTRL_GRP(i2s1), @@ -681,12 +857,14 @@ static const struct eswin_group_desc eswin_pinctrl_groups[] = ESWIN_PINCTRL_GRP(mipi_csi1), ESWIN_PINCTRL_GRP(mipi_csi2), ESWIN_PINCTRL_GRP(mipi_csi3), + #ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC ESWIN_PINCTRL_GRP(mipi_csi4), ESWIN_PINCTRL_GRP(mipi_csi5), + #endif ESWIN_PINCTRL_GRP(spi3), ESWIN_PINCTRL_GRP(i2c8), ESWIN_PINCTRL_GRP(s_mode), - ESWIN_PINCTRL_GRP(pinmux_ddr_refclk_sel), + ESWIN_PINCTRL_GRP(ddr_refclk_sel), ESWIN_PINCTRL_GRP(spi0), ESWIN_PINCTRL_GRP(i2c10), ESWIN_PINCTRL_GRP(i2c11), @@ -717,9 +895,6 @@ static const struct eswin_group_desc eswin_pinctrl_groups[] = ESWIN_PINCTRL_GRP(gpio4), ESWIN_PINCTRL_GRP(gpio5), ESWIN_PINCTRL_GRP(gpio6), - ESWIN_PINCTRL_GRP(gpio7), - ESWIN_PINCTRL_GRP(gpio8), - ESWIN_PINCTRL_GRP(gpio9), ESWIN_PINCTRL_GRP(gpio10), ESWIN_PINCTRL_GRP(gpio11), ESWIN_PINCTRL_GRP(gpio12), @@ -727,7 +902,6 @@ static const struct eswin_group_desc eswin_pinctrl_groups[] = ESWIN_PINCTRL_GRP(gpio14), ESWIN_PINCTRL_GRP(gpio15), ESWIN_PINCTRL_GRP(gpio16), - ESWIN_PINCTRL_GRP(gpio17), ESWIN_PINCTRL_GRP(gpio18), ESWIN_PINCTRL_GRP(gpio19), ESWIN_PINCTRL_GRP(gpio20), @@ -776,10 +950,9 @@ static const struct eswin_group_desc eswin_pinctrl_groups[] = ESWIN_PINCTRL_GRP(gpio61), ESWIN_PINCTRL_GRP(gpio62), ESWIN_PINCTRL_GRP(gpio63), - ESWIN_PINCTRL_GRP(gpio64), - ESWIN_PINCTRL_GRP(gpio65), - ESWIN_PINCTRL_GRP(gpio66), + #ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC ESWIN_PINCTRL_GRP(gpio67), + #endif ESWIN_PINCTRL_GRP(gpio68), ESWIN_PINCTRL_GRP(gpio69), @@ -796,12 +969,14 @@ static const struct eswin_group_desc eswin_pinctrl_groups[] = ESWIN_PINCTRL_GRP(gpio80), ESWIN_PINCTRL_GRP(gpio81), + #ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC ESWIN_PINCTRL_GRP(gpio82), ESWIN_PINCTRL_GRP(gpio83), ESWIN_PINCTRL_GRP(gpio84), ESWIN_PINCTRL_GRP(gpio85), ESWIN_PINCTRL_GRP(gpio86), ESWIN_PINCTRL_GRP(gpio87), + #endif ESWIN_PINCTRL_GRP(gpio88), ESWIN_PINCTRL_GRP(gpio89), @@ -840,11 +1015,22 @@ static const struct eswin_group_desc eswin_pinctrl_groups[] = ESWIN_PINCTRL_GRP(csi_parity_error), ESWIN_PINCTRL_GRP(csi_dtb_out), ESWIN_PINCTRL_GRP(csi_phy_sel), - ESWIN_PINCTRL_GRP(vc_g2d0_debug_out), ESWIN_PINCTRL_GRP(vc_g2d1_debug_out), ESWIN_PINCTRL_GRP(sata_mpll_clk), ESWIN_PINCTRL_GRP(sata_ref_repeat_clk_m), ESWIN_PINCTRL_GRP(sata_ref_repeat_clk_p), + + //func7 + ESWIN_PINCTRL_GRP(clk_d2d_test_out), + ESWIN_PINCTRL_GRP(clk_spll0_test_out), + #ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC + ESWIN_PINCTRL_GRP(clk_spll1_test_out), + ESWIN_PINCTRL_GRP(clk_spll2_test_out), + ESWIN_PINCTRL_GRP(clk_vpll_test_out), + ESWIN_PINCTRL_GRP(clk_apll_test_out), + ESWIN_PINCTRL_GRP(clk_cpll_test_out), + ESWIN_PINCTRL_GRP(clk_pll_lpddr_test_out), + #endif }; #define ESWIN_PINMUX_FUNCTION(_func_name, _mux_val, _mask)\ @@ -861,16 +1047,65 @@ static const struct eswin_group_desc eswin_pinctrl_groups[] = static const struct eswin_function_desc eswin_pinmux_functions[] = { + #ifdef CONFIG_ARCH_ESWIN_EIC7702_SOC + //func0 + ESWIN_PINMUX_FUNCTION(jtag1_on, 0, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(jtag1_off, 1, ESWIN_PINMUX_MASK), + + ESWIN_PINMUX_FUNCTION(jtag2_on, 0, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(jtag2_off, 1, ESWIN_PINMUX_MASK), + + //func2 + ESWIN_PINMUX_FUNCTION(gpio7_on, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio7_off, 1, ESWIN_PINMUX_MASK), + + ESWIN_PINMUX_FUNCTION(gpio8_on, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio8_off, 1, ESWIN_PINMUX_MASK), + + ESWIN_PINMUX_FUNCTION(gpio9_on, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio9_off, 1, ESWIN_PINMUX_MASK), + + ESWIN_PINMUX_FUNCTION(gpio17_on, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio17_off, 1, ESWIN_PINMUX_MASK), + + ESWIN_PINMUX_FUNCTION(gpio64_on, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio64_off, 1, ESWIN_PINMUX_MASK), + + ESWIN_PINMUX_FUNCTION(gpio65_on, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio65_off, 1, ESWIN_PINMUX_MASK), + + ESWIN_PINMUX_FUNCTION(gpio66_on, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio66_off, 1, ESWIN_PINMUX_MASK), + + //func6 + ESWIN_PINMUX_FUNCTION(vc_g2d0_debug_out_on, 6, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(vc_g2d0_debug_out_off, 3, ESWIN_PINMUX_MASK), + + //func7 + ESWIN_PINMUX_FUNCTION(ftm_test_out_on, 7, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(ftm_test_out_off, 3, ESWIN_PINMUX_MASK), + + #else + ESWIN_PINMUX_FUNCTION(jtag1, 0, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(jtag2, 0, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio7, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio8, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio9, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio17, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio64, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio65, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(gpio66, 2, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(vc_g2d0_debug_out, 6, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(ftm_test_out, 7, ESWIN_PINMUX_MASK), + #endif + //func0 ESWIN_PINMUX_FUNCTION(sdio0, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(sdio1, 0, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(por_sel, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(jtag0, 0, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(jtag1, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(spi2_cs, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(pcie, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(hdmi, 0, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(jtag2, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(rgmii0, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(i2s0, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(i2s1, 0, ESWIN_PINMUX_MASK), @@ -896,18 +1131,20 @@ static const struct eswin_function_desc eswin_pinmux_functions[] = { ESWIN_PINMUX_FUNCTION(mipi_csi1, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(mipi_csi2, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(mipi_csi3, 0, ESWIN_PINMUX_MASK), + #ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC ESWIN_PINMUX_FUNCTION(mipi_csi4, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(mipi_csi5, 0, ESWIN_PINMUX_MASK), + #endif ESWIN_PINMUX_FUNCTION(spi3, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(i2c8, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(s_mode, 0, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(pinmux_ddr_refclk_sel, 0, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(ddr_refclk_sel, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(spi0, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(i2c10, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(i2c11, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(boot_sel, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(lpddr_ref_clk, 0, ESWIN_PINMUX_MASK), - + //func1 ESWIN_PINMUX_FUNCTION(spi2_clk, 1, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(spi2_d0, 1, ESWIN_PINMUX_MASK), @@ -931,9 +1168,6 @@ static const struct eswin_function_desc eswin_pinmux_functions[] = { ESWIN_PINMUX_FUNCTION(gpio4, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio5, 0, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio6, 2, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(gpio7, 2, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(gpio8, 2, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(gpio9, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio10, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio11, 0, ESWIN_PINMUX_MASK), @@ -942,7 +1176,6 @@ static const struct eswin_function_desc eswin_pinmux_functions[] = { ESWIN_PINMUX_FUNCTION(gpio14, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio15, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio16, 2, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(gpio17, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio18, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio19, 2, ESWIN_PINMUX_MASK), @@ -994,10 +1227,9 @@ static const struct eswin_function_desc eswin_pinmux_functions[] = { ESWIN_PINMUX_FUNCTION(gpio61, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio62, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio63, 2, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(gpio64, 2, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(gpio65, 2, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(gpio66, 2, ESWIN_PINMUX_MASK), + #ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC ESWIN_PINMUX_FUNCTION(gpio67, 2, ESWIN_PINMUX_MASK), + #endif ESWIN_PINMUX_FUNCTION(gpio68, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio69, 2, ESWIN_PINMUX_MASK), @@ -1014,12 +1246,14 @@ static const struct eswin_function_desc eswin_pinmux_functions[] = { ESWIN_PINMUX_FUNCTION(gpio80, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio81, 2, ESWIN_PINMUX_MASK), + #ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC ESWIN_PINMUX_FUNCTION(gpio82, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio83, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio84, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio85, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio86, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio87, 2, ESWIN_PINMUX_MASK), + #endif ESWIN_PINMUX_FUNCTION(gpio88, 2, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(gpio89, 2, ESWIN_PINMUX_MASK), @@ -1058,11 +1292,22 @@ static const struct eswin_function_desc eswin_pinmux_functions[] = { ESWIN_PINMUX_FUNCTION(csi_parity_error, 6, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(csi_dtb_out, 6, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(csi_phy_sel, 6, ESWIN_PINMUX_MASK), - ESWIN_PINMUX_FUNCTION(vc_g2d0_debug_out, 6, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(vc_g2d1_debug_out, 6, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(sata_mpll_clk, 6, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(sata_ref_repeat_clk_m, 6, ESWIN_PINMUX_MASK), ESWIN_PINMUX_FUNCTION(sata_ref_repeat_clk_p, 6, ESWIN_PINMUX_MASK), + + //func7 + ESWIN_PINMUX_FUNCTION(clk_d2d_test_out, 7, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(clk_spll0_test_out, 7, ESWIN_PINMUX_MASK), + #ifndef CONFIG_ARCH_ESWIN_EIC7702_SOC + ESWIN_PINMUX_FUNCTION(clk_spll1_test_out, 7, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(clk_spll2_test_out, 7, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(clk_vpll_test_out, 7, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(clk_apll_test_out, 7, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(clk_cpll_test_out, 7, ESWIN_PINMUX_MASK), + ESWIN_PINMUX_FUNCTION(clk_pll_lpddr_test_out, 7, ESWIN_PINMUX_MASK), + #endif }; /* pinctrl */ @@ -1245,8 +1490,6 @@ int eswin_pinconf_cfg_set(struct pinctrl_dev *pctldev, param); continue; } - - } writel(reg,pctrl->base + 4*pin); @@ -1332,7 +1575,7 @@ static int eswin_pinctrl_remove(struct platform_device *platform_dev) } static const struct of_device_id eswin_pinctrl_of_match[] = { - { .compatible = "eswin,eic7700-pinctrl" }, + { .compatible = "eswin,eic7x-pinctrl" }, { } }; static struct platform_driver eswin_pinctrl_driver = { diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 512ca2e53f1b..1c469ddbe464 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -800,7 +800,12 @@ config REGULATOR_MPQ8785 help This driver provides support for the voltage regulators on the eswin evb. - +config REGULATOR_ES5340 + tristate "einno,es5340" + depends on I2C && OF + help + This driver provides support for the voltage regulators on the + eswin evb. config REGULATOR_MT6311 tristate "MediaTek MT6311 PMIC" depends on I2C diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index a0b29a60d7f8..c182cbff6548 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -196,4 +196,5 @@ obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o obj-$(CONFIG_REGULATOR_MPQ8785) += mpq8785.o +obj-$(CONFIG_REGULATOR_ES5340) += es5340.o ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG diff --git a/drivers/regulator/es5340.c b/drivers/regulator/es5340.c new file mode 100644 index 000000000000..30696296b210 --- /dev/null +++ b/drivers/regulator/es5340.c @@ -0,0 +1,871 @@ + +// SPDX-License-Identifier: GPL-2.0 +/* + * eswin Specific Glue layer + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Yang Wei + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ES5340_VOLT_DENOMINATOR 604 +#define ES5340_VOLT_NUMERATOR 60 +#define ES5340_INIT_VOLT 1050 + +#define ES5340_CMD_OPERATION 0x1 +#define ES5340_CMD_VOUT_COMMAND 0x21 +#define ES5340_CMD_VOUT_OV_WARN_LIMIT 0x42 +#define ES5340_CMD_IOUT_OC_WARN_LIMIT 0x4A +#define ES5340_CMD_OT_WARN_LIMIT 0x51 +#define ES5340_CMD_VIN_OV_WARN_LIMIT 0x57 +#define ES5340_CMD_STATUS_BYTE 0x78 +#define ES5340_CMD_STATUS_WORD 0x79 +#define ES5340_CMD_READ_VIN 0x88 +#define ES5340_CMD_READ_VOUT 0x8B +#define ES5340_CMD_READ_IOUT 0x8C +#define ES5340_CMD_READ_TEMPERATURE 0x8D + +#define ES5340_LABEL_CNT 4 + +struct ES5340_DRIVER_DATA +{ + struct regulator_dev *rdev; + struct regulator_desc *dev_desc; + struct i2c_client *client; + struct mutex config_lock; + char es5340_label[ES5340_LABEL_CNT][20]; +}; + +#define ES5340_MASK_OPERATION_ENABLE 0X80 + +#define ES5340_MASK_OV_VOLT 0x3FFF +#define ES5340_MASK_VOUT_VALUE 0xFF +#define ES5340_MASK_IOUT 0x3FF +#define ES5340_MASK_TOUT 0xFF +#define ES5340_VOLTE_IN_SENSE_LSB 8 +#define ES5340_CURRENT_LSB 31 +#define ES5340_TEMPERATURE_LSB 1000 /*1mC*/ + +static struct of_regulator_match es5340_matches[] = { + { + .name = "npu_svcc", + }, +}; + +static inline s32 es5340_str2ul(const char *buf, u32 *value) +{ + unsigned long cache = 0; + int ret = 0; + + if (NULL == strstr(buf, "0x")) + { + ret = kstrtoul(buf, 10, &cache); + } + else + { + ret = kstrtoul(buf, 16, &cache); + } + *value = cache; + + return ret; +} + +static u8 es5340_read_byte(struct ES5340_DRIVER_DATA *data, u8 command) +{ + int ret = 0; + mutex_lock(&data->config_lock); + ret = i2c_smbus_read_byte_data(data->client, command); + mutex_unlock(&data->config_lock); + if (ret < 0) + { + dev_err(&data->client->dev, "get command:0x%x value error:%d\n", command, + ret); + return 0xff; + } + return (u8)ret; +} + +static s32 es5340_write_byte(struct ES5340_DRIVER_DATA *data, u8 command, + u8 val) +{ + int ret = 0; + mutex_lock(&data->config_lock); + ret = i2c_smbus_write_byte_data(data->client, command, val); + mutex_unlock(&data->config_lock); + if (ret < 0) + { + dev_err(&data->client->dev, "set command:0x%x value:0x%x error:%d\n", + command, val, ret); + } + return ret; +} + +static s32 es5340_update_byte(struct ES5340_DRIVER_DATA *data, u8 command, + u8 mask, u8 val) +{ + u8 old_value = 0; + u8 new_value = 0; + if (0 != (~mask & val)) + { + dev_err(&data->client->dev, "command:0x%x,input:0x%x outrange mask:0x%x\n", + command, val, mask); + return -EINVAL; + } + old_value = es5340_read_byte(data, command); + new_value = ~mask & old_value; + new_value = new_value | val; + return es5340_write_byte(data, command, new_value); +} + +static u16 es5340_read_word(struct ES5340_DRIVER_DATA *data, u8 command) +{ + int ret = 0; + mutex_lock(&data->config_lock); + ret = i2c_smbus_read_word_data(data->client, command); + mutex_unlock(&data->config_lock); + if (ret < 0) + { + dev_err(&data->client->dev, "get command:0x%x value error:%d\n", command, + ret); + return 0xffff; + } + return (u16)ret; +} + +static u16 es5340_read_mask_word(struct ES5340_DRIVER_DATA *data, u8 command, + u16 mask) +{ + u16 ret = es5340_read_word(data, command); + return (ret & mask); +} + +static s32 es5340_write_word(struct ES5340_DRIVER_DATA *data, u8 command, + u16 val) +{ + int ret = 0; + mutex_lock(&data->config_lock); + ret = i2c_smbus_write_word_data(data->client, command, val); + mutex_unlock(&data->config_lock); + if (ret < 0) + { + dev_err(&data->client->dev, "set command:0x%x value:0x%x error:%d\n", + command, val, ret); + } + return ret; +} + +static s32 es5340_update_word(struct ES5340_DRIVER_DATA *data, u8 command, + u16 mask, u16 val) +{ + u16 old_value = 0; + u16 new_value = 0; + if (0 != (~mask & val)) + { + dev_err(&data->client->dev, "command:0x%x,input:0x%x outrange mask:0x%x\n", + command, val, mask); + return -EINVAL; + } + old_value = es5340_read_word(data, command); + new_value = ~mask & old_value; + new_value = new_value | val; + return es5340_write_word(data, command, new_value); +} + +static int es5340_get_enable(struct ES5340_DRIVER_DATA *data) +{ + u8 cache = 0; + + cache = es5340_read_byte(data, ES5340_CMD_OPERATION); + + return ((cache >> 7) & 0x1); +} + +static const struct hwmon_channel_info *es5340_info[] = { + HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MAX_ALARM | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_MAX | HWMON_I_MAX_ALARM | HWMON_I_LABEL), + HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_MAX_ALARM | HWMON_C_LABEL), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_LABEL), + NULL}; + +static umode_t es5340_is_visible(const void *_data, + enum hwmon_sensor_types type, u32 attr, + int channel) +{ + switch (type) + { + case hwmon_in: + switch (attr) + { + case hwmon_in_input: + case hwmon_in_label: + case hwmon_in_max_alarm: + return 0444; + case hwmon_in_enable: + case hwmon_in_max: + return 0644; + } + + break; + case hwmon_curr: + switch (attr) + { + case hwmon_curr_input: + case hwmon_curr_max_alarm: + case hwmon_curr_label: + return 0444; + case hwmon_curr_max: + return 0644; + } + break; + case hwmon_temp: + switch (attr) + { + case hwmon_temp_input: + case hwmon_temp_label: + case hwmon_temp_max_alarm: + return 0444; + case hwmon_temp_max: + return 0644; + } + break; + + default: + break; + } + return 0; +} + +static int es5340_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + u32 get_value = 0; + + switch (type) + { + case hwmon_in: + switch (attr) + { + case hwmon_in_input: + if (channel == 0) + { + get_value = es5340_read_word(data, ES5340_CMD_READ_VIN); + *val = get_value * ES5340_VOLTE_IN_SENSE_LSB; + } + else if (channel == 1) + { + *val = es5340_read_word(data, ES5340_CMD_READ_VOUT); + } + else + { + dev_err(dev, "not support channel%d\n", channel); + } + break; + case hwmon_in_max_alarm: + get_value = es5340_read_word(data, ES5340_CMD_STATUS_WORD); + if (channel == 0) + { + *val = ((get_value >> 13) & 0x1); + } + else if (channel == 1) + { + *val = ((get_value >> 15) & 0x1); + } + else + { + dev_err(dev, "not support channel%d\n", channel); + } + break; + case hwmon_in_enable: + *val = es5340_get_enable(data); + break; + case hwmon_in_max: + if (channel == 0) + { + get_value = es5340_read_mask_word(data, ES5340_CMD_VIN_OV_WARN_LIMIT, ES5340_MASK_OV_VOLT); + *val = get_value * ES5340_VOLTE_IN_SENSE_LSB; + } + else if (channel == 1) + { + *val = es5340_read_mask_word(data, ES5340_CMD_VOUT_OV_WARN_LIMIT, ES5340_MASK_OV_VOLT); + } + else + { + dev_err(dev, "not support channel%d\n", channel); + } + break; + } + break; + case hwmon_curr: + switch (attr) + { + case hwmon_curr_input: + get_value = es5340_read_mask_word(data, ES5340_CMD_READ_IOUT, ES5340_MASK_IOUT); + *val = get_value * ES5340_CURRENT_LSB; + break; + case hwmon_curr_max_alarm: + get_value = es5340_read_word(data, ES5340_CMD_STATUS_WORD); + if (channel == 0) + { + *val = ((get_value >> 14) & 0x1); + } + break; + + case hwmon_curr_max: + get_value = es5340_read_mask_word(data, ES5340_CMD_IOUT_OC_WARN_LIMIT, ES5340_MASK_IOUT); + *val = get_value * ES5340_CURRENT_LSB; + break; + } + break; + case hwmon_temp: + switch (attr) + { + case hwmon_temp_input: + *val = es5340_read_mask_word(data, ES5340_CMD_READ_TEMPERATURE, 0x1ff); + *val *= ES5340_TEMPERATURE_LSB; + break; + case hwmon_temp_crit_alarm: + get_value = es5340_read_byte(data, ES5340_CMD_STATUS_BYTE); //????? + if (channel == 0) + { + *val = ((get_value >> 2) & 0x1); + } + break; + case hwmon_temp_max: + *val = es5340_read_mask_word(data, ES5340_CMD_OT_WARN_LIMIT, 0xff); + *val *= ES5340_TEMPERATURE_LSB; + break; + } + break; + + default: + break; + } + return 0; +} +static int es5340_read_string(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + switch (type) + { + case hwmon_in: + switch (attr) + { + case hwmon_in_label: + if (channel == 0) + { + *str = data->es5340_label[0]; + } + else if (channel == 1) + { + *str = data->es5340_label[1]; + } + else + { + dev_err(dev, "not support channel%d\n", channel); + } + break; + } + break; + case hwmon_curr: + switch (attr) + { + case hwmon_curr_label: + *str = data->es5340_label[2]; + break; + } + break; + case hwmon_temp: + switch (attr) + { + + case hwmon_temp_label: + *str = data->es5340_label[3]; + break; + } + break; + + default: + break; + } + return 0; +} + +static int es5340_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + + int ret = 0; + switch (type) + { + case hwmon_in: + switch (attr) + { + case hwmon_in_enable: + es5340_update_byte(data, ES5340_CMD_OPERATION, + ES5340_MASK_OPERATION_ENABLE, (u8)(val << 7)); + break; + case hwmon_in_max: + if (channel == 0) + { + ret = es5340_update_word(data, ES5340_CMD_VIN_OV_WARN_LIMIT, ES5340_MASK_OV_VOLT, (u16)(val / ES5340_VOLTE_IN_SENSE_LSB)); + } + else if (channel == 1) + { + ret = es5340_update_word(data, ES5340_CMD_VOUT_OV_WARN_LIMIT, ES5340_MASK_OV_VOLT, (u16)(val)); + } + else + { + dev_err(dev, "not support channel%d\n", channel); + } + } + break; + case hwmon_curr: + switch (attr) + { + + case hwmon_curr_max: + ret = es5340_update_word(data, ES5340_CMD_IOUT_OC_WARN_LIMIT, + ES5340_MASK_IOUT, (u16)(val / ES5340_CURRENT_LSB)); + break; + } + break; + case hwmon_temp: + switch (attr) + { + case hwmon_temp_max: + ret = es5340_update_word(data, ES5340_CMD_OT_WARN_LIMIT, + ES5340_MASK_TOUT, (u16)(val / ES5340_TEMPERATURE_LSB)); + break; + } + break; + + default: + break; + } + return ret; +} +static const struct hwmon_ops pac193x_hwmon_ops = { + .is_visible = es5340_is_visible, + .read = es5340_read, + .write = es5340_write, + .read_string = es5340_read_string, +}; + +static struct hwmon_chip_info es5340_chip_info = { + .ops = &pac193x_hwmon_ops, + .info = es5340_info, + +}; + +static u8 es5340_volt2reg(struct ES5340_DRIVER_DATA *data, u32 volt_mv) +{ + s32 surplus_volt = (s32)ES5340_INIT_VOLT - (s32)volt_mv; + s32 cache = 0; + if (surplus_volt >= 0) + { + cache = (surplus_volt * ES5340_VOLT_NUMERATOR + ES5340_VOLT_DENOMINATOR - 1) / ES5340_VOLT_DENOMINATOR; + } + else + { + cache = (surplus_volt * ES5340_VOLT_NUMERATOR - ES5340_VOLT_DENOMINATOR + 1) / ES5340_VOLT_DENOMINATOR; + } + + return (u8)cache; +} + +static u32 es5340_reg2volt(struct ES5340_DRIVER_DATA *data, u8 reg_value) +{ + s32 surplus_volt = (s8)reg_value * ES5340_VOLT_DENOMINATOR / ES5340_VOLT_NUMERATOR; + s32 cache = (s32)ES5340_INIT_VOLT - surplus_volt; + + return (u32)cache; +} + +static u32 es5340_get_vout(struct ES5340_DRIVER_DATA *data) +{ + u32 get_value = es5340_read_mask_word(data, ES5340_CMD_VOUT_COMMAND, + ES5340_MASK_VOUT_VALUE); + return es5340_reg2volt(data, (u8)get_value); +} + +static s32 es5340_set_vout(struct ES5340_DRIVER_DATA *data, u32 volt_mv) +{ + u16 new_value = (u8)es5340_volt2reg(data, volt_mv); + const struct regulation_constraints *constraints = &es5340_matches[0].init_data->constraints; + + if ((volt_mv > (constraints->max_uV / 1000)) || (volt_mv < (constraints->min_uV / 1000))) + { + dev_err(&data->rdev->dev, "max:%dmV,min:%dmV,now:%dmV\n", + (constraints->max_uV / 1000), constraints->min_uV / 1000, volt_mv); + return -EINVAL; + } + + return es5340_update_word(data, ES5340_CMD_VOUT_COMMAND, + ES5340_MASK_VOUT_VALUE, (new_value)); +} + +static ssize_t es5340_vout_show(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(d); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + + return sysfs_emit(buf, "%u", es5340_get_vout(data)); +} +static ssize_t es5340_vout_store(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(d); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + u32 volt_value = 0; + int ret = 0; + ret = es5340_str2ul(buf, &volt_value); + + if (ret) + { + return ret; + } + ret = es5340_set_vout(data, volt_value); + if (0 != ret) + { + return ret; + } + return count; +} +DEVICE_ATTR(es5340_vout, 0600, es5340_vout_show, es5340_vout_store); + +static struct attribute *es5340_attrs[] = { + &dev_attr_es5340_vout.attr, + NULL}; + +ATTRIBUTE_GROUPS(es5340); + +static struct linear_range es5340_ext_ranges[] = { + REGULATOR_LINEAR_RANGE(600000, 0, 320, 3125), +}; + +/** + * es5340_set_voltage_sel - set_voltage_sel for users + * + * @rdev: regulator to operate on + * @sel: Selector to set + */ +static s32 es5340_set_voltage_sel(struct regulator_dev *rdev, + unsigned selector) +{ + struct device *dev = &rdev->dev; + struct i2c_client *client = to_i2c_client(rdev->dev.parent); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + u32 new_value = 0; + + if (selector > es5340_ext_ranges->max_sel) + { + dev_err(dev, "selector:%u out of rang 0~%u\n", selector, + es5340_ext_ranges->max_sel); + return -EINVAL; + } + + new_value = es5340_ext_ranges->min + es5340_ext_ranges->step * selector; + + dev_dbg(dev, "%s_volt:%duV,selector:%u,step:%u,min:%u\n", __FUNCTION__, + new_value, selector, es5340_ext_ranges->step, + es5340_ext_ranges->min); + + es5340_set_vout(data, new_value / 1000); + + return 0; +} + +/** + * es5340_get_voltage_sel - get_voltage_sel for users + * + * @rdev: regulator to operate on + */ +static s32 es5340_get_voltage_sel(struct regulator_dev *rdev) +{ + s32 index = 0; + struct device *dev = &rdev->dev; + struct i2c_client *client = to_i2c_client(rdev->dev.parent); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + u32 volt_value = 0; + u32 diff_volt = 0; + + volt_value = es5340_get_vout(data); + volt_value *= 1000; + + if (volt_value >= es5340_ext_ranges->min) + { + diff_volt = volt_value - es5340_ext_ranges->min; + } + else + { + diff_volt = 0; + } + dev_dbg(dev, "%s_diff_volt:%duV,volt:%u,min:%u\n", __FUNCTION__, diff_volt, + volt_value, es5340_ext_ranges->min); + index = DIV_ROUND_CLOSEST(diff_volt, es5340_ext_ranges->step); + if (index > es5340_ext_ranges->max_sel) + { + dev_err(dev, "volt:%duV out legal range\n", volt_value); + } + + dev_dbg(dev, "%s_diff_volt:%duV,step:%d,index:%d\n", __FUNCTION__, diff_volt, + es5340_ext_ranges->step, index); + return index; +} + +int es5340_regulator_enable(struct regulator_dev *rdev) +{ + struct i2c_client *client = to_i2c_client(rdev->dev.parent); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + dev_dbg(&rdev->dev, "%s.%d\n", __FUNCTION__, __LINE__); + return es5340_update_byte(data, ES5340_CMD_OPERATION, + ES5340_MASK_OPERATION_ENABLE, + ES5340_MASK_OPERATION_ENABLE); +} + +int es5340_regulator_disable(struct regulator_dev *rdev) +{ + struct i2c_client *client = to_i2c_client(rdev->dev.parent); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + dev_dbg(&rdev->dev, "%s.%d\n", __FUNCTION__, __LINE__); + return es5340_update_byte(data, ES5340_CMD_OPERATION, + ES5340_MASK_OPERATION_ENABLE, 0); +} +int es5340_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct i2c_client *client = to_i2c_client(rdev->dev.parent); + struct ES5340_DRIVER_DATA *data = i2c_get_clientdata(client); + dev_dbg(&rdev->dev, "%s.%d\n", __FUNCTION__, __LINE__); + return es5340_get_enable(data); +} + +static struct regulator_ops es5340_core_ops = { + + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, + + /* get/set regulator voltage */ + /* Only one of each(set_voltage&&set_voltage_sel) should be implemented */ + /* .set_voltage = es5340_set_voltage, */ + .set_voltage_sel = es5340_set_voltage_sel, + + /* Only one of each(get_voltage&&get_voltage_sel) should be implemented */ + /* .get_voltage=es5340_get_voltage, */ + .get_voltage_sel = es5340_get_voltage_sel, + + /* enable/disable regulator */ + .enable = es5340_regulator_enable, + .disable = es5340_regulator_disable, + .is_enabled = es5340_regulator_is_enabled, + +}; +static struct regulator_desc es5340_regulator_desc = { + .name = "NPUVDD", + .type = REGULATOR_VOLTAGE, + .n_voltages = 321, + .ops = &es5340_core_ops, + .linear_ranges = es5340_ext_ranges, + .n_linear_ranges = ARRAY_SIZE(es5340_ext_ranges), + .owner = THIS_MODULE, +}; + +static s32 es5340_init_data(struct ES5340_DRIVER_DATA *data, + const struct regulation_constraints *constraints, u32 default_voltage) +{ + u8 value = 0; + s32 ret = 0; + u16 new_value = 0; + struct device *dev = &data->client->dev; + + dev_info(dev, + "min_uV:%d,max_uV:%d,uV_offset:%d,min_uA:%d,max_uA:%d," + "over_voltage_limits:%d,%d,%d\n", + constraints->min_uV, constraints->max_uV, constraints->uV_offset, + constraints->min_uA, constraints->max_uA, + constraints->over_voltage_limits.err, + constraints->over_voltage_limits.prot, + constraints->over_voltage_limits.warn); + es5340_ext_ranges->min = constraints->min_uV; + es5340_ext_ranges->min_sel = 0; + es5340_ext_ranges->step = (ES5340_VOLT_DENOMINATOR / ES5340_VOLT_NUMERATOR)*1000; + es5340_ext_ranges->max_sel = (constraints->max_uV - constraints->min_uV) / es5340_ext_ranges->step + 1; + es5340_regulator_desc.n_voltages = es5340_ext_ranges->max_sel; + dev_dbg(dev,"min:%duV,max:%duV,step:%duV,max_sel:%d\n", es5340_ext_ranges->min, constraints->max_uV, es5340_ext_ranges->step, es5340_ext_ranges->max_sel); + + es5340_set_vout(data, default_voltage / 1000); + return ret; +} + +static s32 es5340_probe(struct i2c_client *client) +{ + struct ES5340_DRIVER_DATA *data = NULL; + s32 ret = 0; + s32 regulator_cnt = 0; + u32 default_voltage = 0; + struct device *hwmon_dev; + struct regulator_config config = {}; + struct device *dev = &client->dev; + struct device_node *np, *parent; + const char *output_names[4]; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + { + dev_err(dev, "not support smbus\n"); + return -EIO; + } + data = devm_kzalloc(dev, sizeof(struct ES5340_DRIVER_DATA), GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_init(&data->config_lock); + data->client = client; + i2c_set_clientdata(client, data); + /* Get the device (PMIC) node */ + np = of_node_get(dev->of_node); + if (!np) + return -EINVAL; + + /* Get 'regulators' subnode */ + parent = of_get_child_by_name(np, "regulators"); + if (!parent) + { + dev_err(dev, "regulators node not found\n"); + return -EINVAL; + } + ret = of_property_read_u32(np, "eswin,regulator_default-microvolt", &default_voltage); + if (ret) + { + default_voltage = 900000; + } + of_property_read_string_array(np, "eswin,regulator_label", output_names, 4); + if (NULL != output_names[0]) + { + strcpy(data->es5340_label[0], output_names[0]); + } + if (NULL != output_names[1]) + { + strcpy(data->es5340_label[1], output_names[1]); + } + if (NULL != output_names[2]) + { + strcpy(data->es5340_label[2], output_names[2]); + } + if (NULL != output_names[3]) + { + strcpy(data->es5340_label[3], output_names[3]); + } + + dev_dbg(dev, "default_voltage:%u,%s,%s,%s,%s\n", default_voltage, data->es5340_label[0], + data->es5340_label[1], data->es5340_label[2], data->es5340_label[3]); + /* fill isl6271a_matches array */ + regulator_cnt = of_regulator_match(dev, parent, es5340_matches, ARRAY_SIZE(es5340_matches)); + of_node_put(parent); + if (regulator_cnt != 1) + { + dev_err(dev, "Error parsing regulator init data: %d\n", regulator_cnt); + return regulator_cnt; + } + + /* Fetched from device tree */ + config.init_data = es5340_matches[0].init_data; + config.dev = dev; + config.of_node = es5340_matches[0].of_node; + /* config.ena_gpio = -EINVAL; */ + ret = es5340_init_data(data, &config.init_data->constraints, default_voltage); + if (0 != ret) + { + dev_err(dev, "init es5340 error\n"); + return -EIO; + } + data->rdev = devm_regulator_register(dev, &es5340_regulator_desc, &config); + if (IS_ERR(data->rdev)) + { + dev_err(dev, "failed to register %s\n", es5340_regulator_desc.name); + } + hwmon_dev = devm_hwmon_device_register_with_info( + dev, client->name, data, &es5340_chip_info, es5340_groups); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + dev_dbg(dev, "es5340_probe\n"); + + return 0; +} + +static void es5340_remove(struct i2c_client *client) +{ + dev_dbg(&client->dev, "es5340_remove\n"); +} + +static s32 es5340_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + dev_dbg(&client->dev, "es5340_detect\n"); + return 0; +} + +static const struct i2c_device_id es5340_id[] = {{"es5340", 0}, {}}; +MODULE_DEVICE_TABLE(i2c, es5340_id); + +/* Addresses to scan */ +static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, 0x60, + I2C_CLIENT_END}; + +static struct i2c_driver es5340_driver = { + .class = I2C_CLASS_HWMON, + .driver = + { + .name = "es5340", + }, + .probe = es5340_probe, + .remove = es5340_remove, + .id_table = es5340_id, + .detect = es5340_detect, + .address_list = normal_i2c, +}; + +module_i2c_driver(es5340_driver); + +MODULE_AUTHOR("Yang Wei "); +MODULE_DESCRIPTION("es5340 driver"); +MODULE_LICENSE("GPL"); \ No newline at end of file diff --git a/drivers/soc/eswin/Kconfig b/drivers/soc/eswin/Kconfig index 256bfa5d85eb..ed45c8ff1d8b 100644 --- a/drivers/soc/eswin/Kconfig +++ b/drivers/soc/eswin/Kconfig @@ -19,4 +19,10 @@ config ESWIN_DSP_SUBSYS If unsure, say N. +config EIC7X_D2D + bool "Eswin EIC7X D2D interrupt status" + depends on ARCH_ESWIN_EIC7702_SOC + help + This is interrputs status of D2D which includes error interrupts. + endif diff --git a/drivers/soc/eswin/Makefile b/drivers/soc/eswin/Makefile index 391ab603d23a..07df48f987ec 100644 --- a/drivers/soc/eswin/Makefile +++ b/drivers/soc/eswin/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_ESWIN_KHANDLE) += eswin-khandle.o obj-$(CONFIG_ESWIN_DSP_SUBSYS) += dsp_subsys.o obj-y += ai_driver/ -obj-$(CONFIG_SUSPEND) += pm_eic770x.o \ No newline at end of file +obj-$(CONFIG_SUSPEND) += pm_eic770x.o +obj-$(CONFIG_EIC7X_D2D) += d2d.o \ No newline at end of file diff --git a/drivers/soc/eswin/ai_driver/common/devices/mailbox_regs.h b/drivers/soc/eswin/ai_driver/common/devices/mailbox_regs.h index 98558ecf0af5..a92f45669a4e 100644 --- a/drivers/soc/eswin/ai_driver/common/devices/mailbox_regs.h +++ b/drivers/soc/eswin/ai_driver/common/devices/mailbox_regs.h @@ -44,8 +44,11 @@ * @brief ESWIN_MAILBOX_REG_BASE 0x50a00000 * */ +#if defined(DIE1) && DIE1 +#define MBOX_REG_BASE 0x70a00000 +#else #define MBOX_REG_BASE 0x50a00000 - +#endif /** * @brief MBOX_REG_OFFSET 0x10000 * @@ -56,37 +59,37 @@ * @brief ESWIN_MAILBOX_U84_TO_NPU_0_REG_BASE (mailbox 4) * */ -#define ESWIN_MAILBOX_U84_TO_NPU_0_REG_BASE 0x50a40000 +#define ESWIN_MAILBOX_U84_TO_NPU_0_REG_BASE (MBOX_REG_BASE + 0x40000UL) /** * @brief ESWIN_MAILBOX_NPU_0_TO_U84_REG_BASE (mailbox 5) * */ -#define ESWIN_MAILBOX_NPU_0_TO_U84_REG_BASE 0x50a50000 +#define ESWIN_MAILBOX_NPU_0_TO_U84_REG_BASE (MBOX_REG_BASE + 0x50000UL) /** * @brief ESWIN_MAILBOX_U84_TO_DSP_0_REG_BASE (mailbox 8) * */ -#define ESWIN_MAILBOX_U84_TO_DSP_0_REG_BASE 0x50a80000UL +#define ESWIN_MAILBOX_U84_TO_DSP_0_REG_BASE (MBOX_REG_BASE + 0x80000UL) /** * @brief ESWIN_MAILBOX_U84_TO_DSP_1_REG_BASE (mailbox 10) * */ -#define ESWIN_MAILBOX_U84_TO_DSP_1_REG_BASE 0x50aa0000UL +#define ESWIN_MAILBOX_U84_TO_DSP_1_REG_BASE (MBOX_REG_BASE + 0xa0000UL) /** * @brief ESWIN_MAILBOX_U84_TO_DSP_2_REG_BASE (mailbox 12) * */ -#define ESWIN_MAILBOX_U84_TO_DSP_2_REG_BASE 0x50ac0000UL +#define ESWIN_MAILBOX_U84_TO_DSP_2_REG_BASE (MBOX_REG_BASE + 0xc0000UL) /** * @brief ESWIN_MAILBOX_U84_TO_DSP_3_REG_BASE (mailbox 14) * */ -#define ESWIN_MAILBOX_U84_TO_DSP_3_REG_BASE 0x50ae0000UL +#define ESWIN_MAILBOX_U84_TO_DSP_3_REG_BASE (MBOX_REG_BASE + 0xe0000UL) /** * @brief MBOX_NPU_WR_DATA0_OFFSET @@ -202,10 +205,12 @@ #define ESWIN_MAILBOX_NPU_LOCK_BIT BIT3 +#ifndef __KERNEL__ static u32 MAILBOX_E31_TO_DSP_REG[] = {ESWIN_MAILBOX_U84_TO_DSP_0_REG_BASE, ESWIN_MAILBOX_U84_TO_DSP_1_REG_BASE, ESWIN_MAILBOX_U84_TO_DSP_2_REG_BASE, ESWIN_MAILBOX_U84_TO_DSP_3_REG_BASE}; static u32 MAILBOX_E31_TO_DSP_INT[] = {MBOX_INT_TO_DSP0, MBOX_INT_TO_DSP1, MBOX_INT_TO_DSP2, MBOX_INT_TO_DSP3}; +#endif #define MBOX_INT_BIT_BASE 5 diff --git a/drivers/soc/eswin/ai_driver/common/devices/npu_base_regs.h b/drivers/soc/eswin/ai_driver/common/devices/npu_base_regs.h index cea2c9335305..bfbbe244a712 100644 --- a/drivers/soc/eswin/ai_driver/common/devices/npu_base_regs.h +++ b/drivers/soc/eswin/ai_driver/common/devices/npu_base_regs.h @@ -85,13 +85,25 @@ typedef enum type_NPU_MODULE_SEL_ID_E { } NPU_MODULE_SEL_ID_E; /* NPU system port base address */ +#if defined(DIE1) && DIE1 +#define NPU_SYS_BASE_ADDR 0x71C00000 +#else #define NPU_SYS_BASE_ADDR 0x51C00000 +#endif /* NPU configuration base address */ #define NPU_CFG_BASE_ADDR 0x51828000 /* NPU PP port base address */ #define NPU_PP_BASE_ADDR 0x20000000 +/*NPU Die Register Offset*/ +#define NPU_DIE_REG_OFFSET 0x20000000 + +#if defined(DIE1) && DIE1 +#define NPU_SRAM_BASE_ADDR 0x79000000 +#define SYS_CON_BASE_ADDR 0x71810000 +#define PMC_BASE_ADDR 0x71800000 +#else /* NPU sram base address */ #define NPU_SRAM_BASE_ADDR 0x59000000 @@ -100,6 +112,7 @@ typedef enum type_NPU_MODULE_SEL_ID_E { /* Power domain management control base address */ #define PMC_BASE_ADDR 0x51800000 +#endif #define PMC_REG_MAX 0x9000 @@ -307,9 +320,10 @@ typedef enum type_NPU_MODULE_SEL_ID_E { * @brief Assert the register access address is legal. * */ -#define ASSERT_REG_ADDR(addr) \ - ASSERT(((addr)-NPU_HW_MODULE_BASE_ADDR) < NPU_HW_MODULE_SIZE || ((addr)-SYS_CON_BASE_ADDR) < CON_REG_MAX || \ - ((addr)-PMC_BASE_ADDR) < PMC_REG_MAX || ((addr)-ESWIN_MAILBOX_NPU_0_TO_U84_REG_BASE) < MBOX_NPU_MAX_SIZE) +#define ASSERT_REG_ADDR(addr) \ + ASSERT(((addr) - NPU_HW_MODULE_BASE_ADDR) < NPU_HW_MODULE_SIZE || ((addr) - SYS_CON_BASE_ADDR) < CON_REG_MAX || \ + ((addr) - PMC_BASE_ADDR) < PMC_REG_MAX || \ + ((addr) - ESWIN_MAILBOX_NPU_0_TO_U84_REG_BASE) < MBOX_NPU_MAX_SIZE) /** * @brief The ssid of write and read operation. diff --git a/drivers/soc/eswin/ai_driver/common/hetero_arch.h b/drivers/soc/eswin/ai_driver/common/hetero_arch.h index e577200853af..deb46ac3d38f 100644 --- a/drivers/soc/eswin/ai_driver/common/hetero_arch.h +++ b/drivers/soc/eswin/ai_driver/common/hetero_arch.h @@ -125,7 +125,11 @@ static inline u32 _ffs(u32 r0) return r1; } +#if defined(DIE1) && DIE1 +#define TIMER0_BASE 0x71840000 +#else #define TIMER0_BASE 0x51840000 +#endif #define PTS_CHAN 7 #define PTS_END_CYCLE (*(u32 *)(TIMER0_BASE + 0x0 + PTS_CHAN * 0x14)) #define PTS_START_CYCLE (*(u32 *)(TIMER0_BASE + 0x4 + PTS_CHAN * 0x14)) diff --git a/drivers/soc/eswin/ai_driver/common/hetero_ipc.h b/drivers/soc/eswin/ai_driver/common/hetero_ipc.h index c255d5cd371b..667e13996be3 100644 --- a/drivers/soc/eswin/ai_driver/common/hetero_ipc.h +++ b/drivers/soc/eswin/ai_driver/common/hetero_ipc.h @@ -444,6 +444,7 @@ static void messagebox_send(u16 channel_id, msg_payload_t payload) reg_write((size_t)(mbox_base + MBOX_NPU_WR_DATA1_OFFSET), MBOX_WRITE_FIFO_BIT); } +#ifndef __KERNEL__ /** * @brief Send a mailbox message payload to DSP. This function should be used * by E31. @@ -488,6 +489,7 @@ static void messagebox_send_dsp(u32 op_type, u64 payload) // clear lock bit reg_write(mbox_base + MBOX_NPU_WR_LOCK, 0); } +#endif /** * @brief Receives data from a mailbox message. diff --git a/drivers/soc/eswin/ai_driver/dsp/Makefile b/drivers/soc/eswin/ai_driver/dsp/Makefile index ebacf5f1723e..e433e1af883b 100755 --- a/drivers/soc/eswin/ai_driver/dsp/Makefile +++ b/drivers/soc/eswin/ai_driver/dsp/Makefile @@ -30,7 +30,6 @@ DEBUG_LEVEL ?= 0 ccflags-y := -I$(src)/ ccflags-y += -I$(srctree)/drivers/soc/eswin ccflags-y += -I$(srctree)/drivers/memory/eswin/es_dev_buf/include/linux -ccflags-y += -I$(srctree)/drivers/memory/eswin/es_iommu_rsv/include/linux ccflags-y += -I$(srctree)/$(src)/../common ccflags-y += -I$(srctree)/$(src)/../common/devices diff --git a/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c b/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c index 1e7871eb56b0..2250f2cfd476 100644 --- a/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c +++ b/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c @@ -67,8 +67,9 @@ static struct dsp_user *dsp_op_add_user(struct dsp_file *dsp_file, { struct dsp_user *user; int ret; + struct es_dsp *dsp = op->dsp; - user = kmalloc(sizeof(struct dsp_user), GFP_KERNEL); + user = kzalloc(sizeof(struct dsp_user), GFP_KERNEL); if (!user) { dsp_err("alloc dsp user mem failed.\n"); return NULL; @@ -479,7 +480,7 @@ static struct dsp_user_req_async *dsp_set_task_info(struct dsp_file *dsp_file, buffer_count = task->task.bufferCntCfg + task->task.bufferCntInput + task->task.bufferCntOutput; - user_req = kmalloc(sizeof(struct dsp_user_req_async) + + user_req = kzalloc(sizeof(struct dsp_user_req_async) + sizeof(struct dsp_dma_buf *) * buffer_count, GFP_KERNEL); if (!user_req) { @@ -1172,7 +1173,7 @@ static void dsp_file_release(struct khandle *handle) } dsp = dsp_file->dsp; - devm_kfree(dsp->dev, dsp_file); + kfree(dsp_file); es_dsp_pm_put_sync(dsp); dsp_debug("release dsp_file done.\n"); } @@ -1195,7 +1196,7 @@ static int dsp_open(struct inode *inode, struct file *flip) __LINE__, ret); return ret; } - dsp_file = devm_kzalloc(dsp->dev, sizeof(*dsp_file), GFP_KERNEL); + dsp_file = kzalloc(sizeof(*dsp_file), GFP_KERNEL); if (!dsp_file) { es_dsp_pm_put_sync(dsp); return -ENOMEM; @@ -1205,7 +1206,7 @@ static int dsp_open(struct inode *inode, struct file *flip) DSP_FILE_HANDLE_MAGIC, NULL); if (ret != 0) { dsp_err("%s, init kernel handle error.\n", __func__); - devm_kfree(dsp->dev, dsp_file); + kfree(dsp_file); es_dsp_pm_put_sync(dsp->dev); return ret; } diff --git a/drivers/soc/eswin/ai_driver/dsp/dsp_platform.c b/drivers/soc/eswin/ai_driver/dsp/dsp_platform.c index 83c309dcae5a..45c8bda644cc 100644 --- a/drivers/soc/eswin/ai_driver/dsp/dsp_platform.c +++ b/drivers/soc/eswin/ai_driver/dsp/dsp_platform.c @@ -38,7 +38,7 @@ #include #include #include -#include "es_iommu_rsv.h" +#include #include "eswin-dsp-subsys.h" #include "dsp_platform.h" @@ -711,7 +711,7 @@ static inline int init_hw_uart(struct es_dsp_hw *hw) ret = iommu_map_rsv_iova_with_phys( dsp->dev, (dma_addr_t)DSP_DEVICE_UART_MUTEX_IOVA, - DSP_DEVICE_UART_MUTEX_IOVA_SIZE, UART_MUTEX_BASE_ADDR, + DSP_DEVICE_UART_MUTEX_IOVA_SIZE, UART_MUTEX_BASE_ADDR + dsp->numa_id * DIE_BASE_INTERVAL, IOMMU_MMIO); if (ret != 0) { dev_err(dsp->dev, "uart mutex iommu map error\n"); @@ -1158,7 +1158,7 @@ int es_dsp_map_resource(struct es_dsp *dsp) } hw->uart_mutex_base = - ioremap(UART_MUTEX_BASE_ADDR, DSP_DEVICE_UART_MUTEX_IOVA_SIZE); + devm_ioremap(dsp->dev, UART_MUTEX_BASE_ADDR + dsp->numa_id * DIE_BASE_INTERVAL, DSP_DEVICE_UART_MUTEX_IOVA_SIZE); if (!hw->uart_mutex_base) { dev_err(&hw->pdev->dev, "ioremap error\n"); ret = -ENOMEM; @@ -1212,7 +1212,7 @@ int es_dsp_unmap_resource(struct es_dsp *dsp) } if (hw->uart_mutex_base != NULL) { - iounmap(hw->uart_mutex_base); + devm_iounmap(dsp->dev, hw->uart_mutex_base); hw->uart_mutex_base = NULL; } return 0; @@ -1260,9 +1260,9 @@ int es_dsp_hw_init(struct es_dsp *dsp) hw->pts_iova = DSP_PTS_IOVA; hw->pts_iova_size = DSP_PTS_IOVA_SIZE; - hw->pts_phys_base = 0x51840000; + hw->pts_phys_base = 0x51840000 + dsp->numa_id * DIE_BASE_INTERVAL; ret = iommu_map_rsv_iova_with_phys(dsp->dev, (dma_addr_t)DSP_PTS_IOVA, - DSP_PTS_IOVA_SIZE, 0x51840000, + DSP_PTS_IOVA_SIZE, hw->pts_phys_base, IOMMU_MMIO); if (ret != 0) { dev_err(dsp->dev, "iommu map dsp pts phy error.\n"); diff --git a/drivers/soc/eswin/ai_driver/npu/Makefile b/drivers/soc/eswin/ai_driver/npu/Makefile index 3602d3c9a0f6..29cbe78dd026 100644 --- a/drivers/soc/eswin/ai_driver/npu/Makefile +++ b/drivers/soc/eswin/ai_driver/npu/Makefile @@ -10,7 +10,6 @@ ccflags-y += -I$(srctree)/$(src)/include ccflags-y += -I$(srctree)/$(src)/../dsp ccflags-y += -I$(srctree)/$(src)/../common ccflags-y += -I$(srctree)/$(src)/../common/devices -ccflags-y += -I$(srctree)/drivers/memory/eswin/es_iommu_rsv/include/linux ccflags-y += -I$(srctree)/drivers/memory/eswin/es_dev_buf/include/linux ccflags-y += -DNPU_DEV_SIM=$(NPU_DEV_SIM) diff --git a/drivers/soc/eswin/ai_driver/npu/debug.c b/drivers/soc/eswin/ai_driver/npu/debug.c index 489bbbdec743..c55a8188843b 100644 --- a/drivers/soc/eswin/ai_driver/npu/debug.c +++ b/drivers/soc/eswin/ai_driver/npu/debug.c @@ -440,10 +440,11 @@ void dla_dump_src_data(struct win_executor *executor, struct host_frame_desc *f, uint64_t offset = 0; uint32_t op_num; uint32_t index; + int16_t buffer_cnt_input; int32_t ret; int op_type = 0; int src_type = 0; - int i = 0, j = 0, k = 0; + int i = 0, j = 0, k = 0, m = 0; char *src_dump_buf = NULL; uint32_t dump_buf_len = 0; struct dla_data_cube *src_data; @@ -462,7 +463,6 @@ void dla_dump_src_data(struct win_executor *executor, struct host_frame_desc *f, } op_num = executor->network->num_operations; - if (npu_dump_op_num_start >= op_num) { dla_error("error:npu_dump_op_num_start(%d) >= op_num(%d)!\n", npu_dump_op_num_start, op_num); @@ -475,6 +475,8 @@ void dla_dump_src_data(struct win_executor *executor, struct host_frame_desc *f, op_type = executor->task->common_desc[i].op_type; if (index != op_index) continue; + buffer_cnt_input = 1; + switch (op_type) { case DLA_OP_EDMA: src_data = &executor->task->surface_desc[index] @@ -543,20 +545,7 @@ void dla_dump_src_data(struct win_executor *executor, struct host_frame_desc *f, case DLA_KMD_OP_DSP_1: case DLA_KMD_OP_DSP_2: case DLA_KMD_OP_DSP_3: - src_data = &executor->task->surface_desc[index] - .dsp_surface.src_data[0]; - src_addr_index = executor->task->surface_desc[index] - .dsp_surface.src_data[0] - .address; - src_data_size = executor->task->surface_desc[index] - .dsp_surface.src_data[0] - .size; - offset = executor->task->surface_desc[index] - .dsp_surface.src_data[0] - .offset; - src_type = executor->task->surface_desc[index] - .dsp_surface.src_data[0] - .type; + buffer_cnt_input = executor->task->op_desc[index].dsp_op.buffer_cnt_input; break; default: continue; @@ -583,116 +572,130 @@ void dla_dump_src_data(struct win_executor *executor, struct host_frame_desc *f, &executor->task->common_desc[i], "y"); } - dla_debug( - "src data dump %d:index:%d op_type:%d src_type:%d src_addr_index: %d size:0x%llx offset:0x%llx\n", - __LINE__, index, op_type, src_type, src_addr_index, - src_data_size, offset); - - if ((-1 == src_addr_index) || (src_type == DLA_MEM_HW)) { - strcpy(md5_dump[index].src_md5, "on-fly"); - continue; - } - if ((index == 0) && (op_type == 0) && (src_type == 0) && - (src_addr_index == 0)) { - continue; - } + /* dump */ + for (m = 0; m < buffer_cnt_input; m++) { + /* multi input for DSP */ + if ((op_type >= DLA_KMD_OP_DSP_0) && (op_type <= DLA_KMD_OP_DSP_3)) { + src_dump_buf = NULL; + src_data = &executor->task->surface_desc[index].dsp_surface.src_data[m]; + src_addr_index = executor->task->surface_desc[index].dsp_surface.src_data[m].address; + src_data_size = executor->task->surface_desc[index].dsp_surface.src_data[m].size; + offset = executor->task->surface_desc[index].dsp_surface.src_data[m].offset; + src_type = executor->task->surface_desc[index].dsp_surface.src_data[m].type; + } + dla_debug( + "src data dump %d:index:%d op_type:%d src_type:%d src_addr_index: %d size:0x%llx offset:0x%llx\n", + __LINE__, index, op_type, src_type, src_addr_index, + src_data_size, offset); + + /* MD5 */ + if ((-1 == src_addr_index) || (src_type == DLA_MEM_HW)) { + strcpy(md5_dump[index].src_md5, "on-fly"); + continue; + } - dump_buf_len = src_data_size; + if ((index == 0) && (op_type == 0) && (src_type == 0) && + (src_addr_index == 0)) { + continue; + } - seg_adrr[1] = src_data_size / 2; - seg_adrr[2] = src_data_size - seg_data_len; + dump_buf_len = src_data_size; + seg_adrr[1] = src_data_size / 2; + seg_adrr[2] = src_data_size - seg_data_len; + + read_input_address(executor, src_data, &input_address, + &input_is_io_tensor); + dla_debug("input_address:0x%llx, input_is_io_tensor:%d\n", + input_address, input_is_io_tensor); + + /* src_dump_buf */ + if (src_type == DLA_MEM_MC) { + if (input_is_io_tensor == invalid_tensor_idx) { + ret = dla_data_get_vaddr( + &model->mem_handles, src_addr_index, + (void **)&src_dump_buf); + if (ret < 0) { + dla_error( + "err:get index(%d) vaddr failed!\n", + src_addr_index); + return; + } - read_input_address(executor, src_data, &input_address, - &input_is_io_tensor); - dla_debug("input_address:0x%llx, input_is_io_tensor:%d\n", - input_address, input_is_io_tensor); + src_dump_buf += offset; + } else { + input_address = f->io_tensors_addr_list + [input_is_io_tensor] + + offset; + if (f->input_bobj[input_is_io_tensor] != NULL) + src_dump_buf = dla_dmabuf_vmap( + f->input_bobj + [input_is_io_tensor]); + } - if (src_type == DLA_MEM_MC) { - if (input_is_io_tensor == invalid_tensor_idx) { - ret = dla_data_get_vaddr( - &model->mem_handles, src_addr_index, - (void **)&src_dump_buf); - if (ret < 0) { + if (src_dump_buf == NULL) { dla_error( - "err:get index(%d) vaddr failed!\n", - src_addr_index); - return; + "%d, error:src_dump_buf == NULL!index=%d\n", + __LINE__, index); + continue; + } + } else if (src_type == DLA_MEM_CV) { + int addr_offset = + input_address - + ((struct nvdla_device + *)(executor->driver_context)) + ->spram_base_addr; + + struct dla_buffer_object *spram_bobj = + ((struct nvdla_device + *)(executor->driver_context)) + ->spram_bobj; + + if (spram_bobj != NULL) { + src_dump_buf = dla_dmabuf_vmap(spram_bobj); + src_dump_buf += addr_offset; } - src_dump_buf += offset; + if (src_dump_buf == NULL) { + dla_error( + "%d, error:src_dump_buf == NULL!index=%d\n", + __LINE__, index); + continue; + } } else { - input_address = f->io_tensors_addr_list - [input_is_io_tensor] + - offset; - if (f->input_bobj[input_is_io_tensor] != NULL) - src_dump_buf = dla_dmabuf_vmap( - f->input_bobj - [input_is_io_tensor]); - } - - if (src_dump_buf == NULL) { - dla_error( - "%d, error:src_dump_buf == NULL!index=%d\n", - __LINE__, index); - continue; - } - - } else if (src_type == DLA_MEM_CV) { - int addr_offset = - input_address - - ((struct nvdla_device - *)(executor->driver_context)) - ->spram_base_addr; - - struct dla_buffer_object *spram_bobj = - ((struct nvdla_device - *)(executor->driver_context)) - ->spram_bobj; - - if (spram_bobj != NULL) { - src_dump_buf = dla_dmabuf_vmap(spram_bobj); - src_dump_buf += addr_offset; - } - - if (src_dump_buf == NULL) { - dla_error( - "%d, error:src_dump_buf == NULL!index=%d\n", - __LINE__, index); continue; } - } else { - continue; - } - - memset(p_buf, 0, P_BUF_LEN); - for (k = 0; k < 3; k++) { - for (j = 0; j < seg_data_len; j++) { - if (j % DATA_CNT_ONE_LINE == 0) { - if (j != 0) { - dla_debug("%s\n", p_buf); - memset(p_buf, 0, P_BUF_LEN); + /* src_dump_buf */ + memset(p_buf, 0, P_BUF_LEN); + for (k = 0; k < 3; k++) { + for (j = 0; j < seg_data_len; j++) { + if (j % DATA_CNT_ONE_LINE == 0) { + if (j != 0) { + dla_debug("%s\n", p_buf); + memset(p_buf, 0, P_BUF_LEN); + } + snprintf(p_buf + strlen(p_buf), + P_BUF_LEN - strlen(p_buf), + "%05x: ", seg_adrr[k] + j); } snprintf(p_buf + strlen(p_buf), - P_BUF_LEN - strlen(p_buf), - "%05x: ", seg_adrr[k] + j); + P_BUF_LEN - strlen(p_buf), "%02x", + src_dump_buf[seg_data_len * k + j]); } - snprintf(p_buf + strlen(p_buf), - P_BUF_LEN - strlen(p_buf), "%02x", - src_dump_buf[seg_data_len * k + j]); + dla_debug("%s\n", p_buf); + memset(p_buf, 0, P_BUF_LEN); } - dla_debug("%s\n", p_buf); - memset(p_buf, 0, P_BUF_LEN); - } - - sprintf(f_name, "%s/%d_%d_%d_%s_%d_0_in.bin", dump_info->path, - dump_info->process_id, dump_info->model_id, - f->frame_idx, pcer2str(op_type), index); - dump2file(f_name, src_dump_buf, src_data_size); + /* dump2file */ + sprintf(f_name, "%s/%d_%d_%d_%s_%d_%d_in.bin", dump_info->path, + dump_info->process_id, dump_info->model_id, + f->frame_idx, pcer2str(op_type), index, m); + dump2file(f_name, src_dump_buf, src_data_size); - calc_md5(md5, src_dump_buf, src_data_size); - memcpy(md5_dump[index].src_md5, md5, MD5_LENGTH); + /* MD5 */ + calc_md5(md5, src_dump_buf, src_data_size); + memcpy(md5_dump[index].src_md5, md5, MD5_LENGTH); + } } return; @@ -706,10 +709,11 @@ void dla_dump_dst_data(struct win_executor *executor, struct host_frame_desc *f, uint64_t offset = 0; uint32_t op_num; uint32_t index; + int16_t buffer_cnt_output; int32_t ret; int op_type = 0; int dst_type = 0; - int i = 0, j = 0, k = 0; + int i = 0, j = 0, k = 0, m = 0; char *dst_dump_buf = NULL; uint32_t dump_buf_len = 0; struct dla_data_cube *dst_data; @@ -729,7 +733,6 @@ void dla_dump_dst_data(struct win_executor *executor, struct host_frame_desc *f, } op_num = executor->network->num_operations; - if (npu_dump_op_num_start >= op_num) { dla_error("error:npu_dump_op_num_start(%d) >= op_num(%d)!\n", npu_dump_op_num_start, op_num); @@ -742,6 +745,8 @@ void dla_dump_dst_data(struct win_executor *executor, struct host_frame_desc *f, op_type = executor->task->common_desc[i].op_type; if (index != op_index) continue; + buffer_cnt_output = 1; + switch (op_type) { case DLA_OP_EDMA: dst_data = &executor->task->surface_desc[index] @@ -810,145 +815,146 @@ void dla_dump_dst_data(struct win_executor *executor, struct host_frame_desc *f, case DLA_KMD_OP_DSP_1: case DLA_KMD_OP_DSP_2: case DLA_KMD_OP_DSP_3: - dst_data = &executor->task->surface_desc[index] - .dsp_surface.dst_data[0]; - dst_addr_index = executor->task->surface_desc[index] - .dsp_surface.dst_data[0] - .address; - dst_data_size = executor->task->surface_desc[index] - .dsp_surface.dst_data[0] - .size; - offset = executor->task->surface_desc[index] - .dsp_surface.dst_data[0] - .offset; - dst_type = executor->task->surface_desc[index] - .dsp_surface.dst_data[0] - .type; + buffer_cnt_output = executor->task->op_desc[index].dsp_op.buffer_cnt_output; break; default: continue; } - dla_debug( - "dst data dump %d:index:%d op_type:%d dst_type:%d dst_addr_index: %d size:0x%llx offset:0x%llx\n", - __LINE__, index, op_type, dst_type, dst_addr_index, - dst_data_size, offset); - sprintf(md5_f_name, "%s/%d_%d_%d_md5.txt", dump_info->path, - dump_info->process_id, dump_info->model_id, - f->frame_idx); - if (-1 == dst_addr_index) { - if (dst_type == DLA_MEM_HW) { - strcpy(md5_dump[index].dst_md5, "on-fly"); - md5_dump[index].calced_flag = 1; - dumpMD5(md5_f_name, executor, index); + /* dump */ + for (m = 0; m < buffer_cnt_output; m++) { + /* multi output for DSP */ + if ((op_type >= DLA_KMD_OP_DSP_0) && (op_type <= DLA_KMD_OP_DSP_3)) { + dst_dump_buf = NULL; + dst_data = &executor->task->surface_desc[index].dsp_surface.dst_data[m]; + dst_addr_index = executor->task->surface_desc[index].dsp_surface.dst_data[m].address; + dst_data_size = executor->task->surface_desc[index].dsp_surface.dst_data[m].size; + offset = executor->task->surface_desc[index].dsp_surface.dst_data[m].offset; + dst_type = executor->task->surface_desc[index].dsp_surface.dst_data[m].type; + } + dla_debug( + "dst data dump %d:index:%d op_type:%d dst_type:%d dst_addr_index: %d size:0x%llx offset:0x%llx\n", + __LINE__, index, op_type, dst_type, dst_addr_index, + dst_data_size, offset); + + /* MD5 */ + sprintf(md5_f_name, "%s/%d_%d_%d_md5.txt", dump_info->path, + dump_info->process_id, dump_info->model_id, + f->frame_idx); + if (-1 == dst_addr_index) { + if (dst_type == DLA_MEM_HW) { + strcpy(md5_dump[index].dst_md5, "on-fly"); + md5_dump[index].calced_flag = 1; + dumpMD5(md5_f_name, executor, index); + } + continue; } - continue; - } - - if ((index == 0) && (op_type == 0) && (dst_type == 0) && - (dst_addr_index == 0)) { - continue; - } - dump_buf_len = dst_data_size; + if ((index == 0) && (op_type == 0) && (dst_type == 0) && + (dst_addr_index == 0)) { + continue; + } - seg_adrr[1] = dst_data_size / 2; - seg_adrr[2] = dst_data_size - seg_data_len; + dump_buf_len = dst_data_size; + seg_adrr[1] = dst_data_size / 2; + seg_adrr[2] = dst_data_size - seg_data_len; + + read_input_address(executor, dst_data, &output_address, + &output_is_io_tensor); + dla_debug("output_address:0x%llx, output_is_io_tensor:%d\n", + output_address, output_is_io_tensor); + + /* dst_dump_buf */ + if (dst_type == DLA_MEM_MC) { + if (output_is_io_tensor == invalid_tensor_idx) { + ret = dla_data_get_vaddr( + &model->mem_handles, dst_addr_index, + (void **)&dst_dump_buf); + if (ret < 0) { + dla_error( + "err:get index(%d) vaddr failed!\n", + dst_addr_index); + return; + } - read_input_address(executor, dst_data, &output_address, - &output_is_io_tensor); - dla_debug("output_address:0x%llx, output_is_io_tensor:%d\n", - output_address, output_is_io_tensor); + dst_dump_buf += offset; + } else { + output_address = f->io_tensors_addr_list + [output_is_io_tensor] + + offset; + if (f->output_bobj[output_is_io_tensor] != NULL) + dst_dump_buf = dla_dmabuf_vmap( + f->output_bobj + [output_is_io_tensor]); + dst_dump_buf += offset; + } - if (dst_type == DLA_MEM_MC) { - if (output_is_io_tensor == invalid_tensor_idx) { - ret = dla_data_get_vaddr( - &model->mem_handles, dst_addr_index, - (void **)&dst_dump_buf); - if (ret < 0) { + if (dst_dump_buf == NULL) { dla_error( - "err:get index(%d) vaddr failed!\n", - dst_addr_index); - return; + "%d, error:dst_dump_buf == NULL!index=%d\n", + __LINE__, index); + continue; + } + } else if (dst_type == DLA_MEM_CV) { + int addr_offset = + output_address - + ((struct nvdla_device + *)(executor->driver_context)) + ->spram_base_addr; + + struct dla_buffer_object *spram_bobj = + ((struct nvdla_device + *)(executor->driver_context)) + ->spram_bobj; + + if (spram_bobj != NULL) { + dst_dump_buf = dla_dmabuf_vmap(spram_bobj); + dst_dump_buf += addr_offset; } - dst_dump_buf += offset; + if (dst_dump_buf == NULL) { + dla_error( + "%d, error:dst_dump_buf == NULL!index=%d\n", + __LINE__, index); + continue; + } } else { - output_address = f->io_tensors_addr_list - [output_is_io_tensor] + - offset; - if (f->output_bobj[output_is_io_tensor] != NULL) - dst_dump_buf = dla_dmabuf_vmap( - f->output_bobj - [output_is_io_tensor]); - dst_dump_buf += offset; - } - - if (dst_dump_buf == NULL) { - dla_error( - "%d, error:dst_dump_buf == NULL!index=%d\n", - __LINE__, index); - continue; - } - - } else if (dst_type == DLA_MEM_CV) { - int addr_offset = - output_address - - ((struct nvdla_device - *)(executor->driver_context)) - ->spram_base_addr; - - struct dla_buffer_object *spram_bobj = - ((struct nvdla_device - *)(executor->driver_context)) - ->spram_bobj; - - if (spram_bobj != NULL) { - dst_dump_buf = dla_dmabuf_vmap(spram_bobj); - dst_dump_buf += addr_offset; - } - - if (dst_dump_buf == NULL) { - dla_error( - "%d, error:dst_dump_buf == NULL!index=%d\n", - __LINE__, index); continue; } - } else { - continue; - } - - memset(p_buf, 0, P_BUF_LEN); - for (k = 0; k < 3; k++) { - for (j = 0; j < seg_data_len; j++) { - if (j % DATA_CNT_ONE_LINE == 0) { - if (j != 0) { - dla_debug("%s\n", p_buf); - memset(p_buf, 0, P_BUF_LEN); + /* dst_dump_buf */ + memset(p_buf, 0, P_BUF_LEN); + for (k = 0; k < 3; k++) { + for (j = 0; j < seg_data_len; j++) { + if (j % DATA_CNT_ONE_LINE == 0) { + if (j != 0) { + dla_debug("%s\n", p_buf); + memset(p_buf, 0, P_BUF_LEN); + } + snprintf(p_buf + strlen(p_buf), + P_BUF_LEN - strlen(p_buf), + "%05x: ", seg_adrr[k] + j); } snprintf(p_buf + strlen(p_buf), - P_BUF_LEN - strlen(p_buf), - "%05x: ", seg_adrr[k] + j); + P_BUF_LEN - strlen(p_buf), "%02x", + dst_dump_buf[seg_data_len * k + j]); } - snprintf(p_buf + strlen(p_buf), - P_BUF_LEN - strlen(p_buf), "%02x", - dst_dump_buf[seg_data_len * k + j]); + dla_debug("%s\n", p_buf); + memset(p_buf, 0, P_BUF_LEN); } - dla_debug("%s\n", p_buf); - memset(p_buf, 0, P_BUF_LEN); - } - - sprintf(f_name, "%s/%d_%d_%d_%s_%d_0_out.bin", dump_info->path, - dump_info->process_id, dump_info->model_id, - f->frame_idx, pcer2str(op_type), index); - dump2file(f_name, dst_dump_buf, dst_data_size); - - calc_md5(md5, dst_dump_buf, dst_data_size); - memcpy(md5_dump[index].dst_md5, md5, MD5_LENGTH); - md5_dump[index].calced_flag = 1; - dumpMD5(md5_f_name, executor, index); + /* dump2file */ + sprintf(f_name, "%s/%d_%d_%d_%s_%d_%d_out.bin", dump_info->path, + dump_info->process_id, dump_info->model_id, + f->frame_idx, pcer2str(op_type), index, m); + dump2file(f_name, dst_dump_buf, dst_data_size); + + /* MD5 */ + calc_md5(md5, dst_dump_buf, dst_data_size); + memcpy(md5_dump[index].dst_md5, md5, MD5_LENGTH); + md5_dump[index].calced_flag = 1; + dumpMD5(md5_f_name, executor, index); + } } return; diff --git a/drivers/soc/eswin/ai_driver/npu/dla_driver.h b/drivers/soc/eswin/ai_driver/npu/dla_driver.h index f497ada38f56..2101ccd1766e 100644 --- a/drivers/soc/eswin/ai_driver/npu/dla_driver.h +++ b/drivers/soc/eswin/ai_driver/npu/dla_driver.h @@ -46,6 +46,7 @@ struct nvdla_device { void __iomem *uart_mutex_base; char *e31_fw_virt_base; dma_addr_t e31_nim_iova; + const char *e31_fw_name; uint32_t e31_fw_size; struct mutex task_mutex; void *engine_context; diff --git a/drivers/soc/eswin/ai_driver/npu/edma.c b/drivers/soc/eswin/ai_driver/npu/edma.c index f720be48c277..b24b56debb37 100644 --- a/drivers/soc/eswin/ai_driver/npu/edma.c +++ b/drivers/soc/eswin/ai_driver/npu/edma.c @@ -942,7 +942,7 @@ void edma_free(struct nvdla_device *nvdla_dev) if (edma == NULL) { return; } - kfree(edma); + devm_kfree(&nvdla_dev->pdev->dev, edma); nvdla_dev->edma = NULL; return; } @@ -955,7 +955,7 @@ int edma_init(struct nvdla_device *nvdla_dev) dla_info("%s %d edma already exist\n", __func__, __LINE__); return -1; } - edma = kzalloc(sizeof(struct edma_context), GFP_KERNEL); + edma = devm_kzalloc(&nvdla_dev->pdev->dev, sizeof(struct edma_context), GFP_KERNEL); if (edma == NULL) { dla_info("%s %d edma can't alloc memory\n", __func__, __LINE__); return -1; diff --git a/drivers/soc/eswin/ai_driver/npu/engine.c b/drivers/soc/eswin/ai_driver/npu/engine.c index 479628628177..f9950ef478c6 100644 --- a/drivers/soc/eswin/ai_driver/npu/engine.c +++ b/drivers/soc/eswin/ai_driver/npu/engine.c @@ -25,7 +25,7 @@ #include #include #include -#include "es_iommu_rsv.h" +#include #include #include #include "dla_engine.h" @@ -484,6 +484,7 @@ int win_engine_init(struct nvdla_device *nvdla_dev, void **arg_engine) { struct win_engine *engine; int ret; + int numa_id = nvdla_dev->numa_id; #if (NPU_DEV_SIM != NPU_MCU_HOST) u32 i; @@ -493,7 +494,7 @@ int win_engine_init(struct nvdla_device *nvdla_dev, void **arg_engine) dla_error("%s %d engine inited\n", __func__, __LINE__); return -1; } - engine = kzalloc(sizeof(struct win_engine), GFP_KERNEL); + engine = devm_kzalloc(&nvdla_dev->pdev->dev, sizeof(struct win_engine), GFP_KERNEL); if (engine == NULL) { dla_error("%s %d nomem\n", __func__, __LINE__); return -ENOMEM; @@ -540,11 +541,11 @@ int win_engine_init(struct nvdla_device *nvdla_dev, void **arg_engine) timer_setup(&engine->timer[1], npu_frame_timeout_tok, 0); engine->is_event_source_done[0] = - (u8 *)kzalloc((COMPLETE_EVENT_ID / 8) * NUM_TIKTOK, GFP_KERNEL); + (u8 *)devm_kzalloc(&nvdla_dev->pdev->dev, (COMPLETE_EVENT_ID / 8) * NUM_TIKTOK, GFP_KERNEL); engine->is_event_source_done[1] = engine->is_event_source_done[0] + (COMPLETE_EVENT_ID / 8); if (engine->is_event_source_done[0] == NULL) { - kfree(engine); + devm_kfree(&nvdla_dev->pdev->dev, engine); return -ENOMEM; } @@ -552,16 +553,16 @@ int win_engine_init(struct nvdla_device *nvdla_dev, void **arg_engine) if (ret) { dla_error("%s, %d, get dsp device err, ret=%d.\n", __func__, __LINE__, ret); - kfree(engine->is_event_source_done[0]); - kfree(engine); + devm_kfree(&nvdla_dev->pdev->dev, engine->is_event_source_done[0]); + devm_kfree(&nvdla_dev->pdev->dev, engine); return -ENOMEM; } #if NPU_PERF_STATS > 1 engine->perf_data_buf = - kzalloc(sizeof(npu_e31_perf_t) * MAX_OP_NUM, GFP_KERNEL); + devm_kzalloc(&nvdla_dev->pdev->dev, sizeof(npu_e31_perf_t) * MAX_OP_NUM, GFP_KERNEL); if (engine->perf_data_buf == NULL) { - kfree(engine->is_event_source_done[0]); - kfree(engine); + devm_kfree(&nvdla_dev->pdev->dev, engine->is_event_source_done[0]); + devm_kfree(&nvdla_dev->pdev->dev, engine); return -ENOMEM; } #endif @@ -569,23 +570,23 @@ int win_engine_init(struct nvdla_device *nvdla_dev, void **arg_engine) #if (NPU_DEV_SIM == NPU_REAL_ENV) if (ret) { destroy_workqueue(engine->dump_op_work_queue); - kfree(engine); + devm_kfree(&nvdla_dev->pdev->dev, engine); return -ENOMEM; } for (i = 0; i < NUM_MAJOR_CORES; i++) { engine->major_shm[i] = - ioremap(E31_MAJOR_DTIM_BASE(i), E31_MAJOR_DTIM_SIZE); + devm_ioremap(&nvdla_dev->pdev->dev, E31_MAJOR_DTIM_BASE(i) + numa_id * NPU_DIE_REG_OFFSET, E31_MAJOR_DTIM_SIZE); if (engine->major_shm[i] == NULL) { ret = -EIO; goto err_ioremap; } - engine->major_mem[i] = kzalloc(E31_MAJOR_DTIM_SIZE, GFP_KERNEL); + engine->major_mem[i] = devm_kzalloc(&nvdla_dev->pdev->dev, E31_MAJOR_DTIM_SIZE, GFP_KERNEL); if (engine->major_mem[i] == NULL) { ret = -ENOMEM; goto err_ioremap; } } - engine->aux_mem = kzalloc(E31_PROGRAM_DTIM_SIZE * 2, GFP_KERNEL); + engine->aux_mem = devm_kzalloc(&nvdla_dev->pdev->dev, E31_PROGRAM_DTIM_SIZE * 2, GFP_KERNEL); if (!engine->aux_mem) { ret = -ENOMEM; goto err_aux_mem; @@ -600,7 +601,7 @@ int win_engine_init(struct nvdla_device *nvdla_dev, void **arg_engine) #if (NPU_DEV_SIM == NPU_MCU_ALONE) destroy_workqueue(engine->work_queue); #endif - kfree(engine); + devm_kfree(&nvdla_dev->pdev->dev, engine); return -ENOMEM; } #endif @@ -611,16 +612,16 @@ int win_engine_init(struct nvdla_device *nvdla_dev, void **arg_engine) err_ioremap: for (i = 0; i < NUM_MAJOR_CORES; i++) { if (engine->major_shm[i] != NULL) { - iounmap(engine->major_shm[i]); + devm_iounmap(&nvdla_dev->pdev->dev, engine->major_shm[i]); engine->major_shm[i] = NULL; } if (engine->major_mem[i] != NULL) { - kfree(engine->major_mem[i]); + devm_kfree(&nvdla_dev->pdev->dev, engine->major_mem[i]); engine->major_mem[i] = NULL; } } if (engine->aux_mem) { - kfree(engine->aux_mem); + devm_kfree(&nvdla_dev->pdev->dev, engine->aux_mem); engine->master_mem = NULL; engine->aux_mem = NULL; } @@ -629,7 +630,7 @@ int win_engine_init(struct nvdla_device *nvdla_dev, void **arg_engine) engine->host_node = NULL; } destroy_workqueue(engine->dump_op_work_queue); - kfree(engine); + devm_kfree(&nvdla_dev->pdev->dev, engine); return ret; #endif } @@ -651,11 +652,11 @@ void win_engine_destroy(struct nvdla_device *nvdla_dev) destroy_workqueue(engine->dump_op_work_queue); for (i = 0; i < NUM_MAJOR_CORES; i++) { if (engine->major_shm[i] != NULL) { - iounmap(engine->major_shm[i]); + devm_iounmap(&nvdla_dev->pdev->dev, engine->major_shm[i]); engine->major_shm[i] = NULL; } if (engine->major_mem[i] != NULL) { - kfree(engine->major_mem[i]); + devm_kfree(&nvdla_dev->pdev->dev, engine->major_mem[i]); engine->major_mem[i] = NULL; } } @@ -664,14 +665,14 @@ void win_engine_destroy(struct nvdla_device *nvdla_dev) npu_uninit_ipc(nvdla_dev); if (engine->is_event_source_done[0] != NULL) { - kfree(engine->is_event_source_done[0]); + devm_kfree(&nvdla_dev->pdev->dev, engine->is_event_source_done[0]); engine->is_event_source_done[0] = NULL; } if (engine->perf_data_buf != NULL) { - kfree(engine->perf_data_buf); + devm_kfree(&nvdla_dev->pdev->dev, engine->perf_data_buf); engine->perf_data_buf = NULL; } - kfree(engine); + devm_kfree(&nvdla_dev->pdev->dev, engine); engine = NULL; } return; diff --git a/drivers/soc/eswin/ai_driver/npu/npu_e31.c b/drivers/soc/eswin/ai_driver/npu/npu_e31.c index c32249e6dd48..a0c01295970b 100644 --- a/drivers/soc/eswin/ai_driver/npu/npu_e31.c +++ b/drivers/soc/eswin/ai_driver/npu/npu_e31.c @@ -36,7 +36,7 @@ #include #include #include -#include "es_iommu_rsv.h" +#include #include #include "dla_log.h" #include "hetero_host.h" @@ -110,7 +110,7 @@ static void npu_check_mcu_active(struct nvdla_device *nvdla_dev) #define NPU_E31_FW_RSV_IOVA 0x80000000 -int npu_e31_load_fw(struct platform_device *pdev, void __iomem *e31_mmio_base) +int npu_e31_load_fw(struct nvdla_device *nvdla_dev) { int retval = 0; int err = 0; @@ -122,7 +122,9 @@ int npu_e31_load_fw(struct platform_device *pdev, void __iomem *e31_mmio_base) u32 offset; u32 boot_dma_addr; u32 streamid_cfg; - struct nvdla_device *nvdla_dev = dev_get_drvdata(&pdev->dev); + + void __iomem *e31_mmio_base = nvdla_dev->e31_mmio_base; + struct platform_device *pdev = nvdla_dev->pdev; /* config streamid of npu-e31 */ streamid_cfg = readl(e31_mmio_base + NPU_CTRL_OFFSET + @@ -132,7 +134,7 @@ int npu_e31_load_fw(struct platform_device *pdev, void __iomem *e31_mmio_base) e31_mmio_base + NPU_CTRL_OFFSET + E31_STREAMID_CFG_OFFSET); npu_e31_sid_cfg(e31_mmio_base, WIN2030_SID_NPU_DMA); - err = request_firmware(&e31_fw, "eic7700_e31_fw", &pdev->dev); + err = request_firmware(&e31_fw, nvdla_dev->e31_fw_name, &pdev->dev); if (err < 0) { dla_error("Eswin e31 request fw error.\n"); return -EINVAL; diff --git a/drivers/soc/eswin/ai_driver/npu/npu_main.c b/drivers/soc/eswin/ai_driver/npu/npu_main.c index f75e70fc4549..a742cbc58452 100644 --- a/drivers/soc/eswin/ai_driver/npu/npu_main.c +++ b/drivers/soc/eswin/ai_driver/npu/npu_main.c @@ -51,7 +51,7 @@ #include #include #include -#include "es_iommu_rsv.h" +#include #include "dla_log.h" #include "dla_engine.h" #include "dla_engine_internal.h" @@ -258,7 +258,7 @@ int32_t dla_get_sram_address(void *driver_context, void *task_data, static const struct of_device_id edla_of_match[] = { { - .compatible = "eswin,npu0", + .compatible = "eswin,npu", }, {}, }; @@ -372,28 +372,28 @@ static int32_t edla_probe(struct platform_device *pdev) err = PTR_ERR(nvdla_dev->base); goto err_mem0; } - if (request_mem_region(E31_EMISSION_DTIM_BASE, E31_EMISSION_DTIM_SIZE, + if (request_mem_region(E31_EMISSION_DTIM_BASE + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, E31_EMISSION_DTIM_SIZE, "EMISSION_BASE") == NULL) { dev_err(&pdev->dev, "request_mem_region error\n"); err = -EBUSY; goto err_mem0; } nvdla_dev->emission_base = devm_ioremap( - &pdev->dev, E31_EMISSION_DTIM_BASE, E31_EMISSION_DTIM_SIZE); + &pdev->dev, E31_EMISSION_DTIM_BASE + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, E31_EMISSION_DTIM_SIZE); if (!nvdla_dev->emission_base) { dev_err(&pdev->dev, "ioremap error\n"); err = -ENOMEM; goto err_iomap_emission; } - if (request_mem_region(E31_PROGRAM_DTIM_BASE, E31_PROGRAM_DTIM_SIZE, + if (request_mem_region(E31_PROGRAM_DTIM_BASE + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, E31_PROGRAM_DTIM_SIZE, "PROGRAM_BASE") == NULL) { dev_err(&pdev->dev, "request_mem_region error\n"); err = -EBUSY; goto err_iomap_emission; } nvdla_dev->program_base = devm_ioremap( - &pdev->dev, E31_PROGRAM_DTIM_BASE, E31_PROGRAM_DTIM_SIZE); + &pdev->dev, E31_PROGRAM_DTIM_BASE + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, E31_PROGRAM_DTIM_SIZE); if (!nvdla_dev->program_base) { dev_err(&pdev->dev, "ioremap error\n"); err = -ENOMEM; @@ -401,7 +401,7 @@ static int32_t edla_probe(struct platform_device *pdev) } nvdla_dev->uart_mutex_base = devm_ioremap( - &pdev->dev, UART_MUTEX_BASE_ADDR, UART_MUTEX_ADDR_SIZE); + &pdev->dev, UART_MUTEX_BASE_ADDR + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, UART_MUTEX_ADDR_SIZE); if (!nvdla_dev->uart_mutex_base) { dev_err(&pdev->dev, "ioremap error\n"); err = -ENOMEM; @@ -432,28 +432,14 @@ static int32_t edla_probe(struct platform_device *pdev) } npu_tbu_power(dev, true); - switch (nvdla_dev->numa_id) { - case 0: - nvdla_dev->e31_mmio_base = devm_ioremap(dev, NPU_CFG_BASE_ADDR, + nvdla_dev->e31_mmio_base = devm_ioremap(dev, NPU_CFG_BASE_ADDR + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, NPU_CFG_ADDR_RANGE); - break; - case 1: - nvdla_dev->e31_mmio_base = - devm_ioremap(dev, NPU_CFG_BASE_ADDR + 0x20000000, - NPU_CFG_ADDR_RANGE); - break; - default: - dla_error( - "parameter numaid=%d is not correct, please use 0 or 1.\n", - nvdla_dev->numa_id); - goto err_iomap_e31; - } if (!nvdla_dev->e31_mmio_base) { dla_error("Eswin e31 ioremap fail.\n"); goto err_iomap_e31; } - err = npu_e31_load_fw(pdev, nvdla_dev->e31_mmio_base); + err = npu_e31_load_fw(nvdla_dev); if (err) { dev_err(&pdev->dev, "load e31 fw error.\n"); goto err_load_firm; @@ -496,7 +482,7 @@ static int32_t edla_probe(struct platform_device *pdev) nvdla_dev->pause_op_list = vmalloc(MAX_OP_NUM * sizeof(u16)); static_nvdla_dev[nvdla_dev->numa_id] = nvdla_dev; - err = create_npu_dev(0, nvdla_dev); + err = create_npu_dev(nvdla_dev->numa_id, nvdla_dev); if (err) { dev_err(&pdev->dev, "failed to register npu device\n"); goto err_create_dev; @@ -523,9 +509,9 @@ static int32_t edla_probe(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(dev); npu_disable_clock(nvdla_dev); err_iomap_program: - release_mem_region(E31_PROGRAM_DTIM_BASE, E31_PROGRAM_DTIM_SIZE); + release_mem_region(E31_PROGRAM_DTIM_BASE + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, E31_PROGRAM_DTIM_SIZE); err_iomap_emission: - release_mem_region(E31_EMISSION_DTIM_BASE, E31_EMISSION_DTIM_SIZE); + release_mem_region(E31_EMISSION_DTIM_BASE + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, E31_EMISSION_DTIM_SIZE); err_mem0: npu_put_dt_resources(nvdla_dev); npu_probe_result = err; @@ -547,7 +533,7 @@ static int32_t __exit edla_remove(struct platform_device *pdev) return -EIO; } - destory_npu_dev(0); + destory_npu_dev(nvdla_dev->numa_id); npu_uninit_mbox(nvdla_dev); npu_dev_reset(nvdla_dev); pm_runtime_disable(&nvdla_dev->pdev->dev); @@ -562,8 +548,8 @@ static int32_t __exit edla_remove(struct platform_device *pdev) } win_engine_destroy(nvdla_dev); edma_free(nvdla_dev); - release_mem_region(E31_EMISSION_DTIM_BASE, E31_EMISSION_DTIM_SIZE); - release_mem_region(E31_PROGRAM_DTIM_BASE, E31_PROGRAM_DTIM_SIZE); + release_mem_region(E31_EMISSION_DTIM_BASE + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, E31_EMISSION_DTIM_SIZE); + release_mem_region(E31_PROGRAM_DTIM_BASE + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, E31_PROGRAM_DTIM_SIZE); npu_spram_release(nvdla_dev); npu_tbu_power(&pdev->dev, false); @@ -671,7 +657,7 @@ int __maybe_unused npu_resume(struct device *dev) npu_tbu_power(dev, true); /* config streamID of NPU_DMA */ - ret = npu_e31_load_fw(ndev->pdev, ndev->e31_mmio_base); + ret = npu_e31_load_fw(ndev); if (ret) { dev_err(dev, "load e31 fw error.\n"); goto err_load_firm; diff --git a/drivers/soc/eswin/ai_driver/npu/npu_spram.c b/drivers/soc/eswin/ai_driver/npu/npu_spram.c index 27da16f2d241..2e8b78a88e73 100644 --- a/drivers/soc/eswin/ai_driver/npu/npu_spram.c +++ b/drivers/soc/eswin/ai_driver/npu/npu_spram.c @@ -27,6 +27,7 @@ #include #include #include "npu_spram.h" +#include "npu_base_regs.h" // At least one way must be reserved for cache #define MAX_CACHE_SIZE (2 * 0x100000) //2MB @@ -157,7 +158,7 @@ static int npu_llc_interleave_enable(struct nvdla_device *nvdla_dev) void *base_addr; struct device *dev = &nvdla_dev->pdev->dev; - base_addr = ioremap(0x51810000, 0x500); + base_addr = ioremap(0x51810000 + nvdla_dev->numa_id * NPU_DIE_REG_OFFSET, 0x500); if (!base_addr) { dev_err(dev, "ioremap error\n"); return -1; @@ -207,7 +208,7 @@ int npu_spram_init(struct nvdla_device *nvdla_dev) } spram_start = - ioremap(nvdla_dev->spram_base_addr, resource_size(&res_spram)); + devm_ioremap(&nvdla_dev->pdev->dev, nvdla_dev->spram_base_addr, resource_size(&res_spram)); if (IS_ERR(spram_start)) { dev_err(dev, "npu spram ioremap error\n"); return -ENODEV; diff --git a/drivers/soc/eswin/ai_driver/npu/nvdla_hw.c b/drivers/soc/eswin/ai_driver/npu/nvdla_hw.c index c81d0a3b3b93..096bcde3bcad 100644 --- a/drivers/soc/eswin/ai_driver/npu/nvdla_hw.c +++ b/drivers/soc/eswin/ai_driver/npu/nvdla_hw.c @@ -42,7 +42,7 @@ #include #include #include -#include "es_iommu_rsv.h" +#include #include "dla_interface.h" // TODO(yuzaiqiang) The header files dla_interface.h and llc_spram.h both define the same // macro CACHE_LINE_SIZE, resulting in a riscv compilation error. @@ -148,16 +148,16 @@ void npu_free_dma_addr(struct win_executor *executor, int i) executor->dma_addr[i]); } -static void npu_e31_hw_lock_reset(void) +static void npu_e31_hw_lock_reset(struct nvdla_device *ndev) { #define MUTEX_BASE_ADDR 0x51820000 #define MUTEX_UNIT_SIZE 4 uint32_t ret_token_id = 0; - unsigned long hw_lock_addr = MUTEX_BASE_ADDR + 1 * MUTEX_UNIT_SIZE; + unsigned long hw_lock_addr = ndev->numa_id * NPU_DIE_REG_OFFSET + MUTEX_BASE_ADDR + 1 * MUTEX_UNIT_SIZE; void *hw_lock_virt_addr = NULL; - hw_lock_virt_addr = ioremap(hw_lock_addr, 8); + hw_lock_virt_addr = devm_ioremap(&ndev->pdev->dev, hw_lock_addr, 8); ret_token_id = readl(hw_lock_virt_addr); @@ -165,19 +165,19 @@ static void npu_e31_hw_lock_reset(void) writel(0, hw_lock_virt_addr); } - iounmap(hw_lock_virt_addr); + devm_iounmap(&ndev->pdev->dev, hw_lock_virt_addr); return; } /*/sys/devices/platform/soc/51c00000.nvdla-controller/reg*/ -int npu_clk_reset_print(struct platform_device *pdev) +int npu_clk_reset_print(struct platform_device *pdev, int numa_id) { void *reset_base_addr; uint32_t reg_val1, reg_val2, reg_val3, reg_val5; - reset_base_addr = ioremap(NPU_CFG_BASE_ADDR, 0x500); + reset_base_addr = devm_ioremap(&pdev->dev, NPU_CFG_BASE_ADDR + numa_id * NPU_DIE_REG_OFFSET, 0x500); if (IS_ERR(reset_base_addr)) { dev_err(&pdev->dev, "reset base addr ioremap error\n"); return -ENODEV; @@ -198,7 +198,7 @@ int npu_clk_reset_print(struct platform_device *pdev) "[0x178]=0x%08x [0x17c]=0x%08x [0x180]=0x%08x [0x418]=0x%08x\n", reg_val1, reg_val2, reg_val3, reg_val5); - iounmap(reset_base_addr); + devm_iounmap(&pdev->dev, reset_base_addr); return 0; } @@ -212,7 +212,7 @@ static int npu_e31_dev_reset(struct nvdla_device *nvdla_dev) WARN_ON(0 != ret); /*reset e31 uart hw mutext*/ - npu_e31_hw_lock_reset(); + npu_e31_hw_lock_reset(nvdla_dev); return 0; } @@ -225,7 +225,7 @@ int npu_dev_reset(struct nvdla_device *nvdla_dev) msleep(10); /*reset npu core*/ - ret = npu_core_rst(0, false); + ret = npu_core_rst(nvdla_dev->numa_id, false); if (ret) { dev_err(&nvdla_dev->pdev->dev, "npu_core_rst fail,error: %d.\n", ret); @@ -233,7 +233,7 @@ int npu_dev_reset(struct nvdla_device *nvdla_dev) } /*reset npu cfg*/ - ret = npu_cfg_rst(0, false); + ret = npu_cfg_rst(nvdla_dev->numa_id, false); if (ret) { dev_err(&nvdla_dev->pdev->dev, "npu_core_rst fail,error: %d.\n", ret); @@ -241,14 +241,14 @@ int npu_dev_reset(struct nvdla_device *nvdla_dev) } msleep(10); - ret = npu_cfg_rst(0, true); + ret = npu_cfg_rst(nvdla_dev->numa_id, true); if (ret) { dev_err(&nvdla_dev->pdev->dev, "npu_cfg_rst fail,error: %d\n", ret); return ret; } - ret = npu_core_rst(0, true); + ret = npu_core_rst(nvdla_dev->numa_id, true); if (ret) { dev_err(&nvdla_dev->pdev->dev, "npu_core_rst fail,error: %d\n", ret); @@ -265,7 +265,7 @@ int npu_init_reset(struct nvdla_device *nvdla_dev) struct platform_device *pdev = nvdla_dev->pdev; npu_dev_reset(nvdla_dev); - npu_clk_reset_print(pdev); + npu_clk_reset_print(pdev, nvdla_dev->numa_id); return 0; } @@ -383,7 +383,7 @@ static ssize_t store_reg_val(struct device *d, struct device_attribute *attr, return count; } -int npu_clk_reset_print(struct platform_device *pdev); +int npu_clk_reset_print(struct platform_device *pdev, int numa_id); int dla_noc_sideband_query(void) { @@ -418,12 +418,11 @@ int dla_noc_sideband_query(void) static int npu_restart_init(struct nvdla_device *nvdla_dev) { - struct platform_device *pdev = nvdla_dev->pdev; int ret; npu_dma_sid_cfg(nvdla_dev->base, WIN2030_SID_NPU_DMA); npu_hw_init(nvdla_dev); - ret = npu_e31_load_fw(pdev, nvdla_dev->e31_mmio_base); + ret = npu_e31_load_fw(nvdla_dev); return ret; } @@ -555,7 +554,7 @@ static ssize_t store_reset_hand(struct device *d, struct device_attribute *attr, } exit: - npu_clk_reset_print(nvdla_dev->pdev); + npu_clk_reset_print(nvdla_dev->pdev, nvdla_dev->numa_id); mutex_unlock(&engine->reset_mutex); return count; } @@ -634,6 +633,7 @@ int npu_spram_get(struct nvdla_device *nvdla_dev) struct dla_buffer_object *spram_bobj = NULL; uint32_t drv_spram_size; int err = 0; + int numa_id = nvdla_dev->numa_id; err = llc_user_register(&nvdla_dev->pdev->dev); if (err) { @@ -665,7 +665,7 @@ int npu_spram_get(struct nvdla_device *nvdla_dev) dla_info("spram_size=0x%x\n", nvdla_dev->spram_size); spram_bobj = dla_alloc_dmabuf(nvdla_dev->spram_size, - ES_MEM_ALLOC_SPRAM_DIE0); + numa_id ? ES_MEM_ALLOC_SPRAM_DIE1 : ES_MEM_ALLOC_SPRAM_DIE0); if (spram_bobj < 0) { dla_error( "spram_dma_fd dev_mem_alloc failed!,spram_size=0x%x\n", @@ -793,7 +793,7 @@ int npu_put_dt_resources(struct nvdla_device *ndev) { struct platform_device *mbox_pdev = ndev->mbox_pdev; - free_irq(ndev->mbox_irq, ndev); + devm_free_irq(&mbox_pdev->dev, ndev->mbox_irq, ndev); clk_put(ndev->mbox_pclk); clk_put(ndev->mbox_pclk_device); @@ -826,13 +826,20 @@ int npu_dt_node_resources(struct nvdla_device *nvdla_dev) dev_err(&pdev->dev, "failed to get core_clk: %d\n", ret); return ret; } - nvdla_dev->rstc_e31_core = devm_reset_control_get_optional_exclusive( + //nvdla_dev->rstc_e31_core = devm_reset_control_get_optional_exclusive( + nvdla_dev->rstc_e31_core = devm_reset_control_get_optional( &pdev->dev, "e31_core"); if (IS_ERR_OR_NULL(nvdla_dev->rstc_e31_core)) { dev_err(&nvdla_dev->pdev->dev, "Failed to e31_core reset handle\n"); return -EFAULT; } + + if (device_property_read_string(&pdev->dev, "firmware-name", &nvdla_dev->e31_fw_name)) { + dev_err(&nvdla_dev->pdev->dev, "Failed to get e31 firmware name\n"); + return -EFAULT; + } + mbox_node = of_parse_phandle(pdev->dev.of_node, "npu_mbox", 0); if (mbox_node == NULL) { dev_err(&pdev->dev, "npu node have not mailbox node, err.\n"); @@ -895,14 +902,15 @@ int npu_dt_node_resources(struct nvdla_device *nvdla_dev) "failed to get device mailbox clock: %d\n", ret); return ret; } - nvdla_dev->mbox_rst = devm_reset_control_get_optional_exclusive( + + nvdla_dev->mbox_rst = devm_reset_control_get_optional( &mbox_pdev->dev, "rst"); if (IS_ERR(nvdla_dev->mbox_rst)) { ret = -ENODEV; dev_err(&mbox_pdev->dev, "failed to get rst controller.\n"); return ret; } - nvdla_dev->mbox_rst_device = devm_reset_control_get_optional_exclusive( + nvdla_dev->mbox_rst_device = devm_reset_control_get_optional( &mbox_pdev->dev, "rst_device"); if (IS_ERR(nvdla_dev->mbox_rst_device)) { ret = PTR_ERR(nvdla_dev->mbox_rst_device); diff --git a/drivers/soc/eswin/ai_driver/npu/nvdla_lowlevel.h b/drivers/soc/eswin/ai_driver/npu/nvdla_lowlevel.h index ed2b392aeda2..6d26b9470152 100644 --- a/drivers/soc/eswin/ai_driver/npu/nvdla_lowlevel.h +++ b/drivers/soc/eswin/ai_driver/npu/nvdla_lowlevel.h @@ -45,7 +45,8 @@ int npu_dev_reset(struct nvdla_device *nvdla_dev); int npu_init_reset(struct nvdla_device *nvdla_dev); void npu_dma_sid_cfg(void __iomem *npu_subsys_base, u32 sid); -int npu_e31_load_fw(struct platform_device *, void __iomem *e31_mmio_base); +int npu_e31_load_fw(struct nvdla_device *ndev); + int npu_pm_get(struct nvdla_device *ndev); int npu_pm_put(struct nvdla_device *ndev); diff --git a/drivers/soc/eswin/ai_driver/npu/user_context.c b/drivers/soc/eswin/ai_driver/npu/user_context.c index a1b4d007cef4..47dadc142fed 100644 --- a/drivers/soc/eswin/ai_driver/npu/user_context.c +++ b/drivers/soc/eswin/ai_driver/npu/user_context.c @@ -1151,7 +1151,7 @@ int create_npu_dev(int node_id, struct nvdla_device *nvdla_dev) int ret = 0; char *name; - npu_cdev[node_id].name = kzalloc(16, GFP_KERNEL); + npu_cdev[node_id].name = devm_kzalloc(&nvdla_dev->pdev->dev, 16, GFP_KERNEL); if (!npu_cdev[node_id].name) { dla_error("alloc memory for %d node err.\n", node_id); return -ENOMEM; @@ -1173,7 +1173,7 @@ int create_npu_dev(int node_id, struct nvdla_device *nvdla_dev) if (ret < 0) { dla_error("alloc_chrdev_region failed for npu%d\n", node_id); - kfree(name); + devm_kfree(&nvdla_dev->pdev->dev, name); npu_cdev[node_id].name = NULL; return ret; } @@ -1208,7 +1208,7 @@ int create_npu_dev(int node_id, struct nvdla_device *nvdla_dev) class_destroy(npu_cdev[node_id].class); class_err: unregister_chrdev_region(npu_cdev[node_id].devid, 1); - kfree(name); + devm_kfree(&nvdla_dev->pdev->dev, name); npu_cdev[node_id].name = NULL; return -1; @@ -1221,7 +1221,7 @@ void destory_npu_dev(int node_id) device_destroy(npu_cdev[node_id].class, npu_cdev[node_id].devid); class_destroy(npu_cdev[node_id].class); if (npu_cdev[node_id].name) { - kfree(npu_cdev[node_id].name); + devm_kfree(&npu_cdev[node_id].nvdla_dev->pdev->dev, npu_cdev[node_id].name); npu_cdev[node_id].name = NULL; } dla_debug("destory_npu_dev!\n"); diff --git a/drivers/soc/eswin/d2d.c b/drivers/soc/eswin/d2d.c new file mode 100644 index 000000000000..2b60e5eb4a6b --- /dev/null +++ b/drivers/soc/eswin/d2d.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * D2D error monitor of core driver + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * SPDX-License-Identifier: GPL-2.0 + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Yu Ning + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/** + * D2D error unit register map + * 0x28 : d2d common interrupt + * 0x30 : d2d common interrupt2 + */ + +#define PHY_OFFSET 0x40000 +#define REG_D2D_INTR_SUMMARY 0x810 +#define REG_D2D_INTR2_SUMMARY 0x814 +#define REG_D2D_INTR_MASK 0x818 +#define REG_D2D_INTR2_MASK 0x81c +#define REG_D2D_COMMON_INTR 0x828 +#define REG_D2D_COMMON_INTR2 0x830 +#define REG_D2D_SPARE_FIRMWARE 0x4034 + +#define D2D_CNTRL_TOP_INTR (0x1<<8) +#define SERDES_INTR (0x1<<9) + +#define D2D_HW_INTR 287 +#define D2D_HW_INTR2 288 + +struct d2d_device { + struct device *dev; + void __iomem *control; + int plic_irq; + int numa_id; + struct delayed_work delay_work; +}; + +static char *d2d_err_desc[] = { + "RX_ERR_ALL_INT", + "RX_LOCAL_LINKUP_INT", + "RX_REMOTE_LINKUP_INT", + "CMN_BRIDGE_RX_AFIFO_OVF_ERR_INT", + "CMN_BRIDGE_RX_AFIFO_UNF_ERR_INT", + "CMN_BRIDGE_TX_AFIFO_OVF_ERR_INT", + "CMN_BRIDGE_TX_AFIFO_UNF_ERR_INT", +}; + +char *d2d_irq_src[] = { + "d2d interrupt", + "d2d 2nd interrupt", +}; + +#define D2D_IRQ_NUMBER (ARRAY_SIZE(d2d_irq_src)) + +static irqreturn_t d2d_irqhandle(int irq, void *dev_id) +{ + struct d2d_device *d2d_dev = dev_id; + void __iomem *base = d2d_dev->control; + unsigned int intr_status, index; + struct irq_data *data = NULL; + // char *irq_src = NULL; + // int ret, nid; + + data = irq_get_irq_data(irq); + if (NULL == data) { + pr_err("D2D: invalid irq data\n"); + } + + if (data->hwirq == D2D_HW_INTR) { + printk(KERN_ERR "%s , hw irq %ld!\n", d2d_irq_src[0], data->hwirq); + intr_status = readl(base+REG_D2D_INTR_SUMMARY); + printk(KERN_ERR "D2D-%d interrupt summary : 0x%x, d2d top interrupt: 0x%x\n", + d2d_dev->numa_id,intr_status,readl(base+REG_D2D_COMMON_INTR)); + if (intr_status & D2D_CNTRL_TOP_INTR) { + for(index=0,intr_status=readl(base+REG_D2D_COMMON_INTR);index<7;index++) { + if (intr_status & (0x1<hwirq == D2D_HW_INTR2) { + printk(KERN_ERR "%s , hw irq %ld!\n", d2d_irq_src[1], data->hwirq); + intr_status = readl(base+REG_D2D_INTR2_SUMMARY); + printk(KERN_ERR "D2D-%d interrupt2 summary : 0x%x, d2d top interrupt: 0x%x\n", + d2d_dev->numa_id,intr_status,readl(base+REG_D2D_COMMON_INTR2)); + if (intr_status & D2D_CNTRL_TOP_INTR) { + for(index=0,intr_status=readl(base+REG_D2D_COMMON_INTR2);index<7;index++) { + if (intr_status & (0x1<control; + void *reg_addr = (void *)(base + PHY_OFFSET + REG_D2D_SPARE_FIRMWARE); + unsigned int rdata = readl(reg_addr); + rdata = rdata | 0x1004000; + writel(rdata, reg_addr); + while(rdata & 0x1004000) { + schedule_timeout_interruptible(msecs_to_jiffies(10)); + rdata = readl(reg_addr); + } + schedule_delayed_work(&d2d_dev->delay_work, msecs_to_jiffies(2 * MSEC_PER_SEC)); +} + +static const struct of_device_id eic7x_d2d_error_of_match[] = { + {.compatible = "eswin,eic7x-d2d", }, + { /* sentinel value */ } +}; + +static int d2d_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = pdev->dev.of_node; + struct d2d_device *d2d_dev; + int ret, i, req_irq; + struct resource *res; + + d2d_dev = devm_kcalloc(dev, 1, + sizeof(struct d2d_device), GFP_KERNEL); + if (!d2d_dev) + return -ENOMEM; + + d2d_dev->dev = dev; + dev_set_drvdata(dev, d2d_dev); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Error while get mem resource\n"); + return -ENODEV; + } + d2d_dev->control = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR_OR_NULL(d2d_dev->control)) { + dev_err(dev, "Fail to get resource %s from 0x%llx!\n", + node->name, res->start); + ret = -EINVAL; + goto free_d2d_err_dev; + } + + ret = device_property_read_u32(d2d_dev->dev, "numa-node-id", &(d2d_dev->numa_id)); + if (ret) { + dev_err(dev, "Can not get numa node!\n"); + goto free_d2d_err_dev; + } + + /* mask interrupts except D2D controller status and SerDes */ + writel(0xff,d2d_dev->control+REG_D2D_INTR_MASK); + writel(0xff,d2d_dev->control+REG_D2D_INTR2_MASK); + + if (readl(d2d_dev->control+REG_D2D_INTR_SUMMARY)||readl(d2d_dev->control+REG_D2D_INTR2_SUMMARY)) { + dev_info(dev, "D2D has intterupt intr:0x%x, intr2:0x%x\n", + readl(d2d_dev->control+REG_D2D_INTR_SUMMARY),readl(d2d_dev->control+REG_D2D_INTR2_SUMMARY)); + } + + /* clean any interrupt before */ + writel(0,d2d_dev->control+REG_D2D_COMMON_INTR); + writel(0,d2d_dev->control+REG_D2D_COMMON_INTR2); + + for (i = 0; i < D2D_IRQ_NUMBER; i++) { + req_irq = platform_get_irq(pdev, i); + if (req_irq < 0) + return req_irq; + + ret = devm_request_irq(&pdev->dev, req_irq, &d2d_irqhandle, + IRQF_SHARED |IRQF_ONESHOT , + d2d_irq_src[i], d2d_dev); + if (ret) { + dev_err(&pdev->dev, "cannot register irq %d, ret %d\n", req_irq, ret); + return ret; + } + dev_dbg(&pdev->dev,"registered irq %s, base %d, num %ld\n", d2d_irq_src[i], + platform_get_irq(pdev, 0), D2D_IRQ_NUMBER); + } + INIT_DELAYED_WORK(&d2d_dev->delay_work, adaptation_delay_work_fn); + schedule_delayed_work(&d2d_dev->delay_work, msecs_to_jiffies(2 * MSEC_PER_SEC)); + + dev_info(dev, "D2D-%d init OK\n",d2d_dev->numa_id); + return 0; + +free_d2d_err_dev: + return ret; + +} + +static struct platform_driver d2d_driver = { + .probe = d2d_probe, + // .remove = d2d_remove, + .driver = { + .name = "d2d_monitor", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(eic7x_d2d_error_of_match),}, +}; + +static int __init init_d2d_unit(void) +{ + return platform_driver_register(&d2d_driver); +} + +subsys_initcall(init_d2d_unit); diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c index a88123c81ace..f73cda043cbc 100644 --- a/drivers/soc/sifive/sifive_ccache.c +++ b/drivers/soc/sifive/sifive_ccache.c @@ -1,3 +1,4 @@ + // SPDX-License-Identifier: GPL-2.0 /* * SiFive composable cache controller Driver @@ -5,9 +6,7 @@ * Copyright (C) 2018-2022 SiFive, Inc. * */ - #define pr_fmt(fmt) "CCACHE: " fmt - #include #include #include @@ -16,175 +15,172 @@ #include #include #include - #include - #define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100 #define SIFIVE_CCACHE_DIRECCFIX_HIGH 0x104 #define SIFIVE_CCACHE_DIRECCFIX_COUNT 0x108 - #define SIFIVE_CCACHE_DIRECCFAIL_LOW 0x120 #define SIFIVE_CCACHE_DIRECCFAIL_HIGH 0x124 #define SIFIVE_CCACHE_DIRECCFAIL_COUNT 0x128 - #define SIFIVE_CCACHE_DATECCFIX_LOW 0x140 #define SIFIVE_CCACHE_DATECCFIX_HIGH 0x144 #define SIFIVE_CCACHE_DATECCFIX_COUNT 0x148 - #define SIFIVE_CCACHE_DATECCFAIL_LOW 0x160 #define SIFIVE_CCACHE_DATECCFAIL_HIGH 0x164 #define SIFIVE_CCACHE_DATECCFAIL_COUNT 0x168 - #define SIFIVE_CCACHE_CONFIG 0x00 #define SIFIVE_CCACHE_CONFIG_BANK_MASK GENMASK_ULL(7, 0) #define SIFIVE_CCACHE_CONFIG_WAYS_MASK GENMASK_ULL(15, 8) #define SIFIVE_CCACHE_CONFIG_SETS_MASK GENMASK_ULL(23, 16) #define SIFIVE_CCACHE_CONFIG_BLKS_MASK GENMASK_ULL(31, 24) - #define SIFIVE_CCACHE_WAYENABLE 0x08 #define SIFIVE_CCACHE_ECCINJECTERR 0x40 - #define SIFIVE_CCACHE_MAX_ECCINTR 4 - #define SIFIVE_CCACHE_FLUSH64 0x200 #define SIFIVE_CCACHE_FLUSH64_LINE_LEN 64 - -static void __iomem *ccache_base; -static int g_irq[SIFIVE_CCACHE_MAX_ECCINTR]; +enum { + CACHE_NODE_0 = 0, + CACHE_NODE_1, + SHARE_CACHE_NODE_NUM, +}; +static void __iomem *ccache_base[SHARE_CACHE_NODE_NUM]; +static int g_irq[SHARE_CACHE_NODE_NUM][SIFIVE_CCACHE_MAX_ECCINTR]; static struct riscv_cacheinfo_ops ccache_cache_ops; static int level; - enum { DIR_CORR = 0, DATA_CORR, DATA_UNCORR, DIR_UNCORR, }; - #ifdef CONFIG_DEBUG_FS static struct dentry *sifive_test; - static ssize_t ccache_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { unsigned int val; - if (kstrtouint_from_user(data, count, 0, &val)) return -EINVAL; if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF)) - writel(val, ccache_base + SIFIVE_CCACHE_ECCINJECTERR); + writel(val, ccache_base[0] + SIFIVE_CCACHE_ECCINJECTERR); else return -EINVAL; return count; } - static const struct file_operations ccache_fops = { .owner = THIS_MODULE, .open = simple_open, .write = ccache_write }; - static void setup_sifive_debug(void) { sifive_test = debugfs_create_dir("sifive_ccache_cache", NULL); - debugfs_create_file("sifive_debug_inject_error", 0200, sifive_test, NULL, &ccache_fops); } #endif - -static void ccache_config_read(void) +static void ccache_config_read(int node_id) { u32 cfg; - - cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG); + cfg = readl(ccache_base[node_id] + SIFIVE_CCACHE_CONFIG); pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n", FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg), FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg), BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)), BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg))); - - cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE); - pr_info("Index of the largest way enabled: %u\n", cfg); + cfg = readl(ccache_base[node_id] + SIFIVE_CCACHE_WAYENABLE); + pr_info("Node %d, index of the largest way enabled: %u\n", node_id, cfg); } - #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) -static void ccache_way_enable(void) +static void ccache_way_enable(int node_id) { u32 cfg, val; - - cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG); + cfg = readl(ccache_base[node_id] + SIFIVE_CCACHE_CONFIG); val = FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg); - writel(val -1 , ccache_base + SIFIVE_CCACHE_WAYENABLE); + writel(val -1 , ccache_base[node_id] + SIFIVE_CCACHE_WAYENABLE); } - static void ccache_flush64_range(phys_addr_t paddr, size_t size) { unsigned long line; - size = size + (paddr % SIFIVE_CCACHE_FLUSH64_LINE_LEN); paddr = ALIGN_DOWN(paddr, SIFIVE_CCACHE_FLUSH64_LINE_LEN); - mb(); /* sync */ - - for (line = paddr; line < paddr + size; - line += SIFIVE_CCACHE_FLUSH64_LINE_LEN) { - writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64); - mb(); + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC7702_SOC) + if (paddr >= CONFIG_RISCV_DIE0_CACHED_OFFSET && (paddr + size) <= (CONFIG_RISCV_DIE0_CACHED_OFFSET + CONFIG_RISCV_DIE0_MEM_MAX_SIZE)) { + #endif + for (line = paddr; line < paddr + size; + line += SIFIVE_CCACHE_FLUSH64_LINE_LEN) { + writeq(line, ccache_base[CACHE_NODE_0] + SIFIVE_CCACHE_FLUSH64); + mb(); + } + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC7702_SOC) + }else if (paddr >= CONFIG_RISCV_DIE1_CACHED_OFFSET && (paddr + size) <= (CONFIG_RISCV_DIE1_CACHED_OFFSET + CONFIG_RISCV_DIE1_MEM_MAX_SIZE)) { + for (line = paddr; line < paddr + size; + line += SIFIVE_CCACHE_FLUSH64_LINE_LEN) { + writeq(line, ccache_base[CACHE_NODE_1] + SIFIVE_CCACHE_FLUSH64); + mb(); + } + } + else if (paddr >= CONFIG_RISCV_INTERLEAVE_CACHED_OFFSET && (paddr + size) <= (CONFIG_RISCV_INTERLEAVE_CACHED_OFFSET + CONFIG_RISCV_INTERLEAVE_MEM_MAX_SIZE)){ + for (line = paddr; line < paddr + size; + line += SIFIVE_CCACHE_FLUSH64_LINE_LEN) { + if((!(!(line & 0x40000)))^(!(!(line & 0x100)))) { + writeq(line, ccache_base[CACHE_NODE_1] + SIFIVE_CCACHE_FLUSH64); + } + else { + writeq(line, ccache_base[CACHE_NODE_0] + SIFIVE_CCACHE_FLUSH64); + } + mb(); + } + } + else { + WARN(1, "Sifive ccache: flush64 out of range: %llx(%lx), skip flush\n", + paddr, size); + return; } + #endif } - static const struct riscv_nonstd_cache_ops ccache_cmo_ops __initdata = { .wback = &ccache_flush64_range, .inv = &ccache_flush64_range, .wback_inv = &ccache_flush64_range, }; #endif - static const struct of_device_id sifive_ccache_ids[] = { { .compatible = "sifive,fu540-c000-ccache" }, { .compatible = "sifive,fu740-c000-ccache" }, { .compatible = "sifive,ccache0" }, { /* end of table */ } }; - static ATOMIC_NOTIFIER_HEAD(ccache_err_chain); - int register_sifive_ccache_error_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&ccache_err_chain, nb); } EXPORT_SYMBOL_GPL(register_sifive_ccache_error_notifier); - int unregister_sifive_ccache_error_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&ccache_err_chain, nb); } EXPORT_SYMBOL_GPL(unregister_sifive_ccache_error_notifier); - static int ccache_largest_wayenabled(void) { - return readl(ccache_base + SIFIVE_CCACHE_WAYENABLE) & 0xFF; + return readl(ccache_base[0] + SIFIVE_CCACHE_WAYENABLE) & 0xFF; } - static ssize_t number_of_ways_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%u\n", ccache_largest_wayenabled()); } - static DEVICE_ATTR_RO(number_of_ways_enabled); - static struct attribute *priv_attrs[] = { &dev_attr_number_of_ways_enabled.attr, NULL, }; - static const struct attribute_group priv_attr_group = { .attrs = priv_attrs, }; - static const struct attribute_group *ccache_get_priv_group(struct cacheinfo *this_leaf) { @@ -194,122 +190,115 @@ static const struct attribute_group *ccache_get_priv_group(struct cacheinfo else return NULL; } - static irqreturn_t ccache_int_handler(int irq, void *device) { unsigned int add_h, add_l; - - if (irq == g_irq[DIR_CORR]) { - add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_HIGH); - add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_LOW); + int node_id = *(int *)device; + if (irq == g_irq[node_id][DIR_CORR]) { + add_h = readl(ccache_base[node_id] + SIFIVE_CCACHE_DIRECCFIX_HIGH); + add_l = readl(ccache_base[node_id] + SIFIVE_CCACHE_DIRECCFIX_LOW); pr_err("DirError @ 0x%08X.%08X\n", add_h, add_l); /* Reading this register clears the DirError interrupt sig */ - readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_COUNT); + readl(ccache_base[node_id] + SIFIVE_CCACHE_DIRECCFIX_COUNT); atomic_notifier_call_chain(&ccache_err_chain, SIFIVE_CCACHE_ERR_TYPE_CE, "DirECCFix"); } - if (irq == g_irq[DIR_UNCORR]) { - add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_HIGH); - add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_LOW); + if (irq == g_irq[node_id][DIR_UNCORR]) { + add_h = readl(ccache_base[node_id] + SIFIVE_CCACHE_DIRECCFAIL_HIGH); + add_l = readl(ccache_base[node_id] + SIFIVE_CCACHE_DIRECCFAIL_LOW); /* Reading this register clears the DirFail interrupt sig */ - readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_COUNT); + readl(ccache_base[node_id] + SIFIVE_CCACHE_DIRECCFAIL_COUNT); atomic_notifier_call_chain(&ccache_err_chain, SIFIVE_CCACHE_ERR_TYPE_UE, "DirECCFail"); panic("CCACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l); } - if (irq == g_irq[DATA_CORR]) { - add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_HIGH); - add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_LOW); + if (irq == g_irq[node_id][DATA_CORR]) { + add_h = readl(ccache_base[node_id] + SIFIVE_CCACHE_DATECCFIX_HIGH); + add_l = readl(ccache_base[node_id] + SIFIVE_CCACHE_DATECCFIX_LOW); pr_err("DataError @ 0x%08X.%08X\n", add_h, add_l); /* Reading this register clears the DataError interrupt sig */ - readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_COUNT); + readl(ccache_base[node_id] + SIFIVE_CCACHE_DATECCFIX_COUNT); atomic_notifier_call_chain(&ccache_err_chain, SIFIVE_CCACHE_ERR_TYPE_CE, "DatECCFix"); } - if (irq == g_irq[DATA_UNCORR]) { - add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_HIGH); - add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_LOW); + if (irq == g_irq[node_id][DATA_UNCORR]) { + add_h = readl(ccache_base[node_id] + SIFIVE_CCACHE_DATECCFAIL_HIGH); + add_l = readl(ccache_base[node_id] + SIFIVE_CCACHE_DATECCFAIL_LOW); pr_err("DataFail @ 0x%08X.%08X\n", add_h, add_l); /* Reading this register clears the DataFail interrupt sig */ - readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_COUNT); + readl(ccache_base[node_id] + SIFIVE_CCACHE_DATECCFAIL_COUNT); atomic_notifier_call_chain(&ccache_err_chain, SIFIVE_CCACHE_ERR_TYPE_UE, "DatECCFail"); } - return IRQ_HANDLED; } - +static int node_index = 0; static int __init sifive_ccache_init(void) { struct device_node *np; struct resource res; int i, rc, intr_num; - - np = of_find_matching_node(NULL, sifive_ccache_ids); - if (!np) - return -ENODEV; - - if (of_address_to_resource(np, 0, &res)) { - rc = -ENODEV; - goto err_node_put; - } - - ccache_base = ioremap(res.start, resource_size(&res)); - if (!ccache_base) { - rc = -ENOMEM; - goto err_node_put; - } - - if (of_property_read_u32(np, "cache-level", &level)) { - rc = -ENOENT; - goto err_unmap; - } - - intr_num = of_property_count_u32_elems(np, "interrupts"); - if (!intr_num) { - pr_err("No interrupts property\n"); - rc = -ENODEV; - goto err_unmap; - } - - for (i = 0; i < intr_num; i++) { - g_irq[i] = irq_of_parse_and_map(np, i); - rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc", - NULL); - if (rc) { - pr_err("Could not request IRQ %d\n", g_irq[i]); - goto err_free_irq; + for_each_matching_node(np, sifive_ccache_ids) { + if (!np) + return -ENODEV; + if (of_address_to_resource(np, 0, &res)) { + rc = -ENODEV; + goto err_node_put; } - } - of_node_put(np); - - #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) - ccache_way_enable(); - riscv_noncoherent_register_cache_ops(&ccache_cmo_ops); + if (of_property_read_u32(np, "numa-node-id", &node_index)) + return -ENODEV; + if (node_index >= SHARE_CACHE_NODE_NUM) { + rc = -ENODEV; + goto err_node_put; + } + ccache_base[node_index] = ioremap(res.start, resource_size(&res)); + if (!ccache_base[node_index]) { + rc = -ENOMEM; + goto err_node_put; + } + if (of_property_read_u32(np, "cache-level", &level)) { + rc = -ENOENT; + goto err_unmap; + } + intr_num = of_property_count_u32_elems(np, "interrupts"); + if (!intr_num) { + pr_err("No interrupts property\n"); + rc = -ENODEV; + goto err_unmap; + } + for (i = 0; i < intr_num; i++) { + g_irq[node_index][i] = irq_of_parse_and_map(np, i); + rc = request_irq(g_irq[node_index][i], ccache_int_handler, 0, "ccache_ecc", + &node_index); + if (rc) { + pr_err("Could not request IRQ %d\n", g_irq[node_index][i]); + goto err_free_irq; + } + } + of_node_put(np); + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + ccache_way_enable(node_index); + riscv_noncoherent_register_cache_ops(&ccache_cmo_ops); + #endif + ccache_config_read(node_index); + ccache_cache_ops.get_priv_group = ccache_get_priv_group; + riscv_set_cacheinfo_ops(&ccache_cache_ops); + #ifdef CONFIG_DEBUG_FS + setup_sifive_debug(); #endif - - ccache_config_read(); - - ccache_cache_ops.get_priv_group = ccache_get_priv_group; - riscv_set_cacheinfo_ops(&ccache_cache_ops); - -#ifdef CONFIG_DEBUG_FS - setup_sifive_debug(); -#endif + } return 0; - err_free_irq: while (--i >= 0) - free_irq(g_irq[i], NULL); + free_irq(g_irq[node_index][i], NULL); err_unmap: - iounmap(ccache_base); + iounmap(ccache_base[node_index]); err_node_put: of_node_put(np); return rc; } - device_initcall(sifive_ccache_init); diff --git a/drivers/staging/media/eswin/dewarp/vvcam_dwe_driver.c b/drivers/staging/media/eswin/dewarp/vvcam_dwe_driver.c index 78a843e5f85d..f1ab8835ceac 100644 --- a/drivers/staging/media/eswin/dewarp/vvcam_dwe_driver.c +++ b/drivers/staging/media/eswin/dewarp/vvcam_dwe_driver.c @@ -80,8 +80,12 @@ #include "vivdw200_irq_queue.h" #include "dw200_dump.h" -#define VIVCAM_DWE_NAME "es_dewarp" -#define VIVCAM_DWE_MAXCNT 1 +#define ES_DEWARP_NAME "es_dewarp" +#define DEWARP_CLASS_NAME "es_dewarp_class" +#define NUM_DEVICES 2 + +static dev_t devt; +static struct class *es_dewarp_class; #define VSE_REG_INDEX (0) #define DWE_REG_INDEX (1) @@ -94,23 +98,47 @@ #define VVCAM_DW_CLK_HIGHEST 594000000 #define VVCAM_AXI_CLK_HIGHEST 800000000 -#define VI_TOP_CLK 0x51030040 -#define VI_TOP_CLK_ENABLE_VAL 0xFFFF - #define IS_BIT_SET(reg, bit) ((reg) & (1 << (bit))) #define SET_BIT(reg, bit) ((reg) |= (1 << (bit))) +#define VSE_OFFLINE_MODE 0 +#define ISP_ONLINE_VSE_MODE 1 +#define DWE_ONLINE_VSE_MODE 2 +#define ES_WAIT_TIMEOUT_MS msecs_to_jiffies(1000) +#define ES_BUSY 0 +#define ES_IDLE 1 +#define RET_HW_COMMOND 0x11 + +static bool fe_enable = false; +module_param(fe_enable, bool, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(fe_enable, "FE(command buffer) enable for DW200"); + struct es_dewarp_driver_dev { struct cdev cdev; - dev_t devt; - struct class *class; + struct device *device; + int id; + struct mutex vvmutex; - void *private; + struct dw200_subdev hw_dev; unsigned int irq_num; unsigned int irq_num_vse; bool irq_trigger; wait_queue_head_t irq_wait; + atomic_t vse_online_mode_atomic; + wait_queue_head_t dwe_irq_wait_q; + wait_queue_head_t vse_irq_wait_q; + spinlock_t dwe_irq_lock; + spinlock_t vse_irq_lock; + atomic_t dwe_irq_trigger_mis; + atomic_t vse_irq_trigger_mis; + struct semaphore dwe_sem; + struct semaphore vse_sem; + wait_queue_head_t dwe_reserve_wait_q; + wait_queue_head_t vse_reserve_wait_q; + int dwe_status; + int vse_status; + wait_queue_head_t trigger_wq; atomic_t trigger_atom; int suspended; @@ -127,45 +155,12 @@ struct es_dw200_private { struct heap_root heap_root; }; -#ifdef ES_DW200_SDK -#define VSE_OFFLINE_MODE 0 -#define ISP_ONLINE_VSE_MODE 1 -#define DWE_ONLINE_VSE_MODE 2 -#define ES_WAIT_TIMEOUT_MS msecs_to_jiffies(1000) -#define ES_BUSY 0 -#define ES_IDLE 1 -#define RET_HW_COMMOND 0x11 -atomic_t vse_online_mode_atomic = ATOMIC_INIT(VSE_OFFLINE_MODE); -static DECLARE_WAIT_QUEUE_HEAD(dwe_irq_wait_q); -static DECLARE_WAIT_QUEUE_HEAD(vse_irq_wait_q); -static DEFINE_SPINLOCK(dwe_irq_lock); -static DEFINE_SPINLOCK(vse_irq_lock); -atomic_t dwe_irq_trigger_mis = ATOMIC_INIT(0); -atomic_t vse_irq_trigger_mis = ATOMIC_INIT(0); -static struct semaphore dwe_sem; -static struct semaphore vse_sem; -static DECLARE_WAIT_QUEUE_HEAD(dwe_reserve_wait_q); -static DECLARE_WAIT_QUEUE_HEAD(vse_reserve_wait_q); -static int dwe_status = ES_IDLE; -static int vse_status = ES_IDLE; -#endif // ES_DW200_SDK - -static unsigned int es_dewarp_major = 0; -static unsigned int es_dewarp_minor = 0; -struct class *es_dewarp_class; -static unsigned int devise_register_index = 0; - -static bool fe_enable = false; -module_param(fe_enable, bool, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); -MODULE_PARM_DESC(fe_enable, "FE(command buffer) enable for DW200"); - static unsigned int dewarp_poll(struct file *filp, poll_table *wait) { unsigned int mask = 0; struct es_dw200_private *pes_dw200_priv = filp->private_data; struct es_dewarp_driver_dev *pdriver_dev = pes_dw200_priv->pdriver_dev; - struct dw200_subdev *pdw200 = - (struct dw200_subdev *)pdriver_dev->private; + struct dw200_subdev *pdw200 = &pdriver_dev->hw_dev; vivdw200_mis_list_t *Vse_pCList = &pdw200->vse_circle_list; vivdw200_mis_list_t *Dwe_pCList = &pdw200->dwe_circle_list; @@ -194,28 +189,28 @@ static int obtain_dewarp_mis(struct device *dev) unsigned int dwe_mis, vse_mis; unsigned long flags; - pdwe_dev = (struct dw200_subdev *)pdriver_dev->private; + pdwe_dev = &pdriver_dev->hw_dev; //dw200 is working wait done vse_read_irq((struct dw200_subdev *)pdwe_dev, &vse_mis); dwe_read_irq((struct dw200_subdev *)pdwe_dev, &dwe_mis); if (vse_mis) { - spin_lock_irqsave(&vse_irq_lock, flags); + spin_lock_irqsave(&pdriver_dev->vse_irq_lock, flags); vse_clear_irq((struct dw200_subdev *)pdwe_dev, vse_mis); - atomic_set(&vse_irq_trigger_mis, vse_mis); + atomic_set(&pdriver_dev->vse_irq_trigger_mis, vse_mis); atomic_dec(&pdriver_dev->trigger_atom); - wake_up_interruptible_all(&vse_irq_wait_q); - spin_unlock_irqrestore(&vse_irq_lock, flags); + wake_up_interruptible_all(&pdriver_dev->vse_irq_wait_q); + spin_unlock_irqrestore(&pdriver_dev->vse_irq_lock, flags); ret |= vse_mis; } dwe_mis = dwe_mis & 0x1; if (dwe_mis) { - spin_lock_irqsave(&dwe_irq_lock, flags); + spin_lock_irqsave(&pdriver_dev->dwe_irq_lock, flags); dwe_clear_irq((struct dw200_subdev *)pdwe_dev, dwe_mis << 24); - atomic_set(&dwe_irq_trigger_mis, dwe_mis); + atomic_set(&pdriver_dev->dwe_irq_trigger_mis, dwe_mis); atomic_dec(&pdriver_dev->trigger_atom); - wake_up_interruptible_all(&dwe_irq_wait_q); - spin_unlock_irqrestore(&dwe_irq_lock, flags); + wake_up_interruptible_all(&pdriver_dev->dwe_irq_wait_q); + spin_unlock_irqrestore(&pdriver_dev->dwe_irq_lock, flags); ret |= dwe_mis; } return ret; @@ -349,33 +344,36 @@ static int triggerDweVse(struct es_dw200_private *pes_dw200_priv) static long reserveDwe(struct es_dw200_private *pes_dw200_priv) { + struct es_dewarp_driver_dev *pdriver_dev = pes_dw200_priv->pdriver_dev; pr_debug("%s before down_interruptible, sema.count: %d\n", __func__, - dwe_sem.count); + pdriver_dev->dwe_sem.count); /* reserve dwe */ - if (down_interruptible(&dwe_sem)) + if (down_interruptible(&pdriver_dev->dwe_sem)) return -ERESTARTSYS; pr_debug("%s after down_interruptible, sema.count: %d\n", __func__, - dwe_sem.count); + pdriver_dev->dwe_sem.count); /* lock a core that has specific format*/ - if (wait_event_interruptible(dwe_reserve_wait_q, dwe_status) != 0) + if (wait_event_interruptible(pdriver_dev->dwe_reserve_wait_q, + pdriver_dev->dwe_status) != 0) return -ERESTARTSYS; - dwe_status = ES_BUSY; + pdriver_dev->dwe_status = ES_BUSY; atomic_inc(&pes_dw200_priv->dwe_reserved_atom); - return dwe_status; + return pdriver_dev->dwe_status; } static void releaseDwe(struct es_dw200_private *pes_dw200_priv) { + struct es_dewarp_driver_dev *pdriver_dev = pes_dw200_priv->pdriver_dev; if (atomic_read(&pes_dw200_priv->dwe_reserved_atom) > 0) { pr_debug("%s before up, sema.count: %d\n", __func__, - dwe_sem.count); - dwe_status = ES_IDLE; - up(&dwe_sem); - wake_up_interruptible_all(&dwe_reserve_wait_q); + pdriver_dev->dwe_sem.count); + pdriver_dev->dwe_status = ES_IDLE; + up(&pdriver_dev->dwe_sem); + wake_up_interruptible_all(&pdriver_dev->dwe_reserve_wait_q); atomic_dec(&pes_dw200_priv->dwe_reserved_atom); pr_debug("%s after up, sema.count: %d\n", __func__, - dwe_sem.count); + pdriver_dev->dwe_sem.count); } else { pr_err("no reserved resources, no release.\n"); } @@ -383,57 +381,60 @@ static void releaseDwe(struct es_dw200_private *pes_dw200_priv) static long reserveVse(struct es_dw200_private *pes_dw200_priv) { + struct es_dewarp_driver_dev *pdriver_dev = pes_dw200_priv->pdriver_dev; pr_debug("%s before down_interruptible, sema.count: %d\n", __func__, - vse_sem.count); + pdriver_dev->vse_sem.count); /* reserve vse */ - if (down_interruptible(&vse_sem)) + if (down_interruptible(&pdriver_dev->vse_sem)) return -ERESTARTSYS; pr_debug("%s after down_interruptible, sema.count: %d\n", __func__, - vse_sem.count); + pdriver_dev->vse_sem.count); /* lock a core that has specific format*/ - if (wait_event_interruptible(vse_reserve_wait_q, vse_status) != 0) + if (wait_event_interruptible(pdriver_dev->vse_reserve_wait_q, + pdriver_dev->vse_status) != 0) return -ERESTARTSYS; - vse_status = ES_BUSY; + pdriver_dev->vse_status = ES_BUSY; atomic_inc(&pes_dw200_priv->vse_reserved_atom); - return vse_status; + return pdriver_dev->vse_status; } static void releaseVse(struct es_dw200_private *pes_dw200_priv) { + struct es_dewarp_driver_dev *pdriver_dev = pes_dw200_priv->pdriver_dev; if (atomic_read(&pes_dw200_priv->vse_reserved_atom) > 0) { pr_debug("%s before up, sema.count: %d\n", __func__, - vse_sem.count); - vse_status = ES_IDLE; - up(&vse_sem); - wake_up_interruptible_all(&vse_reserve_wait_q); + pdriver_dev->vse_sem.count); + pdriver_dev->vse_status = ES_IDLE; + up(&pdriver_dev->vse_sem); + wake_up_interruptible_all(&pdriver_dev->vse_reserve_wait_q); atomic_dec(&pes_dw200_priv->vse_reserved_atom); pr_debug("%s after up, sema.count: %d\n", __func__, - vse_sem.count); + pdriver_dev->vse_sem.count); } else { pr_err("no reserved resources, no release.\n"); } } -static int CheckIrq(int is_dwe) +static int CheckIrq(struct es_dewarp_driver_dev *pdriver_dev, int is_dwe) { unsigned long flags; u32 mis = 0; int rdy = 0; if (is_dwe) { - spin_lock_irqsave(&dwe_irq_lock, flags); - mis = atomic_read(&dwe_irq_trigger_mis); + spin_lock_irqsave(&pdriver_dev->dwe_irq_lock, flags); + mis = atomic_read(&pdriver_dev->dwe_irq_trigger_mis); if (mis) { rdy = 1; } - spin_unlock_irqrestore(&dwe_irq_lock, flags); + spin_unlock_irqrestore(&pdriver_dev->dwe_irq_lock, flags); } else { - spin_lock_irqsave(&vse_irq_lock, flags); - mis = atomic_read(&vse_irq_trigger_mis); + spin_lock_irqsave(&pdriver_dev->vse_irq_lock, flags); + mis = atomic_read(&pdriver_dev->vse_irq_trigger_mis); if (mis) { rdy = 1; } - spin_unlock_irqrestore(&vse_irq_lock, flags); + spin_unlock_irqrestore(&pdriver_dev->vse_irq_lock, flags); } return rdy; } @@ -441,52 +442,50 @@ static int CheckIrq(int is_dwe) irqreturn_t dwe_isr(int irq, void *dev_id) { struct es_dewarp_driver_dev *pdriver_dev = dev_id; - struct dw200_subdev *pdw200 = - (struct dw200_subdev *)pdriver_dev->private; + struct dw200_subdev *pdw200 = &pdriver_dev->hw_dev; u32 dwe_mis = 0; unsigned long flags; - spin_lock_irqsave(&dwe_irq_lock, flags); + spin_lock_irqsave(&pdriver_dev->dwe_irq_lock, flags); dwe_read_irq((struct dw200_subdev *)pdw200, &dwe_mis); dwe_mis = dwe_mis & 0x1; if (0 != dwe_mis) { dwe_clear_irq((struct dw200_subdev *)pdw200, dwe_mis << 24); - atomic_set(&dwe_irq_trigger_mis, dwe_mis); - wake_up_interruptible_all(&dwe_irq_wait_q); + atomic_set(&pdriver_dev->dwe_irq_trigger_mis, dwe_mis); + wake_up_interruptible_all(&pdriver_dev->dwe_irq_wait_q); atomic_dec(&pdriver_dev->trigger_atom); wake_up_interruptible(&pdriver_dev->trigger_wq); - spin_unlock_irqrestore(&dwe_irq_lock, flags); + spin_unlock_irqrestore(&pdriver_dev->dwe_irq_lock, flags); return IRQ_HANDLED; } - spin_unlock_irqrestore(&dwe_irq_lock, flags); + spin_unlock_irqrestore(&pdriver_dev->dwe_irq_lock, flags); return IRQ_NONE; } irqreturn_t vse_isr(int irq, void *dev_id) { struct es_dewarp_driver_dev *pdriver_dev = dev_id; - struct dw200_subdev *pdw200 = - (struct dw200_subdev *)pdriver_dev->private; + struct dw200_subdev *pdw200 = &pdriver_dev->hw_dev; u32 vse_mis = 0; unsigned long flags; DEBUG_PRINT("%s enter\n", __func__); - spin_lock_irqsave(&vse_irq_lock, flags); + spin_lock_irqsave(&pdriver_dev->vse_irq_lock, flags); vse_read_irq((struct dw200_subdev *)pdw200, &vse_mis); DEBUG_PRINT(" %s vse mis 0x%08x\n", __func__, vse_mis); if (vse_mis) { vse_clear_irq((struct dw200_subdev *)pdw200, vse_mis); - atomic_set(&vse_irq_trigger_mis, vse_mis); - wake_up_interruptible_all(&vse_irq_wait_q); + atomic_set(&pdriver_dev->vse_irq_trigger_mis, vse_mis); + wake_up_interruptible_all(&pdriver_dev->vse_irq_wait_q); atomic_dec(&pdriver_dev->trigger_atom); wake_up_interruptible(&pdriver_dev->trigger_wq); - spin_unlock_irqrestore(&vse_irq_lock, flags); + spin_unlock_irqrestore(&pdriver_dev->vse_irq_lock, flags); DEBUG_PRINT("%s vse frame ready, vse mis 0x%08x\n", __func__, vse_mis); return IRQ_HANDLED; } - spin_unlock_irqrestore(&vse_irq_lock, flags); + spin_unlock_irqrestore(&pdriver_dev->vse_irq_lock, flags); return IRQ_NONE; } @@ -497,9 +496,8 @@ static long waitDweDone(struct es_dw200_private *pes_dw200_priv, long timeout) u32 irq_trigger = 0; struct dw200_subdev *pdwe_dev = &pes_dw200_priv->dw200; struct es_dewarp_driver_dev *pdriver_dev = pes_dw200_priv->pdriver_dev; - ret = wait_event_interruptible_timeout(dwe_irq_wait_q, CheckIrq(1), - timeout); - // ret = wait_event_interruptible(dwe_irq_wait_q, atomic_read(&dwe_irq_trigger_mis)); + ret = wait_event_interruptible_timeout( + pdriver_dev->dwe_irq_wait_q, CheckIrq(pdriver_dev, 1), timeout); if (ret < 0) { pr_err("dwe wait_event_interruptible interrupted\n"); ret = -ERESTARTSYS; @@ -511,7 +509,8 @@ static long waitDweDone(struct es_dw200_private *pes_dw200_priv, long timeout) // when dwe timeout, need wait vse idle,than do top reset pr_err("do dw200 top reset\n"); - wait_event_interruptible(vse_reserve_wait_q, vse_status); + wait_event_interruptible(pdriver_dev->vse_reserve_wait_q, + pdriver_dev->vse_status); reset_control_reset(pdwe_dev->dw_crg.rstc_dwe); atomic_dec(&pdriver_dev->trigger_atom); ret = -ETIMEDOUT; @@ -519,12 +518,12 @@ static long waitDweDone(struct es_dw200_private *pes_dw200_priv, long timeout) dwe_disable_irq(pdwe_dev); dwe_enable_bus(pdwe_dev, false); } - irq_trigger = atomic_read(&dwe_irq_trigger_mis); + irq_trigger = atomic_read(&pdriver_dev->dwe_irq_trigger_mis); if (irq_trigger & INT_FRAME_DONE) { pr_debug("wait dwe done ok, irq_trigger:%08x\n", irq_trigger); } - atomic_set(&dwe_irq_trigger_mis, 0); + atomic_set(&pdriver_dev->dwe_irq_trigger_mis, 0); /*clean all irq*/ dwe_clear_irq(pdwe_dev, INT_RESET_MASK); reg = dwe_read_reg(pdwe_dev, BUS_CTRL); @@ -538,9 +537,8 @@ static long waitVseDone(struct es_dw200_private *pes_dw200_priv, long timeout) struct dw200_subdev *pdwe_dev = &pes_dw200_priv->dw200; struct es_dewarp_driver_dev *pdriver_dev = pes_dw200_priv->pdriver_dev; - ret = wait_event_interruptible_timeout(vse_irq_wait_q, CheckIrq(0), - timeout); - // ret = wait_event_interruptible(vse_irq_wait_q, CheckIrq(0)); + ret = wait_event_interruptible_timeout( + pdriver_dev->vse_irq_wait_q, CheckIrq(pdriver_dev, 0), timeout); if (ret < 0) { pr_err("wait_event_interruptible interrupted\n"); ret = -ERESTARTSYS; @@ -552,17 +550,19 @@ static long waitVseDone(struct es_dw200_private *pes_dw200_priv, long timeout) readVseReg(pdwe_dev); // when vse timeout, need wait dwe idle,than do top reset pr_err("do dw200 top reset\n"); - wait_event_interruptible(dwe_reserve_wait_q, dwe_status); + wait_event_interruptible(pdriver_dev->dwe_reserve_wait_q, + pdriver_dev->dwe_status); reset_control_reset(pdwe_dev->dw_crg.rstc_dwe); atomic_dec(&pdriver_dev->trigger_atom); ret = -ETIMEDOUT; } - atomic_set(&vse_irq_trigger_mis, 0); + atomic_set(&pdriver_dev->vse_irq_trigger_mis, 0); vse_write_reg(pdwe_dev, VSE_REG_MI_ICR, 0xffffffff); vse_write_reg(pdwe_dev, VSE_REG_MI_ICR1, 0xffffffff); /*stop the axi bus*/ vse_write_reg(pdwe_dev, VSE_REG_MI0_BUS_ID, 0); vse_write_reg(pdwe_dev, VSE_REG_MI1_BUS_ID, 0); + return ret; } @@ -570,6 +570,7 @@ long es_dw200_ioctl(struct es_dw200_private *pes_dw200_priv, unsigned int cmd, void *arg) { int ret = -1; + struct es_dewarp_driver_dev *pdriver_dev = pes_dw200_priv->pdriver_dev; struct dw200_subdev *pdwe_dev = &pes_dw200_priv->dw200; u64 addr; @@ -608,11 +609,11 @@ long es_dw200_ioctl(struct es_dw200_private *pes_dw200_priv, unsigned int cmd, mode); return -EINVAL; } - atomic_set(&vse_online_mode_atomic, mode); + atomic_set(&pdriver_dev->vse_online_mode_atomic, mode); return 0; } case VSEIOC_G_ONLINE_MODE: { - u32 mode = atomic_read(&vse_online_mode_atomic); + u32 mode = atomic_read(&pdriver_dev->vse_online_mode_atomic); DEBUG_PRINT("%s: get vse online mode:%d\n", __func__, mode); viv_check_retval(copy_to_user(arg, &mode, sizeof(mode))); return 0; @@ -685,8 +686,7 @@ irqreturn_t vivdw200_interrupt(int irq, void *dev_id) unsigned int dwe_mis, vse_mis; unsigned int dw200_fe_mis; struct es_dewarp_driver_dev *pdriver_dev = dev_id; - struct dw200_subdev *pdw200 = - (struct dw200_subdev *)pdriver_dev->private; + struct dw200_subdev *pdw200 = &pdriver_dev->hw_dev; int ret = 0; pr_info("%s enter\n", __func__); @@ -849,6 +849,7 @@ static int vvcam_dw200_smmu_sid_cfg(struct device *dev) u32 sid = 0; phandle phandle; struct device_node *vi_top_csr_np; + u32 reg_val = 0; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); @@ -881,6 +882,13 @@ static int vvcam_dw200_smmu_sid_cfg(struct device *dev) } of_node_put(vi_top_csr_np); + // VI dw200 top clk enable: + regmap_read(regmap, 0x40, ®_val); + if (!IS_BIT_SET(reg_val, 2)) { + SET_BIT(reg_val, 2); + regmap_write(regmap, 0x40, reg_val); + } + ret = of_property_read_u32_index(dev->of_node, "eswin,vi_top_csr", 1, &mmu_tbu0_vi_dw200_reg); if (ret) { @@ -923,7 +931,7 @@ static int dewarp_open(struct inode *inode, struct file *file) container_of(inode->i_cdev, struct es_dewarp_driver_dev, cdev); pdriver_dev = pes_dw200_priv->pdriver_dev; - pdw200 = (struct dw200_subdev *)pdriver_dev->private; + pdw200 = &pdriver_dev->hw_dev; memcpy(&pes_dw200_priv->dw200, pdw200, sizeof(pes_dw200_priv->dw200)); pm_runtime_get_sync(pdw200->dev); @@ -945,27 +953,16 @@ static int dewarp_open(struct inode *inode, struct file *file) static long dewarp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = 0; - struct es_dewarp_driver_dev *pdriver_dev; - struct dw200_subdev *pdwe_dev; struct es_dw200_private *pes_dw200_priv; - pes_dw200_priv = file->private_data; - pdriver_dev = pes_dw200_priv->pdriver_dev; - // pdriver_dev = file->private_data; - if (pdriver_dev == NULL) { - pr_err("%s:file private is null point error\n", __func__); - return -ENOMEM; + if (!pes_dw200_priv) { + pr_err("file private invalid\n"); + return -EFAULT; } - - pdwe_dev = pdriver_dev->private; - pdwe_dev->pheap_root = &pes_dw200_priv->heap_root; -#ifdef ES_DW200_SDK ret = es_dw200_ioctl(pes_dw200_priv, cmd, (void *)arg); if (ret != RET_HW_COMMOND) { return ret; } -#endif // ES_DW200_SDK - ret = dw200_priv_ioctl(pdwe_dev, cmd, (void *)arg); return ret; }; @@ -1131,7 +1128,7 @@ static int dewarp_release(struct inode *inode, struct file *file) pes_dw200_priv = file->private_data; pdriver_dev = pes_dw200_priv->pdriver_dev; - pdw200 = (struct dw200_subdev *)pdriver_dev->private; + pdw200 = &pdriver_dev->hw_dev; DEBUG_PRINT("enter %s\n", __func__); @@ -1147,7 +1144,7 @@ static int dewarp_release(struct inode *inode, struct file *file) /*stop the axi bus*/ vse_write_reg(pdw200, VSE_REG_MI0_BUS_ID, 0); vse_write_reg(pdw200, VSE_REG_MI1_BUS_ID, 0); - atomic_set(&vse_irq_trigger_mis, 0); + atomic_set(&pdriver_dev->vse_irq_trigger_mis, 0); while (atomic_read(&pes_dw200_priv->vse_reserved_atom) > 0) { releaseVse(pes_dw200_priv); } @@ -1163,7 +1160,7 @@ static int dewarp_release(struct inode *inode, struct file *file) reg = dwe_read_reg(pdw200, BUS_CTRL); dwe_write_reg(pdw200, BUS_CTRL, reg & ~DEWRAP_BUS_CTRL_ENABLE_MASK); - atomic_set(&dwe_irq_trigger_mis, 0); + atomic_set(&pdriver_dev->dwe_irq_trigger_mis, 0); while (atomic_read(&pes_dw200_priv->dwe_reserved_atom) > 0) { releaseDwe(pes_dw200_priv); } @@ -1215,12 +1212,9 @@ static int es_dewarp_probe(struct platform_device *pdev) struct es_dewarp_driver_dev *pdriver_dev; struct dw200_subdev *pdwe_dev; char debug_dw200_reset[64] = "dw200_reset"; - void __iomem *vi_top_addr = ioremap(VI_TOP_CLK, sizeof(u32)); - u32 reg = 0; - - // DEBUG_PRINT("enter %s\n", __func__); + int id = 0; - if (pdev->id >= VIVCAM_DWE_MAXCNT) { + if (pdev->id >= NUM_DEVICES) { pr_err("%s:pdev id is %d error\n", __func__, pdev->id); return -EINVAL; } @@ -1233,13 +1227,7 @@ static int es_dewarp_probe(struct platform_device *pdev) return -ENOMEM; } - pdwe_dev = devm_kzalloc(&pdev->dev, sizeof(struct dw200_subdev), - GFP_KERNEL); - if (pdwe_dev == NULL) { - pr_err("%s:alloc struct vvcam_soc_dev error\n", __func__); - return -ENOMEM; - } - + pdwe_dev = &pdriver_dev->hw_dev; ret = vvcam_sys_reset_init(pdev, &pdwe_dev->dw_crg); if (ret) { pr_err("%s: DW reset init failed\n", __func__); @@ -1271,13 +1259,7 @@ static int es_dewarp_probe(struct platform_device *pdev) return ret; } - // VI dw200 top clk enable: - reg = ioread32(vi_top_addr); - if (!IS_BIT_SET(reg, 2)) { - SET_BIT(reg, 2); - iowrite32(reg, vi_top_addr); - } - iounmap(vi_top_addr); + (void)vvcam_dw200_smmu_sid_cfg(&pdev->dev); /* DWE ioremap */ pdwe_dev->dwe_base = @@ -1299,13 +1281,10 @@ static int es_dewarp_probe(struct platform_device *pdev) pdwe_dev->vse_reset = ioremap(VSE_REG_RESET, 4); #endif - pdriver_dev->private = pdwe_dev; - // pdwe_dev->id = pdev->id; pdwe_dev->dev = &pdev->dev; + mutex_init(&pdriver_dev->vvmutex); - platform_set_drvdata(pdev, pdriver_dev); -#ifdef ES_DW200_SDK pdriver_dev->irq_num = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, pdriver_dev->irq_num, (irq_handler_t)dwe_isr, @@ -1325,86 +1304,35 @@ static int es_dewarp_probe(struct platform_device *pdev) pr_err("%s:request irq error\n", __func__); return ret; } - sema_init(&dwe_sem, 1); - sema_init(&vse_sem, 1); -#else - - pdriver_dev->irq_num = platform_get_irq(pdev, 0); - ret = devm_request_irq(&pdev->dev, pdriver_dev->irq_num, - (irq_handler_t)vivdw200_interrupt, - IRQF_SHARED | IRQF_TRIGGER_RISING, - VIVCAM_DWE_NAME, (void *)pdriver_dev); - if (ret != 0) { - pr_err("%s:request irq error\n", __func__); - return ret; - } - - pdriver_dev->irq_num_vse = platform_get_irq(pdev, 1); - ret = devm_request_irq(&pdev->dev, pdriver_dev->irq_num_vse, - (irq_handler_t)vivdw200_interrupt, - IRQF_SHARED | IRQF_TRIGGER_RISING, - VIVCAM_DWE_NAME, (void *)pdriver_dev); - if (ret != 0) { - pr_err("%s:request irq error\n", __func__); - return ret; - } -#endif - init_waitqueue_head(&pdriver_dev->irq_wait); - if (devise_register_index == 0) { - if (es_dewarp_major == 0) { - ret = alloc_chrdev_region(&pdriver_dev->devt, 0, - VIVCAM_DWE_MAXCNT, - VIVCAM_DWE_NAME); - if (ret != 0) { - pr_err("%s:alloc_chrdev_region error\n", - __func__); - return ret; - } - es_dewarp_major = MAJOR(pdriver_dev->devt); - es_dewarp_minor = MINOR(pdriver_dev->devt); - } else { - pdriver_dev->devt = - MKDEV(es_dewarp_major, es_dewarp_minor); - ret = register_chrdev_region(pdriver_dev->devt, - VIVCAM_DWE_MAXCNT, - VIVCAM_DWE_NAME); - if (ret) { - pr_err("%s:register_chrdev_region error\n", - __func__); - return ret; - } - } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) - es_dewarp_class = class_create(VIVCAM_DWE_NAME); -#else - es_dewarp_class = class_create(THIS_MODULE, VIVCAM_DWE_NAME); -#endif - if (IS_ERR(es_dewarp_class)) { - pr_err("%s[%d]:class_create error!\n", __func__, - __LINE__); - return -EINVAL; - } - } - pdriver_dev->devt = MKDEV(es_dewarp_major, es_dewarp_minor); + ret = of_property_read_u32(pdev->dev.of_node, "numa-node-id", &id); + if (ret) { + dev_err(&pdev->dev, "Failed to read index property, ret = %d\n", ret); + return ret; + } + pr_info("dewarp dev is on die%d\n", id); cdev_init(&pdriver_dev->cdev, &es_dewarp_fops); - ret = cdev_add(&pdriver_dev->cdev, pdriver_dev->devt, 1); + pdriver_dev->cdev.owner = THIS_MODULE; + + ret = cdev_add(&pdriver_dev->cdev, devt + id, 1); if (ret) { - pr_err("%s[%d]:cdev_add error!\n", __func__, __LINE__); + dev_err(&pdev->dev, "Failed to add cdev\n"); return ret; } - pdriver_dev->class = es_dewarp_class; - device_create(pdriver_dev->class, NULL, pdriver_dev->devt, pdriver_dev, - "%s", VIVCAM_DWE_NAME); + pdriver_dev->device = device_create(es_dewarp_class, &pdev->dev, + devt + id, NULL, "es_dewarp%d", id); + if (IS_ERR(pdriver_dev->device)) { + cdev_del(&pdriver_dev->cdev); + return PTR_ERR(pdriver_dev->device); + } + platform_set_drvdata(pdev, pdriver_dev); pdwe_dev->fe.enable = fe_enable; // TODO: Get from input parameter if (pdwe_dev->fe.enable == true) { dw200_fe_init(pdwe_dev); } - (void)vvcam_dw200_smmu_sid_cfg(&pdev->dev); - pdwe_dev->dw200_reset = debugfs_create_file( debug_dw200_reset, 0644, NULL, pdwe_dev, &dw200_reset_fops); @@ -1414,10 +1342,25 @@ static int es_dewarp_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); + sema_init(&pdriver_dev->dwe_sem, 1); + sema_init(&pdriver_dev->vse_sem, 1); + spin_lock_init(&pdriver_dev->dwe_irq_lock); + spin_lock_init(&pdriver_dev->vse_irq_lock); + init_waitqueue_head(&pdriver_dev->irq_wait); + init_waitqueue_head(&pdriver_dev->dwe_irq_wait_q); + init_waitqueue_head(&pdriver_dev->vse_irq_wait_q); + init_waitqueue_head(&pdriver_dev->dwe_reserve_wait_q); + init_waitqueue_head(&pdriver_dev->vse_reserve_wait_q); + atomic_set(&pdriver_dev->vse_online_mode_atomic, 0); + atomic_set(&pdriver_dev->dwe_irq_trigger_mis, 0); + atomic_set(&pdriver_dev->dwe_irq_trigger_mis, 0); + + pdriver_dev->dwe_status = ES_IDLE; + pdriver_dev->vse_status = ES_IDLE; + atomic_set(&pdriver_dev->trigger_atom, 0); init_waitqueue_head(&pdriver_dev->trigger_wq); - devise_register_index++; return ret; } @@ -1426,20 +1369,16 @@ static int es_dewarp_remove(struct platform_device *pdev) struct es_dewarp_driver_dev *pdriver_dev; struct dw200_subdev *pdwe_dev; - devise_register_index--; pdriver_dev = platform_get_drvdata(pdev); - pdwe_dev = (struct dw200_subdev *)pdriver_dev->private; + pdwe_dev = &pdriver_dev->hw_dev; if (pdwe_dev->fe.enable == true) { dw200_fe_destory(pdwe_dev); } - debugfs_remove(pdwe_dev->dw200_reset); + device_destroy(es_dewarp_class, devt + pdriver_dev->id); cdev_del(&pdriver_dev->cdev); - device_destroy(pdriver_dev->class, pdriver_dev->devt); - unregister_chrdev_region(pdriver_dev->devt, VIVCAM_DWE_MAXCNT); - if (devise_register_index == 0) { - class_destroy(pdriver_dev->class); - } + + debugfs_remove(pdwe_dev->dw200_reset); vvcam_reset_fini(&pdwe_dev->dw_crg); @@ -1453,7 +1392,7 @@ static int dewarp_runtime_suspend(struct device *dev) struct es_dewarp_driver_dev *pdriver_dev = dev_get_drvdata(dev); struct dw200_subdev *pdwe_dev; - pdwe_dev = (struct dw200_subdev *)pdriver_dev->private; + pdwe_dev = &pdriver_dev->hw_dev; return vvcam_sys_clk_unprepare(&pdwe_dev->dw_crg); } @@ -1462,7 +1401,7 @@ static int dewarp_runtime_resume(struct device *dev) struct es_dewarp_driver_dev *pdriver_dev = dev_get_drvdata(dev); struct dw200_subdev *pdwe_dev; - pdwe_dev = (struct dw200_subdev *)pdriver_dev->private; + pdwe_dev = &pdriver_dev->hw_dev; return vvcam_sys_clk_prepare(&pdwe_dev->dw_crg); } @@ -1473,7 +1412,7 @@ static int dewarp_suspend(struct device *dev) int ret = 0; pdriver_dev->suspended = 0; - pdwe_dev = (struct dw200_subdev *)pdriver_dev->private; + pdwe_dev = &pdriver_dev->hw_dev; if (pm_runtime_status_suspended(dev)) { return 0; @@ -1510,7 +1449,7 @@ static int dewarp_resume(struct device *dev) struct dw200_subdev *pdwe_dev; int ret = 0; - pdwe_dev = (struct dw200_subdev *)pdriver_dev->private; + pdwe_dev = &pdriver_dev->hw_dev; if (pm_runtime_status_suspended(dev)) { return 0; @@ -1549,7 +1488,52 @@ static struct platform_driver viv_platform_driver = { .pm = &dewarp_pm_ops, }, }; -module_platform_driver(viv_platform_driver); + +static int __init es_dewarp_init(void) +{ + int ret; + + pr_info("es_dewarp_init: Entering\n"); + + ret = alloc_chrdev_region(&devt, 0, NUM_DEVICES, DEWARP_CLASS_NAME); + if (ret) { + pr_err("Failed to allocate char dev region\n"); + return ret; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) + es_dewarp_class = class_create(ES_DEWARP_NAME); +#else + es_dewarp_class = class_create(THIS_MODULE, ES_DEWARP_NAME); +#endif + if (IS_ERR(es_dewarp_class)) { + unregister_chrdev_region(devt, NUM_DEVICES); + return PTR_ERR(es_dewarp_class); + } + + ret = platform_driver_register(&viv_platform_driver); + if (ret) { + class_destroy(es_dewarp_class); + unregister_chrdev_region(devt, NUM_DEVICES); + } + + pr_info("es_dewarp_init: Exiting\n"); + return ret; +} + +static void __exit es_dewarp_exit(void) +{ + pr_info("es_dewarp_exit: Entering\n"); + + platform_driver_unregister(&viv_platform_driver); + class_destroy(es_dewarp_class); + unregister_chrdev_region(devt, NUM_DEVICES); + + pr_info("es_dewarp_exit: Exiting\n"); +} + +module_init(es_dewarp_init); +module_exit(es_dewarp_exit); MODULE_DESCRIPTION("DWE"); MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/eswin/hae/hal/kernel/arch/gc_hal_kernel_hardware.c b/drivers/staging/media/eswin/hae/hal/kernel/arch/gc_hal_kernel_hardware.c index bebbbd4a7edf..0cd8c0ab4dae 100644 --- a/drivers/staging/media/eswin/hae/hal/kernel/arch/gc_hal_kernel_hardware.c +++ b/drivers/staging/media/eswin/hae/hal/kernel/arch/gc_hal_kernel_hardware.c @@ -2033,7 +2033,8 @@ gckHARDWARE_Construct(gckOS Os, gckKERNEL Kernel, gckHARDWARE *Hardware) /* Check if big endian */ hardware->bigEndian = (*(gctUINT8 *)&data == 0xff); - gcmkONERROR(gckOS_CreateSignal(Os, gcvTRUE, &hardware->feIdleSignal)); + gcmkONERROR(gckOS_CreateSignal(Os, gcvFALSE, &hardware->feIdleSignal)); + gcmkONERROR(gckOS_Signal(Os, hardware->feIdleSignal, gcvTRUE)); /* Initialize the fast clear. */ gcmkONERROR(gckHARDWARE_SetFastClear(hardware, -1, -1)); @@ -9664,6 +9665,7 @@ gckHARDWARE_ExecuteFunctions(gcsFUNCTION_EXECUTION_PTR Execution) gcmkONERROR(gckWLFE_Execute(hardware, address, Execution->funcCmd[i].bytes)); gcmkONERROR(gckOS_WaitSignal(hardware->os, hardware->feIdleSignal, gcvFALSE, gcdGPU_2D_TIMEOUT)); + gcmkONERROR(gckOS_Signal(hardware->os, hardware->feIdleSignal, gcvTRUE)); } #if gcdLINK_QUEUE_SIZE diff --git a/drivers/staging/media/eswin/hae/hal/kernel/arch/gc_hal_kernel_hardware_waitlink_fe.c b/drivers/staging/media/eswin/hae/hal/kernel/arch/gc_hal_kernel_hardware_waitlink_fe.c index bb92f5f58e0a..52907911bf20 100644 --- a/drivers/staging/media/eswin/hae/hal/kernel/arch/gc_hal_kernel_hardware_waitlink_fe.c +++ b/drivers/staging/media/eswin/hae/hal/kernel/arch/gc_hal_kernel_hardware_waitlink_fe.c @@ -1130,7 +1130,7 @@ gckWLFE_Execute(gckHARDWARE Hardware, gctADDRESS Address, gctUINT32 Bytes) gcmkVERIFY_OK(gckOS_ReadRegisterEx(Hardware->os, Hardware->kernel, 0x00004, &idle)); if (idle != 0x7FFFFFFF) { - gcmkONERROR(gckOS_WaitSignal(Hardware->os, Hardware->feIdleSignal, gcvFALSE, gcdGPU_2D_TIMEOUT)); + gcmkONERROR(gckOS_WaitSignal(Hardware->os, Hardware->feIdleSignal, gcvTRUE, gcdGPU_2D_TIMEOUT)); } } while (idle != 0x7FFFFFFF); } @@ -1168,7 +1168,6 @@ gckWLFE_Execute(gckHARDWARE Hardware, gctADDRESS Address, gctUINT32 Bytes) gcmkONERROR(gckOS_MemoryBarrier(Hardware->os, gcvNULL)); if (Hardware->type == gcvHARDWARE_2D) { - gcmkONERROR(gckOS_Signal(Hardware->os, Hardware->feIdleSignal, gcvFALSE)); gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->kernel, 0x00658, control)); } else { diff --git a/drivers/staging/media/eswin/hae/hal/kernel/gc_hal_kernel.c b/drivers/staging/media/eswin/hae/hal/kernel/gc_hal_kernel.c index 8303e9a139c6..fb921fbd970e 100644 --- a/drivers/staging/media/eswin/hae/hal/kernel/gc_hal_kernel.c +++ b/drivers/staging/media/eswin/hae/hal/kernel/gc_hal_kernel.c @@ -732,6 +732,13 @@ gckKERNEL_Construct(gckOS Os, gceCORE Core, if (kernel->sharedPageTable) { gcmkONERROR(gckDEVICE_GetMMU(Device, kernel->hardware->type, &kernel->mmu)); + if(!kernel->mmu && Device->id == 1) { + /* let the die1 share die0 mmu table */ + gckGALDEVICE gal_device = (gckGALDEVICE)Context; + gckDEVICE die0_device = gal_device->devices[0]; + gcmkONERROR(gckDEVICE_GetMMU(die0_device, kernel->hardware->type, &kernel->mmu)); + gcmkONERROR(gckDEVICE_SetMMU(Device, kernel->hardware->type, kernel->mmu)); + } if (!kernel->mmu) { gcmkONERROR(gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu)); diff --git a/drivers/staging/media/eswin/hae/hal/kernel/gc_hal_kernel_event.c b/drivers/staging/media/eswin/hae/hal/kernel/gc_hal_kernel_event.c index c210a5237740..a1b78ba8e70b 100644 --- a/drivers/staging/media/eswin/hae/hal/kernel/gc_hal_kernel_event.c +++ b/drivers/staging/media/eswin/hae/hal/kernel/gc_hal_kernel_event.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /**************************************************************************** * * The MIT License (MIT) @@ -51,7 +52,25 @@ * version of this file. * *****************************************************************************/ - +/* + * ESWIN gc hal kernel event APIs + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Huan Sun + */ #include "gc_hal_kernel_precomp.h" @@ -2066,7 +2085,7 @@ gckEVENT_Notify(gckEVENT Event, gctUINT32 IDs, gceEVENT_FAULT *Fault) if (IDs == 0) { gcmkONERROR(gckOS_StartTimer(Event->kernel->os, Event->kernel->eventObj->tryIdleTimer, - 500)); + 1000)); } /* End of event handling. */ diff --git a/drivers/staging/media/eswin/hae/hal/os/linux/kernel/gc_hal_kernel_driver.c b/drivers/staging/media/eswin/hae/hal/os/linux/kernel/gc_hal_kernel_driver.c index 9386092809b5..a428dcd84af5 100644 --- a/drivers/staging/media/eswin/hae/hal/os/linux/kernel/gc_hal_kernel_driver.c +++ b/drivers/staging/media/eswin/hae/hal/os/linux/kernel/gc_hal_kernel_driver.c @@ -314,6 +314,8 @@ _SyncModuleParam(gcsMODULE_PARAMETERS *ModuleParam) irqLineVG = p->irqVG; registerMemBaseVG = (ulong)p->registerVGBase; registerMemSizeVG = (ulong)p->registerVGSize; + registerMemBase2D = p->register2DBases[0]; + registerMemSize2D = p->register2DSizes[0]; for (i = 0; i < gcvCORE_COUNT; i++) chipIDs[i] = p->chipIDs[i]; @@ -1178,9 +1180,9 @@ gceSTATUS viv_device_node_create(uint32_t dev_index) if (dev_index > 0) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) - device_create(gpu_class, NULL, MKDEV(major, dev_index), NULL, "galcore%d", dev_index); + device_create(gpu_class, NULL, MKDEV(major, dev_index), NULL, DEVICE_NAME "%d", dev_index); #else - device_create(gpu_class, NULL, MKDEV(major, dev_index), "galcore%d", dev_index); + device_create(gpu_class, NULL, MKDEV(major, dev_index), DEVICE_NAME "%d", dev_index); #endif } else { /* Compatible with old style. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) @@ -1346,9 +1348,7 @@ static int __devinit viv_dev_probe(struct platform_device *pdev) } /* Gather module parameters. */ - if (activeDeviceCount == 0) { - _InitModuleParam(&platform->params); - } + _InitModuleParam(&platform->params); platform->params.devices[activeDeviceCount] = &pdev->dev; @@ -1396,6 +1396,7 @@ static int __devinit viv_dev_probe(struct platform_device *pdev) activeDeviceCount++; if(gcvSTATUS_MORE_DATA == platform->ops->adjustParam(platform, &platform->params)){ gcmkPRINT("hae loaded first device, waiting for another..."); + _SyncModuleParam(&platform->params); return 0; } } diff --git a/drivers/staging/media/eswin/hae/hal/os/linux/kernel/gc_hal_kernel_parameter.h b/drivers/staging/media/eswin/hae/hal/os/linux/kernel/gc_hal_kernel_parameter.h index 0adeae90bc10..db95354cb7c0 100644 --- a/drivers/staging/media/eswin/hae/hal/os/linux/kernel/gc_hal_kernel_parameter.h +++ b/drivers/staging/media/eswin/hae/hal/os/linux/kernel/gc_hal_kernel_parameter.h @@ -248,7 +248,7 @@ static uint devCoreCounts[gcdDEVICE_COUNT] = {gcdCORE_3D_COUNT, 0}; module_param_array(devCoreCounts, uint, NULL, 0644); MODULE_PARM_DESC(devCoreCounts, "Array of core count of each hardware device"); -static uint dev2DCoreCounts[gcdDEVICE_COUNT] = {gcdCORE_2D_COUNT, 0}; +static uint dev2DCoreCounts[gcdDEVICE_COUNT] = {0, 0}; module_param_array(dev2DCoreCounts, uint, NULL, 0644); MODULE_PARM_DESC(dev2DCoreCounts, "Array of core 2D count of each hardware device"); diff --git a/drivers/staging/media/eswin/hae/hal/os/linux/kernel/platform/eswin/gc_hal_kernel_platform_win2030.c b/drivers/staging/media/eswin/hae/hal/os/linux/kernel/platform/eswin/gc_hal_kernel_platform_win2030.c index 4b52d9679954..580267770992 100644 --- a/drivers/staging/media/eswin/hae/hal/os/linux/kernel/platform/eswin/gc_hal_kernel_platform_win2030.c +++ b/drivers/staging/media/eswin/hae/hal/os/linux/kernel/platform/eswin/gc_hal_kernel_platform_win2030.c @@ -163,7 +163,13 @@ struct gpu_power_domain { static struct _gpu_reset { struct reset_control *rsts[gcdDEVICE_COUNT][gcvRST_COUNT]; -}gpu_reset; +} gpu_reset; + +static struct _gpu_device { + struct device *devs[gcdDEVICE_COUNT]; +} gpu_device = { + .devs = { gcvNULL, gcvNULL }, +}; static struct _gcsPLATFORM default_platform = { .name = __FILE__, @@ -288,7 +294,7 @@ static int gpu_add_power_domains(struct platform_device *pdev, gcsMODULE_PARAMET int ret = 0; memset(&gpd, 0, sizeof(struct gpu_power_domain)); - num_domains = params->devCount; + num_domains = (gpu_device.devs[0] != gcvNULL) + (gpu_device.devs[1] != gcvNULL); gpd.num_domains = num_domains; /* If the num of domains is less than 2, the domain will be attached automatically */ @@ -300,13 +306,13 @@ static int gpu_add_power_domains(struct platform_device *pdev, gcsMODULE_PARAMET for (i = 0; i < num_domains; i++) { if (gpd.power_dev) { - gpd.power_dev[i] = dev_pm_domain_attach_by_id(dev, i); + gpd.power_dev[i] = dev_pm_domain_attach_by_id(gpu_device.devs[i], i); if (IS_ERR(gpd.power_dev[i])) goto error; } for (j = 0; j < nc_of_clks; j++) { - gpd.clks[i][j] = devm_clk_get(dev, clk_names[j]); + gpd.clks[i][j] = devm_clk_get(gpu_device.devs[i], clk_names[j]); if (IS_ERR(gpd.clks[i][j])) { ret = PTR_ERR(gpd.clks[i][j]); dev_err(dev, "failed to get die-%d:%s clock: %d\n", i, clk_names[j], ret); @@ -330,14 +336,19 @@ static int gpu_add_power_domains(struct platform_device *pdev, gcsMODULE_PARAMET static int g2d_device_node_scan(unsigned char *compatible) { struct device_node *np; + const char *str; + int ret = -1; np = of_find_compatible_node(NULL, NULL, compatible); if (!np) { - return -1; + return ret; + } + if (!of_property_read_string(np, "status", &str) && !strcmp(str, "okay")) { + ret = 0; } of_node_put(np); - return 0; + return ret; } static int g2d_reset(struct device *dev, int dieIndex, int enable) { @@ -399,6 +410,7 @@ static int gpu_parse_dt(struct platform_device *pdev, gcsMODULE_PARAMETERS *para const gctUINT32 *value; const char *str; int dieIndex = 0; + int peerDieIndex; gcmSTATIC_ASSERT(gcvCORE_COUNT == gcmCOUNTOF(core_names), "core_names array does not match core types"); @@ -410,6 +422,7 @@ static int gpu_parse_dt(struct platform_device *pdev, gcsMODULE_PARAMETERS *para } else if (!strcmp("eswin,galcore_d1", str)) { dieIndex = 1; } + peerDieIndex = (dieIndex + 1) % 2; /* parse the irqs config */ for (i = gcvCORE_2D; i <= gcvCORE_2D1; i++) { @@ -540,10 +553,17 @@ static int gpu_parse_dt(struct platform_device *pdev, gcsMODULE_PARAMETERS *para params->devCount++; - if (params->devCount == 1) { + params->devices[dieIndex] = gpu_device.devs[dieIndex] = &pdev->dev; + params->platformIDs[dieIndex] = dieIndex; + if(params->dev2DCoreCounts[dieIndex] && params->dev2DCoreCounts[peerDieIndex]){ + /*all device probe done, it need restore remembered peer device pointer*/ + params->devices[peerDieIndex] = gpu_device.devs[peerDieIndex]; + } + + if (params->devCount == 1 && params->dev2DCoreCounts[peerDieIndex] == 0) { unsigned char compatible[32] = { 0 }; - sprintf(compatible, "eswin,galcore_d%d", (dieIndex + 1) % 2); - if(!g2d_device_node_scan(compatible)){ + sprintf(compatible, "eswin,galcore_d%d", peerDieIndex); + if (!g2d_device_node_scan(compatible)) { return 1; } } diff --git a/drivers/staging/media/eswin/vdec/hantro_dec.c b/drivers/staging/media/eswin/vdec/hantro_dec.c index cc4e04b3c5c3..7c4de300d6e6 100644 --- a/drivers/staging/media/eswin/vdec/hantro_dec.c +++ b/drivers/staging/media/eswin/vdec/hantro_dec.c @@ -325,6 +325,7 @@ static struct apbfilter_cfg apbfilter_cfg[MAX_SUBSYS_NUM][HW_CORE_MAX]; static struct axife_cfg axife_cfg[MAX_SUBSYS_NUM]; static int elements = 2; +static int gdev_count = 0; #ifdef CLK_CFG struct clk *clk_cfg; @@ -2385,7 +2386,6 @@ static long hantrodec_ioctl(struct file *filp, unsigned int cmd, case HANTRODEC_IOC_DMA_HEAP_GET_IOVA: { struct dmabuf_cfg dbcfg; size_t buf_size = 0; - void *cpu_vaddr = NULL; struct heap_mem *hmem, *hmem_d1; struct filp_priv *fp_priv = (struct filp_priv *)filp->private_data; @@ -2401,52 +2401,30 @@ static long hantrodec_ioctl(struct file *filp, unsigned int cmd, LOG_ERR("dmabuf-heap alloc from userspace failed\n"); return -ENOMEM; } + dbcfg.iova = (unsigned long)sg_dma_address(hmem->sgt->sgl); + + /* get the size of the dmabuf allocated by dmabuf_heap */ + buf_size = common_dmabuf_heap_get_size(hmem); //LOG_INFO("import dmabuf_fd = %d, hmem=%px, filp=%px, platformdev_d1=%px\n", dbcfg.dmabuf_fd, hmem, filp, // platformdev_d1); if (platformdev_d1) { - hmem_d1 = common_dmabuf_heap_import_from_user(&fp_priv->root_d1, dbcfg.dmabuf_fd); + hmem_d1 = common_dmabuf_heap_rsv_iova_map(&fp_priv->root_d1, dbcfg.dmabuf_fd, dbcfg.iova, buf_size); if(IS_ERR(hmem_d1)) { + LOG_ERR("dmabuf-heap rsv iova map failed for d1\n"); common_dmabuf_heap_release(hmem); - LOG_ERR("dmabuf-heap alloc from userspace failed for d1\n"); return -ENOMEM; } } - /* map the pha to cpu vaddr*/ - cpu_vaddr = common_dmabuf_heap_map_vaddr(hmem); - if (cpu_vaddr == NULL) { - LOG_ERR("map to cpu_vaddr failed\n"); - common_dmabuf_heap_release(hmem); - if (platformdev_d1) - common_dmabuf_heap_release(hmem_d1); - - return -ENOMEM; - } - - /* get the size of the dmabuf allocated by dmabuf_heap */ - buf_size = common_dmabuf_heap_get_size(hmem); LOG_TRACE("dmabuf info: CPU VA:0x%lx, PA:0x%lx, DMA addr(iova):0x%lx, size=0x%lx\n", (unsigned long)hmem->vaddr, (unsigned long)sg_phys(hmem->sgt->sgl), (unsigned long)sg_dma_address(hmem->sgt->sgl), (unsigned long)buf_size); - dbcfg.iova = (unsigned long)sg_dma_address(hmem->sgt->sgl); - if (platformdev_d1) { - unsigned long iova_d1; - - iova_d1 = (unsigned long)sg_dma_address(hmem_d1->sgt->sgl); - if (dbcfg.iova != iova_d1) { - common_dmabuf_heap_release(hmem); - common_dmabuf_heap_release(hmem_d1); - LOG_ERR("IOVA addrs of d0 and d1 are not the same\n"); - return -EFAULT; - } - } - tmp = copy_to_user((u32 __user *)arg, &dbcfg, sizeof(struct dmabuf_cfg)); if (tmp) { - common_dmabuf_heap_release(hmem); if (platformdev_d1) - common_dmabuf_heap_release(hmem_d1); + common_dmabuf_heap_rsv_iova_unmap(hmem_d1); + common_dmabuf_heap_release(hmem); LOG_ERR("%s %d: copy_from_user failed, returned %li\n", __func__, __LINE__, tmp); return -EFAULT; } @@ -2454,6 +2432,7 @@ static long hantrodec_ioctl(struct file *filp, unsigned int cmd, return 0; } case HANTRODEC_IOC_DMA_HEAP_PUT_IOVA: { + struct dmabuf_cfg dbcfg; struct heap_mem *hmem, *hmem_d1; unsigned int dmabuf_fd; struct filp_priv *fp_priv = (struct filp_priv *)filp->private_data; @@ -2476,7 +2455,7 @@ static long hantrodec_ioctl(struct file *filp, unsigned int cmd, LOG_ERR("cannot find dmabuf-heap for dmabuf_fd %d on d1\n", dmabuf_fd); return -EFAULT; } - common_dmabuf_heap_release(hmem_d1); + common_dmabuf_heap_rsv_iova_unmap(hmem_d1); } //LOG_INFO("release dmabuf_fd = %d, hmem=%px, filp=%px, platformdev_d1=%px\n", dmabuf_fd, hmem, filp, // platformdev_d1); @@ -2650,6 +2629,12 @@ static int hantrodec_release(struct inode *inode, int n; hantrodec_t *dev = &hantrodec_data; struct filp_priv *fp_priv = (struct filp_priv *)filp->private_data; +#ifdef SUPPORT_DMA_HEAP + struct heap_mem *h, *tmp; + dma_addr_t iova; + size_t buf_size = 0; + struct heap_root *root = &fp_priv->root; +#endif LOG_DBG("closing ...\n"); @@ -2678,10 +2663,10 @@ static int hantrodec_release(struct inode *inode, end: #ifdef SUPPORT_DMA_HEAP - common_dmabuf_heap_import_uninit(&fp_priv->root); if (platformdev_d1) { - common_dmabuf_heap_import_uninit(&fp_priv->root_d1); + common_dmabuf_heap_rsv_iova_uninit(&fp_priv->root_d1); } + common_dmabuf_heap_import_uninit(&fp_priv->root); #endif for (u32 core_id = 0; core_id < DEC_CORE_NUM; core_id ++) { /** clear the tasks for pm*/ @@ -2833,12 +2818,12 @@ static int hantrodec_init(void) if (platformdev_d1) { of_dma_configure(&platformdev_d1->dev, platformdev_d1->dev.of_node, true); - if (dma_set_mask_and_coherent(&platformdev_d1->dev, DMA_BIT_MASK(48))) { - LOG_ERR("48bit dma dev: No suitable DMA available\n"); + if (dma_set_mask_and_coherent(&platformdev_d1->dev, DMA_BIT_MASK(41))) { + LOG_ERR("41bit dma dev: No suitable DMA available\n"); } - if (dma_set_coherent_mask(&platformdev_d1->dev, DMA_BIT_MASK(48))) { - LOG_ERR("48bit dma dev: No suitable DMA available\n"); + if (dma_set_coherent_mask(&platformdev_d1->dev, DMA_BIT_MASK(41))) { + LOG_ERR("41bit dma dev: No suitable DMA available\n"); } } } @@ -3061,12 +3046,13 @@ static int hantrodec_init(void) * Description : clean up * Return type : int */ -static void hantrodec_cleanup(void) +static void hantrodec_cleanup(struct platform_device *pdev) { hantrodec_t *dev = &hantrodec_data; int i, n = 0; volatile u8 *mmu_hwregs[MAX_SUBSYS_NUM][2]; int has_mmu = 0; + int cleanup = (gdev_count > 0) ? 0 : 1; for (i = 0; i < MAX_SUBSYS_NUM; i++) { mmu_hwregs[i][0] = dev->hwregs[i][HW_MMU]; @@ -3084,7 +3070,7 @@ static void hantrodec_cleanup(void) MMUCleanup(mmu_hwregs); if (vcmd) { - hantrovcmd_cleanup(); + hantrovcmd_cleanup(pdev, cleanup); } else { /* reset hardware */ ResetAsic(dev); @@ -3095,6 +3081,9 @@ static void hantrodec_cleanup(void) free_irq(dev->irq[n], (void *)dev); } } + if (!cleanup) { + return; + } ReleaseIO(); #ifdef CLK_CFG @@ -3960,7 +3949,6 @@ static int hantro_vdec_probe(struct platform_device *pdev) { int numa_id; int ret, vdec_dev_num = 0; - static int pdev_count = 0; vdec_clk_rst_t *vcrt = devm_kzalloc(&pdev->dev, sizeof(vdec_clk_rst_t), GFP_KERNEL); if (!vcrt) { LOG_ERR("malloc drvdata failed\n"); @@ -4033,8 +4021,8 @@ static int hantro_vdec_probe(struct platform_device *pdev) } #endif - pdev_count++; - if (vdec_dev_num > pdev_count) { + gdev_count++; + if (vdec_dev_num > gdev_count) { LOG_INFO("The first core loaded, waiting for another..."); return 0; } @@ -4074,7 +4062,9 @@ static int hantro_vdec_remove(struct platform_device *pdev) vdec_clk_rst_t *vcrt; pm_runtime_disable(&pdev->dev); - hantrodec_cleanup(); + + gdev_count--; + hantrodec_cleanup(pdev); #ifdef SUPPORT_DMA_HEAP ret = win2030_tbu_power(&pdev->dev, false); if (ret) { diff --git a/drivers/staging/media/eswin/vdec/hantro_vcmd.c b/drivers/staging/media/eswin/vdec/hantro_vcmd.c index ee87be45c346..6d6cb6dcb1ea 100644 --- a/drivers/staging/media/eswin/vdec/hantro_vcmd.c +++ b/drivers/staging/media/eswin/vdec/hantro_vcmd.c @@ -1153,7 +1153,7 @@ static int wait_abort_rdy(struct hantrovcmd_dev *dev) return dev->working_state == WORKING_STATE_IDLE; } -static int select_vcmd(bi_list_node *new_cmdbuf_node) +static int select_vcmd(bi_list_node *new_cmdbuf_node, u16 nid) { struct cmdbuf_obj *cmdbuf_obj = NULL; bi_list_node *curr_cmdbuf_node = NULL; @@ -1170,6 +1170,17 @@ static int select_vcmd(bi_list_node *new_cmdbuf_node) u32 cmdbuf_id = 0; cmdbuf_obj = (struct cmdbuf_obj *)new_cmdbuf_node->data; + if (nid < vcmd_type_core_num[cmdbuf_obj->module_type]) { + dev = vcmd_manager[cmdbuf_obj->module_type][nid]; + list = &dev->list_manager; + + spin_lock_irqsave(dev->spinlock, flags); + bi_list_insert_node_tail(list, new_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + cmdbuf_obj->core_id = dev->core_id; + return 0; + } + //there is an empty vcmd to be used while (1) { dev = vcmd_manager[cmdbuf_obj->module_type][vcmd_position[cmdbuf_obj->module_type]]; @@ -1814,7 +1825,7 @@ static long link_and_run_cmdbuf(struct file *filp, &vcmd_reserve_cmdbuf_sem[cmdbuf_obj->module_type])) return -ERESTARTSYS; - return_value = select_vcmd(new_cmdbuf_node); + return_value = select_vcmd(new_cmdbuf_node, input_para->nid); if (return_value) return return_value; @@ -3280,9 +3291,9 @@ static int vcmd_init(void) vcmd_buf_mem_pool.size = CMDBUF_POOL_TOTAL_SIZE; /* command buffer */ - vcmd_buf_mem_pool.virtual_address = (u32 *)dma_alloc_coherent(&platformdev->dev, + vcmd_buf_mem_pool.virtual_address = (u32 *)dma_alloc_attrs(&platformdev->dev, vcmd_buf_mem_pool.size, &dma_handle, - GFP_KERNEL | __GFP_DMA32); + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); vcmd_buf_mem_pool.bus_address = (unsigned long long)dma_handle; vcmd_buf_mem_pool.phy_address = pfn_to_phys(vmalloc_to_pfn(vcmd_buf_mem_pool.virtual_address)); @@ -3290,7 +3301,8 @@ static int vcmd_init(void) dma_handle_d1 = dma_map_page(&platformdev_d1->dev, vmalloc_to_page(vcmd_buf_mem_pool.virtual_address), 0, vcmd_buf_mem_pool.size, DMA_BIDIRECTIONAL); if (dma_handle != dma_handle_d1) { - LOG_ERR("vdec_vcmd: dma address of vcmd buf not the same between d0 and d1\n"); + LOG_ERR("vdec_vcmd: dma address of vcmd buf not the same between d0 and d1, 0x%llx, 0x%llx, %d\n", + dma_handle, dma_handle_d1, __LINE__); return -1; } } @@ -3316,10 +3328,11 @@ static int vcmd_init(void) /* status buffer */ vcmd_status_buf_mem_pool.size = CMDBUF_POOL_TOTAL_SIZE; vcmd_status_buf_mem_pool.virtual_address = - (u32 *)dma_alloc_coherent(&platformdev->dev, + (u32 *)dma_alloc_attrs(&platformdev->dev, vcmd_status_buf_mem_pool.size, &dma_handle, - GFP_KERNEL | __GFP_DMA32); + GFP_KERNEL, + DMA_ATTR_FORCE_CONTIGUOUS); vcmd_status_buf_mem_pool.bus_address = (unsigned long long)dma_handle; vcmd_status_buf_mem_pool.phy_address = pfn_to_phys(vmalloc_to_pfn(vcmd_status_buf_mem_pool.virtual_address)); @@ -3327,7 +3340,8 @@ static int vcmd_init(void) dma_handle_d1 = dma_map_page(&platformdev_d1->dev, vmalloc_to_page(vcmd_status_buf_mem_pool.virtual_address), 0, vcmd_status_buf_mem_pool.size, DMA_BIDIRECTIONAL); if (dma_handle != dma_handle_d1) { - LOG_ERR("vdec_vcmd: dma address of status buf not the same between d0 and d1\n"); + LOG_ERR("vdec_vcmd: dma address of status buf not the same between d0 and d1, 0x%llx, 0x%llx, %d\n", + dma_handle, dma_handle_d1, __LINE__); return -1; } } @@ -3355,10 +3369,11 @@ static int vcmd_init(void) /* register buffer */ vcmd_registers_mem_pool.size = CMDBUF_POOL_TOTAL_SIZE; vcmd_registers_mem_pool.virtual_address = - (u32 *)dma_alloc_coherent(&platformdev->dev, + (u32 *)dma_alloc_attrs(&platformdev->dev, vcmd_registers_mem_pool.size, &dma_handle, - GFP_KERNEL | __GFP_DMA32); + GFP_KERNEL, + DMA_ATTR_FORCE_CONTIGUOUS); vcmd_registers_mem_pool.bus_address = (unsigned long long)dma_handle; vcmd_registers_mem_pool.phy_address = pfn_to_phys(vmalloc_to_pfn(vcmd_registers_mem_pool.virtual_address)); @@ -3366,7 +3381,8 @@ static int vcmd_init(void) dma_handle_d1 = dma_map_page(&platformdev_d1->dev, vmalloc_to_page(vcmd_registers_mem_pool.virtual_address), 0, vcmd_registers_mem_pool.size, DMA_BIDIRECTIONAL); if (dma_handle != dma_handle_d1) { - LOG_ERR("vdec_vcmd: dma address of registers buf not the same between d0 and d1\n"); + LOG_ERR("vdec_vcmd: dma address of registers buf not the same between d0 and d1, 0x%llx, 0x%llx, %d\n", + dma_handle, dma_handle_d1, __LINE__); return -1; } } @@ -4273,13 +4289,15 @@ int hantrovcmd_init(void) return result; } -void hantrovcmd_cleanup(void) +void hantrovcmd_cleanup(struct platform_device *pdev, int cleanup) { int i = 0; u32 result; + u32 core_id = (platformdev_d1 == pdev) ? 2 : 0; for (i = 0; i < total_vcmd_core_num; i++) { - if (!hantrovcmd_data[i].hwregs) + if (!hantrovcmd_data[i].hwregs || (hantrovcmd_data[i].core_id != core_id && + hantrovcmd_data[i].core_id != (core_id) + 1)) continue; //disable interrupt at first vcmd_write_reg((const void *)hantrovcmd_data[i].hwregs, @@ -4301,10 +4319,12 @@ void hantrovcmd_cleanup(void) release_cmdbuf_node_cleanup(&hantrovcmd_data[i].list_manager); } - release_process_node_cleanup(&global_process_manager); - - vcmd_release_IO(); - vfree(hantrovcmd_data); + if (cleanup) { + vcmd_release_IO(); + release_process_node_cleanup(&global_process_manager); + vfree(hantrovcmd_data); + hantrovcmd_data = NULL; + } //release_vcmd_non_cachable_memory(); if (pcie) { @@ -4320,26 +4340,29 @@ void hantrovcmd_cleanup(void) release_mem_region(vcmd_registers_mem_pool.bus_address, vcmd_registers_mem_pool.size); } else { - if (vcmd_buf_mem_pool.virtual_address) - dma_free_coherent( - &platformdev->dev, vcmd_buf_mem_pool.size, - vcmd_buf_mem_pool.virtual_address, - (dma_addr_t)vcmd_buf_mem_pool.bus_address); - if (vcmd_status_buf_mem_pool.virtual_address) - dma_free_coherent( - &platformdev->dev, - vcmd_status_buf_mem_pool.size, - vcmd_status_buf_mem_pool.virtual_address, - (dma_addr_t) - vcmd_status_buf_mem_pool.bus_address); - if (vcmd_registers_mem_pool.virtual_address) - dma_free_coherent( - &platformdev->dev, vcmd_registers_mem_pool.size, - vcmd_registers_mem_pool.virtual_address, - (dma_addr_t) - vcmd_registers_mem_pool.bus_address); + if (pdev == platformdev) { + if (vcmd_buf_mem_pool.virtual_address) + dma_free_attrs( + &platformdev->dev, vcmd_buf_mem_pool.size, + vcmd_buf_mem_pool.virtual_address, + (dma_addr_t)vcmd_buf_mem_pool.bus_address, + 0); + if (vcmd_status_buf_mem_pool.virtual_address) + dma_free_attrs( + &platformdev->dev, + vcmd_status_buf_mem_pool.size, + vcmd_status_buf_mem_pool.virtual_address, + (dma_addr_t)vcmd_status_buf_mem_pool.bus_address, + 0); + if (vcmd_registers_mem_pool.virtual_address) + dma_free_attrs( + &platformdev->dev, vcmd_registers_mem_pool.size, + vcmd_registers_mem_pool.virtual_address, + (dma_addr_t)vcmd_registers_mem_pool.bus_address, + 0); + } - if (platformdev_d1) { + if (pdev == platformdev_d1) { dma_unmap_page(&platformdev_d1->dev, (dma_addr_t)vcmd_buf_mem_pool.bus_address, vcmd_buf_mem_pool.size, DMA_BIDIRECTIONAL); dma_unmap_page(&platformdev_d1->dev, @@ -4348,7 +4371,7 @@ void hantrovcmd_cleanup(void) (dma_addr_t)vcmd_registers_mem_pool.bus_address, vcmd_registers_mem_pool.size, DMA_BIDIRECTIONAL); } } - LOG_INFO("module removed\n"); + LOG_INFO("vcmd module removed %s\n", (pdev == platformdev_d1) ? "dev1" : "dev0"); return; } diff --git a/drivers/staging/media/eswin/vdec/hantrovcmd.h b/drivers/staging/media/eswin/vdec/hantrovcmd.h index aa6bc6abb367..5febac70d8dc 100644 --- a/drivers/staging/media/eswin/vdec/hantrovcmd.h +++ b/drivers/staging/media/eswin/vdec/hantrovcmd.h @@ -227,6 +227,8 @@ struct exchange_parameter { u16 cmdbuf_id; /* just used for polling. */ u16 core_id; + /* which die will be used, 0xFFFF: not specify */ + u16 nid; }; #define DEC_DRIVER_NAME "es_vdec_drv" diff --git a/drivers/staging/media/eswin/vdec/subsys.h b/drivers/staging/media/eswin/vdec/subsys.h index 17a82a6169b0..92e777d40369 100644 --- a/drivers/staging/media/eswin/vdec/subsys.h +++ b/drivers/staging/media/eswin/vdec/subsys.h @@ -171,7 +171,7 @@ int hantrovcmd_release(struct inode *inode, struct file *filp); long hantrovcmd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); int hantrovcmd_mmap(struct file *filp, struct vm_area_struct *vma); int hantrovcmd_init(void); -void hantrovcmd_cleanup(void); +void hantrovcmd_cleanup(struct platform_device *pdev, int cleanup); int hantrovcmd_wait_core_idle(u32 core_id, long timeout); struct platform_device *vdec_get_platform_device(u32 core_id); diff --git a/drivers/staging/media/eswin/venc/vc8000_driver.h b/drivers/staging/media/eswin/venc/vc8000_driver.h index 2863a47ff0ff..3aad8ae83bec 100644 --- a/drivers/staging/media/eswin/venc/vc8000_driver.h +++ b/drivers/staging/media/eswin/venc/vc8000_driver.h @@ -297,6 +297,7 @@ struct exchange_parameter { u16 priority; //input,normal=0, high/live=1 u16 cmdbuf_id; //output ,it is unique in driver. u16 core_id; //just used for polling. + u16 numa_id; //bind the task to the specified numa_id }; typedef struct CoreWaitOut { diff --git a/drivers/staging/media/eswin/venc/vc8000_vcmd_driver.c b/drivers/staging/media/eswin/venc/vc8000_vcmd_driver.c index 5f88db259b5c..0686cc8dbd04 100644 --- a/drivers/staging/media/eswin/venc/vc8000_vcmd_driver.c +++ b/drivers/staging/media/eswin/venc/vc8000_vcmd_driver.c @@ -53,6 +53,25 @@ * ***************************************************************************** */ +/* + * ESWIN cipher serivce driver + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Xiaojun Zou + */ #include #include @@ -130,7 +149,10 @@ #define MAX_CMDBUF_INT_NUMBER 1 #define INT_MIN_SUM_OF_IMAGE_SIZE (4096 * 2160 * 1 * MAX_CMDBUF_INT_NUMBER) #define MAX_PROCESS_CORE_NUMBER (4 * 8) -#define PROCESS_MAX_SUM_OF_IMAGE_SIZE (4096 * 2160 * MAX_SAME_MODULE_TYPE_CORE_NUMBER * MAX_PROCESS_CORE_NUMBER) +#define PROCESS_MAX_VIDO_SIZE (4096 * 2160 * MAX_SAME_MODULE_TYPE_CORE_NUMBER * MAX_PROCESS_CORE_NUMBER) +#define PROCESS_MAX_JPEG_SIZE (2147483648U) //32K*32K*2 +#define PROCESS_MAX_SUM_OF_IMAGE_SIZE \ + (PROCESS_MAX_VIDO_SIZE > PROCESS_MAX_JPEG_SIZE ? PROCESS_MAX_VIDO_SIZE : PROCESS_MAX_JPEG_SIZE) /** the wait time for the vcmd task list(in milliseconds): * 200x frame encoding time + 1000ms @@ -249,6 +271,7 @@ struct hantrovcmd_dev { #define VCMD_HW_ID 0x4342 + static struct noncache_mem vcmd_buf_mem_pool; static struct noncache_mem vcmd_status_buf_mem_pool; static struct noncache_mem vcmd_registers_mem_pool; @@ -1025,7 +1048,7 @@ static int wait_abort_rdy(struct hantrovcmd_dev *dev) return dev->working_state == WORKING_STATE_IDLE; } -static int select_vcmd(bi_list_node *new_cmdbuf_node) +static int select_vcmd(bi_list_node *new_cmdbuf_node, u16 numa_id) { struct cmdbuf_obj *cmdbuf_obj = NULL; bi_list_node *curr_cmdbuf_node = NULL; @@ -1041,6 +1064,19 @@ static int select_vcmd(bi_list_node *new_cmdbuf_node) u32 cmdbuf_id = 0; cmdbuf_obj = (struct cmdbuf_obj *)new_cmdbuf_node->data; + + /** bind the task to the specified core_id if necessary*/ + if (numa_id < vcmd_type_core_num[cmdbuf_obj->module_type]) { + dev = vcmd_manager[cmdbuf_obj->module_type][numa_id]; + list = &dev->list_manager; + spin_lock_irqsave(dev->spinlock, flags); + bi_list_insert_node_tail(list, new_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + + cmdbuf_obj->core_id = dev->core_id; + return 0; + } + //there is an empty vcmd to be used while (1) { dev = vcmd_manager[cmdbuf_obj->module_type][vcmd_position[cmdbuf_obj->module_type]]; @@ -1623,7 +1659,7 @@ static long link_and_run_cmdbuf(struct file *filp, return -ERESTARTSYS; } - return_value = select_vcmd(new_cmdbuf_node); + return_value = select_vcmd(new_cmdbuf_node, input_para->numa_id); if (return_value) { LOG_ERR("vcmd: error return from select_vcmd\n"); return return_value; @@ -2069,49 +2105,38 @@ static long hantrovcmd_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&dbcfg, (void __user *)arg, sizeof(struct dmabuf_cfg)) != 0) return -EFAULT; - LOG_DBG("import dmabuf_fd = %d\n", dbcfg.dmabuf_fd); - /* map the pha to dma addr(iova)*/ hmem = common_dmabuf_heap_import_from_user(&fp_priv->root, dbcfg.dmabuf_fd); if(IS_ERR(hmem)) { LOG_ERR("dmabuf-heap import from userspace failed\n"); return -ENOMEM; } - - if (venc_pdev_d1) { - hmem_d1 = common_dmabuf_heap_import_from_user(&fp_priv->root_d1, dbcfg.dmabuf_fd); - if(IS_ERR(hmem_d1)) { - common_dmabuf_heap_release(hmem); - LOG_ERR("dmabuf-heap alloc from userspace failed for d1\n"); - return -ENOMEM; - } - } - + dbcfg.iova = (unsigned long)sg_dma_address(hmem->sgt->sgl); /* get the size of the dmabuf allocated by dmabuf_heap */ buf_size = common_dmabuf_heap_get_size(hmem); - LOG_DBG("dmabuf info: CPU VA:0x%lx, PA:0x%lx, DMA addr(iova):0x%lx, size=0x%lx\n", - (unsigned long)hmem->vaddr, (unsigned long)sg_phys(hmem->sgt->sgl), (unsigned long)sg_dma_address(hmem->sgt->sgl), (unsigned long)buf_size); + LOG_DBG("fd = %d, dmabuf info: CPU VA:0x%lx, PA:0x%lx, DMA addr(iova):0x%lx, size=0x%lx\n" + , dbcfg.dmabuf_fd + , (unsigned long)hmem->vaddr + , (unsigned long)sg_phys(hmem->sgt->sgl) + , (unsigned long)sg_dma_address(hmem->sgt->sgl) + , (unsigned long)buf_size); - dbcfg.iova = (unsigned long)sg_dma_address(hmem->sgt->sgl); if (venc_pdev_d1) { - unsigned long iova_d1; - - iova_d1 = (unsigned long)sg_dma_address(hmem_d1->sgt->sgl); - if (dbcfg.iova != iova_d1) { + hmem_d1 = common_dmabuf_heap_rsv_iova_map(&fp_priv->root_d1, dbcfg.dmabuf_fd, dbcfg.iova, buf_size); + if ((IS_ERR(hmem_d1))) { + LOG_ERR("dmabuf-heap rsv iova map failed for d1\n"); common_dmabuf_heap_release(hmem); - common_dmabuf_heap_release(hmem_d1); - LOG_ERR("VENC_VCMD: IOVA addrs of d0 and d1 are not the same\n"); - return -EFAULT; + return err; } } - retval = copy_to_user((u32 __user *)arg, &dbcfg, sizeof(struct dmabuf_cfg)); if (retval) { - common_dmabuf_heap_release(hmem); if (venc_pdev_d1) - common_dmabuf_heap_release(hmem_d1); + common_dmabuf_heap_rsv_iova_unmap(hmem_d1); + + common_dmabuf_heap_release(hmem); LOG_DBG("copy_to_user failed, returned %li\n", retval); return -EFAULT; } @@ -2119,6 +2144,7 @@ static long hantrovcmd_ioctl(struct file *filp, unsigned int cmd, return 0; } case HANTRO_IOCH_DMA_HEAP_PUT_IOVA: { + struct dmabuf_cfg dbcfg; struct heap_mem *hmem, *hmem_d1; unsigned int dmabuf_fd; struct filp_priv *fp_priv = (struct filp_priv *)filp->private_data; @@ -2126,14 +2152,13 @@ static long hantrovcmd_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&dmabuf_fd, (void __user *)arg, sizeof(int)) != 0) return -EFAULT; - LOG_DBG("release dmabuf_fd = %d\n", dmabuf_fd); - /* find the heap_mem */ hmem = common_dmabuf_lookup_heapobj_by_fd(&fp_priv->root, dmabuf_fd); if(IS_ERR(hmem)) { LOG_ERR("cannot find dmabuf-heap for dmabuf_fd %d\n", dmabuf_fd); return -ENOMEM; } + LOG_DBG("release dmabuf_fd = %d, iova 0x%llx\n", dmabuf_fd, sg_dma_address(hmem->sgt->sgl)); if (venc_pdev_d1) { hmem_d1 = common_dmabuf_lookup_heapobj_by_fd(&fp_priv->root_d1, dmabuf_fd); @@ -2141,7 +2166,7 @@ static long hantrovcmd_ioctl(struct file *filp, unsigned int cmd, LOG_ERR("cannot find dmabuf-heap for dmabuf_fd %d on d1\n", dmabuf_fd); return -EFAULT; } - common_dmabuf_heap_release(hmem_d1); + common_dmabuf_heap_rsv_iova_unmap(hmem_d1); } common_dmabuf_heap_release(hmem); @@ -2818,22 +2843,24 @@ static int hantrovcmd_release(struct inode *inode, struct file *filp) } } #ifdef SUPPORT_DMA_HEAP - common_dmabuf_heap_import_uninit(&fp_priv->root); if (venc_pdev_d1) { - common_dmabuf_heap_import_uninit(&fp_priv->root_d1); + common_dmabuf_heap_rsv_iova_uninit(&fp_priv->root_d1); } + + common_dmabuf_heap_import_uninit(&fp_priv->root); #endif kfree(fp_priv); return 0; error: #ifdef SUPPORT_DMA_HEAP - common_dmabuf_heap_import_uninit(&fp_priv->root); if (venc_pdev_d1) { - common_dmabuf_heap_import_uninit(&fp_priv->root_d1); + common_dmabuf_heap_rsv_iova_uninit(&fp_priv->root_d1); } - kfree(fp_priv); + + common_dmabuf_heap_import_uninit(&fp_priv->root); #endif + kfree(fp_priv); return -ERESTARTSYS; } @@ -3579,12 +3606,14 @@ int vcmd_mem_init(void) dma_addr_t dma_handle = 0; dma_addr_t dma_handle_d1 = 0; + vcmd_buf_mem_pool.size = CMDBUF_POOL_TOTAL_SIZE; /* command buffer */ - vcmd_buf_mem_pool.virtualAddress = (u32 *)dma_alloc_coherent(&venc_pdev->dev, + vcmd_buf_mem_pool.virtualAddress = (u32 *)dma_alloc_attrs(&venc_pdev->dev, vcmd_buf_mem_pool.size, &dma_handle, - GFP_KERNEL | __GFP_DMA32); + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); + vcmd_buf_mem_pool.busAddress = (unsigned long long)dma_handle; vcmd_buf_mem_pool.phy_address = pfn_to_phys(vmalloc_to_pfn(vcmd_buf_mem_pool.virtualAddress)); @@ -3606,11 +3635,10 @@ int vcmd_mem_init(void) /* status buffer */ vcmd_status_buf_mem_pool.size = CMDBUF_POOL_TOTAL_SIZE; - vcmd_status_buf_mem_pool.virtualAddress = - (u32 *)dma_alloc_coherent(&venc_pdev->dev, - vcmd_status_buf_mem_pool.size, - &dma_handle, - GFP_KERNEL | __GFP_DMA32); + vcmd_status_buf_mem_pool.virtualAddress = (u32 *)dma_alloc_attrs(&venc_pdev->dev, + vcmd_status_buf_mem_pool.size, &dma_handle, + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); + vcmd_status_buf_mem_pool.busAddress = (unsigned long long)dma_handle; vcmd_status_buf_mem_pool.phy_address = pfn_to_phys(vmalloc_to_pfn(vcmd_status_buf_mem_pool.virtualAddress)); @@ -3632,11 +3660,10 @@ int vcmd_mem_init(void) /* register buffer */ vcmd_registers_mem_pool.size = CMDBUF_POOL_TOTAL_SIZE; - vcmd_registers_mem_pool.virtualAddress = - (u32 *)dma_alloc_coherent(&venc_pdev->dev, - vcmd_registers_mem_pool.size, - &dma_handle, - GFP_KERNEL | __GFP_DMA32); + vcmd_registers_mem_pool.virtualAddress = (u32 *)dma_alloc_attrs(&venc_pdev->dev, + vcmd_registers_mem_pool.size, &dma_handle, + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); + vcmd_registers_mem_pool.busAddress = (unsigned long long)dma_handle; vcmd_registers_mem_pool.phy_address = pfn_to_phys(vmalloc_to_pfn(vcmd_registers_mem_pool.virtualAddress)); @@ -3662,20 +3689,23 @@ int vcmd_mem_init(void) void vcmd_mem_cleanup(void) { if (vcmd_buf_mem_pool.virtualAddress) - dma_free_coherent( + dma_free_attrs( &venc_pdev->dev, vcmd_buf_mem_pool.size, vcmd_buf_mem_pool.virtualAddress, - (dma_addr_t)vcmd_buf_mem_pool.busAddress); + (dma_addr_t)vcmd_buf_mem_pool.busAddress, + 0); if (vcmd_status_buf_mem_pool.virtualAddress) - dma_free_coherent( + dma_free_attrs( &venc_pdev->dev, vcmd_status_buf_mem_pool.size, vcmd_status_buf_mem_pool.virtualAddress, - (dma_addr_t)vcmd_status_buf_mem_pool.busAddress); + (dma_addr_t)vcmd_status_buf_mem_pool.busAddress, + 0); if (vcmd_registers_mem_pool.virtualAddress) - dma_free_coherent( + dma_free_attrs( &venc_pdev->dev, vcmd_registers_mem_pool.size, vcmd_registers_mem_pool.virtualAddress, - (dma_addr_t)vcmd_registers_mem_pool.busAddress); + (dma_addr_t)vcmd_registers_mem_pool.busAddress, + 0); if (venc_pdev_d1) { dma_unmap_page(&venc_pdev_d1->dev, diff --git a/include/linux/dmabuf-heap-import-helper.h b/include/linux/dmabuf-heap-import-helper.h index 6fd339ee8ff3..9a474f6594e7 100644 --- a/include/linux/dmabuf-heap-import-helper.h +++ b/include/linux/dmabuf-heap-import-helper.h @@ -1,3 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Header File of ESWIN DMABUF heap helper APIs + * + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Min Lin + */ + #ifndef _DMABUF_HEAP_IMPORT_H_ #define _DMABUF_HEAP_IMPORT_H_ @@ -7,6 +28,7 @@ #include #include #include +#include #define SYSTEM_DEV_NODE "system" #define CMA_DEV_NODE_RES "reserved" @@ -42,15 +64,24 @@ struct heap_mem { void *vaddr; enum dma_data_direction dir; + dma_addr_t iova; + size_t size; }; -struct esw_export_buffer_info { - char name[64]; - int fd_flags; - - int dbuf_fd; - struct dma_buf *dmabuf; +struct eswin_split_buffer { + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct sg_table sg_table; // for Re-formatted splitted sgt + struct sg_table orig_sg_table; // for originally splitted sgt + struct page **pages; + int vmap_cnt; + void *vaddr; + unsigned long fd_flags; // for vmap to determin the cache or non-cached mapping + char name[64]; // export name + int dbuf_fd; // parent dmabuf fd + struct dma_buf *dmabuf; // parent dmabuf struct esw_slice_buffer { __u64 offset; size_t len; @@ -97,4 +128,8 @@ struct heap_mem *common_dmabuf_heap_import_from_kernel(struct heap_root *root, c int esw_common_dmabuf_split_export(int dbuf_fd, unsigned int offset, size_t len, int fd_flags, char *name); +struct heap_mem *common_dmabuf_heap_rsv_iova_map(struct heap_root *root, int fd, dma_addr_t iova, size_t size); +void common_dmabuf_heap_rsv_iova_unmap(struct heap_mem *heap_obj); +void common_dmabuf_heap_rsv_iova_uninit(struct heap_root *root); + #endif diff --git a/drivers/memory/eswin/es_iommu_rsv/include/linux/es_iommu_rsv.h b/include/linux/es_iommu_rsv.h similarity index 77% rename from drivers/memory/eswin/es_iommu_rsv/include/linux/es_iommu_rsv.h rename to include/linux/es_iommu_rsv.h index 3e11a6421502..cda1c83d47a2 100644 --- a/drivers/memory/eswin/es_iommu_rsv/include/linux/es_iommu_rsv.h +++ b/include/linux/es_iommu_rsv.h @@ -6,5 +6,6 @@ int iommu_unmap_rsv_iova(struct device *dev, void *cpu_addr, dma_addr_t iova, unsigned long size); int iommu_map_rsv_iova_with_phys(struct device *dev, dma_addr_t iova, unsigned long size, phys_addr_t paddr, unsigned long attrs); void *iommu_map_rsv_iova(struct device *dev, dma_addr_t iova, unsigned long size, gfp_t gfp, unsigned long attrs); +ssize_t iommu_rsv_iova_map_sgt(struct device *dev, unsigned long iova, struct sg_table *sgt, unsigned long attrs, size_t buf_size); #endif diff --git a/include/linux/eswin_npu.h b/include/linux/eswin_npu.h index 44784eeefba3..1b226c3dfe17 100644 --- a/include/linux/eswin_npu.h +++ b/include/linux/eswin_npu.h @@ -1,14 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 /* * ESWIN NPU Clock Rate Definitaions * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. + * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved. * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Authors: Yang Wei */ #ifndef __LINUX_ESWIN_NPU_H #define __LINUX_ESWIN_NPU_H @@ -17,7 +25,11 @@ #define NPU_DEFAULT_VOLTAGE 800000 //uV #define NPU_LLC_CLK_RATE 800000000 //nvdla #define NPU_CORE_CLK_RATE 1040000000 //npu and e31 +#ifdef CONFIG_ARCH_ESWIN_EIC7702_SOC +#define NPU_1P5G_VOLTAGE 1080000 //uV +#else #define NPU_1P5G_VOLTAGE 1050000 //uV +#endif #define NPU_LLC_CLK_1P5G_RATE 1188000000 //nvdla #define NPU_CORE_CLK_1P5G_RATE 1500000000 //npu and e31 #define NPU_E31_CLK_RATE 1040000000 //llc diff --git a/sound/soc/eswin/esw-i2s.c b/sound/soc/eswin/esw-i2s.c index 5774a03a270b..efae45ee2f31 100755 --- a/sound/soc/eswin/esw-i2s.c +++ b/sound/soc/eswin/esw-i2s.c @@ -59,7 +59,8 @@ #define MAX_SAMPLE_RATE_SUPPORT (192000UL) #define MAX_SAMPLE_RATE_CLK (MAX_SAMPLE_RATE_SUPPORT * 32 * 2) // 32 bits, 2channels -#define VO_TOP_CSR 0x50280000UL +#define DIE0_VO_TOP_CSR 0x50280000UL +#define DIE1_VO_TOP_CSR 0x70280000UL #define VO_I2S0_DIV_NUM 0x2000 #define VO_I2S1_DIV_NUM 0x2004 #define VO_I2S2_DIV_NUM 0x2008 @@ -73,7 +74,11 @@ SNDRV_PCM_RATE_8000) #define ESW_I2S_FORMATS (SNDRV_PCM_FMTBIT_S32_LE) -#define I2S0_IO_ADDR 0x51600124 +#define DIE0_I2S0_IO_ADDR 0x51600124 +#define DIE1_I2S0_IO_ADDR 0x71600124 + +#define DIE0_DAI_DRIVER_OFFSET 0 +#define DIE1_DAI_DRIVER_OFFSET 4 #define HDMI_DAI_NAME "i2s0-hdmi" @@ -106,6 +111,67 @@ static const u32 bus_widths[COMP_MAX_DATA_WIDTH] = { DMA_SLAVE_BUSWIDTH_UNDEFINED }; +int i2s_get_nid(struct device *dev) +{ + int nid = dev_to_node(dev); + + if (nid == NUMA_NO_NODE) { + #ifdef CONFIG_NUMA + dev_err(dev, "%s:%d, NUMA_NO_NODE\n", __func__, __LINE__); + return NUMA_NO_NODE; + #else + dev_info(dev, "nid:NUMA_NO_NODE, single DIE\n"); + nid = 0; + #endif + } else { + dev_info(dev, "nid:%d\n", nid); + } + return nid; +} + +static int set_mclk(struct device *dev, struct clk **i2s_clk) +{ + const char *clk_id = "mclk"; + struct clk *clk = NULL; + int ret; + + clk = devm_clk_get(dev, clk_id); + if (IS_ERR(clk)) { + dev_err(dev, "Failed to get clock: %ld\n", PTR_ERR(clk)); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret < 0) { + dev_err(dev, "Failed to enable clock: %d\n", ret); + return ret; + } + + /* only set once */ + if (of_node_name_prefix(dev->of_node, "i2s0")) { + ret = clk_set_rate(clk, MAX_SAMPLE_RATE_CLK); + if (ret) { + dev_err(dev, "Can't set I2S clock rate: %d\n", ret); + clk_disable_unprepare(clk); + return ret; + } + } + + *i2s_clk = clk; + return 0; +} + + +unsigned long get_vo_top_csr(int nid) +{ + return (nid == 1) ? DIE1_VO_TOP_CSR : DIE0_VO_TOP_CSR; +} + +unsigned long get_i2s0_io_addr(int nid) +{ + return (nid == 1) ? DIE1_I2S0_IO_ADDR : DIE0_I2S0_IO_ADDR; +} + static inline u32 i2s_read_reg(void *io_base, int reg) { return readl((char *)io_base + reg); @@ -532,10 +598,11 @@ static const struct snd_soc_dai_ops i2s_dai_ops = { static int i2s_runtime_suspend(struct device *dev) { struct i2s_dev *i2s_drvdata = dev_get_drvdata(dev); + struct clk *clk = i2s_drvdata->clk; dev_dbg(i2s_drvdata->dev, "%s\n", __func__); - clk_disable(g_mclk); + clk_disable(clk); return 0; } @@ -543,9 +610,10 @@ static int i2s_runtime_suspend(struct device *dev) static int i2s_runtime_resume(struct device *dev) { struct i2s_dev *i2s_drvdata = dev_get_drvdata(dev); + struct clk *clk = i2s_drvdata->clk; dev_dbg(i2s_drvdata->dev, "%s\n", __func__); - clk_enable(g_mclk); + clk_enable(clk); return 0; } @@ -553,11 +621,12 @@ static int i2s_runtime_resume(struct device *dev) static int i2s_suspend(struct snd_soc_component *component) { struct i2s_dev *i2s_drvdata = snd_soc_component_get_drvdata(component); + struct clk *clk = i2s_drvdata->clk; dev_dbg(i2s_drvdata->dev, "%s\n", __func__); if(!pm_runtime_suspended(i2s_drvdata->dev)) { dev_dbg(i2s_drvdata->dev, "disable clk\n"); - clk_disable(g_mclk); + clk_disable(clk); } return 0; @@ -567,12 +636,13 @@ static int i2s_resume(struct snd_soc_component *component) { struct i2s_dev *i2s_drvdata = snd_soc_component_get_drvdata(component); struct snd_soc_dai *dai = NULL; + struct clk *clk = i2s_drvdata->clk; int stream; dev_dbg(i2s_drvdata->dev, "%s\n", __func__); if(!pm_runtime_suspended(i2s_drvdata->dev)) { dev_dbg(i2s_drvdata->dev, "enable clk\n"); - clk_enable(g_mclk); + clk_enable(clk); for_each_component_dais(component, dai) { for_each_pcm_streams(stream) if (snd_soc_dai_stream_active(dai, stream)) @@ -644,7 +714,7 @@ static const struct snd_soc_component_driver i2s_component = { .resume = i2s_resume, }; -static struct snd_soc_dai_driver i2s_dai[4] = { +static struct snd_soc_dai_driver i2s_dai[8] = { { .name = HDMI_DAI_NAME, .id = 0, @@ -714,14 +784,93 @@ static struct snd_soc_dai_driver i2s_dai[4] = { .formats = ESW_I2S_FORMATS, }, }, + { + .name = "d1-i2s0-hdmi", + .id = 0, + .ops = &i2s_dai_ops, + .playback = { + .stream_name = "Playback", + .channels_min = MIN_CHANNEL_NUM, + .channels_max = MAX_CHANNEL_NUM, + .rates = ESW_I2S_RATES, + .formats = ESW_I2S_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = MIN_CHANNEL_NUM, + .channels_max = MAX_CHANNEL_NUM, + .rates = ESW_I2S_RATES, + .formats = ESW_I2S_FORMATS, + }, + }, + { + .name = "d1-i2s0", + .id = 1, + .ops = &i2s_dai_ops, + .playback = { + .stream_name = "Playback", + .channels_min = MIN_CHANNEL_NUM, + .channels_max = MAX_CHANNEL_NUM, + .rates = ESW_I2S_RATES, + .formats = ESW_I2S_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = MIN_CHANNEL_NUM, + .channels_max = MAX_CHANNEL_NUM, + .rates = ESW_I2S_RATES, + .formats = ESW_I2S_FORMATS, + }, + }, + { + .name = "d1-i2s1", + .id = 0, + .ops = &i2s_dai_ops, + .playback = { + .stream_name = "Playback", + .channels_min = MIN_CHANNEL_NUM, + .channels_max = MAX_CHANNEL_NUM, + .rates = ESW_I2S_RATES, + .formats = ESW_I2S_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = MIN_CHANNEL_NUM, + .channels_max = MAX_CHANNEL_NUM, + .rates = ESW_I2S_RATES, + .formats = ESW_I2S_FORMATS, + }, + }, + { + .name = "d1-i2s2", + .id = 0, + .ops = &i2s_dai_ops, + .playback = { + .stream_name = "Playback", + .channels_min = MIN_CHANNEL_NUM, + .channels_max = MAX_CHANNEL_NUM, + .rates = ESW_I2S_RATES, + .formats = ESW_I2S_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = MIN_CHANNEL_NUM, + .channels_max = MAX_CHANNEL_NUM, + .rates = ESW_I2S_RATES, + .formats = ESW_I2S_FORMATS, + }, + }, }; static int i2s_probe(struct platform_device *pdev) { struct i2s_dev *i2s_drvdata; struct resource *res; - int ret; - const char *clk_id; + int ret = 0; + int nid = 0; + unsigned long vo_top_csr; + unsigned long i2s0_io_addr; + int dai_offset = 0; struct snd_dmaengine_pcm_config *config; void __iomem *i2s0_io_base; int reg_val; @@ -747,20 +896,22 @@ static int i2s_probe(struct platform_device *pdev) } i2s_drvdata->dev = &pdev->dev; - if (of_node_name_prefix(pdev->dev.of_node, "i2s0")) { - clk_id = "mclk"; - g_mclk = devm_clk_get(&pdev->dev, clk_id); - if (IS_ERR(g_mclk)) - return PTR_ERR(g_mclk); - ret = clk_prepare_enable(g_mclk); - if (ret < 0) - return ret; - i2s_drvdata->clk = g_mclk; - ret = clk_set_rate(g_mclk, MAX_SAMPLE_RATE_CLK); - if (ret) { - dev_err(i2s_drvdata->dev, "Can't set I2S clock rate: %d\n", ret); - } + nid = i2s_get_nid(&pdev->dev); + if (nid == 0) { + dai_offset = DIE0_DAI_DRIVER_OFFSET; + } else if (nid == 1) { + dai_offset = DIE1_DAI_DRIVER_OFFSET; + } else { + dev_err(&pdev->dev, "%s:%d, NUMA_NO_NODE\n", __func__, __LINE__); + return -EFAULT; + } + ret = set_mclk(&pdev->dev, &i2s_drvdata->clk); + if (ret < 0) { + return ret; + } + + if (of_node_name_prefix(pdev->dev.of_node, "i2s0")) { ret = i2s_reset(pdev, i2s_drvdata); if (ret != 0) { dev_err(&pdev->dev, "i2s_reset failed\n"); @@ -768,7 +919,8 @@ static int i2s_probe(struct platform_device *pdev) } if (!of_property_read_bool(pdev->dev.of_node, "io_reuse_enable")) { - i2s0_io_base = devm_ioremap(&pdev->dev, I2S0_IO_ADDR, 12); + i2s0_io_addr = get_i2s0_io_addr(nid); + i2s0_io_base = devm_ioremap(&pdev->dev, i2s0_io_addr, 12); if (!i2s0_io_base) { dev_err(i2s_drvdata->dev, "failed to remap i2s0 io ctl\n"); return -ENOMEM; @@ -796,33 +948,31 @@ static int i2s_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, i2s_drvdata); + vo_top_csr = get_vo_top_csr(nid); if (of_node_name_prefix(pdev->dev.of_node, "i2s0")) { - i2s_drvdata->i2s_div_base = devm_ioremap(i2s_drvdata->dev, VO_TOP_CSR + VO_I2S0_DIV_NUM, 4); + i2s_drvdata->i2s_div_base = devm_ioremap(i2s_drvdata->dev, vo_top_csr + VO_I2S0_DIV_NUM, 4); if (!i2s_drvdata->i2s_div_base) { dev_err(&pdev->dev, "failed to remap i2s0 div config\n"); ret = -ENOMEM; goto err_probe; } - ret = devm_snd_soc_register_component(&pdev->dev, &i2s_component, - &i2s_dai[0], 2); + ret = devm_snd_soc_register_component(&pdev->dev, &i2s_component, &i2s_dai[dai_offset], 2); } else if (of_node_name_prefix(pdev->dev.of_node, "i2s1")) { - i2s_drvdata->i2s_div_base = devm_ioremap(i2s_drvdata->dev, VO_TOP_CSR + VO_I2S1_DIV_NUM, 4); + i2s_drvdata->i2s_div_base = devm_ioremap(i2s_drvdata->dev, vo_top_csr + VO_I2S1_DIV_NUM, 4); if (!i2s_drvdata->i2s_div_base) { dev_err(&pdev->dev, "failed to remap i2s1 div config\n"); ret = -ENOMEM; goto err_probe; } - ret = devm_snd_soc_register_component(&pdev->dev, &i2s_component, - &i2s_dai[2], 1); + ret = devm_snd_soc_register_component(&pdev->dev, &i2s_component, &i2s_dai[2 + dai_offset], 1); } else { - i2s_drvdata->i2s_div_base = devm_ioremap(i2s_drvdata->dev, VO_TOP_CSR + VO_I2S2_DIV_NUM, 4); + i2s_drvdata->i2s_div_base = devm_ioremap(i2s_drvdata->dev, vo_top_csr + VO_I2S2_DIV_NUM, 4); if (!i2s_drvdata->i2s_div_base) { dev_err(&pdev->dev, "failed to remap i2s2 div config\n"); ret = -ENOMEM; goto err_probe; } - ret = devm_snd_soc_register_component(&pdev->dev, &i2s_component, - &i2s_dai[3], 1); + ret = devm_snd_soc_register_component(&pdev->dev, &i2s_component, &i2s_dai[3 + dai_offset], 1); } if (ret != 0) { dev_err(&pdev->dev, "not able to register dai\n"); -- 2.47.0